python_code
stringlengths
0
679k
repo_name
stringlengths
9
41
file_path
stringlengths
6
149
## @file # Run a makefile as part of a PREBUILD or POSTBUILD action. # # Copyright (c) 2017, Intel Corporation. All rights reserved.<BR> # SPDX-License-Identifier: BSD-2-Clause-Patent # ''' RunMakefile.py ''' import os import sys import argparse import subprocess # # Globals for help information # __prog__ = 'RunMakefile' __version__ = '%s Version %s' % (__prog__, '1.0') __copyright__ = 'Copyright (c) 2017, Intel Corporation. All rights reserved.' __description__ = 'Run a makefile as part of a PREBUILD or POSTBUILD action.\n' # # Globals # gArgs = None def Log(Message): if not gArgs.Verbose: return sys.stdout.write (__prog__ + ': ' + Message + '\n') def Error(Message, ExitValue=1): sys.stderr.write (__prog__ + ': ERROR: ' + Message + '\n') sys.exit (ExitValue) def RelativePath(target): return os.path.relpath (target, gWorkspace) def NormalizePath(target): if isinstance(target, tuple): return os.path.normpath (os.path.join (*target)) else: return os.path.normpath (target) if __name__ == '__main__': # # Create command line argument parser object # parser = argparse.ArgumentParser ( prog = __prog__, version = __version__, description = __description__ + __copyright__, conflict_handler = 'resolve' ) parser.add_argument ( '-a', '--arch', dest = 'Arch', nargs = '+', action = 'append', required = True, help = '''ARCHS is one of list: IA32, X64, IPF, ARM, AARCH64 or EBC, which overrides target.txt's TARGET_ARCH definition. To specify more archs, please repeat this option.''' ) parser.add_argument ( '-t', '--tagname', dest = 'ToolChain', required = True, help = '''Using the Tool Chain Tagname to build the platform, overriding target.txt's TOOL_CHAIN_TAG definition.''' ) parser.add_argument ( '-p', '--platform', dest = 'PlatformFile', required = True, help = '''Build the platform specified by the DSC file name argument, overriding target.txt's ACTIVE_PLATFORM definition.''' ) parser.add_argument ( '-b', '--buildtarget', dest = 'BuildTarget', required = True, help = '''Using the TARGET to build the platform, overriding target.txt's TARGET definition.''' ) parser.add_argument ( '--conf=', dest = 'ConfDirectory', required = True, help = '''Specify the customized Conf directory.''' ) parser.add_argument ( '-D', '--define', dest = 'Define', nargs='*', action = 'append', help = '''Macro: "Name [= Value]".''' ) parser.add_argument ( '--makefile', dest = 'Makefile', required = True, help = '''Makefile to run passing in arguments as makefile defines.''' ) parser.add_argument ( '-v', '--verbose', dest = 'Verbose', action = 'store_true', help = '''Turn on verbose output with informational messages printed''' ) # # Parse command line arguments # gArgs, remaining = parser.parse_known_args() gArgs.BuildType = 'all' for BuildType in ['all', 'fds', 'genc', 'genmake', 'clean', 'cleanall', 'modules', 'libraries', 'run']: if BuildType in remaining: gArgs.BuildType = BuildType remaining.remove(BuildType) break gArgs.Remaining = ' '.join(remaining) # # Start # Log ('Start') # # Find makefile in WORKSPACE or PACKAGES_PATH # PathList = [''] try: PathList.append(os.environ['WORKSPACE']) except: Error ('WORKSPACE environment variable not set') try: PathList += os.environ['PACKAGES_PATH'].split(os.pathsep) except: pass for Path in PathList: Makefile = NormalizePath((Path, gArgs.Makefile)) if os.path.exists (Makefile): break if not os.path.exists(Makefile): Error ('makefile %s not found' % (gArgs.Makefile)) # # Build command line arguments converting build arguments to makefile defines # CommandLine = [Makefile] CommandLine.append('TARGET_ARCH="%s"' % (' '.join([Item[0] for Item in gArgs.Arch]))) CommandLine.append('TOOL_CHAIN_TAG="%s"' % (gArgs.ToolChain)) CommandLine.append('TARGET="%s"' % (gArgs.BuildTarget)) CommandLine.append('ACTIVE_PLATFORM="%s"' % (gArgs.PlatformFile)) CommandLine.append('CONF_DIRECTORY="%s"' % (gArgs.ConfDirectory)) if gArgs.Define: for Item in gArgs.Define: if '=' not in Item[0]: continue Item = Item[0].split('=', 1) CommandLine.append('%s="%s"' % (Item[0], Item[1])) CommandLine.append('EXTRA_FLAGS="%s"' % (gArgs.Remaining)) CommandLine.append(gArgs.BuildType) if sys.platform == "win32": CommandLine = 'nmake /f %s' % (' '.join(CommandLine)) else: CommandLine = 'make -f %s' % (' '.join(CommandLine)) # # Run the makefile # try: Process = subprocess.Popen(CommandLine, shell=True) except: Error ('make command not available. Please verify PATH') Process.communicate() # # Done # Log ('Done') # # Return status from running the makefile # sys.exit(Process.returncode)
edk2-master
BaseTools/Scripts/RunMakefile.py
# @file FormatDosFiles.py # This script format the source files to follow dos style. # It supports Python2.x and Python3.x both. # # Copyright (c) 2018, Intel Corporation. All rights reserved.<BR> # # SPDX-License-Identifier: BSD-2-Clause-Patent # # # Import Modules # from __future__ import print_function import argparse import os import os.path import re import sys import copy __prog__ = 'FormatDosFiles' __version__ = '%s Version %s' % (__prog__, '0.10 ') __copyright__ = 'Copyright (c) 2018-2019, Intel Corporation. All rights reserved.' __description__ = 'Convert source files to meet the EDKII C Coding Standards Specification.\n' DEFAULT_EXT_LIST = ['.h', '.c', '.nasm', '.nasmb', '.asm', '.S', '.inf', '.dec', '.dsc', '.fdf', '.uni', '.asl', '.aslc', '.vfr', '.idf', '.txt', '.bat', '.py'] #For working in python2 and python3 environment, re pattern should use binary string, which is bytes type in python3. #Because in python3,read from file in binary mode will return bytes type,and in python3 bytes type can not be mixed with str type. def FormatFile(FilePath, Args): with open(FilePath, 'rb') as Fd: Content = Fd.read() # Convert the line endings to CRLF Content = re.sub(br'([^\r])\n', br'\1\r\n', Content) Content = re.sub(br'^\n', br'\r\n', Content, flags=re.MULTILINE) # Add a new empty line if the file is not end with one Content = re.sub(br'([^\r\n])$', br'\1\r\n', Content) # Remove trailing white spaces Content = re.sub(br'[ \t]+(\r\n)', br'\1', Content, flags=re.MULTILINE) # Replace '\t' with two spaces Content = re.sub(b'\t', b' ', Content) with open(FilePath, 'wb') as Fd: Fd.write(Content) if not Args.Quiet: print(FilePath) def FormatFilesInDir(DirPath, ExtList, Args): FileList = [] ExcludeDir = DirPath for DirPath, DirNames, FileNames in os.walk(DirPath): if Args.Exclude: DirNames[:] = [d for d in DirNames if d not in Args.Exclude] FileNames[:] = [f for f in FileNames if f not in Args.Exclude] Continue = False for Path in Args.Exclude: Path = Path.strip('\\').strip('/') if not os.path.isdir(Path) and not os.path.isfile(Path): Path = os.path.join(ExcludeDir, Path) if os.path.isdir(Path) and Path.endswith(DirPath): DirNames[:] = [] Continue = True elif os.path.isfile(Path): FilePaths = FileNames for ItemPath in FilePaths: FilePath = os.path.join(DirPath, ItemPath) if Path.endswith(FilePath): FileNames.remove(ItemPath) if Continue: continue for FileName in [f for f in FileNames if any(f.endswith(ext) for ext in ExtList)]: FileList.append(os.path.join(DirPath, FileName)) for File in FileList: FormatFile(File, Args) if __name__ == "__main__": parser = argparse.ArgumentParser(prog=__prog__, description=__description__ + __copyright__, conflict_handler = 'resolve') parser.add_argument('Path', nargs='+', help='the path for files to be converted.It could be directory or file path.') parser.add_argument('--version', action='version', version=__version__) parser.add_argument('--append-extensions', dest='AppendExt', nargs='+', help='append file extensions filter to default extensions. (Example: .txt .c .h)') parser.add_argument('--override-extensions', dest='OverrideExt', nargs='+', help='override file extensions filter on default extensions. (Example: .txt .c .h)') parser.add_argument('-v', '--verbose', dest='Verbose', action='store_true', help='increase output messages') parser.add_argument('-q', '--quiet', dest='Quiet', action='store_true', help='reduce output messages') parser.add_argument('--debug', dest='Debug', type=int, metavar='[0-9]', choices=range(0, 10), default=0, help='set debug level') parser.add_argument('--exclude', dest='Exclude', nargs='+', help="directory name or file name which will be excluded") args = parser.parse_args() DefaultExt = copy.copy(DEFAULT_EXT_LIST) if args.OverrideExt is not None: DefaultExt = args.OverrideExt if args.AppendExt is not None: DefaultExt = list(set(DefaultExt + args.AppendExt)) for Path in args.Path: if not os.path.exists(Path): print("not exists path: {0}".format(Path)) sys.exit(1) if os.path.isdir(Path): FormatFilesInDir(Path, DefaultExt, args) elif os.path.isfile(Path): FormatFile(Path, args)
edk2-master
BaseTools/Scripts/FormatDosFiles.py
## @file # Update build revisions of the tools when performing a developer build # # This script will modife the C/Include/Common/BuildVersion.h file and the two # Python scripts, Python/Common/BuildVersion.py and Python/UPT/BuildVersion.py. # If SVN is available, the tool will obtain the current checked out version of # the source tree for including the --version commands. # Copyright (c) 2014 - 2015, Intel Corporation. All rights reserved.<BR> # # SPDX-License-Identifier: BSD-2-Clause-Patent ## """ This program will update the BuildVersion.py and BuildVersion.h files used to set a tool's version value """ from __future__ import absolute_import import os import shlex import subprocess import sys from argparse import ArgumentParser, SUPPRESS from tempfile import NamedTemporaryFile from types import IntType, ListType SYS_ENV_ERR = "ERROR : %s system environment variable must be set prior to running this tool.\n" __execname__ = "UpdateBuildVersions.py" SVN_REVISION = "$LastChangedRevision: 3 $" SVN_REVISION = SVN_REVISION.replace("$LastChangedRevision:", "").replace("$", "").strip() __copyright__ = "Copyright (c) 2014, Intel Corporation. All rights reserved." VERSION_NUMBER = "0.7.0" __version__ = "Version %s.%s" % (VERSION_NUMBER, SVN_REVISION) def ParseOptions(): """ Parse the command-line options. The options for this tool will be passed along to the MkBinPkg tool. """ parser = ArgumentParser( usage=("%s [options]" % __execname__), description=__copyright__, conflict_handler='resolve') # Standard Tool Options parser.add_argument("--version", action="version", version=__execname__ + " " + __version__) parser.add_argument("-s", "--silent", action="store_true", dest="silent", help="All output will be disabled, pass/fail determined by the exit code") parser.add_argument("-v", "--verbose", action="store_true", dest="verbose", help="Enable verbose output") # Tool specific options parser.add_argument("--revert", action="store_true", dest="REVERT", default=False, help="Revert the BuildVersion files only") parser.add_argument("--svn-test", action="store_true", dest="TEST_SVN", default=False, help="Test if the svn command is available") parser.add_argument("--svnFlag", action="store_true", dest="HAVE_SVN", default=False, help=SUPPRESS) return(parser.parse_args()) def ShellCommandResults(CmdLine, Opt): """ Execute the command, returning the output content """ file_list = NamedTemporaryFile(delete=False) filename = file_list.name Results = [] returnValue = 0 try: subprocess.check_call(args=shlex.split(CmdLine), stderr=subprocess.STDOUT, stdout=file_list) except subprocess.CalledProcessError as err_val: file_list.close() if not Opt.silent: sys.stderr.write("ERROR : %d : %s\n" % (err_val.returncode, err_val.__str__())) if os.path.exists(filename): sys.stderr.write(" : Partial results may be in this file: %s\n" % filename) sys.stderr.flush() returnValue = err_val.returncode except IOError as err_val: (errno, strerror) = err_val.args file_list.close() if not Opt.silent: sys.stderr.write("I/O ERROR : %s : %s\n" % (str(errno), strerror)) sys.stderr.write("ERROR : this command failed : %s\n" % CmdLine) if os.path.exists(filename): sys.stderr.write(" : Partial results may be in this file: %s\n" % filename) sys.stderr.flush() returnValue = errno except OSError as err_val: (errno, strerror) = err_val.args file_list.close() if not Opt.silent: sys.stderr.write("OS ERROR : %s : %s\n" % (str(errno), strerror)) sys.stderr.write("ERROR : this command failed : %s\n" % CmdLine) if os.path.exists(filename): sys.stderr.write(" : Partial results may be in this file: %s\n" % filename) sys.stderr.flush() returnValue = errno except KeyboardInterrupt: file_list.close() if not Opt.silent: sys.stderr.write("ERROR : Command terminated by user : %s\n" % CmdLine) if os.path.exists(filename): sys.stderr.write(" : Partial results may be in this file: %s\n" % filename) sys.stderr.flush() returnValue = 1 finally: if not file_list.closed: file_list.flush() os.fsync(file_list.fileno()) file_list.close() if os.path.exists(filename): fd_ = open(filename, 'r') Results = fd_.readlines() fd_.close() os.unlink(filename) if returnValue > 0: return returnValue return Results def UpdateBuildVersionPython(Rev, UserModified, opts): """ This routine will update the BuildVersion.h files in the C source tree """ for SubDir in ["Common", "UPT"]: PyPath = os.path.join(os.environ['BASE_TOOLS_PATH'], "Source", "Python", SubDir) BuildVersionPy = os.path.join(PyPath, "BuildVersion.py") fd_ = open(os.path.normpath(BuildVersionPy), 'r') contents = fd_.readlines() fd_.close() if opts.HAVE_SVN is False: BuildVersionOrig = os.path.join(PyPath, "orig_BuildVersion.py") fd_ = open (BuildVersionOrig, 'w') for line in contents: fd_.write(line) fd_.flush() fd_.close() new_content = [] for line in contents: if line.strip().startswith("gBUILD_VERSION"): new_line = "gBUILD_VERSION = \"Developer Build based on Revision: %s\"" % Rev if UserModified: new_line = "gBUILD_VERSION = \"Developer Build based on Revision: %s with Modified Sources\"" % Rev new_content.append(new_line) continue new_content.append(line) fd_ = open(os.path.normpath(BuildVersionPy), 'w') for line in new_content: fd_.write(line) fd_.close() def UpdateBuildVersionH(Rev, UserModified, opts): """ This routine will update the BuildVersion.h files in the C source tree """ CPath = os.path.join(os.environ['BASE_TOOLS_PATH'], "Source", "C", "Include", "Common") BuildVersionH = os.path.join(CPath, "BuildVersion.h") fd_ = open(os.path.normpath(BuildVersionH), 'r') contents = fd_.readlines() fd_.close() if opts.HAVE_SVN is False: BuildVersionOrig = os.path.join(CPath, "orig_BuildVersion.h") fd_ = open(BuildVersionOrig, 'w') for line in contents: fd_.write(line) fd_.flush() fd_.close() new_content = [] for line in contents: if line.strip().startswith("#define"): new_line = "#define __BUILD_VERSION \"Developer Build based on Revision: %s\"" % Rev if UserModified: new_line = "#define __BUILD_VERSION \"Developer Build based on Revision: %s with Modified Sources\"" % \ Rev new_content.append(new_line) continue new_content.append(line) fd_ = open(os.path.normpath(BuildVersionH), 'w') for line in new_content: fd_.write(line) fd_.close() def RevertCmd(Filename, Opt): """ This is the shell command that does the SVN revert """ CmdLine = "svn revert %s" % Filename.replace("\\", "/").strip() try: subprocess.check_output(args=shlex.split(CmdLine)) except subprocess.CalledProcessError as err_val: if not Opt.silent: sys.stderr.write("Subprocess ERROR : %s\n" % err_val) sys.stderr.flush() except IOError as err_val: (errno, strerror) = err_val.args if not Opt.silent: sys.stderr.write("I/O ERROR : %d : %s\n" % (str(errno), strerror)) sys.stderr.write("ERROR : this command failed : %s\n" % CmdLine) sys.stderr.flush() except OSError as err_val: (errno, strerror) = err_val.args if not Opt.silent: sys.stderr.write("OS ERROR : %d : %s\n" % (str(errno), strerror)) sys.stderr.write("ERROR : this command failed : %s\n" % CmdLine) sys.stderr.flush() except KeyboardInterrupt: if not Opt.silent: sys.stderr.write("ERROR : Command terminated by user : %s\n" % CmdLine) sys.stderr.flush() if Opt.verbose: sys.stdout.write("Reverted this file: %s\n" % Filename) sys.stdout.flush() def GetSvnRevision(opts): """ Get the current revision of the BaseTools/Source tree, and check if any of the files have been modified """ Revision = "Unknown" Modified = False if opts.HAVE_SVN is False: sys.stderr.write("WARNING: the svn command-line tool is not available.\n") return (Revision, Modified) SrcPath = os.path.join(os.environ['BASE_TOOLS_PATH'], "Source") # Check if there are modified files. Cwd = os.getcwd() os.chdir(SrcPath) StatusCmd = "svn st -v --depth infinity --non-interactive" contents = ShellCommandResults(StatusCmd, opts) os.chdir(Cwd) if isinstance(contents, ListType): for line in contents: if line.startswith("M "): Modified = True break # Get the repository revision of BaseTools/Source InfoCmd = "svn info %s" % SrcPath.replace("\\", "/").strip() Revision = 0 contents = ShellCommandResults(InfoCmd, opts) if isinstance(contents, IntType): return 0, Modified for line in contents: line = line.strip() if line.startswith("Revision:"): Revision = line.replace("Revision:", "").strip() break return (Revision, Modified) def CheckSvn(opts): """ This routine will return True if an svn --version command succeeds, or False if it fails. If it failed, SVN is not available. """ OriginalSilent = opts.silent opts.silent = True VerCmd = "svn --version" contents = ShellCommandResults(VerCmd, opts) opts.silent = OriginalSilent if isinstance(contents, IntType): if opts.verbose: sys.stdout.write("SVN does not appear to be available.\n") sys.stdout.flush() return False if opts.verbose: sys.stdout.write("Found %s" % contents[0]) sys.stdout.flush() return True def CopyOrig(Src, Dest, Opt): """ Overwrite the Dest File with the Src File content """ try: fd_ = open(Src, 'r') contents = fd_.readlines() fd_.close() fd_ = open(Dest, 'w') for line in contents: fd_.write(line) fd_.flush() fd_.close() except IOError: if not Opt.silent: sys.stderr.write("Unable to restore this file: %s\n" % Dest) sys.stderr.flush() return 1 os.remove(Src) if Opt.verbose: sys.stdout.write("Restored this file: %s\n" % Src) sys.stdout.flush() return 0 def CheckOriginals(Opts): """ If SVN was not available, then the tools may have made copies of the original BuildVersion.* files using orig_BuildVersion.* for the name. If they exist, replace the existing BuildVersion.* file with the corresponding orig_BuildVersion.* file. Returns 0 if this succeeds, or 1 if the copy function fails. It will also return 0 if the orig_BuildVersion.* file does not exist. """ CPath = os.path.join(os.environ['BASE_TOOLS_PATH'], "Source", "C", "Include", "Common") BuildVersionH = os.path.join(CPath, "BuildVersion.h") OrigBuildVersionH = os.path.join(CPath, "orig_BuildVersion.h") if not os.path.exists(OrigBuildVersionH): return 0 if CopyOrig(OrigBuildVersionH, BuildVersionH, Opts): return 1 for SubDir in ["Common", "UPT"]: PyPath = os.path.join(os.environ['BASE_TOOLS_PATH'], "Source", "Python", SubDir) BuildVersionPy = os.path.join(PyPath, "BuildVersion.h") OrigBuildVersionPy = os.path.join(PyPath, "orig_BuildVersion.h") if not os.path.exists(OrigBuildVersionPy): return 0 if CopyOrig(OrigBuildVersionPy, BuildVersionPy, Opts): return 1 return 0 def RevertBuildVersionFiles(opts): """ This routine will attempt to perform an SVN --revert on each of the BuildVersion.* files """ if not opts.HAVE_SVN: if CheckOriginals(opts): return 1 return 0 # SVN is available BuildVersionH = os.path.join(os.environ['BASE_TOOLS_PATH'], "Source", "C", "Include", "Common", "BuildVersion.h") RevertCmd(BuildVersionH, opts) for SubDir in ["Common", "UPT"]: BuildVersionPy = os.path.join(os.environ['BASE_TOOLS_PATH'], "Source", "Python", SubDir, "BuildVersion.py") RevertCmd(BuildVersionPy, opts) def UpdateRevisionFiles(): """ Main routine that will update the BuildVersion.py and BuildVersion.h files.""" options = ParseOptions() # Check the working environment if "WORKSPACE" not in os.environ.keys(): sys.stderr.write(SYS_ENV_ERR % 'WORKSPACE') return 1 if 'BASE_TOOLS_PATH' not in os.environ.keys(): sys.stderr.write(SYS_ENV_ERR % 'BASE_TOOLS_PATH') return 1 if not os.path.exists(os.environ['BASE_TOOLS_PATH']): sys.stderr.write("Unable to locate the %s directory." % os.environ['BASE_TOOLS_PATH']) return 1 options.HAVE_SVN = CheckSvn(options) if options.TEST_SVN: return (not options.HAVE_SVN) # done processing the option, now use the option.HAVE_SVN as a flag. True = Have it, False = Don't have it. if options.REVERT: # Just revert the tools an exit RevertBuildVersionFiles(options) else: # Revert any changes in the BuildVersion.* files before setting them again. RevertBuildVersionFiles(options) Revision, Modified = GetSvnRevision(options) if options.verbose: sys.stdout.write("Revision: %s is Modified: %s\n" % (Revision, Modified)) sys.stdout.flush() UpdateBuildVersionH(Revision, Modified, options) UpdateBuildVersionPython(Revision, Modified, options) return 0 if __name__ == "__main__": sys.exit(UpdateRevisionFiles())
edk2-master
BaseTools/Scripts/UpdateBuildVersions.py
## @file # Check a patch for various format issues # # Copyright (c) 2015 - 2021, Intel Corporation. All rights reserved.<BR> # Copyright (C) 2020, Red Hat, Inc.<BR> # Copyright (c) 2020, ARM Ltd. All rights reserved.<BR> # # SPDX-License-Identifier: BSD-2-Clause-Patent # from __future__ import print_function VersionNumber = '0.1' __copyright__ = "Copyright (c) 2015 - 2016, Intel Corporation All rights reserved." import email import argparse import os import re import subprocess import sys import email.header class Verbose: SILENT, ONELINE, NORMAL = range(3) level = NORMAL class EmailAddressCheck: """Checks an email address.""" def __init__(self, email, description): self.ok = True if email is None: self.error('Email address is missing!') return if description is None: self.error('Email description is missing!') return self.description = "'" + description + "'" self.check_email_address(email) def error(self, *err): if self.ok and Verbose.level > Verbose.ONELINE: print('The ' + self.description + ' email address is not valid:') self.ok = False if Verbose.level < Verbose.NORMAL: return count = 0 for line in err: prefix = (' *', ' ')[count > 0] print(prefix, line) count += 1 email_re1 = re.compile(r'(?:\s*)(.*?)(\s*)<(.+)>\s*$', re.MULTILINE|re.IGNORECASE) def check_email_address(self, email): email = email.strip() mo = self.email_re1.match(email) if mo is None: self.error("Email format is invalid: " + email.strip()) return name = mo.group(1).strip() if name == '': self.error("Name is not provided with email address: " + email) else: quoted = len(name) > 2 and name[0] == '"' and name[-1] == '"' if name.find(',') >= 0 and not quoted: self.error('Add quotes (") around name with a comma: ' + name) if mo.group(2) == '': self.error("There should be a space between the name and " + "email address: " + email) if mo.group(3).find(' ') >= 0: self.error("The email address cannot contain a space: " + mo.group(3)) if ' via Groups.Io' in name and mo.group(3).endswith('@groups.io'): self.error("Email rewritten by lists DMARC / DKIM / SPF: " + email) class CommitMessageCheck: """Checks the contents of a git commit message.""" def __init__(self, subject, message, author_email): self.ok = True if subject is None and message is None: self.error('Commit message is missing!') return MergifyMerge = False if "mergify[bot]@users.noreply.github.com" in author_email: if "Merge branch" in subject: MergifyMerge = True self.subject = subject self.msg = message print (subject) self.check_contributed_under() if not MergifyMerge: self.check_signed_off_by() self.check_misc_signatures() self.check_overall_format() self.report_message_result() url = 'https://github.com/tianocore/tianocore.github.io/wiki/Commit-Message-Format' def report_message_result(self): if Verbose.level < Verbose.NORMAL: return if self.ok: # All checks passed return_code = 0 print('The commit message format passed all checks.') else: return_code = 1 if not self.ok: print(self.url) def error(self, *err): if self.ok and Verbose.level > Verbose.ONELINE: print('The commit message format is not valid:') self.ok = False if Verbose.level < Verbose.NORMAL: return count = 0 for line in err: prefix = (' *', ' ')[count > 0] print(prefix, line) count += 1 # Find 'contributed-under:' at the start of a line ignoring case and # requires ':' to be present. Matches if there is white space before # the tag or between the tag and the ':'. contributed_under_re = \ re.compile(r'^\s*contributed-under\s*:', re.MULTILINE|re.IGNORECASE) def check_contributed_under(self): match = self.contributed_under_re.search(self.msg) if match is not None: self.error('Contributed-under! (Note: this must be ' + 'removed by the code contributor!)') @staticmethod def make_signature_re(sig, re_input=False): if re_input: sub_re = sig else: sub_re = sig.replace('-', r'[-\s]+') re_str = (r'^(?P<tag>' + sub_re + r')(\s*):(\s*)(?P<value>\S.*?)(?:\s*)$') try: return re.compile(re_str, re.MULTILINE|re.IGNORECASE) except Exception: print("Tried to compile re:", re_str) raise sig_block_re = \ re.compile(r'''^ (?: (?P<tag>[^:]+) \s* : \s* (?P<value>\S.*?) ) | (?: \[ (?P<updater>[^:]+) \s* : \s* (?P<note>.+?) \s* \] ) \s* $''', re.VERBOSE | re.MULTILINE) def find_signatures(self, sig): if not sig.endswith('-by') and sig != 'Cc': sig += '-by' regex = self.make_signature_re(sig) sigs = regex.findall(self.msg) bad_case_sigs = filter(lambda m: m[0] != sig, sigs) for s in bad_case_sigs: self.error("'" +s[0] + "' should be '" + sig + "'") for s in sigs: if s[1] != '': self.error('There should be no spaces between ' + sig + " and the ':'") if s[2] != ' ': self.error("There should be a space after '" + sig + ":'") EmailAddressCheck(s[3], sig) return sigs def check_signed_off_by(self): sob='Signed-off-by' if self.msg.find(sob) < 0: self.error('Missing Signed-off-by! (Note: this must be ' + 'added by the code contributor!)') return sobs = self.find_signatures('Signed-off') if len(sobs) == 0: self.error('Invalid Signed-off-by format!') return sig_types = ( 'Reviewed', 'Reported', 'Tested', 'Suggested', 'Acked', 'Cc' ) def check_misc_signatures(self): for sig in self.sig_types: self.find_signatures(sig) cve_re = re.compile('CVE-[0-9]{4}-[0-9]{5}[^0-9]') def check_overall_format(self): lines = self.msg.splitlines() if len(lines) >= 1 and lines[0].endswith('\r\n'): empty_line = '\r\n' else: empty_line = '\n' lines.insert(0, empty_line) lines.insert(0, self.subject + empty_line) count = len(lines) if count <= 0: self.error('Empty commit message!') return if count >= 1 and re.search(self.cve_re, lines[0]): # # If CVE-xxxx-xxxxx is present in subject line, then limit length of # subject line to 92 characters # if len(lines[0].rstrip()) >= 93: self.error( 'First line of commit message (subject line) is too long (%d >= 93).' % (len(lines[0].rstrip())) ) else: # # If CVE-xxxx-xxxxx is not present in subject line, then limit # length of subject line to 75 characters # if len(lines[0].rstrip()) >= 76: self.error( 'First line of commit message (subject line) is too long (%d >= 76).' % (len(lines[0].rstrip())) ) if count >= 1 and len(lines[0].strip()) == 0: self.error('First line of commit message (subject line) ' + 'is empty.') if count >= 2 and lines[1].strip() != '': self.error('Second line of commit message should be ' + 'empty.') for i in range(2, count): if (len(lines[i]) >= 76 and len(lines[i].split()) > 1 and not lines[i].startswith('git-svn-id:') and not lines[i].startswith('Reviewed-by') and not lines[i].startswith('Acked-by:') and not lines[i].startswith('Tested-by:') and not lines[i].startswith('Reported-by:') and not lines[i].startswith('Suggested-by:') and not lines[i].startswith('Signed-off-by:') and not lines[i].startswith('Cc:')): # # Print a warning if body line is longer than 75 characters # print( 'WARNING - Line %d of commit message is too long (%d >= 76).' % (i + 1, len(lines[i])) ) print(lines[i]) last_sig_line = None for i in range(count - 1, 0, -1): line = lines[i] mo = self.sig_block_re.match(line) if mo is None: if line.strip() == '': break elif last_sig_line is not None: err2 = 'Add empty line before "%s"?' % last_sig_line self.error('The line before the signature block ' + 'should be empty', err2) else: self.error('The signature block was not found') break last_sig_line = line.strip() (START, PRE_PATCH, PATCH) = range(3) class GitDiffCheck: """Checks the contents of a git diff.""" def __init__(self, diff): self.ok = True self.format_ok = True self.lines = diff.splitlines(True) self.count = len(self.lines) self.line_num = 0 self.state = START self.new_bin = [] while self.line_num < self.count and self.format_ok: line_num = self.line_num self.run() assert(self.line_num > line_num) self.report_message_result() def report_message_result(self): if Verbose.level < Verbose.NORMAL: return if self.ok: print('The code passed all checks.') if self.new_bin: print('\nWARNING - The following binary files will be added ' + 'into the repository:') for binary in self.new_bin: print(' ' + binary) def run(self): line = self.lines[self.line_num] if self.state in (PRE_PATCH, PATCH): if line.startswith('diff --git'): self.state = START if self.state == PATCH: if line.startswith('@@ '): self.state = PRE_PATCH elif len(line) >= 1 and line[0] not in ' -+' and \ not line.startswith('\r\n') and \ not line.startswith(r'\ No newline ') and not self.binary: for line in self.lines[self.line_num + 1:]: if line.startswith('diff --git'): self.format_error('diff found after end of patch') break self.line_num = self.count return if self.state == START: if line.startswith('diff --git'): self.state = PRE_PATCH self.filename = line[13:].split(' ', 1)[0] self.is_newfile = False self.force_crlf = True self.force_notabs = True if self.filename.endswith('.sh') or \ self.filename.startswith('BaseTools/BinWrappers/PosixLike/') or \ self.filename.startswith('BaseTools/BinPipWrappers/PosixLike/') or \ self.filename == 'BaseTools/BuildEnv': # # Do not enforce CR/LF line endings for linux shell scripts. # Some linux shell scripts don't end with the ".sh" extension, # they are identified by their path. # self.force_crlf = False if self.filename == '.gitmodules' or \ self.filename == 'BaseTools/Conf/diff.order': # # .gitmodules and diff orderfiles are used internally by git # use tabs and LF line endings. Do not enforce no tabs and # do not enforce CR/LF line endings. # self.force_crlf = False self.force_notabs = False if os.path.basename(self.filename) == 'GNUmakefile' or \ os.path.basename(self.filename).lower() == 'makefile' or \ os.path.splitext(self.filename)[1] == '.makefile' or \ self.filename.startswith( 'BaseTools/Source/C/VfrCompile/Pccts/'): self.force_notabs = False elif len(line.rstrip()) != 0: self.format_error("didn't find diff command") self.line_num += 1 elif self.state == PRE_PATCH: if line.startswith('@@ '): self.state = PATCH self.binary = False elif line.startswith('GIT binary patch') or \ line.startswith('Binary files'): self.state = PATCH self.binary = True if self.is_newfile: self.new_bin.append(self.filename) elif line.startswith('new file mode 160000'): # # New submodule. Do not enforce CR/LF line endings # self.force_crlf = False else: ok = False self.is_newfile = self.newfile_prefix_re.match(line) for pfx in self.pre_patch_prefixes: if line.startswith(pfx): ok = True if not ok: self.format_error("didn't find diff hunk marker (@@)") self.line_num += 1 elif self.state == PATCH: if self.binary: pass elif line.startswith('-'): pass elif line.startswith('+'): self.check_added_line(line[1:]) elif line.startswith('\r\n'): pass elif line.startswith(r'\ No newline '): pass elif not line.startswith(' '): self.format_error("unexpected patch line") self.line_num += 1 pre_patch_prefixes = ( '--- ', '+++ ', 'index ', 'new file ', 'deleted file ', 'old mode ', 'new mode ', 'similarity index ', 'copy from ', 'copy to ', 'rename ', ) line_endings = ('\r\n', '\n\r', '\n', '\r') newfile_prefix_re = \ re.compile(r'''^ index\ 0+\.\. ''', re.VERBOSE) def added_line_error(self, msg, line): lines = [ msg ] if self.filename is not None: lines.append('File: ' + self.filename) lines.append('Line: ' + line) self.error(*lines) old_debug_re = \ re.compile(r''' DEBUG \s* \( \s* \( \s* (?: DEBUG_[A-Z_]+ \s* \| \s*)* EFI_D_ ([A-Z_]+) ''', re.VERBOSE) def check_added_line(self, line): eol = '' for an_eol in self.line_endings: if line.endswith(an_eol): eol = an_eol line = line[:-len(eol)] stripped = line.rstrip() if self.force_crlf and eol != '\r\n' and (line.find('Subproject commit') == -1): self.added_line_error('Line ending (%s) is not CRLF' % repr(eol), line) if self.force_notabs and '\t' in line: self.added_line_error('Tab character used', line) if len(stripped) < len(line): self.added_line_error('Trailing whitespace found', line) mo = self.old_debug_re.search(line) if mo is not None: self.added_line_error('EFI_D_' + mo.group(1) + ' was used, ' 'but DEBUG_' + mo.group(1) + ' is now recommended', line) rp_file = os.path.realpath(self.filename) rp_script = os.path.realpath(__file__) if line.find('__FUNCTION__') != -1 and rp_file != rp_script: self.added_line_error('__FUNCTION__ was used, but __func__ ' 'is now recommended', line) split_diff_re = re.compile(r''' (?P<cmd> ^ diff \s+ --git \s+ a/.+ \s+ b/.+ $ ) (?P<index> ^ index \s+ .+ $ ) ''', re.IGNORECASE | re.VERBOSE | re.MULTILINE) def format_error(self, err): self.format_ok = False err = 'Patch format error: ' + err err2 = 'Line: ' + self.lines[self.line_num].rstrip() self.error(err, err2) def error(self, *err): if self.ok and Verbose.level > Verbose.ONELINE: print('Code format is not valid:') self.ok = False if Verbose.level < Verbose.NORMAL: return count = 0 for line in err: prefix = (' *', ' ')[count > 0] print(prefix, line) count += 1 class CheckOnePatch: """Checks the contents of a git email formatted patch. Various checks are performed on both the commit message and the patch content. """ def __init__(self, name, patch): self.patch = patch self.find_patch_pieces() email_check = EmailAddressCheck(self.author_email, 'Author') email_ok = email_check.ok msg_check = CommitMessageCheck(self.commit_subject, self.commit_msg, self.author_email) msg_ok = msg_check.ok diff_ok = True if self.diff is not None: diff_check = GitDiffCheck(self.diff) diff_ok = diff_check.ok self.ok = email_ok and msg_ok and diff_ok if Verbose.level == Verbose.ONELINE: if self.ok: result = 'ok' else: result = list() if not msg_ok: result.append('commit message') if not diff_ok: result.append('diff content') result = 'bad ' + ' and '.join(result) print(name, result) git_diff_re = re.compile(r''' ^ diff \s+ --git \s+ a/.+ \s+ b/.+ $ ''', re.IGNORECASE | re.VERBOSE | re.MULTILINE) stat_re = \ re.compile(r''' (?P<commit_message> [\s\S\r\n]* ) (?P<stat> ^ --- $ [\r\n]+ (?: ^ \s+ .+ \s+ \| \s+ \d+ \s+ \+* \-* $ [\r\n]+ )+ [\s\S\r\n]+ ) ''', re.IGNORECASE | re.VERBOSE | re.MULTILINE) subject_prefix_re = \ re.compile(r'''^ \s* (\[ [^\[\]]* # Allow all non-brackets \])* \s* ''', re.VERBOSE) def find_patch_pieces(self): if sys.version_info < (3, 0): patch = self.patch.encode('ascii', 'ignore') else: patch = self.patch self.commit_msg = None self.stat = None self.commit_subject = None self.commit_prefix = None self.diff = None if patch.startswith('diff --git'): self.diff = patch return pmail = email.message_from_string(patch) parts = list(pmail.walk()) assert(len(parts) == 1) assert(parts[0].get_content_type() == 'text/plain') content = parts[0].get_payload(decode=True).decode('utf-8', 'ignore') mo = self.git_diff_re.search(content) if mo is not None: self.diff = content[mo.start():] content = content[:mo.start()] mo = self.stat_re.search(content) if mo is None: self.commit_msg = content else: self.stat = mo.group('stat') self.commit_msg = mo.group('commit_message') # # Parse subject line from email header. The subject line may be # composed of multiple parts with different encodings. Decode and # combine all the parts to produce a single string with the contents of # the decoded subject line. # parts = email.header.decode_header(pmail.get('subject')) subject = '' for (part, encoding) in parts: if encoding: part = part.decode(encoding) else: try: part = part.decode() except: pass subject = subject + part self.commit_subject = subject.replace('\r\n', '') self.commit_subject = self.commit_subject.replace('\n', '') self.commit_subject = self.subject_prefix_re.sub('', self.commit_subject, 1) self.author_email = pmail['from'] class CheckGitCommits: """Reads patches from git based on the specified git revision range. The patches are read from git, and then checked. """ def __init__(self, rev_spec, max_count): commits = self.read_commit_list_from_git(rev_spec, max_count) if len(commits) == 1 and Verbose.level > Verbose.ONELINE: commits = [ rev_spec ] self.ok = True blank_line = False for commit in commits: if Verbose.level > Verbose.ONELINE: if blank_line: print() else: blank_line = True print('Checking git commit:', commit) email = self.read_committer_email_address_from_git(commit) self.ok &= EmailAddressCheck(email, 'Committer').ok patch = self.read_patch_from_git(commit) self.ok &= CheckOnePatch(commit, patch).ok if not commits: print("Couldn't find commit matching: '{}'".format(rev_spec)) def read_commit_list_from_git(self, rev_spec, max_count): # Run git to get the commit patch cmd = [ 'rev-list', '--abbrev-commit', '--no-walk' ] if max_count is not None: cmd.append('--max-count=' + str(max_count)) cmd.append(rev_spec) out = self.run_git(*cmd) return out.split() if out else [] def read_patch_from_git(self, commit): # Run git to get the commit patch return self.run_git('show', '--pretty=email', '--no-textconv', '--no-use-mailmap', commit) def read_committer_email_address_from_git(self, commit): # Run git to get the committer email return self.run_git('show', '--pretty=%cn <%ce>', '--no-patch', '--no-use-mailmap', commit) def run_git(self, *args): cmd = [ 'git' ] cmd += args p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) Result = p.communicate() return Result[0].decode('utf-8', 'ignore') if Result[0] and Result[0].find(b"fatal")!=0 else None class CheckOnePatchFile: """Performs a patch check for a single file. stdin is used when the filename is '-'. """ def __init__(self, patch_filename): if patch_filename == '-': patch = sys.stdin.read() patch_filename = 'stdin' else: f = open(patch_filename, 'rb') patch = f.read().decode('utf-8', 'ignore') f.close() if Verbose.level > Verbose.ONELINE: print('Checking patch file:', patch_filename) self.ok = CheckOnePatch(patch_filename, patch).ok class CheckOneArg: """Performs a patch check for a single command line argument. The argument will be handed off to a file or git-commit based checker. """ def __init__(self, param, max_count=None): self.ok = True if param == '-' or os.path.exists(param): checker = CheckOnePatchFile(param) else: checker = CheckGitCommits(param, max_count) self.ok = checker.ok class PatchCheckApp: """Checks patches based on the command line arguments.""" def __init__(self): self.parse_options() patches = self.args.patches if len(patches) == 0: patches = [ 'HEAD' ] self.ok = True self.count = None for patch in patches: self.process_one_arg(patch) if self.count is not None: self.process_one_arg('HEAD') if self.ok: self.retval = 0 else: self.retval = -1 def process_one_arg(self, arg): if len(arg) >= 2 and arg[0] == '-': try: self.count = int(arg[1:]) return except ValueError: pass self.ok &= CheckOneArg(arg, self.count).ok self.count = None def parse_options(self): parser = argparse.ArgumentParser(description=__copyright__) parser.add_argument('--version', action='version', version='%(prog)s ' + VersionNumber) parser.add_argument('patches', nargs='*', help='[patch file | git rev list]') group = parser.add_mutually_exclusive_group() group.add_argument("--oneline", action="store_true", help="Print one result per line") group.add_argument("--silent", action="store_true", help="Print nothing") self.args = parser.parse_args() if self.args.oneline: Verbose.level = Verbose.ONELINE if self.args.silent: Verbose.level = Verbose.SILENT if __name__ == "__main__": sys.exit(PatchCheckApp().retval)
edk2-master
BaseTools/Scripts/PatchCheck.py
## @file # Convert a binary file to a VOID* PCD value or DSC file VOID* PCD statement. # # Copyright (c) 2016 - 2018, Intel Corporation. All rights reserved.<BR> # SPDX-License-Identifier: BSD-2-Clause-Patent # ''' BinToPcd ''' from __future__ import print_function import sys import argparse import re import xdrlib import io import struct import math # # Globals for help information # __prog__ = 'BinToPcd' __copyright__ = 'Copyright (c) 2016 - 2018, Intel Corporation. All rights reserved.' __description__ = 'Convert one or more binary files to a VOID* PCD value or DSC file VOID* PCD statement.\n' if __name__ == '__main__': def ValidateUnsignedInteger (Argument): try: Value = int (Argument, 0) except: Message = '{Argument} is not a valid integer value.'.format (Argument = Argument) raise argparse.ArgumentTypeError (Message) if Value < 0: Message = '{Argument} is a negative value.'.format (Argument = Argument) raise argparse.ArgumentTypeError (Message) return Value def ValidatePcdName (Argument): if re.split ('[a-zA-Z\_][a-zA-Z0-9\_]*\.[a-zA-Z\_][a-zA-Z0-9\_]*', Argument) != ['', '']: Message = '{Argument} is not in the form <PcdTokenSpaceGuidCName>.<PcdCName>'.format (Argument = Argument) raise argparse.ArgumentTypeError (Message) return Argument def ValidateGuidName (Argument): if re.split ('[a-zA-Z\_][a-zA-Z0-9\_]*', Argument) != ['', '']: Message = '{Argument} is not a valid GUID C name'.format (Argument = Argument) raise argparse.ArgumentTypeError (Message) return Argument def XdrPackBuffer (buffer): packed_bytes = io.BytesIO() for unpacked_bytes in buffer: n = len(unpacked_bytes) packed_bytes.write(struct.pack('>L',n)) data = unpacked_bytes[:n] n = math.ceil(n/4)*4 data = data + (n - len(data)) * b'\0' packed_bytes.write(data) return packed_bytes.getvalue() def ByteArray (Buffer, Xdr = False): if Xdr: # # If Xdr flag is set then encode data using the Variable-Length Opaque # Data format of RFC 4506 External Data Representation Standard (XDR). # Buffer = bytearray (XdrPackBuffer (Buffer)) else: # # If Xdr flag is not set, then concatenate all the data # Buffer = bytearray (b''.join (Buffer)) # # Return a PCD value of the form '{0x01, 0x02, ...}' along with the PCD length in bytes # return '{' + (', '.join (['0x{Byte:02X}'.format (Byte = Item) for Item in Buffer])) + '}', len (Buffer) # # Create command line argument parser object # parser = argparse.ArgumentParser (prog = __prog__, description = __description__ + __copyright__, conflict_handler = 'resolve') parser.add_argument ("-i", "--input", dest = 'InputFile', type = argparse.FileType ('rb'), action='append', required = True, help = "Input binary filename. Multiple input files are combined into a single PCD.") parser.add_argument ("-o", "--output", dest = 'OutputFile', type = argparse.FileType ('w'), help = "Output filename for PCD value or PCD statement") parser.add_argument ("-p", "--pcd", dest = 'PcdName', type = ValidatePcdName, help = "Name of the PCD in the form <PcdTokenSpaceGuidCName>.<PcdCName>") parser.add_argument ("-t", "--type", dest = 'PcdType', default = None, choices = ['VPD', 'HII'], help = "PCD statement type (HII or VPD). Default is standard.") parser.add_argument ("-m", "--max-size", dest = 'MaxSize', type = ValidateUnsignedInteger, help = "Maximum size of the PCD. Ignored with --type HII.") parser.add_argument ("-f", "--offset", dest = 'Offset', type = ValidateUnsignedInteger, help = "VPD offset if --type is VPD. UEFI Variable offset if --type is HII. Must be 8-byte aligned.") parser.add_argument ("-n", "--variable-name", dest = 'VariableName', help = "UEFI variable name. Only used with --type HII.") parser.add_argument ("-g", "--variable-guid", type = ValidateGuidName, dest = 'VariableGuid', help = "UEFI variable GUID C name. Only used with --type HII.") parser.add_argument ("-x", "--xdr", dest = 'Xdr', action = "store_true", help = "Encode PCD using the Variable-Length Opaque Data format of RFC 4506 External Data Representation Standard (XDR)") parser.add_argument ("-v", "--verbose", dest = 'Verbose', action = "store_true", help = "Increase output messages") parser.add_argument ("-q", "--quiet", dest = 'Quiet', action = "store_true", help = "Reduce output messages") parser.add_argument ("--debug", dest = 'Debug', type = int, metavar = '[0-9]', choices = range (0, 10), default = 0, help = "Set debug level") # # Parse command line arguments # args = parser.parse_args () # # Read all binary input files # Buffer = [] for File in args.InputFile: try: Buffer.append (File.read ()) File.close () except: print ('BinToPcd: error: can not read binary input file {File}'.format (File = File)) sys.exit (1) # # Convert PCD to an encoded string of hex values and determine the size of # the encoded PCD in bytes. # PcdValue, PcdSize = ByteArray (Buffer, args.Xdr) # # Convert binary buffer to a DSC file PCD statement # if args.PcdName is None: # # If PcdName is None, then only a PCD value is being requested. # Pcd = PcdValue if args.Verbose: print ('BinToPcd: Convert binary file to PCD Value') elif args.PcdType is None: # # If --type is neither VPD nor HII, then use PCD statement syntax that is # compatible with [PcdsFixedAtBuild], [PcdsPatchableInModule], # [PcdsDynamicDefault], and [PcdsDynamicExDefault]. # if args.MaxSize is None: # # If --max-size is not provided, then do not generate the syntax that # includes the maximum size. # Pcd = ' {Name}|{Value}'.format (Name = args.PcdName, Value = PcdValue) elif args.MaxSize < PcdSize: print ('BinToPcd: error: argument --max-size is smaller than input file.') sys.exit (1) else: Pcd = ' {Name}|{Value}|VOID*|{Size}'.format (Name = args.PcdName, Value = PcdValue, Size = args.MaxSize) if args.Verbose: print ('BinToPcd: Convert binary file to PCD statement compatible with PCD sections:') print (' [PcdsFixedAtBuild]') print (' [PcdsPatchableInModule]') print (' [PcdsDynamicDefault]') print (' [PcdsDynamicExDefault]') elif args.PcdType == 'VPD': if args.MaxSize is None: # # If --max-size is not provided, then set maximum size to the size of the # binary input file # args.MaxSize = PcdSize if args.MaxSize < PcdSize: print ('BinToPcd: error: argument --max-size is smaller than input file.') sys.exit (1) if args.Offset is None: # # if --offset is not provided, then set offset field to '*' so build # tools will compute offset of PCD in VPD region. # Pcd = ' {Name}|*|{Size}|{Value}'.format (Name = args.PcdName, Size = args.MaxSize, Value = PcdValue) else: # # --offset value must be 8-byte aligned # if (args.Offset % 8) != 0: print ('BinToPcd: error: argument --offset must be 8-byte aligned.') sys.exit (1) # # Use the --offset value provided. # Pcd = ' {Name}|{Offset}|{Size}|{Value}'.format (Name = args.PcdName, Offset = args.Offset, Size = args.MaxSize, Value = PcdValue) if args.Verbose: print ('BinToPcd: Convert binary file to PCD statement compatible with PCD sections') print (' [PcdsDynamicVpd]') print (' [PcdsDynamicExVpd]') elif args.PcdType == 'HII': if args.VariableGuid is None or args.VariableName is None: print ('BinToPcd: error: arguments --variable-guid and --variable-name are required for --type HII.') sys.exit (1) if args.Offset is None: # # Use UEFI Variable offset of 0 if --offset is not provided # args.Offset = 0 # # --offset value must be 8-byte aligned # if (args.Offset % 8) != 0: print ('BinToPcd: error: argument --offset must be 8-byte aligned.') sys.exit (1) Pcd = ' {Name}|L"{VarName}"|{VarGuid}|{Offset}|{Value}'.format (Name = args.PcdName, VarName = args.VariableName, VarGuid = args.VariableGuid, Offset = args.Offset, Value = PcdValue) if args.Verbose: print ('BinToPcd: Convert binary file to PCD statement compatible with PCD sections') print (' [PcdsDynamicHii]') print (' [PcdsDynamicExHii]') # # Write PCD value or PCD statement to the output file # try: args.OutputFile.write (Pcd) args.OutputFile.close () except: # # If output file is not specified or it can not be written, then write the # PCD value or PCD statement to the console # print (Pcd)
edk2-master
BaseTools/Scripts/BinToPcd.py
#!/usr/bin/python ## @file # Firmware Configuration Editor (FCE) from https://firmware.intel.com/develop # can parse BIOS image and generate Firmware Configuration file. # This script bases on Firmware Configuration file, and generate the structure # PCD setting in DEC/DSC/INF files. # # Copyright (c) 2018, Intel Corporation. All rights reserved.<BR> # SPDX-License-Identifier: BSD-2-Clause-Patent # ''' ConvertFceToStructurePcd ''' import re import os import datetime import argparse # # Globals for help information # __prog__ = 'ConvertFceToStructurePcd' __version__ = '%s Version %s' % (__prog__, '0.1 ') __copyright__ = 'Copyright (c) 2018, Intel Corporation. All rights reserved.' __description__ = 'Generate Structure PCD in DEC/DSC/INF based on Firmware Configuration.\n' dscstatement='''[Defines] VPD_TOOL_GUID = 8C3D856A-9BE6-468E-850A-24F7A8D38E08 [SkuIds] 0|DEFAULT # The entry: 0|DEFAULT is reserved and always required. [DefaultStores] 0|STANDARD # UEFI Standard default 0|STANDARD is reserved. 1|MANUFACTURING # UEFI Manufacturing default 1|MANUFACTURING is reserved. [PcdsDynamicExVpd.common.DEFAULT] gEfiMdeModulePkgTokenSpaceGuid.PcdNvStoreDefaultValueBuffer|* ''' decstatement = '''[Guids] gStructPcdTokenSpaceGuid = {0x3f1406f4, 0x2b, 0x487a, {0x8b, 0x69, 0x74, 0x29, 0x1b, 0x36, 0x16, 0xf4}} [PcdsFixedAtBuild,PcdsPatchableInModule,PcdsDynamic,PcdsDynamicEx] ''' infstatement = '''[Pcd] ''' SECTION='PcdsDynamicHii' PCD_NAME='gStructPcdTokenSpaceGuid.Pcd' Max_Pcd_Len = 100 WARNING=[] ERRORMSG=[] class parser_lst(object): def __init__(self,filelist): self._ignore=['BOOLEAN', 'UINT8', 'UINT16', 'UINT32', 'UINT64'] self.file=filelist self.text=self.megre_lst()[0] self.content=self.megre_lst()[1] def megre_lst(self): alltext='' content={} for file in self.file: with open(file,'r') as f: read =f.read() alltext += read content[file]=read return alltext,content def struct_lst(self):#{struct:lst file} structs_file={} name_format = re.compile(r'(?<!typedef)\s+struct (\w+) {.*?;', re.S) for i in list(self.content.keys()): structs= name_format.findall(self.content[i]) if structs: for j in structs: if j not in self._ignore: structs_file[j]=i else: print("%s"%structs) return structs_file def struct(self):#struct:{offset:name} unit_num = re.compile('(\d+)') offset1_re = re.compile('(\d+)\[') pcdname_num_re = re.compile('\w+\[(\S+)\]') pcdname_re = re.compile('\](.*)\<') pcdname2_re = re.compile('(\w+)\[') uint_re = re.compile('\<(\S+)\>') name_format = re.compile(r'(?<!typedef)\s+struct (\w+) {.*?;', re.S) name=name_format.findall(self.text) info={} unparse=[] if name: tmp_n = [n for n in name if n not in self._ignore] name = list(set(tmp_n)) name.sort(key = tmp_n.index) name.reverse() #name=list(set(name).difference(set(self._ignore))) for struct in name: s_re = re.compile(r'struct %s :(.*?)};'% struct, re.S) content = s_re.search(self.text) if content: tmp_dict = {} text = content.group().split('+') for line in text[1:]: offset = offset1_re.findall(line) t_name = pcdname_re.findall(line) uint = uint_re.findall(line) if offset and uint: offset = offset[0] uint = uint[0] if t_name: t_name = t_name[0].strip() if (' ' in t_name) or ("=" in t_name) or (";" in t_name) or("\\" in name) or (t_name ==''): WARNING.append("Warning:Invalid Pcd name '%s' for Offset %s in struct %s" % (t_name,offset, struct)) else: if '[' in t_name: if uint in ['UINT8', 'UINT16', 'UINT32', 'UINT64']: offset = int(offset, 10) tmp_name = pcdname2_re.findall(t_name)[0] + '[0]' tmp_dict[offset] = tmp_name pcdname_num = int(pcdname_num_re.findall(t_name)[0],10) uint = int(unit_num.findall(uint)[0],10) bit = uint // 8 for i in range(1, pcdname_num): offset += bit tmp_name = pcdname2_re.findall(t_name)[0] + '[%s]' % i tmp_dict[offset] = tmp_name else: tmp_name = pcdname2_re.findall(t_name)[0] pcdname_num = pcdname_num_re.findall(t_name)[0] line = [offset,tmp_name,pcdname_num,uint] line.append(struct) unparse.append(line) else: if uint not in ['UINT8', 'UINT16', 'UINT32', 'UINT64', 'BOOLEAN']: line = [offset, t_name, 0, uint] line.append(struct) unparse.append(line) else: offset = int(offset,10) tmp_dict[offset] = t_name info[struct] = tmp_dict if len(unparse) != 0: for u in unparse: if u[3] in list(info.keys()): unpar = self.nameISstruct(u,info[u[3]]) info[u[4]]= dict(list(info[u[4]].items())+list(unpar[u[4]].items())) else: print("ERROR: No struct name found in %s" % self.file) ERRORMSG.append("ERROR: No struct name found in %s" % self.file) return info def nameISstruct(self,line,key_dict): dict={} dict2={} s_re = re.compile(r'struct %s :(.*?)};' % line[3], re.S) size_re = re.compile(r'mTotalSize \[(\S+)\]') content = s_re.search(self.text) if content: s_size = size_re.findall(content.group())[0] else: s_size = '0' print("ERROR: Struct %s not define mTotalSize in lst file" %line[3]) ERRORMSG.append("ERROR: Struct %s not define mTotalSize in lst file" %line[3]) size = int(line[0], 10) if line[2] != 0: for j in range(0, int(line[2], 10)): for k in list(key_dict.keys()): offset = size + k name ='%s.%s' %((line[1]+'[%s]'%j),key_dict[k]) dict[offset] = name size = int(s_size,16)+size elif line[2] == 0: for k in list(key_dict.keys()): offset = size + k name = '%s.%s' % (line[1], key_dict[k]) dict[offset] = name dict2[line[4]] = dict return dict2 def efivarstore_parser(self): efivarstore_format = re.compile(r'efivarstore.*?;', re.S) struct_re = re.compile(r'efivarstore(.*?),',re.S) name_re = re.compile(r'name=(\w+)') efivarstore_dict={} efitxt = efivarstore_format.findall(self.text) for i in efitxt: struct = struct_re.findall(i.replace(' ','')) if struct[0] in self._ignore: continue name = name_re.findall(i.replace(' ','')) if struct and name: efivarstore_dict[name[0]]=struct[0] else: print("ERROR: Can't find Struct or name in lst file, please check have this format:efivarstore XXXX, name=xxxx") ERRORMSG.append("ERROR: Can't find Struct or name in lst file, please check have this format:efivarstore XXXX, name=xxxx") return efivarstore_dict class Config(object): def __init__(self,Config): self.config=Config #Parser .config file,return list[offset,name,guid,value,help] def config_parser(self): ids_re =re.compile('_ID:(\d+)',re.S) id_re= re.compile('\s+') info = [] info_dict={} with open(self.config, 'r') as text: read = text.read() if 'DEFAULT_ID:' in read: all_txt = read.split('FCEKEY DEFAULT') for i in all_txt[1:]: part = [] #save all infomation for DEFAULT_ID str_id='' ids = ids_re.findall(i.replace(' ','')) for m in ids: str_id +=m+'_' str_id=str_id[:-1] part.append(ids) section = i.split('\nQ') #split with '\nQ ' to get every block part +=self.section_parser(section) info_dict[str_id] = self.section_parser(section) info.append(part) else: part = [] id=('0','0') str_id='0_0' part.append(id) section = read.split('\nQ') part +=self.section_parser(section) info_dict[str_id] = self.section_parser(section) info.append(part) return info_dict def eval_id(self,id): id = id.split("_") default_id=id[0:len(id)//2] platform_id=id[len(id)//2:] text='' for i in range(len(default_id)): text +="%s.common.%s.%s,"%(SECTION,self.id_name(platform_id[i],'PLATFORM'),self.id_name(default_id[i],'DEFAULT')) return '\n[%s]\n'%text[:-1] def id_name(self,ID, flag): platform_dict = {'0': 'DEFAULT'} default_dict = {'0': 'STANDARD', '1': 'MANUFACTURING'} if flag == "PLATFORM": try: value = platform_dict[ID] except KeyError: value = 'SKUID%s' % ID elif flag == 'DEFAULT': try: value = default_dict[ID] except KeyError: value = 'DEFAULTID%s' % ID else: value = None return value def section_parser(self,section): offset_re = re.compile(r'offset=(\w+)') name_re = re.compile(r'name=(\S+)') guid_re = re.compile(r'guid=(\S+)') # help_re = re.compile(r'help = (.*)') attribute_re=re.compile(r'attribute=(\w+)') value_re = re.compile(r'(//.*)') part = [] part_without_comment = [] for x in section[1:]: line=x.split('\n')[0] comment_list = value_re.findall(line) # the string \\... in "Q...." line comment_list[0] = comment_list[0].replace('//', '') comment_ori = comment_list[0].strip() comment = "" for each in comment_ori: if each != " " and "\x21" > each or each > "\x7E": if bytes(each, 'utf-16') == b'\xff\xfe\xae\x00': each = '(R)' else: each = "" comment += each line=value_re.sub('',line) #delete \\... in "Q...." line list1=line.split(' ') value=self.value_parser(list1) offset = offset_re.findall(x.replace(' ','')) name = name_re.findall(x.replace(' ','')) guid = guid_re.findall(x.replace(' ','')) attribute =attribute_re.findall(x.replace(' ','')) if offset and name and guid and value and attribute: if attribute[0] in ['0x3','0x7']: offset = int(offset[0], 16) #help = help_re.findall(x) text_without_comment = offset, name[0], guid[0], value, attribute[0] if text_without_comment in part_without_comment: # check if exists same Pcd with different comments, add different comments in one line with "|". dupl_index = part_without_comment.index(text_without_comment) part[dupl_index] = list(part[dupl_index]) if comment not in part[dupl_index][-1]: part[dupl_index][-1] += " | " + comment part[dupl_index] = tuple(part[dupl_index]) else: text = offset, name[0], guid[0], value, attribute[0], comment part_without_comment.append(text_without_comment) part.append(text) return(part) def value_parser(self, list1): list1 = [t for t in list1 if t != ''] # remove '' form list first_num = int(list1[0], 16) if list1[first_num + 1] == 'STRING': # parser STRING if list1[-1] == '""': value = "{0x0, 0x0}" else: value = 'L%s' % list1[-1] elif list1[first_num + 1] == 'ORDERED_LIST': # parser ORDERED_LIST value_total = int(list1[first_num + 2]) list2 = list1[-value_total:] tmp = [] line = '' for i in list2: if len(i) % 2 == 0 and len(i) != 2: for m in range(0, len(i) // 2): tmp.append('0x%02x' % (int('0x%s' % i, 16) >> m * 8 & 0xff)) else: tmp.append('0x%s' % i) for i in tmp: line += '%s,' % i value = '{%s}' % line[:-1] else: value = "0x%01x" % int(list1[-1], 16) return value #parser Guid file, get guid name form guid value class GUID(object): def __init__(self,path): self.path = path self.guidfile = self.gfile() self.guiddict = self.guid_dict() def gfile(self): for root, dir, file in os.walk(self.path, topdown=True, followlinks=False): if 'FV' in dir: gfile = os.path.join(root,'Fv','Guid.xref') if os.path.isfile(gfile): return gfile else: print("ERROR: Guid.xref file not found") ERRORMSG.append("ERROR: Guid.xref file not found") exit() def guid_dict(self): guiddict={} with open(self.guidfile,'r') as file: lines = file.readlines() guidinfo=lines for line in guidinfo: list=line.strip().split(' ') if list: if len(list)>1: guiddict[list[0].upper()]=list[1] elif list[0] != ''and len(list)==1: print("Error: line %s can't be parser in %s"%(line.strip(),self.guidfile)) ERRORMSG.append("Error: line %s can't be parser in %s"%(line.strip(),self.guidfile)) else: print("ERROR: No data in %s" %self.guidfile) ERRORMSG.append("ERROR: No data in %s" %self.guidfile) return guiddict def guid_parser(self,guid): if guid.upper() in self.guiddict: return self.guiddict[guid.upper()] else: print("ERROR: GUID %s not found in file %s"%(guid, self.guidfile)) ERRORMSG.append("ERROR: GUID %s not found in file %s"%(guid, self.guidfile)) return guid class PATH(object): def __init__(self,path): self.path=path self.rootdir=self.get_root_dir() self.usefuldir=set() self.lstinf = {} for path in self.rootdir: for o_root, o_dir, o_file in os.walk(os.path.join(path, "OUTPUT"), topdown=True, followlinks=False): for INF in o_file: if os.path.splitext(INF)[1] == '.inf': for l_root, l_dir, l_file in os.walk(os.path.join(path, "DEBUG"), topdown=True, followlinks=False): for LST in l_file: if os.path.splitext(LST)[1] == '.lst': self.lstinf[os.path.join(l_root, LST)] = os.path.join(o_root, INF) self.usefuldir.add(path) def get_root_dir(self): rootdir=[] for root,dir,file in os.walk(self.path,topdown=True,followlinks=False): if "OUTPUT" in root: updir=root.split("OUTPUT",1)[0] rootdir.append(updir) rootdir=list(set(rootdir)) return rootdir def lst_inf(self): return self.lstinf def package(self): package={} package_re=re.compile(r'Packages\.\w+]\n(.*)',re.S) for i in list(self.lstinf.values()): with open(i,'r') as inf: read=inf.read() section=read.split('[') for j in section: p=package_re.findall(j) if p: package[i]=p[0].rstrip() return package def header(self,struct): header={} head_re = re.compile('typedef.*} %s;[\n]+(.*)(?:typedef|formset)'%struct,re.M|re.S) head_re2 = re.compile(r'#line[\s\d]+"(\S+h)"') for i in list(self.lstinf.keys()): with open(i,'r') as lst: read = lst.read() h = head_re.findall(read) if h: head=head_re2.findall(h[0]) if head: format = head[0].replace('\\\\','/').replace('\\','/') name =format.split('/')[-1] head = self.headerfileset.get(name) if head: head = head.replace('\\','/') header[struct] = head return header @property def headerfileset(self): headerset = dict() for root,dirs,files in os.walk(self.path): for file in files: if os.path.basename(file) == 'deps.txt': with open(os.path.join(root,file),"r") as fr: for line in fr.readlines(): headerset[os.path.basename(line).strip()] = line.strip() return headerset def makefile(self,filename): re_format = re.compile(r'DEBUG_DIR.*(?:\S+Pkg)\\(.*\\%s)'%filename) for i in self.usefuldir: with open(os.path.join(i,'Makefile'),'r') as make: read = make.read() dir = re_format.findall(read) if dir: return dir[0] return None class mainprocess(object): def __init__(self,InputPath,Config,OutputPath): self.init = 0xFCD00000 self.inputpath = os.path.abspath(InputPath) self.outputpath = os.path.abspath(OutputPath) self.LST = PATH(self.inputpath) self.lst_dict = self.LST.lst_inf() self.Config = Config self.attribute_dict = {'0x3': 'NV, BS', '0x7': 'NV, BS, RT'} self.guid = GUID(self.inputpath) self.header={} def main(self): conf=Config(self.Config) config_dict=conf.config_parser() #get {'0_0':[offset,name,guid,value,attribute]...,'1_0':....} lst=parser_lst(list(self.lst_dict.keys())) efi_dict=lst.efivarstore_parser() #get {name:struct} form lst file keys=sorted(config_dict.keys()) all_struct=lst.struct() stru_lst=lst.struct_lst() title_list=[] info_list=[] header_list=[] inf_list =[] for i in stru_lst: tmp = self.LST.header(i) self.header.update(tmp) for id_key in keys: tmp_id=[id_key] #['0_0',[(struct,[name...]),(struct,[name...])]] tmp_info={} #{name:struct} for section in config_dict[id_key]: c_offset,c_name,c_guid,c_value,c_attribute,c_comment = section if c_name in efi_dict: struct = efi_dict[c_name] title='%s%s|L"%s"|%s|0x00||%s\n'%(PCD_NAME,c_name,c_name,self.guid.guid_parser(c_guid),self.attribute_dict[c_attribute]) if struct in all_struct: lstfile = stru_lst[struct] struct_dict=all_struct[struct] try: title2 = '%s%s|{0}|%s|0xFCD00000{\n <HeaderFiles>\n %s\n <Packages>\n%s\n}\n' % (PCD_NAME, c_name, struct, self.header[struct], self.LST.package()[self.lst_dict[lstfile]]) except KeyError: WARNING.append("Warning: No <HeaderFiles> for struct %s"%struct) title2 = '%s%s|{0}|%s|0xFCD00000{\n <HeaderFiles>\n %s\n <Packages>\n%s\n}\n' % (PCD_NAME, c_name, struct, '', self.LST.package()[self.lst_dict[lstfile]]) header_list.append(title2) elif struct not in lst._ignore: struct_dict ={} print("ERROR: Struct %s can't found in lst file" %struct) ERRORMSG.append("ERROR: Struct %s can't found in lst file" %struct) if c_offset in struct_dict: offset_name=struct_dict[c_offset] info = "%s%s.%s|%s\n"%(PCD_NAME,c_name,offset_name,c_value) blank_length = Max_Pcd_Len - len(info) if blank_length <= 0: info_comment = "%s%s.%s|%s%s# %s\n"%(PCD_NAME,c_name,offset_name,c_value," ",c_comment) else: info_comment = "%s%s.%s|%s%s# %s\n"%(PCD_NAME,c_name,offset_name,c_value,blank_length*" ",c_comment) inf = "%s%s\n"%(PCD_NAME,c_name) inf_list.append(inf) tmp_info[info_comment]=title else: print("ERROR: Can't find offset %s with struct name %s"%(c_offset,struct)) ERRORMSG.append("ERROR: Can't find offset %s with name %s"%(c_offset,struct)) else: print("ERROR: Can't find name %s in lst file"%(c_name)) ERRORMSG.append("ERROR: Can't find name %s in lst file"%(c_name)) tmp_id.append(list(self.reverse_dict(tmp_info).items())) id,tmp_title_list,tmp_info_list = self.read_list(tmp_id) title_list +=tmp_title_list info_list.append(tmp_info_list) inf_list = self.del_repeat(inf_list) header_list = self.plus(self.del_repeat(header_list)) title_all=list(set(title_list)) info_list = self.remove_bracket(self.del_repeat(info_list)) for i in range(len(info_list)-1,-1,-1): if len(info_list[i]) == 0: info_list.remove(info_list[i]) for i in (inf_list, title_all, header_list): i.sort() return keys,title_all,info_list,header_list,inf_list def correct_sort(self, PcdString): # sort the Pcd list with two rules: # First sort through Pcd name; # Second if the Pcd exists several elements, sort them through index value. if ("]|") in PcdString: Pcdname = PcdString.split("[")[0] Pcdindex = int(PcdString.split("[")[1].split("]")[0]) else: Pcdname = PcdString.split("|")[0] Pcdindex = 0 return Pcdname, Pcdindex def remove_bracket(self,List): for i in List: for j in i: tmp = j.split("|") if (('L"' in j) and ("[" in j)) or (tmp[1].split("#")[0].strip() == '{0x0, 0x0}'): tmp[0] = tmp[0][:tmp[0].index('[')] List[List.index(i)][i.index(j)] = "|".join(tmp) else: List[List.index(i)][i.index(j)] = j for i in List: if type(i) == type([0,0]): i.sort(key = lambda x:(self.correct_sort(x)[0], self.correct_sort(x)[1])) return List def write_all(self): title_flag=1 info_flag=1 if not os.path.isdir(self.outputpath): os.makedirs(self.outputpath) decwrite = write2file(os.path.join(self.outputpath,'StructurePcd.dec')) dscwrite = write2file(os.path.join(self.outputpath,'StructurePcd.dsc')) infwrite = write2file(os.path.join(self.outputpath, 'StructurePcd.inf')) conf = Config(self.Config) ids,title,info,header,inf=self.main() decwrite.add2file(decstatement) decwrite.add2file(header) infwrite.add2file(infstatement) infwrite.add2file(inf) dscwrite.add2file(dscstatement) for id in ids: dscwrite.add2file(conf.eval_id(id)) if title_flag: dscwrite.add2file(title) title_flag=0 if len(info) == 1: dscwrite.add2file(info) elif len(info) == 2: if info_flag: dscwrite.add2file(info[0]) info_flag =0 else: dscwrite.add2file(info[1]) def del_repeat(self,List): if len(List) == 1 or len(List) == 0: return List else: if type(List[0]) != type('xxx'): alist=[] for i in range(len(List)): if i == 0: alist.append(List[0]) else: plist = [] for j in range(i): plist += List[j] alist.append(self.__del(list(set(plist)), List[i])) return alist else: return list(set(List)) def __del(self,list1,list2): return list(set(list2).difference(set(list1))) def reverse_dict(self,dict): data={} for i in list(dict.items()): if i[1] not in list(data.keys()): data[i[1]]=[i[0]] else: data[i[1]].append(i[0]) return data def read_list(self,list): title_list=[] info_list=[] for i in list[1]: title_list.append(i[0]) for j in i[1]: info_list.append(j) return list[0],title_list,info_list def plus(self,list): nums=[] for i in list: if type(i) != type([0]): self.init += 1 num = "0x%01x" % self.init j=i.replace('0xFCD00000',num.upper()) nums.append(j) return nums class write2file(object): def __init__(self,Output): self.output=Output self.text='' if os.path.exists(self.output): os.remove(self.output) def add2file(self,content): self.text = '' with open(self.output,'a+') as file: file.write(self.__gen(content)) def __gen(self,content): if type(content) == type(''): return content elif type(content) == type([0,0])or type(content) == type((0,0)): return self.__readlist(content) elif type(content) == type({0:0}): return self.__readdict(content) def __readlist(self,list): for i in list: if type(i) == type([0,0])or type(i) == type((0,0)): self.__readlist(i) elif type(i) == type('') : self.text +=i return self.text def __readdict(self,dict): content=list(dict.items()) return self.__readlist(content) def stamp(): return datetime.datetime.now() def dtime(start,end,id=None): if id: pass print("%s time:%s" % (id,str(end - start))) else: print("Total time:%s" %str(end-start)[:-7]) def main(): start = stamp() parser = argparse.ArgumentParser(prog = __prog__, description = __description__ + __copyright__, conflict_handler = 'resolve') parser.add_argument('-v', '--version', action = 'version',version = __version__, help="show program's version number and exit") parser.add_argument('-p', '--path', metavar='PATH', dest='path', help="platform build output directory") parser.add_argument('-c', '--config',metavar='FILENAME', dest='config', help="firmware configuration file") parser.add_argument('-o', '--outputdir', metavar='PATH', dest='output', help="output directoy") options = parser.parse_args() if options.config: if options.path: if options.output: run = mainprocess(options.path, options.config, options.output) print("Running...") run.write_all() if WARNING: warning = list(set(WARNING)) for j in warning: print(j) if ERRORMSG: ERROR = list(set(ERRORMSG)) with open("ERROR.log", 'w+') as error: for i in ERROR: error.write(i + '\n') print("Some error find, error log in ERROR.log") print('Finished, Output files in directory %s'%os.path.abspath(options.output)) else: print('Error command, no output path, use -h for help') else: print('Error command, no build path input, use -h for help') else: print('Error command, no output file, use -h for help') end = stamp() dtime(start, end) if __name__ == '__main__': main()
edk2-master
BaseTools/Scripts/ConvertFceToStructurePcd.py
## # Generate symbal for SMI handler profile info. # # This tool depends on DIA2Dump.exe (VS) or nm (gcc) to parse debug entry. # # Copyright (c) 2017, Intel Corporation. All rights reserved.<BR> # SPDX-License-Identifier: BSD-2-Clause-Patent # ## from __future__ import print_function import os import re import sys from optparse import OptionParser from xml.dom.minidom import parse import xml.dom.minidom versionNumber = "1.1" __copyright__ = "Copyright (c) 2016, Intel Corporation. All rights reserved." class Symbols: def __init__(self): self.listLineAddress = [] self.pdbName = "" # Cache for function self.functionName = "" # Cache for line self.sourceName = "" def getSymbol (self, rva): index = 0 lineName = 0 sourceName = "??" while index + 1 < self.lineCount : if self.listLineAddress[index][0] <= rva and self.listLineAddress[index + 1][0] > rva : offset = rva - self.listLineAddress[index][0] functionName = self.listLineAddress[index][1] lineName = self.listLineAddress[index][2] sourceName = self.listLineAddress[index][3] if lineName == 0 : return [functionName] else : return [functionName, sourceName, lineName] index += 1 return [] def parse_debug_file(self, driverName, pdbName): if cmp (pdbName, "") == 0 : return self.pdbName = pdbName; try: nmCommand = "nm" nmLineOption = "-l" print("parsing (debug) - " + pdbName) os.system ('%s %s %s > nmDump.line.log' % (nmCommand, nmLineOption, pdbName)) except : print('ERROR: nm command not available. Please verify PATH') return # # parse line # linefile = open("nmDump.line.log") reportLines = linefile.readlines() linefile.close() # 000113ca T AllocatePool c:\home\edk-ii\MdePkg\Library\UefiMemoryAllocationLib\MemoryAllocationLib.c:399 patchLineFileMatchString = "([0-9a-fA-F]*)\s+[T|D|t|d]\s+(\w+)\s*((?:[a-zA-Z]:)?[\w+\-./_a-zA-Z0-9\\\\]*):?([0-9]*)" for reportLine in reportLines: match = re.match(patchLineFileMatchString, reportLine) if match is not None: rva = int (match.group(1), 16) functionName = match.group(2) sourceName = match.group(3) if cmp (match.group(4), "") != 0 : lineName = int (match.group(4)) else : lineName = 0 self.listLineAddress.append ([rva, functionName, lineName, sourceName]) self.lineCount = len (self.listLineAddress) self.listLineAddress = sorted(self.listLineAddress, key=lambda symbolAddress:symbolAddress[0]) def parse_pdb_file(self, driverName, pdbName): if cmp (pdbName, "") == 0 : return self.pdbName = pdbName; try: #DIA2DumpCommand = "\"C:\\Program Files (x86)\Microsoft Visual Studio 14.0\\DIA SDK\\Samples\\DIA2Dump\\x64\\Debug\\Dia2Dump.exe\"" DIA2DumpCommand = "Dia2Dump.exe" #DIA2SymbolOption = "-p" DIA2LinesOption = "-l" print("parsing (pdb) - " + pdbName) #os.system ('%s %s %s > DIA2Dump.symbol.log' % (DIA2DumpCommand, DIA2SymbolOption, pdbName)) os.system ('%s %s %s > DIA2Dump.line.log' % (DIA2DumpCommand, DIA2LinesOption, pdbName)) except : print('ERROR: DIA2Dump command not available. Please verify PATH') return # # parse line # linefile = open("DIA2Dump.line.log") reportLines = linefile.readlines() linefile.close() # ** GetDebugPrintErrorLevel # line 32 at [0000C790][0001:0000B790], len = 0x3 c:\home\edk-ii\mdepkg\library\basedebugprinterrorlevellib\basedebugprinterrorlevellib.c (MD5: 687C0AE564079D35D56ED5D84A6164CC) # line 36 at [0000C793][0001:0000B793], len = 0x5 # line 37 at [0000C798][0001:0000B798], len = 0x2 patchLineFileMatchString = "\s+line ([0-9]+) at \[([0-9a-fA-F]{8})\]\[[0-9a-fA-F]{4}\:[0-9a-fA-F]{8}\], len = 0x[0-9a-fA-F]+\s*([\w+\-\:./_a-zA-Z0-9\\\\]*)\s*" patchLineFileMatchStringFunc = "\*\*\s+(\w+)\s*" for reportLine in reportLines: match = re.match(patchLineFileMatchString, reportLine) if match is not None: if cmp (match.group(3), "") != 0 : self.sourceName = match.group(3) sourceName = self.sourceName functionName = self.functionName rva = int (match.group(2), 16) lineName = int (match.group(1)) self.listLineAddress.append ([rva, functionName, lineName, sourceName]) else : match = re.match(patchLineFileMatchStringFunc, reportLine) if match is not None: self.functionName = match.group(1) self.lineCount = len (self.listLineAddress) self.listLineAddress = sorted(self.listLineAddress, key=lambda symbolAddress:symbolAddress[0]) class SymbolsFile: def __init__(self): self.symbolsTable = {} symbolsFile = "" driverName = "" rvaName = "" symbolName = "" def getSymbolName(driverName, rva): global symbolsFile try : symbolList = symbolsFile.symbolsTable[driverName] if symbolList is not None: return symbolList.getSymbol (rva) else: return [] except Exception: return [] def myOptionParser(): usage = "%prog [--version] [-h] [--help] [-i inputfile [-o outputfile] [-g guidreffile]]" Parser = OptionParser(usage=usage, description=__copyright__, version="%prog " + str(versionNumber)) Parser.add_option("-i", "--inputfile", dest="inputfilename", type="string", help="The input memory profile info file output from MemoryProfileInfo application in MdeModulePkg") Parser.add_option("-o", "--outputfile", dest="outputfilename", type="string", help="The output memory profile info file with symbol, MemoryProfileInfoSymbol.txt will be used if it is not specified") Parser.add_option("-g", "--guidref", dest="guidreffilename", type="string", help="The input guid ref file output from build") (Options, args) = Parser.parse_args() if Options.inputfilename is None: Parser.error("no input file specified") if Options.outputfilename is None: Options.outputfilename = "SmiHandlerProfileInfoSymbol.xml" return Options dictGuid = { '00000000-0000-0000-0000-000000000000':'gZeroGuid', '2A571201-4966-47F6-8B86-F31E41F32F10':'gEfiEventLegacyBootGuid', '27ABF055-B1B8-4C26-8048-748F37BAA2DF':'gEfiEventExitBootServicesGuid', '7CE88FB3-4BD7-4679-87A8-A8D8DEE50D2B':'gEfiEventReadyToBootGuid', '02CE967A-DD7E-4FFC-9EE7-810CF0470880':'gEfiEndOfDxeEventGroupGuid', '60FF8964-E906-41D0-AFED-F241E974E08E':'gEfiDxeSmmReadyToLockProtocolGuid', '18A3C6DC-5EEA-48C8-A1C1-B53389F98999':'gEfiSmmSwDispatch2ProtocolGuid', '456D2859-A84B-4E47-A2EE-3276D886997D':'gEfiSmmSxDispatch2ProtocolGuid', '4CEC368E-8E8E-4D71-8BE1-958C45FC8A53':'gEfiSmmPeriodicTimerDispatch2ProtocolGuid', 'EE9B8D90-C5A6-40A2-BDE2-52558D33CCA1':'gEfiSmmUsbDispatch2ProtocolGuid', '25566B03-B577-4CBF-958C-ED663EA24380':'gEfiSmmGpiDispatch2ProtocolGuid', '7300C4A1-43F2-4017-A51B-C81A7F40585B':'gEfiSmmStandbyButtonDispatch2ProtocolGuid', '1B1183FA-1823-46A7-8872-9C578755409D':'gEfiSmmPowerButtonDispatch2ProtocolGuid', '58DC368D-7BFA-4E77-ABBC-0E29418DF930':'gEfiSmmIoTrapDispatch2ProtocolGuid', } def genGuidString(guidreffile): guidLines = guidreffile.readlines() for guidLine in guidLines: guidLineList = guidLine.split(" ") if len(guidLineList) == 2: guid = guidLineList[0] guidName = guidLineList[1] if guid not in dictGuid : dictGuid[guid] = guidName def createSym(symbolName): SymbolNode = xml.dom.minidom.Document().createElement("Symbol") SymbolFunction = xml.dom.minidom.Document().createElement("Function") SymbolFunctionData = xml.dom.minidom.Document().createTextNode(symbolName[0]) SymbolFunction.appendChild(SymbolFunctionData) SymbolNode.appendChild(SymbolFunction) if (len(symbolName)) >= 2: SymbolSourceFile = xml.dom.minidom.Document().createElement("SourceFile") SymbolSourceFileData = xml.dom.minidom.Document().createTextNode(symbolName[1]) SymbolSourceFile.appendChild(SymbolSourceFileData) SymbolNode.appendChild(SymbolSourceFile) if (len(symbolName)) >= 3: SymbolLineNumber = xml.dom.minidom.Document().createElement("LineNumber") SymbolLineNumberData = xml.dom.minidom.Document().createTextNode(str(symbolName[2])) SymbolLineNumber.appendChild(SymbolLineNumberData) SymbolNode.appendChild(SymbolLineNumber) return SymbolNode def main(): global symbolsFile global Options Options = myOptionParser() symbolsFile = SymbolsFile() try : DOMTree = xml.dom.minidom.parse(Options.inputfilename) except Exception: print("fail to open input " + Options.inputfilename) return 1 if Options.guidreffilename is not None: try : guidreffile = open(Options.guidreffilename) except Exception: print("fail to open guidref" + Options.guidreffilename) return 1 genGuidString(guidreffile) guidreffile.close() SmiHandlerProfile = DOMTree.documentElement SmiHandlerDatabase = SmiHandlerProfile.getElementsByTagName("SmiHandlerDatabase") SmiHandlerCategory = SmiHandlerDatabase[0].getElementsByTagName("SmiHandlerCategory") for smiHandlerCategory in SmiHandlerCategory: SmiEntry = smiHandlerCategory.getElementsByTagName("SmiEntry") for smiEntry in SmiEntry: if smiEntry.hasAttribute("HandlerType"): guidValue = smiEntry.getAttribute("HandlerType") if guidValue in dictGuid: smiEntry.setAttribute("HandlerType", dictGuid[guidValue]) SmiHandler = smiEntry.getElementsByTagName("SmiHandler") for smiHandler in SmiHandler: Module = smiHandler.getElementsByTagName("Module") Pdb = Module[0].getElementsByTagName("Pdb") if (len(Pdb)) >= 1: driverName = Module[0].getAttribute("Name") pdbName = Pdb[0].childNodes[0].data Module[0].removeChild(Pdb[0]) symbolsFile.symbolsTable[driverName] = Symbols() if cmp (pdbName[-3:], "pdb") == 0 : symbolsFile.symbolsTable[driverName].parse_pdb_file (driverName, pdbName) else : symbolsFile.symbolsTable[driverName].parse_debug_file (driverName, pdbName) Handler = smiHandler.getElementsByTagName("Handler") RVA = Handler[0].getElementsByTagName("RVA") print(" Handler RVA: %s" % RVA[0].childNodes[0].data) if (len(RVA)) >= 1: rvaName = RVA[0].childNodes[0].data symbolName = getSymbolName (driverName, int(rvaName, 16)) if (len(symbolName)) >= 1: SymbolNode = createSym(symbolName) Handler[0].appendChild(SymbolNode) Caller = smiHandler.getElementsByTagName("Caller") RVA = Caller[0].getElementsByTagName("RVA") print(" Caller RVA: %s" % RVA[0].childNodes[0].data) if (len(RVA)) >= 1: rvaName = RVA[0].childNodes[0].data symbolName = getSymbolName (driverName, int(rvaName, 16)) if (len(symbolName)) >= 1: SymbolNode = createSym(symbolName) Caller[0].appendChild(SymbolNode) try : newfile = open(Options.outputfilename, "w") except Exception: print("fail to open output" + Options.outputfilename) return 1 newfile.write(DOMTree.toprettyxml(indent = "\t", newl = "\n", encoding = "utf-8")) newfile.close() if __name__ == '__main__': sys.exit(main())
edk2-master
BaseTools/Scripts/SmiHandlerProfileSymbolGen.py
#!/usr/bin/python3 ''' Copyright 2021 (c) Apple Inc. All rights reserved. SPDX-License-Identifier: BSD-2-Clause-Patent EFI gdb commands based on efi_debugging classes. Example usage: OvmfPkg/build.sh qemu -gdb tcp::9000 gdb -ex "target remote localhost:9000" -ex "source efi_gdb.py" (gdb) help efi Commands for debugging EFI. efi <cmd> List of efi subcommands: efi devicepath -- Display an EFI device path. efi guid -- Display info about EFI GUID's. efi hob -- Dump EFI HOBs. Type 'hob -h' for more info. efi symbols -- Load Symbols for EFI. Type 'efi_symbols -h' for more info. efi table -- Dump EFI System Tables. Type 'table -h' for more info. This module is coded against a generic gdb remote serial stub. It should work with QEMU, JTAG debugger, or a generic EFI gdb remote serial stub. If you are debugging with QEMU or a JTAG hardware debugger you can insert a CpuDeadLoop(); in your code, attach with gdb, and then `p Index=1` to step past. If you have a debug stub in EFI you can use CpuBreakpoint();. ''' from gdb.printing import RegexpCollectionPrettyPrinter from gdb.printing import register_pretty_printer import gdb import os import sys import uuid import optparse import shlex # gdb will not import from the same path as this script. # so lets fix that for gdb... sys.path.append(os.path.dirname(os.path.abspath(__file__))) from efi_debugging import PeTeImage, patch_ctypes # noqa: E402 from efi_debugging import EfiHob, GuidNames, EfiStatusClass # noqa: E402 from efi_debugging import EfiBootMode, EfiDevicePath # noqa: E402 from efi_debugging import EfiConfigurationTable, EfiTpl # noqa: E402 class GdbFileObject(object): '''Provide a file like object required by efi_debugging''' def __init__(self): self.inferior = gdb.selected_inferior() self.offset = 0 def tell(self): return self.offset def read(self, size=-1): if size == -1: # arbitrary default size size = 0x1000000 try: data = self.inferior.read_memory(self.offset, size) except MemoryError: data = bytearray(size) assert False if len(data) != size: raise MemoryError( f'gdb could not read memory 0x{size:x}' + f' bytes from 0x{self.offset:08x}') else: # convert memoryview object to a bytestring. return data.tobytes() def readable(self): return True def seek(self, offset, whence=0): if whence == 0: self.offset = offset elif whence == 1: self.offset += offset else: # whence == 2 is seek from end raise NotImplementedError def seekable(self): return True def write(self, data): self.inferior.write_memory(self.offset, data) return len(data) def writable(self): return True def truncate(self, size=None): raise NotImplementedError def flush(self): raise NotImplementedError def fileno(self): raise NotImplementedError class EfiSymbols: """Class to manage EFI Symbols""" loaded = {} stride = None range = None verbose = False def __init__(self, file=None): EfiSymbols.file = file if file else GdbFileObject() @ classmethod def __str__(cls): return ''.join(f'{value}\n' for value in cls.loaded.values()) @ classmethod def configure_search(cls, stride, range=None, verbose=False): cls.stride = stride cls.range = range cls.verbose = verbose @ classmethod def clear(cls): cls.loaded = {} @ classmethod def add_symbols_for_pecoff(cls, pecoff): '''Tell lldb the location of the .text and .data sections.''' if pecoff.TextAddress in cls.loaded: return 'Already Loaded: ' try: res = 'Loading Symbols Failed:' res = gdb.execute('add-symbol-file ' + pecoff.CodeViewPdb + ' ' + hex(pecoff.TextAddress) + ' -s .data ' + hex(pecoff.DataAddress), False, True) cls.loaded[pecoff.TextAddress] = pecoff if cls.verbose: print(f'\n{res:s}\n') return '' except gdb.error: return res @ classmethod def address_to_symbols(cls, address, reprobe=False): ''' Given an address search backwards for a PE/COFF (or TE) header and load symbols. Return a status string. ''' if not isinstance(address, int): address = int(address) pecoff = cls.address_in_loaded_pecoff(address) if not reprobe and pecoff is not None: # skip the probe of the remote return f'{pecoff} is already loaded' pecoff = PeTeImage(cls.file, None) if pecoff.pcToPeCoff(address, cls.stride, cls.range): res = cls.add_symbols_for_pecoff(pecoff) return f'{res}{pecoff}' else: return f'0x{address:08x} not in a PE/COFF (or TE) image' @ classmethod def address_in_loaded_pecoff(cls, address): if not isinstance(address, int): address = int(address) for value in cls.loaded.values(): if (address >= value.LoadAddress and address <= value.EndLoadAddress): return value return None @ classmethod def unload_symbols(cls, address): if not isinstance(address, int): address = int(address) pecoff = cls.address_in_loaded_pecoff(address) try: res = 'Unloading Symbols Failed:' res = gdb.execute( f'remove-symbol-file -a {hex(pecoff.TextAddress):s}', False, True) del cls.loaded[pecoff.LoadAddress] return res except gdb.error: return res class CHAR16_PrettyPrinter(object): def __init__(self, val): self.val = val def to_string(self): if int(self.val) < 0x20: return f"L'\\x{int(self.val):02x}'" else: return f"L'{chr(self.val):s}'" class EFI_TPL_PrettyPrinter(object): def __init__(self, val): self.val = val def to_string(self): return str(EfiTpl(int(self.val))) class EFI_STATUS_PrettyPrinter(object): def __init__(self, val): self.val = val def to_string(self): status = int(self.val) return f'{str(EfiStatusClass(status)):s} (0x{status:08x})' class EFI_BOOT_MODE_PrettyPrinter(object): def __init__(self, val): self.val = val def to_string(self): return str(EfiBootMode(int(self.val))) class EFI_GUID_PrettyPrinter(object): """Print 'EFI_GUID' as 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx'""" def __init__(self, val): self.val = val def to_string(self): # if we could get a byte like object of *(unsigned char (*)[16]) # then we could just use uuid.UUID() to convert Data1 = int(self.val['Data1']) Data2 = int(self.val['Data2']) Data3 = int(self.val['Data3']) Data4 = self.val['Data4'] guid = f'{Data1:08X}-{Data2:04X}-' guid += f'{Data3:04X}-' guid += f'{int(Data4[0]):02X}{int(Data4[1]):02X}-' guid += f'{int(Data4[2]):02X}{int(Data4[3]):02X}' guid += f'{int(Data4[4]):02X}{int(Data4[5]):02X}' guid += f'{int(Data4[6]):02X}{int(Data4[7]):02X}' return str(GuidNames(guid)) def build_pretty_printer(): # Turn off via: disable pretty-printer global EFI pp = RegexpCollectionPrettyPrinter("EFI") # you can also tell gdb `x/sh <address>` to print CHAR16 string pp.add_printer('CHAR16', '^CHAR16$', CHAR16_PrettyPrinter) pp.add_printer('EFI_BOOT_MODE', '^EFI_BOOT_MODE$', EFI_BOOT_MODE_PrettyPrinter) pp.add_printer('EFI_GUID', '^EFI_GUID$', EFI_GUID_PrettyPrinter) pp.add_printer('EFI_STATUS', '^EFI_STATUS$', EFI_STATUS_PrettyPrinter) pp.add_printer('EFI_TPL', '^EFI_TPL$', EFI_TPL_PrettyPrinter) return pp class EfiDevicePathCmd (gdb.Command): """Display an EFI device path. Type 'efi devicepath -h' for more info""" def __init__(self): super(EfiDevicePathCmd, self).__init__( "efi devicepath", gdb.COMMAND_NONE) self.file = GdbFileObject() def create_options(self, arg, from_tty): usage = "usage: %prog [options] [arg]" description = ( "Command that can load EFI PE/COFF and TE image symbols. ") self.parser = optparse.OptionParser( description=description, prog='efi devicepath', usage=usage, add_help_option=False) self.parser.add_option( '-v', '--verbose', action='store_true', dest='verbose', help='hex dump extra data', default=False) self.parser.add_option( '-n', '--node', action='store_true', dest='node', help='dump a single device path node', default=False) self.parser.add_option( '-h', '--help', action='store_true', dest='help', help='Show help for the command', default=False) return self.parser.parse_args(shlex.split(arg)) def invoke(self, arg, from_tty): '''gdb command to dump EFI device paths''' try: (options, _) = self.create_options(arg, from_tty) if options.help: self.parser.print_help() return dev_addr = int(gdb.parse_and_eval(arg)) except ValueError: print("Invalid argument!") return if options.node: print(EfiDevicePath( self.file).device_path_node_str(dev_addr, options.verbose)) else: device_path = EfiDevicePath(self.file, dev_addr, options.verbose) if device_path.valid(): print(device_path) class EfiGuidCmd (gdb.Command): """Display info about EFI GUID's. Type 'efi guid -h' for more info""" def __init__(self): super(EfiGuidCmd, self).__init__("efi guid", gdb.COMMAND_NONE, gdb.COMPLETE_EXPRESSION) self.file = GdbFileObject() def create_options(self, arg, from_tty): usage = "usage: %prog [options] [arg]" description = ( "Show EFI_GUID values and the C name of the EFI_GUID variables" "in the C code. If symbols are loaded the Guid.xref file" "can be processed and the complete GUID database can be shown." "This command also suports generating new GUID's, and showing" "the value used to initialize the C variable.") self.parser = optparse.OptionParser( description=description, prog='efi guid', usage=usage, add_help_option=False) self.parser.add_option( '-n', '--new', action='store_true', dest='new', help='Generate a new GUID', default=False) self.parser.add_option( '-v', '--verbose', action='store_true', dest='verbose', help='Also display GUID C structure values', default=False) self.parser.add_option( '-h', '--help', action='store_true', dest='help', help='Show help for the command', default=False) return self.parser.parse_args(shlex.split(arg)) def invoke(self, arg, from_tty): '''gdb command to dump EFI System Tables''' try: (options, args) = self.create_options(arg, from_tty) if options.help: self.parser.print_help() return if len(args) >= 1: # guid { 0x414e6bdd, 0xe47b, 0x47cc, # { 0xb2, 0x44, 0xbb, 0x61, 0x02, 0x0c,0xf5, 0x16 }} # this generates multiple args guid = ' '.join(args) except ValueError: print('bad arguments!') return if options.new: guid = uuid.uuid4() print(str(guid).upper()) print(GuidNames.to_c_guid(guid)) return if len(args) > 0: if GuidNames.is_guid_str(arg): # guid 05AD34BA-6F02-4214-952E-4DA0398E2BB9 key = guid.upper() name = GuidNames.to_name(key) elif GuidNames.is_c_guid(arg): # guid { 0x414e6bdd, 0xe47b, 0x47cc, # { 0xb2, 0x44, 0xbb, 0x61, 0x02, 0x0c,0xf5, 0x16 }} key = GuidNames.from_c_guid(arg) name = GuidNames.to_name(key) else: # guid gEfiDxeServicesTableGuid name = guid try: key = GuidNames.to_guid(name) name = GuidNames.to_name(key) except ValueError: return extra = f'{GuidNames.to_c_guid(key)}: ' if options.verbose else '' print(f'{key}: {extra}{name}') else: for key, value in GuidNames._dict_.items(): if options.verbose: extra = f'{GuidNames.to_c_guid(key)}: ' else: extra = '' print(f'{key}: {extra}{value}') class EfiHobCmd (gdb.Command): """Dump EFI HOBs. Type 'hob -h' for more info.""" def __init__(self): super(EfiHobCmd, self).__init__("efi hob", gdb.COMMAND_NONE) self.file = GdbFileObject() def create_options(self, arg, from_tty): usage = "usage: %prog [options] [arg]" description = ( "Command that can load EFI PE/COFF and TE image symbols. ") self.parser = optparse.OptionParser( description=description, prog='efi hob', usage=usage, add_help_option=False) self.parser.add_option( '-a', '--address', type="int", dest='address', help='Parse HOBs from address', default=None) self.parser.add_option( '-t', '--type', type="int", dest='type', help='Only dump HOBS of his type', default=None) self.parser.add_option( '-v', '--verbose', action='store_true', dest='verbose', help='hex dump extra data', default=False) self.parser.add_option( '-h', '--help', action='store_true', dest='help', help='Show help for the command', default=False) return self.parser.parse_args(shlex.split(arg)) def invoke(self, arg, from_tty): '''gdb command to dump EFI System Tables''' try: (options, _) = self.create_options(arg, from_tty) if options.help: self.parser.print_help() return except ValueError: print('bad arguments!') return if options.address: try: value = gdb.parse_and_eval(options.address) address = int(value) except ValueError: address = None else: address = None hob = EfiHob(self.file, address, options.verbose).get_hob_by_type(options.type) print(hob) class EfiTablesCmd (gdb.Command): """Dump EFI System Tables. Type 'table -h' for more info.""" def __init__(self): super(EfiTablesCmd, self).__init__("efi table", gdb.COMMAND_NONE) self.file = GdbFileObject() def create_options(self, arg, from_tty): usage = "usage: %prog [options] [arg]" description = "Dump EFI System Tables. Requires symbols to be loaded" self.parser = optparse.OptionParser( description=description, prog='efi table', usage=usage, add_help_option=False) self.parser.add_option( '-h', '--help', action='store_true', dest='help', help='Show help for the command', default=False) return self.parser.parse_args(shlex.split(arg)) def invoke(self, arg, from_tty): '''gdb command to dump EFI System Tables''' try: (options, _) = self.create_options(arg, from_tty) if options.help: self.parser.print_help() return except ValueError: print('bad arguments!') return gST = gdb.lookup_global_symbol('gST') if gST is None: print('Error: This command requires symbols for gST to be loaded') return table = EfiConfigurationTable( self.file, int(gST.value(gdb.selected_frame()))) if table: print(table, '\n') class EfiSymbolsCmd (gdb.Command): """Load Symbols for EFI. Type 'efi symbols -h' for more info.""" def __init__(self): super(EfiSymbolsCmd, self).__init__("efi symbols", gdb.COMMAND_NONE, gdb.COMPLETE_EXPRESSION) self.file = GdbFileObject() self.gST = None self.efi_symbols = EfiSymbols(self.file) def create_options(self, arg, from_tty): usage = "usage: %prog [options]" description = ( "Command that can load EFI PE/COFF and TE image symbols. " "If you are having trouble in PEI try adding --pei. " "Given any address search backward for the PE/COFF (or TE header) " "and then parse the PE/COFF image to get debug info. " "The address can come from the current pc, pc values in the " "frame, or an address provided to the command" "") self.parser = optparse.OptionParser( description=description, prog='efi symbols', usage=usage, add_help_option=False) self.parser.add_option( '-a', '--address', type="str", dest='address', help='Load symbols for image that contains address', default=None) self.parser.add_option( '-c', '--clear', action='store_true', dest='clear', help='Clear the cache of loaded images', default=False) self.parser.add_option( '-f', '--frame', action='store_true', dest='frame', help='Load symbols for current stack frame', default=False) self.parser.add_option( '-p', '--pc', action='store_true', dest='pc', help='Load symbols for pc', default=False) self.parser.add_option( '--pei', action='store_true', dest='pei', help='Load symbols for PEI (searches every 4 bytes)', default=False) self.parser.add_option( '-e', '--extended', action='store_true', dest='extended', help='Try to load all symbols based on config tables', default=False) self.parser.add_option( '-r', '--range', type="long", dest='range', help='How far to search backward for start of PE/COFF Image', default=None) self.parser.add_option( '-s', '--stride', type="long", dest='stride', help='Boundary to search for PE/COFF header', default=None) self.parser.add_option( '-t', '--thread', action='store_true', dest='thread', help='Load symbols for the frames of all threads', default=False) self.parser.add_option( '-v', '--verbose', action='store_true', dest='verbose', help='Show more info on symbols loading in gdb', default=False) self.parser.add_option( '-h', '--help', action='store_true', dest='help', help='Show help for the command', default=False) return self.parser.parse_args(shlex.split(arg)) def save_user_state(self): self.pagination = gdb.parameter("pagination") if self.pagination: gdb.execute("set pagination off") self.user_selected_thread = gdb.selected_thread() self.user_selected_frame = gdb.selected_frame() def restore_user_state(self): self.user_selected_thread.switch() self.user_selected_frame.select() if self.pagination: gdb.execute("set pagination on") def canonical_address(self, address): ''' Scrub out 48-bit non canonical addresses Raw frames in gdb can have some funky values ''' # Skip lowest 256 bytes to avoid interrupt frames if address > 0xFF and address < 0x00007FFFFFFFFFFF: return True if address >= 0xFFFF800000000000: return True return False def pc_set_for_frames(self): '''Return a set for the PC's in the current frame''' pc_list = [] frame = gdb.newest_frame() while frame: pc = int(frame.read_register('pc')) if self.canonical_address(pc): pc_list.append(pc) frame = frame.older() return set(pc_list) def invoke(self, arg, from_tty): '''gdb command to symbolicate all the frames from all the threads''' try: (options, _) = self.create_options(arg, from_tty) if options.help: self.parser.print_help() return except ValueError: print('bad arguments!') return self.dont_repeat() self.save_user_state() if options.clear: self.efi_symbols.clear() return if options.pei: # XIP code can be 4 byte aligned in the FV options.stride = 4 options.range = 0x100000 self.efi_symbols.configure_search(options.stride, options.range, options.verbose) if options.thread: thread_list = gdb.selected_inferior().threads() else: thread_list = (gdb.selected_thread(),) address = None if options.address: value = gdb.parse_and_eval(options.address) address = int(value) elif options.pc: address = gdb.selected_frame().pc() if address: res = self.efi_symbols.address_to_symbols(address) print(res) else: for thread in thread_list: thread.switch() # You can not iterate over frames as you load symbols. Loading # symbols changes the frames gdb can see due to inlining and # boom. So we loop adding symbols for the current frame, and # we test to see if new frames have shown up. If new frames # show up we process those new frames. Thus 1st pass is the # raw frame, and other passes are only new PC values. NewPcSet = self.pc_set_for_frames() while NewPcSet: PcSet = self.pc_set_for_frames() for pc in NewPcSet: res = self.efi_symbols.address_to_symbols(pc) print(res) NewPcSet = PcSet.symmetric_difference( self.pc_set_for_frames()) # find the EFI System tables the 1st time if self.gST is None: gST = gdb.lookup_global_symbol('gST') if gST is not None: self.gST = int(gST.value(gdb.selected_frame())) table = EfiConfigurationTable(self.file, self.gST) else: table = None else: table = EfiConfigurationTable(self.file, self.gST) if options.extended and table: # load symbols from EFI System Table entry for address, _ in table.DebugImageInfo(): res = self.efi_symbols.address_to_symbols(address) print(res) # sync up the GUID database from the build output for m in gdb.objfiles(): if GuidNames.add_build_guid_file(str(m.filename)): break self.restore_user_state() class EfiCmd (gdb.Command): """Commands for debugging EFI. efi <cmd>""" def __init__(self): super(EfiCmd, self).__init__("efi", gdb.COMMAND_NONE, gdb.COMPLETE_NONE, True) def invoke(self, arg, from_tty): '''default to loading symbols''' if '-h' in arg or '--help' in arg: gdb.execute('help efi') else: # default to loading all symbols gdb.execute('efi symbols --extended') class LoadEmulatorEfiSymbols(gdb.Breakpoint): ''' breakpoint for EmulatorPkg to load symbols Note: make sure SecGdbScriptBreak is not optimized away! Also turn off the dlopen() flow like on macOS. ''' def stop(self): symbols = EfiSymbols() # Emulator adds SizeOfHeaders so we need file alignment to search symbols.configure_search(0x20) frame = gdb.newest_frame() try: # gdb was looking at spill address, pre spill :( LoadAddress = frame.read_register('rdx') AddSymbolFlag = frame.read_register('rcx') except gdb.error: LoadAddress = frame.read_var('LoadAddress') AddSymbolFlag = frame.read_var('AddSymbolFlag') if AddSymbolFlag == 1: res = symbols.address_to_symbols(LoadAddress) else: res = symbols.unload_symbols(LoadAddress) print(res) # keep running return False # Get python backtraces to debug errors in this script gdb.execute("set python print-stack full") # tell efi_debugging how to walk data structures with pointers try: pointer_width = gdb.lookup_type('int').pointer().sizeof except ValueError: pointer_width = 8 patch_ctypes(pointer_width) register_pretty_printer(None, build_pretty_printer(), replace=True) # gdb commands that we are adding # add `efi` prefix gdb command EfiCmd() # subcommands for `efi` EfiSymbolsCmd() EfiTablesCmd() EfiHobCmd() EfiDevicePathCmd() EfiGuidCmd() # bp = LoadEmulatorEfiSymbols('SecGdbScriptBreak', internal=True) if bp.pending: try: gdb.selected_frame() # Not the emulator so do this when you attach gdb.execute('efi symbols --frame --extended', True) gdb.execute('bt') # If you want to skip the above commands comment them out pass except gdb.error: # If you load the script and there is no target ignore the error. pass else: # start the emulator gdb.execute('run')
edk2-master
BaseTools/Scripts/efi_gdb.py
## @file # # Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR> # # SPDX-License-Identifier: BSD-2-Clause-Patent #
edk2-master
BaseTools/Scripts/PackageDocumentTools/__init__.py
## @file # This module provide command line entry for generating package document! # # Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR> # # SPDX-License-Identifier: BSD-2-Clause-Patent # from __future__ import print_function import os, sys, logging, traceback, subprocess from optparse import OptionParser from plugins.EdkPlugins.edk2.model import baseobject from plugins.EdkPlugins.edk2.model import doxygengen gArchMarcoDict = {'ALL' : 'MDE_CPU_IA32 MDE_CPU_X64 MDE_CPU_EBC MDE_CPU_IPF _MSC_EXTENSIONS __GNUC__ __INTEL_COMPILER', 'IA32_MSFT': 'MDE_CPU_IA32 _MSC_EXTENSIONS', 'IA32_GNU' : 'MDE_CPU_IA32 __GNUC__', 'X64_MSFT' : 'MDE_CPU_X64 _MSC_EXTENSIONS ASM_PFX= OPTIONAL= ', 'X64_GNU' : 'MDE_CPU_X64 __GNUC__ ASM_PFX= OPTIONAL= ', 'IPF_MSFT' : 'MDE_CPU_IPF _MSC_EXTENSIONS ASM_PFX= OPTIONAL= ', 'IPF_GNU' : 'MDE_CPU_IPF __GNUC__ ASM_PFX= OPTIONAL= ', 'EBC_INTEL': 'MDE_CPU_EBC __INTEL_COMPILER ASM_PFX= OPTIONAL= '} def parseCmdArgs(): parser = OptionParser(version="Package Document Generation Tools - Version 0.1") parser.add_option('-w', '--workspace', action='store', type='string', dest='WorkspacePath', help='Specify workspace absolute path. For example: c:\\tianocore') parser.add_option('-p', '--decfile', action='store', dest='PackagePath', help='Specify the absolute path for package DEC file. For example: c:\\tianocore\\MdePkg\\MdePkg.dec') parser.add_option('-x', '--doxygen', action='store', dest='DoxygenPath', help='Specify the absolute path of doxygen tools installation. For example: C:\\Program Files\\doxygen\bin\doxygen.exe') parser.add_option('-o', '--output', action='store', dest='OutputPath', help='Specify the document output path. For example: c:\\docoutput') parser.add_option('-a', '--arch', action='store', dest='Arch', choices=list(gArchMarcoDict.keys()), help='Specify the architecture used in preprocess package\'s source. For example: -a IA32_MSFT') parser.add_option('-m', '--mode', action='store', dest='DocumentMode', choices=['CHM', 'HTML'], help='Specify the document mode from : CHM or HTML') parser.add_option('-i', '--includeonly', action='store_true', dest='IncludeOnly', help='Only generate document for package\'s public interfaces produced by include folder. ') parser.add_option('-c', '--htmlworkshop', dest='HtmlWorkshopPath', help='Specify the absolute path for Microsoft HTML Workshop\'s hhc.exe file. For example: C:\\Program Files\\HTML Help Workshop\\hhc.exe') (options, args) = parser.parse_args() # validate the options errors = [] if options.WorkspacePath is None: errors.append('- Please specify workspace path via option -w!') elif not os.path.exists(options.WorkspacePath): errors.append("- Invalid workspace path %s! The workspace path should be exist in absolute path!" % options.WorkspacePath) if options.PackagePath is None: errors.append('- Please specify package DEC file path via option -p!') elif not os.path.exists(options.PackagePath): errors.append("- Invalid package's DEC file path %s! The DEC path should be exist in absolute path!" % options.PackagePath) default = "C:\\Program Files\\doxygen\\bin\\doxygen.exe" if options.DoxygenPath is None: if os.path.exists(default): print("Warning: Assume doxygen tool is installed at %s. If not, please specify via -x" % default) options.DoxygenPath = default else: errors.append('- Please specify the path of doxygen tool installation via option -x! or install it in default path %s' % default) elif not os.path.exists(options.DoxygenPath): errors.append("- Invalid doxygen tool path %s! The doxygen tool path should be exist in absolute path!" % options.DoxygenPath) if options.OutputPath is not None: if not os.path.exists(options.OutputPath): # create output try: os.makedirs(options.OutputPath) except: errors.append('- Fail to create the output directory %s' % options.OutputPath) else: if options.PackagePath is not None and os.path.exists(options.PackagePath): dirpath = os.path.dirname(options.PackagePath) default = os.path.join (dirpath, "Document") print('Warning: Assume document output at %s. If not, please specify via option -o' % default) options.OutputPath = default if not os.path.exists(default): try: os.makedirs(default) except: errors.append('- Fail to create default output directory %s! Please specify document output diretory via option -o' % default) else: errors.append('- Please specify document output path via option -o!') if options.Arch is None: options.Arch = 'ALL' print("Warning: Assume arch is \"ALL\". If not, specify via -a") if options.DocumentMode is None: options.DocumentMode = "HTML" print("Warning: Assume document mode is \"HTML\". If not, specify via -m") if options.IncludeOnly is None: options.IncludeOnly = False print("Warning: Assume generate package document for all package\'s source including publich interfaces and implementation libraries and modules.") if options.DocumentMode.lower() == 'chm': default = "C:\\Program Files\\HTML Help Workshop\\hhc.exe" if options.HtmlWorkshopPath is None: if os.path.exists(default): print('Warning: Assume the installation path of Microsoft HTML Workshop is %s. If not, specify via option -c.' % default) options.HtmlWorkshopPath = default else: errors.append('- Please specify the installation path of Microsoft HTML Workshop via option -c!') elif not os.path.exists(options.HtmlWorkshopPath): errors.append('- The installation path of Microsoft HTML Workshop %s does not exists. ' % options.HtmlWorkshopPath) if len(errors) != 0: print('\n') parser.error('Fail to start due to following reasons: \n%s' %'\n'.join(errors)) return (options.WorkspacePath, options.PackagePath, options.DoxygenPath, options.OutputPath, options.Arch, options.DocumentMode, options.IncludeOnly, options.HtmlWorkshopPath) def createPackageObject(wsPath, pkgPath): try: pkgObj = baseobject.Package(None, wsPath) pkgObj.Load(pkgPath) except: logging.getLogger().error ('Fail to create package object!') return None return pkgObj def callbackLogMessage(msg, level): print(msg.strip()) def callbackCreateDoxygenProcess(doxPath, configPath): if sys.platform == 'win32': cmd = '"%s" %s' % (doxPath, configPath) else: cmd = '%s %s' % (doxPath, configPath) print(cmd) subprocess.call(cmd, shell=True) def DocumentFixup(outPath, arch): # find BASE_LIBRARY_JUMP_BUFFER structure reference page print('\n >>> Start fixup document \n') for root, dirs, files in os.walk(outPath): for dir in dirs: if dir.lower() in ['.svn', '_svn', 'cvs']: dirs.remove(dir) for file in files: if not file.lower().endswith('.html'): continue fullpath = os.path.join(outPath, root, file) try: f = open(fullpath, 'r') text = f.read() f.close() except: logging.getLogger().error('\nFail to open file %s\n' % fullpath) continue if arch.lower() == 'all': if text.find('BASE_LIBRARY_JUMP_BUFFER Struct Reference') != -1: FixPageBASE_LIBRARY_JUMP_BUFFER(fullpath, text) if text.find('MdePkg/Include/Library/BaseLib.h File Reference') != -1: FixPageBaseLib(fullpath, text) if text.find('IA32_IDT_GATE_DESCRIPTOR Union Reference') != -1: FixPageIA32_IDT_GATE_DESCRIPTOR(fullpath, text) if text.find('MdePkg/Include/Library/UefiDriverEntryPoint.h File Reference') != -1: FixPageUefiDriverEntryPoint(fullpath, text) if text.find('MdePkg/Include/Library/UefiApplicationEntryPoint.h File Reference') != -1: FixPageUefiApplicationEntryPoint(fullpath, text) print(' >>> Finish all document fixing up! \n') def FixPageBaseLib(path, text): print(' >>> Fixup BaseLib file page at file %s \n' % path) lines = text.split('\n') lastBaseJumpIndex = -1 lastIdtGateDescriptor = -1 for index in range(len(lines) - 1, -1, -1): line = lines[index] if line.strip() == '<td class="memname">#define BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT&nbsp;&nbsp;&nbsp;4 </td>': lines[index] = '<td class="memname">#define BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT&nbsp;&nbsp;&nbsp;4&nbsp;[IA32] </td>' if line.strip() == '<td class="memname">#define BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT&nbsp;&nbsp;&nbsp;0x10 </td>': lines[index] = '<td class="memname">#define BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT&nbsp;&nbsp;&nbsp;0x10&nbsp;[IPF] </td>' if line.strip() == '<td class="memname">#define BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT&nbsp;&nbsp;&nbsp;8 </td>': lines[index] = '<td class="memname">#define BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT&nbsp;&nbsp;&nbsp;9&nbsp;[EBC, x64] </td>' if line.find('BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT</a>&nbsp;&nbsp;&nbsp;4') != -1: lines[index] = lines[index].replace('BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT</a>&nbsp;&nbsp;&nbsp;4', 'BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT</a>&nbsp;&nbsp;&nbsp;4&nbsp;[IA32]') if line.find('BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT</a>&nbsp;&nbsp;&nbsp;0x10') != -1: lines[index] = lines[index].replace('BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT</a>&nbsp;&nbsp;&nbsp;0x10', 'BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT</a>&nbsp;&nbsp;&nbsp;0x10&nbsp;[IPF]') if line.find('BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT</a>&nbsp;&nbsp;&nbsp;8') != -1: lines[index] = lines[index].replace('BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT</a>&nbsp;&nbsp;&nbsp;8', 'BASE_LIBRARY_JUMP_BUFFER_ALIGNMENT</a>&nbsp;&nbsp;&nbsp;8&nbsp;[x64, EBC]') if line.find('>BASE_LIBRARY_JUMP_BUFFER</a>') != -1: if lastBaseJumpIndex != -1: del lines[lastBaseJumpIndex] lastBaseJumpIndex = index if line.find('>IA32_IDT_GATE_DESCRIPTOR</a></td>') != -1: if lastIdtGateDescriptor != -1: del lines[lastIdtGateDescriptor] lastIdtGateDescriptor = index try: f = open(path, 'w') f.write('\n'.join(lines)) f.close() except: logging.getLogger().error(" <<< Fail to fixup file %s\n" % path) return print(" <<< Finish to fixup file %s\n" % path) def FixPageIA32_IDT_GATE_DESCRIPTOR(path, text): print(' >>> Fixup structure reference IA32_IDT_GATE_DESCRIPTOR at file %s \n' % path) lines = text.split('\n') for index in range(len(lines) - 1, -1, -1): line = lines[index].strip() if line.find('struct {</td>') != -1 and lines[index - 2].find('>Uint64</a></td>') != -1: lines.insert(index, '<tr><td colspan="2"><br><h2>Data Fields For X64</h2></td></tr>') if line.find('struct {</td>') != -1 and lines[index - 1].find('Data Fields') != -1: lines.insert(index, '<tr><td colspan="2"><br><h2>Data Fields For IA32</h2></td></tr>') try: f = open(path, 'w') f.write('\n'.join(lines)) f.close() except: logging.getLogger().error(" <<< Fail to fixup file %s\n" % path) return print(" <<< Finish to fixup file %s\n" % path) def FixPageBASE_LIBRARY_JUMP_BUFFER(path, text): print(' >>> Fixup structure reference BASE_LIBRARY_JUMP_BUFFER at file %s \n' % path) lines = text.split('\n') bInDetail = True bNeedRemove = False for index in range(len(lines) - 1, -1, -1): line = lines[index] if line.find('Detailed Description') != -1: bInDetail = False if line.startswith('EBC context buffer used by') and lines[index - 1].startswith('x64 context buffer'): lines[index] = "IA32/IPF/X64/" + line bNeedRemove = True if line.startswith("x64 context buffer") or line.startswith('IPF context buffer used by') or \ line.startswith('IA32 context buffer used by'): if bNeedRemove: lines.remove(line) if line.find('>R0</a>') != -1 and not bInDetail: if lines[index - 1] != '<tr><td colspan="2"><br><h2>Data Fields For EBC</h2></td></tr>': lines.insert(index, '<tr><td colspan="2"><br><h2>Data Fields For EBC</h2></td></tr>') if line.find('>Rbx</a>') != -1 and not bInDetail: if lines[index - 1] != '<tr><td colspan="2"><br><h2>Data Fields For X64</h2></td></tr>': lines.insert(index, '<tr><td colspan="2"><br><h2>Data Fields For X64</h2></td></tr>') if line.find('>F2</a>') != -1 and not bInDetail: if lines[index - 1] != '<tr><td colspan="2"><br><h2>Data Fields For IPF</h2></td></tr>': lines.insert(index, '<tr><td colspan="2"><br><h2>Data Fields For IPF</h2></td></tr>') if line.find('>Ebx</a>') != -1 and not bInDetail: if lines[index - 1] != '<tr><td colspan="2"><br><h2>Data Fields For IA32</h2></td></tr>': lines.insert(index, '<tr><td colspan="2"><br><h2>Data Fields For IA32</h2></td></tr>') try: f = open(path, 'w') f.write('\n'.join(lines)) f.close() except: logging.getLogger().error(" <<< Fail to fixup file %s" % path) return print(" <<< Finish to fixup file %s\n" % path) def FixPageUefiDriverEntryPoint(path, text): print(' >>> Fixup file reference MdePkg/Include/Library/UefiDriverEntryPoint.h at file %s \n' % path) lines = text.split('\n') bInModuleEntry = False bInEfiMain = False ModuleEntryDlCount = 0 ModuleEntryDelStart = 0 ModuleEntryDelEnd = 0 EfiMainDlCount = 0 EfiMainDelStart = 0 EfiMainDelEnd = 0 for index in range(len(lines)): line = lines[index].strip() if line.find('EFI_STATUS</a> EFIAPI _ModuleEntryPoint </td>') != -1: bInModuleEntry = True if line.find('EFI_STATUS</a> EFIAPI EfiMain </td>') != -1: bInEfiMain = True if line.startswith('<p>References <a'): if bInModuleEntry: ModuleEntryDelEnd = index - 1 bInModuleEntry = False elif bInEfiMain: EfiMainDelEnd = index - 1 bInEfiMain = False if bInModuleEntry: if line.startswith('</dl>'): ModuleEntryDlCount = ModuleEntryDlCount + 1 if ModuleEntryDlCount == 1: ModuleEntryDelStart = index + 1 if bInEfiMain: if line.startswith('</dl>'): EfiMainDlCount = EfiMainDlCount + 1 if EfiMainDlCount == 1: EfiMainDelStart = index + 1 if EfiMainDelEnd > EfiMainDelStart: for index in range(EfiMainDelEnd, EfiMainDelStart, -1): del lines[index] if ModuleEntryDelEnd > ModuleEntryDelStart: for index in range(ModuleEntryDelEnd, ModuleEntryDelStart, -1): del lines[index] try: f = open(path, 'w') f.write('\n'.join(lines)) f.close() except: logging.getLogger().error(" <<< Fail to fixup file %s" % path) return print(" <<< Finish to fixup file %s\n" % path) def FixPageUefiApplicationEntryPoint(path, text): print(' >>> Fixup file reference MdePkg/Include/Library/UefiApplicationEntryPoint.h at file %s \n' % path) lines = text.split('\n') bInModuleEntry = False bInEfiMain = False ModuleEntryDlCount = 0 ModuleEntryDelStart = 0 ModuleEntryDelEnd = 0 EfiMainDlCount = 0 EfiMainDelStart = 0 EfiMainDelEnd = 0 for index in range(len(lines)): line = lines[index].strip() if line.find('EFI_STATUS</a> EFIAPI _ModuleEntryPoint </td>') != -1: bInModuleEntry = True if line.find('EFI_STATUS</a> EFIAPI EfiMain </td>') != -1: bInEfiMain = True if line.startswith('<p>References <a'): if bInModuleEntry: ModuleEntryDelEnd = index - 1 bInModuleEntry = False elif bInEfiMain: EfiMainDelEnd = index - 1 bInEfiMain = False if bInModuleEntry: if line.startswith('</dl>'): ModuleEntryDlCount = ModuleEntryDlCount + 1 if ModuleEntryDlCount == 1: ModuleEntryDelStart = index + 1 if bInEfiMain: if line.startswith('</dl>'): EfiMainDlCount = EfiMainDlCount + 1 if EfiMainDlCount == 1: EfiMainDelStart = index + 1 if EfiMainDelEnd > EfiMainDelStart: for index in range(EfiMainDelEnd, EfiMainDelStart, -1): del lines[index] if ModuleEntryDelEnd > ModuleEntryDelStart: for index in range(ModuleEntryDelEnd, ModuleEntryDelStart, -1): del lines[index] try: f = open(path, 'w') f.write('\n'.join(lines)) f.close() except: logging.getLogger().error(" <<< Fail to fixup file %s" % path) return print(" <<< Finish to fixup file %s\n" % path) if __name__ == '__main__': wspath, pkgpath, doxpath, outpath, archtag, docmode, isinc, hwpath = parseCmdArgs() # configure logging system logfilepath = os.path.join(outpath, 'log.txt') logging.basicConfig(format='%(levelname)-8s %(message)s', level=logging.DEBUG) # create package model object firstly pkgObj = createPackageObject(wspath, pkgpath) if pkgObj is None: sys.exit(-1) # create doxygen action model arch = None tooltag = None if archtag.lower() != 'all': arch = archtag.split('_')[0] tooltag = archtag.split('_')[1] else: arch = 'all' tooltag = 'all' # preprocess package and call doxygen try: action = doxygengen.PackageDocumentAction(doxpath, hwpath, outpath, pkgObj, docmode, callbackLogMessage, arch, tooltag, isinc, True) action.RegisterCallbackDoxygenProcess(callbackCreateDoxygenProcess) action.Generate() except: message = traceback.format_exception(*sys.exc_info()) logging.getLogger().error('Fail to create doxygen action! \n%s' % ''.join(message)) sys.exit(-1) DocumentFixup(outpath, arch) # generate CHM is necessary if docmode.lower() == 'chm': indexpath = os.path.join(outpath, 'html', 'index.hhp') if sys.platform == 'win32': cmd = '"%s" %s' % (hwpath, indexpath) else: cmd = '%s %s' % (hwpath, indexpath) subprocess.call(cmd) print('\nFinish to generate package document! Please open %s for review' % os.path.join(outpath, 'html', 'index.chm')) else: print('\nFinish to generate package document! Please open %s for review' % os.path.join(outpath, 'html', 'index.html'))
edk2-master
BaseTools/Scripts/PackageDocumentTools/packagedoc_cli.py
## @file # # Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR> # # SPDX-License-Identifier: BSD-2-Clause-Patent #
edk2-master
BaseTools/Scripts/PackageDocumentTools/plugins/__init__.py
## @file # # Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR> # # SPDX-License-Identifier: BSD-2-Clause-Patent #
edk2-master
BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/__init__.py
## @file # # Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR> # # SPDX-License-Identifier: BSD-2-Clause-Patent # from __future__ import print_function import array import uuid import re import os import logging import core.pe as pe def GetLogger(): return logging.getLogger('EFI Binary File') class EFIBinaryError(Exception): def __init__(self, message): Exception.__init__(self) self._message = message def GetMessage(self): return self._message class EfiFd(object): EFI_FV_HEADER_SIZE = 0x48 def __init__(self): self._fvs = [] def Load(self, fd, size): index = fd.tell() while (index + self.EFI_FV_HEADER_SIZE < size): fv = EfiFv(self) fv.Load(fd) self._fvs.append(fv) index += fv.GetHeader().GetFvLength() index = align(index, 8) fd.seek(index) def GetFvs(self): return self._fvs class EfiFv(object): FILE_SYSTEM_GUID = uuid.UUID('{8c8ce578-8a3d-4f1c-9935-896185c32dd3}') def __init__(self, parent=None): self._size = 0 self._filename = None self._fvheader = None self._blockentries = [] self._ffs = [] # following field is for FV in FD self._parent = parent self._offset = 0 self._raw = array.array('B') def Load(self, fd): self._offset = fd.tell() self._filename = fd.name # get file header self._fvheader = EfiFirmwareVolumeHeader.Read(fd) #self._fvheader.Dump() self._size = self._fvheader.GetFvLength() if self._fvheader.GetFileSystemGuid() != self.FILE_SYSTEM_GUID: fd.seek(self._offset) self._raw.fromfile(fd, self.GetHeader().GetFvLength()) return # read block map blockentry = BlockMapEntry.Read(fd) self._blockentries.append(blockentry) while (blockentry.GetNumberBlocks() != 0 and blockentry.GetLength() != 0): self._blockentries.append(blockentry) blockentry = BlockMapEntry.Read(fd) if self._fvheader.GetSize() + (len(self._blockentries)) * 8 != \ self._fvheader.GetHeaderLength(): raise EFIBinaryError("Volume Header length not consistent with block map!") index = align(fd.tell(), 8) count = 0 while ((index + EfiFfs.FFS_HEADER_SIZE) < self._size): ffs = EfiFfs.Read(fd, self) if not isValidGuid(ffs.GetNameGuid()): break self._ffs.append(ffs) count += 1 index = align(fd.tell(), 8) fd.seek(self._offset) self._raw.fromfile(fd, self.GetHeader().GetFvLength()) def GetFfs(self): return self._ffs def GetHeader(self): return self._fvheader def GetBlockEntries(self): return self._blockentries def GetHeaderRawData(self): ret = [] ret += self._fvheader.GetRawData() for block in self._blockentries: ret += block.GetRawData() return ret def GetOffset(self): return 0 def GetRawData(self): return self._raw.tolist() class BinaryItem(object): def __init__(self, parent=None): self._size = 0 self._arr = array.array('B') self._parent = parent @classmethod def Read(cls, fd, parent=None): item = cls(parent) item.fromfile(fd) return item def Load(self, fd): self.fromfile(fd) def GetSize(self): """should be implemented by inherited class""" def fromfile(self, fd): self._arr.fromfile(fd, self.GetSize()) def GetParent(self): return self._parent class EfiFirmwareVolumeHeader(BinaryItem): def GetSize(self): return 56 def GetSigunature(self): list = self._arr.tolist() sig = '' for x in list[40:44]: sig += chr(x) return sig def GetAttribute(self): return list2int(self._arr.tolist()[44:48]) def GetErasePolarity(self): list = self.GetAttrStrings() if 'EFI_FVB2_ERASE_POLARITY' in list: return True return False def GetAttrStrings(self): list = [] value = self.GetAttribute() if (value & 0x01) != 0: list.append('EFI_FVB2_READ_DISABLED_CAP') if (value & 0x02) != 0: list.append('EFI_FVB2_READ_ENABLED_CAP') if (value & 0x04) != 0: list.append('EFI_FVB2_READ_STATUS') if (value & 0x08) != 0: list.append('EFI_FVB2_WRITE_DISABLED_CAP') if (value & 0x10) != 0: list.append('EFI_FVB2_WRITE_ENABLED_CAP') if (value & 0x20) != 0: list.append('EFI_FVB2_WRITE_STATUS') if (value & 0x40) != 0: list.append('EFI_FVB2_LOCK_CAP') if (value & 0x80) != 0: list.append('EFI_FVB2_LOCK_STATUS') if (value & 0x200) != 0: list.append('EFI_FVB2_STICKY_WRITE') if (value & 0x400) != 0: list.append('EFI_FVB2_MEMORY_MAPPED') if (value & 0x800) != 0: list.append('EFI_FVB2_ERASE_POLARITY') if (value & 0x1000) != 0: list.append('EFI_FVB2_READ_LOCK_CAP') if (value & 0x00002000) != 0: list.append('EFI_FVB2_READ_LOCK_STATUS') if (value & 0x00004000) != 0: list.append('EFI_FVB2_WRITE_LOCK_CAP') if (value & 0x00008000) != 0: list.append('EFI_FVB2_WRITE_LOCK_STATUS') if (value == 0): list.append('EFI_FVB2_ALIGNMENT_1') if (value & 0x001F0000) == 0x00010000: list.append('EFI_FVB2_ALIGNMENT_2') if (value & 0x001F0000) == 0x00020000: list.append('EFI_FVB2_ALIGNMENT_4') if (value & 0x001F0000) == 0x00030000: list.append('EFI_FVB2_ALIGNMENT_8') if (value & 0x001F0000) == 0x00040000: list.append('EFI_FVB2_ALIGNMENT_16') if (value & 0x001F0000) == 0x00050000: list.append('EFI_FVB2_ALIGNMENT_32') if (value & 0x001F0000) == 0x00060000: list.append('EFI_FVB2_ALIGNMENT_64') if (value & 0x001F0000) == 0x00070000: list.append('EFI_FVB2_ALIGNMENT_128') if (value & 0x001F0000) == 0x00080000: list.append('EFI_FVB2_ALIGNMENT_256') if (value & 0x001F0000) == 0x00090000: list.append('EFI_FVB2_ALIGNMENT_512') if (value & 0x001F0000) == 0x000A0000: list.append('EFI_FVB2_ALIGNMENT_1K') if (value & 0x001F0000) == 0x000B0000: list.append('EFI_FVB2_ALIGNMENT_2K') if (value & 0x001F0000) == 0x000C0000: list.append('EFI_FVB2_ALIGNMENT_4K') if (value & 0x001F0000) == 0x000D0000: list.append('EFI_FVB2_ALIGNMENT_8K') if (value & 0x001F0000) == 0x000E0000: list.append('EFI_FVB2_ALIGNMENT_16K') if (value & 0x001F0000) == 0x000F0000: list.append('EFI_FVB2_ALIGNMENT_32K') if (value & 0x001F0000) == 0x00100000: list.append('EFI_FVB2_ALIGNMENT_64K') if (value & 0x001F0000) == 0x00110000: list.append('EFI_FVB2_ALIGNMENT_128K') if (value & 0x001F0000) == 0x00120000: list.append('EFI_FVB2_ALIGNMENT_256K') if (value & 0x001F0000) == 0x00130000: list.append('EFI_FVB2_ALIGNMENT_512K') return list def GetHeaderLength(self): return list2int(self._arr.tolist()[48:50]) def Dump(self): print('Signature: %s' % self.GetSigunature()) print('Attribute: 0x%X' % self.GetAttribute()) print('Header Length: 0x%X' % self.GetHeaderLength()) print('File system Guid: ', self.GetFileSystemGuid()) print('Revision: 0x%X' % self.GetRevision()) print('FvLength: 0x%X' % self.GetFvLength()) def GetFileSystemGuid(self): list = self._arr.tolist() return list2guid(list[16:32]) def GetRevision(self): list = self._arr.tolist() return int(list[55]) def GetFvLength(self): list = self._arr.tolist() return list2int(list[32:40]) def GetRawData(self): return self._arr.tolist() class BlockMapEntry(BinaryItem): def GetSize(self): return 8 def GetNumberBlocks(self): list = self._arr.tolist() return list2int(list[0:4]) def GetLength(self): list = self._arr.tolist() return list2int(list[4:8]) def GetRawData(self): return self._arr.tolist() def __str__(self): return '[BlockEntry] Number = 0x%X, length=0x%X' % (self.GetNumberBlocks(), self.GetLength()) class EfiFfs(object): FFS_HEADER_SIZE = 24 def __init__(self, parent=None): self._header = None # following field is for FFS in FV file. self._parent = parent self._offset = 0 self._sections = [] def Load(self, fd): self._offset = align(fd.tell(), 8) self._header = EfiFfsHeader.Read(fd, self) if not isValidGuid(self.GetNameGuid()): return index = self._offset fileend = self._offset + self.GetSize() while (index + EfiSection.EFI_SECTION_HEADER_SIZE < fileend): section = EfiSection(self) section.Load(fd) if section.GetSize() == 0 and section.GetHeader().GetType() == 0: break self._sections.append(section) index = fd.tell() # rebase file pointer to next ffs file index = self._offset + self._header.GetFfsSize() index = align(index, 8) fd.seek(index) def GetOffset(self): return self._offset def GetSize(self): return self._header.GetFfsSize() @classmethod def Read(cls, fd, parent=None): item = cls(parent) item.Load(fd) return item def GetNameGuid(self): return self._header.GetNameGuid() def DumpContent(self): list = self._content.tolist() line = [] count = 0 for item in list: if count < 32: line.append('0x%X' % int(item)) count += 1 else: print(' '.join(line)) count = 0 line = [] line.append('0x%X' % int(item)) count += 1 def GetHeader(self): return self._header def GetParent(self): return self._parent def GetSections(self): return self._sections class EfiFfsHeader(BinaryItem): ffs_state_map = {0x01:'EFI_FILE_HEADER_CONSTRUCTION', 0x02:'EFI_FILE_HEADER_VALID', 0x04:'EFI_FILE_DATA_VALID', 0x08:'EFI_FILE_MARKED_FOR_UPDATE', 0x10:'EFI_FILE_DELETED', 0x20:'EFI_FILE_HEADER_INVALID'} def GetSize(self): return 24 def GetNameGuid(self): list = self._arr.tolist() return list2guid(list[0:16]) def GetType(self): list = self._arr.tolist() return int(list[18]) def GetTypeString(self): value = self.GetType() if value == 0x01: return 'EFI_FV_FILETYPE_RAW' if value == 0x02: return 'EFI_FV_FILETYPE_FREEFORM' if value == 0x03: return 'EFI_FV_FILETYPE_SECURITY_CORE' if value == 0x04: return 'EFI_FV_FILETYPE_PEI_CORE' if value == 0x05: return 'EFI_FV_FILETYPE_DXE_CORE' if value == 0x06: return 'EFI_FV_FILETYPE_PEIM' if value == 0x07: return 'EFI_FV_FILETYPE_DRIVER' if value == 0x08: return 'EFI_FV_FILETYPE_COMBINED_PEIM_DRIVER' if value == 0x09: return 'EFI_FV_FILETYPE_APPLICATION' if value == 0x0B: return 'EFI_FV_FILETYPE_FIRMWARE_VOLUME_IMAGE' if value == 0xc0: return 'EFI_FV_FILETYPE_OEM_MIN' if value == 0xdf: return 'EFI_FV_FILETYPE_OEM_MAX' if value == 0xe0: return 'EFI_FV_FILETYPE_DEBUG_MIN' if value == 0xef: return 'EFI_FV_FILETYPE_DEBUG_MAX' if value == 0xf0: return 'EFI_FV_FILETYPE_FFS_PAD' if value == 0xff: return 'EFI_FV_FILETYPE_FFS_MAX' return 'Unknown FFS Type' def GetAttributes(self): list = self._arr.tolist() return int(list[19]) def GetFfsSize(self): list = self._arr.tolist() return list2int(list[20:23]) def GetState(self): list = self._arr.tolist() state = int(list[23]) polarity = self.GetParent().GetParent().GetHeader().GetErasePolarity() if polarity: state = (~state) & 0xFF HighestBit = 0x80 while (HighestBit != 0) and (HighestBit & state) == 0: HighestBit = HighestBit >> 1 return HighestBit def GetStateString(self): state = self.GetState() if state in self.ffs_state_map.keys(): return self.ffs_state_map[state] return 'Unknown Ffs State' def Dump(self): print("FFS name: ", self.GetNameGuid()) print("FFS type: ", self.GetType()) print("FFS attr: 0x%X" % self.GetAttributes()) print("FFS size: 0x%X" % self.GetFfsSize()) print("FFS state: 0x%X" % self.GetState()) def GetRawData(self): return self._arr.tolist() class EfiSection(object): EFI_SECTION_HEADER_SIZE = 4 def __init__(self, parent=None): self._size = 0 self._parent = parent self._offset = 0 self._contents = array.array('B') def Load(self, fd): self._offset = align(fd.tell(), 4) self._header = EfiSectionHeader.Read(fd, self) if self._header.GetTypeString() == "EFI_SECTION_PE32": pefile = pe.PEFile(self) pefile.Load(fd, self.GetContentSize()) fd.seek(self._offset) self._contents.fromfile(fd, self.GetContentSize()) # rebase file pointer to next section index = self._offset + self.GetSize() index = align(index, 4) fd.seek(index) def GetContentSize(self): return self.GetSize() - self.EFI_SECTION_HEADER_SIZE def GetContent(self): return self._contents.tolist() def GetSize(self): return self._header.GetSectionSize() def GetHeader(self): return self._header def GetSectionOffset(self): return self._offset + self.EFI_SECTION_HEADER_SIZE class EfiSectionHeader(BinaryItem): section_type_map = {0x01: 'EFI_SECTION_COMPRESSION', 0x02: 'EFI_SECTION_GUID_DEFINED', 0x10: 'EFI_SECTION_PE32', 0x11: 'EFI_SECTION_PIC', 0x12: 'EFI_SECTION_TE', 0x13: 'EFI_SECTION_DXE_DEPEX', 0x14: 'EFI_SECTION_VERSION', 0x15: 'EFI_SECTION_USER_INTERFACE', 0x16: 'EFI_SECTION_COMPATIBILITY16', 0x17: 'EFI_SECTION_FIRMWARE_VOLUME_IMAGE', 0x18: 'EFI_SECTION_FREEFORM_SUBTYPE_GUID', 0x19: 'EFI_SECTION_RAW', 0x1B: 'EFI_SECTION_PEI_DEPEX'} def GetSize(self): return 4 def GetSectionSize(self): list = self._arr.tolist() return list2int(list[0:3]) def GetType(self): list = self._arr.tolist() return int(list[3]) def GetTypeString(self): type = self.GetType() if type not in self.section_type_map.keys(): return 'Unknown Section Type' return self.section_type_map[type] def Dump(self): print('size = 0x%X' % self.GetSectionSize()) print('type = 0x%X' % self.GetType()) rMapEntry = re.compile('^(\w+)[ \(\w\)]* \(BaseAddress=([0-9a-fA-F]+), EntryPoint=([0-9a-fA-F]+), GUID=([0-9a-fA-F\-]+)') class EfiFvMapFile(object): def __init__(self): self._mapentries = {} def Load(self, path): if not os.path.exists(path): return False try: file = open(path, 'r') lines = file.readlines() file.close() except: return False for line in lines: if line[0] != ' ': # new entry ret = rMapEntry.match(line) if ret is not None: name = ret.groups()[0] baseaddr = int(ret.groups()[1], 16) entry = int(ret.groups()[2], 16) guidstr = '{' + ret.groups()[3] + '}' guid = uuid.UUID(guidstr) self._mapentries[guid] = EfiFvMapFileEntry(name, baseaddr, entry, guid) return True def GetEntry(self, guid): if guid in self._mapentries.keys(): return self._mapentries[guid] return None class EfiFvMapFileEntry(object): def __init__(self, name, baseaddr, entry, guid): self._name = name self._baseaddr = baseaddr self._entry = entry self._guid = guid def GetName(self): return self._name def GetBaseAddress(self): return self._baseaddr def GetEntryPoint(self): return self._entry def list2guid(list): val1 = list2int(list[0:4]) val2 = list2int(list[4:6]) val3 = list2int(list[6:8]) val4 = 0 for item in list[8:16]: val4 = (val4 << 8) | int(item) val = val1 << 12 * 8 | val2 << 10 * 8 | val3 << 8 * 8 | val4 guid = uuid.UUID(int=val) return guid def list2int(list): val = 0 for index in range(len(list) - 1, -1, -1): val = (val << 8) | int(list[index]) return val def align(value, alignment): return (value + ((alignment - value) & (alignment - 1))) gInvalidGuid = uuid.UUID(int=0xffffffffffffffffffffffffffffffff) def isValidGuid(guid): if guid == gInvalidGuid: return False return True
edk2-master
BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/basemodel/efibinary.py
## @file # # Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR> # # SPDX-License-Identifier: BSD-2-Clause-Patent #
edk2-master
BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/basemodel/__init__.py
## @file # # Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR> # # SPDX-License-Identifier: BSD-2-Clause-Patent # def GetEdkLogger(): import logging return logging.getLogger('edk') class EdkException(Exception): def __init__(self, message, fName=None, fNo=None): self._message = message ErrorMsg(message, fName, fNo) def GetMessage(self): return '[EDK Failure]: %s' %self._message def ErrorMsg(mess, fName=None, fNo=None): GetEdkLogger().error(NormalMessage('#ERR#', mess, fName, fNo)) def LogMsg(mess, fName=None, fNo=None): GetEdkLogger().info(NormalMessage('@LOG@', mess, fName, fNo)) def WarnMsg(mess, fName=None, fNo=None): GetEdkLogger().warning(NormalMessage('!WAR!', mess, fName, fNo)) def NormalMessage(type, mess, fName=None, fNo=None): strMsg = type if fName is not None: strMsg += ' %s' % fName.replace('/', '\\') if fNo is not None: strMsg += '(%d):' % fNo else: strMsg += ' :' if fName is None and fNo is None: strMsg += ' ' strMsg += mess return strMsg
edk2-master
BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/basemodel/message.py
## @file # # Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR> # # SPDX-License-Identifier: BSD-2-Clause-Patent # import core.editor class INIDoc(core.editor.EditorDocument): def __init__(self): core.editor.EditorDocument.__init__(self) self._iniobj = None class INIView(core.editor.EditorView): pass
edk2-master
BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/basemodel/inidocview.py
## @file # # Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR> # # SPDX-License-Identifier: BSD-2-Clause-Patent # from __future__ import print_function from __future__ import absolute_import import os from .message import * class BaseDoxygeItem: def __init__(self, name, tag=''): self.mName = name self.mTag = tag self.mDescription = '' self.mText = [] def AddDescription(self, desc): self.mDescription = '%s%s' % (self.mDescription, desc) def __str__(self): return '\n'.join(self.mText) def Generate(self): """This interface need to be override""" class Section(BaseDoxygeItem): def Generate(self): """This interface need to be override""" if len(self.mTag) != 0: self.mText.append(' \section %s %s' % (self.mName, self.mTag)) else: self.mText.append(' \section %s' % self.mName) self.mText.append(self.mDescription) return self.mText class Page(BaseDoxygeItem): def __init__(self, name, tag=None, isSort=True): BaseDoxygeItem.__init__(self, name, tag) self.mSubPages = [] self.mIsMainPage = False self.mSections = [] self.mIsSort = isSort def GetSubpageCount(self): return len(self.mSubPages) def AddPage(self, subpage): self.mSubPages.append(subpage) return subpage def AddPages(self, pageArray): if pageArray is None: return for page in pageArray: self.AddPage(page) def AddSection(self, section): self.mSections.append(section) self.mSections.sort(key=lambda x: x.mName.lower()) def Generate(self): if self.mIsMainPage: self.mText.append('/** \mainpage %s' % self.mName) self.mIsSort = False else: self.mText.append('/** \page %s %s' % (self.mTag, self.mName)) if len(self.mDescription) != 0: self.mText.append(self.mDescription) endIndex = len(self.mText) self.mSections.sort(key=lambda x: x.mName.lower()) for sect in self.mSections: self.mText += sect.Generate() endIndex = len(self.mText) if len(self.mSubPages) != 0: self.mText.insert(endIndex, "<p> \section content_index INDEX") endIndex = len(self.mText) self.mText.insert(endIndex, '<ul>') endIndex += 1 if self.mIsSort: self.mSubPages.sort(key=lambda x: x.mName.lower()) for page in self.mSubPages: self.mText.insert(endIndex, '<li>\subpage %s \"%s\" </li>' % (page.mTag, page.mName)) endIndex += 1 self.mText += page.Generate() self.mText.insert(endIndex, '</ul>') endIndex += 1 self.mText.insert(endIndex, ' **/') return self.mText class DoxygenFile(Page): def __init__(self, name, file): Page.__init__(self, name) self.mFilename = file self.mIsMainPage = True def GetFilename(self): return self.mFilename.replace('/', '\\') def Save(self): str = self.Generate() try: f = open(self.mFilename, 'w') f.write('\n'.join(str)) f.close() except IOError as e: ErrorMsg ('Fail to write file %s' % self.mFilename) return False return True doxygenConfigTemplate = """ DOXYFILE_ENCODING = UTF-8 PROJECT_NAME = %(ProjectName)s PROJECT_NUMBER = %(ProjectVersion)s OUTPUT_DIRECTORY = %(OutputDir)s CREATE_SUBDIRS = YES OUTPUT_LANGUAGE = English BRIEF_MEMBER_DESC = YES REPEAT_BRIEF = YES ABBREVIATE_BRIEF = "The $name class " \\ "The $name widget " \\ "The $name file " \\ is \\ provides \\ specifies \\ contains \\ represents \\ a \\ an \\ the ALWAYS_DETAILED_SEC = NO INLINE_INHERITED_MEMB = NO FULL_PATH_NAMES = YES STRIP_FROM_PATH = %(StripPath)s STRIP_FROM_INC_PATH = SHORT_NAMES = YES JAVADOC_AUTOBRIEF = NO QT_AUTOBRIEF = NO MULTILINE_CPP_IS_BRIEF = NO DETAILS_AT_TOP = YES INHERIT_DOCS = YES SEPARATE_MEMBER_PAGES = NO TAB_SIZE = 1 ALIASES = OPTIMIZE_OUTPUT_FOR_C = YES OPTIMIZE_OUTPUT_JAVA = NO BUILTIN_STL_SUPPORT = NO CPP_CLI_SUPPORT = NO SIP_SUPPORT = NO DISTRIBUTE_GROUP_DOC = YES SUBGROUPING = YES TYPEDEF_HIDES_STRUCT = NO EXTRACT_ALL = YES EXTRACT_PRIVATE = NO EXTRACT_STATIC = NO EXTRACT_LOCAL_CLASSES = NO EXTRACT_LOCAL_METHODS = NO EXTRACT_ANON_NSPACES = NO HIDE_UNDOC_MEMBERS = NO HIDE_UNDOC_CLASSES = NO HIDE_FRIEND_COMPOUNDS = NO HIDE_IN_BODY_DOCS = NO INTERNAL_DOCS = NO CASE_SENSE_NAMES = NO HIDE_SCOPE_NAMES = NO SHOW_INCLUDE_FILES = NO INLINE_INFO = YES SORT_MEMBER_DOCS = YES SORT_BRIEF_DOCS = NO SORT_BY_SCOPE_NAME = YES GENERATE_TODOLIST = YES GENERATE_TESTLIST = YES GENERATE_BUGLIST = YES GENERATE_DEPRECATEDLIST= YES ENABLED_SECTIONS = MAX_INITIALIZER_LINES = 30 SHOW_USED_FILES = NO SHOW_DIRECTORIES = NO FILE_VERSION_FILTER = QUIET = NO WARNINGS = YES WARN_IF_UNDOCUMENTED = YES WARN_IF_DOC_ERROR = YES WARN_NO_PARAMDOC = YES WARN_FORMAT = "$file:$line: $text " WARN_LOGFILE = %(WarningFile)s INPUT = %(FileList)s INPUT_ENCODING = UTF-8 FILE_PATTERNS = %(Pattern)s RECURSIVE = NO EXCLUDE = *.svn EXCLUDE_SYMLINKS = NO EXCLUDE_PATTERNS = .svn EXCLUDE_SYMBOLS = EXAMPLE_PATH = %(ExamplePath)s EXAMPLE_PATTERNS = * EXAMPLE_RECURSIVE = NO IMAGE_PATH = INPUT_FILTER = FILTER_PATTERNS = FILTER_SOURCE_FILES = NO SOURCE_BROWSER = NO INLINE_SOURCES = NO STRIP_CODE_COMMENTS = YES REFERENCED_BY_RELATION = YES REFERENCES_RELATION = YES REFERENCES_LINK_SOURCE = NO USE_HTAGS = NO VERBATIM_HEADERS = NO ALPHABETICAL_INDEX = NO COLS_IN_ALPHA_INDEX = 5 IGNORE_PREFIX = GENERATE_HTML = YES HTML_OUTPUT = html HTML_FILE_EXTENSION = .html HTML_HEADER = HTML_FOOTER = HTML_STYLESHEET = HTML_ALIGN_MEMBERS = YES GENERATE_HTMLHELP = %(WhetherGenerateHtmlHelp)s HTML_DYNAMIC_SECTIONS = NO CHM_FILE = index.chm HHC_LOCATION = GENERATE_CHI = NO BINARY_TOC = NO TOC_EXPAND = NO DISABLE_INDEX = NO ENUM_VALUES_PER_LINE = 4 GENERATE_TREEVIEW = %(WhetherGenerateTreeView)s TREEVIEW_WIDTH = 250 GENERATE_LATEX = NO LATEX_OUTPUT = latex LATEX_CMD_NAME = latex MAKEINDEX_CMD_NAME = makeindex COMPACT_LATEX = NO PAPER_TYPE = a4wide EXTRA_PACKAGES = LATEX_HEADER = PDF_HYPERLINKS = YES USE_PDFLATEX = YES LATEX_BATCHMODE = NO LATEX_HIDE_INDICES = NO GENERATE_RTF = NO RTF_OUTPUT = rtf COMPACT_RTF = NO RTF_HYPERLINKS = NO RTF_STYLESHEET_FILE = RTF_EXTENSIONS_FILE = GENERATE_MAN = NO MAN_OUTPUT = man MAN_EXTENSION = .3 MAN_LINKS = NO GENERATE_XML = NO XML_OUTPUT = xml XML_SCHEMA = XML_DTD = XML_PROGRAMLISTING = YES GENERATE_AUTOGEN_DEF = NO GENERATE_PERLMOD = NO PERLMOD_LATEX = NO PERLMOD_PRETTY = YES PERLMOD_MAKEVAR_PREFIX = ENABLE_PREPROCESSING = YES MACRO_EXPANSION = YES EXPAND_ONLY_PREDEF = YES SEARCH_INCLUDES = YES INCLUDE_PATH = %(IncludePath)s INCLUDE_FILE_PATTERNS = *.h PREDEFINED = %(PreDefined)s EXPAND_AS_DEFINED = SKIP_FUNCTION_MACROS = NO TAGFILES = GENERATE_TAGFILE = ALLEXTERNALS = NO EXTERNAL_GROUPS = YES PERL_PATH = /usr/bin/perl CLASS_DIAGRAMS = NO MSCGEN_PATH = HIDE_UNDOC_RELATIONS = YES HAVE_DOT = NO CLASS_GRAPH = YES COLLABORATION_GRAPH = YES GROUP_GRAPHS = YES UML_LOOK = NO TEMPLATE_RELATIONS = NO INCLUDE_GRAPH = YES INCLUDED_BY_GRAPH = YES CALL_GRAPH = NO CALLER_GRAPH = NO GRAPHICAL_HIERARCHY = YES DIRECTORY_GRAPH = YES DOT_IMAGE_FORMAT = png DOT_PATH = DOTFILE_DIRS = DOT_GRAPH_MAX_NODES = 50 MAX_DOT_GRAPH_DEPTH = 1000 DOT_TRANSPARENT = YES DOT_MULTI_TARGETS = NO GENERATE_LEGEND = YES DOT_CLEANUP = YES SEARCHENGINE = NO """ class DoxygenConfigFile: def __init__(self): self.mProjectName = '' self.mOutputDir = '' self.mFileList = [] self.mIncludeList = [] self.mStripPath = '' self.mExamplePath = '' self.mPattern = ['*.c', '*.h', '*.asm', '*.s', '.nasm', '*.html', '*.dox'] self.mMode = 'HTML' self.mWarningFile = '' self.mPreDefined = [] self.mProjectVersion = 0.1 def SetChmMode(self): self.mMode = 'CHM' def SetHtmlMode(self): self.mMode = 'HTML' def SetProjectName(self, str): self.mProjectName = str def SetProjectVersion(self, str): self.mProjectVersion = str def SetOutputDir(self, str): self.mOutputDir = str def SetStripPath(self, str): self.mStripPath = str def SetExamplePath(self, str): self.mExamplePath = str def SetWarningFilePath(self, str): self.mWarningFile = str.replace('\\', '/') def FileExists(self, path): if path is None: return False if len(path) == 0: return False for p in self.mFileList: if path.lower() == p.lower(): return True return False def AddFile(self, path): if path is None: return if len(path) == 0: return path = path.replace('\\', '/') if not self.FileExists(path): self.mFileList.append(path) def AddIncludePath(self, path): path = path.replace('\\', '/') if path not in self.mIncludeList: self.mIncludeList.append(path) def AddPattern(self, pattern): self.mPattern.append(pattern) def AddPreDefined(self, macro): self.mPreDefined.append(macro) def Generate(self, path): files = ' \\\n'.join(self.mFileList) includes = ' \\\n'.join(self.mIncludeList) patterns = ' \\\n'.join(self.mPattern) if self.mMode.lower() == 'html': sHtmlHelp = 'NO' sTreeView = 'YES' else: sHtmlHelp = 'YES' sTreeView = 'NO' text = doxygenConfigTemplate % {'ProjectName':self.mProjectName, 'OutputDir':self.mOutputDir, 'StripPath':self.mStripPath, 'ExamplePath':self.mExamplePath, 'FileList':files, 'Pattern':patterns, 'WhetherGenerateHtmlHelp':sHtmlHelp, 'WhetherGenerateTreeView':sTreeView, 'IncludePath':includes, 'WarningFile':self.mWarningFile, 'PreDefined':' '.join(self.mPreDefined), 'ProjectVersion':self.mProjectVersion} try: f = open(path, 'w') f.write(text) f.close() except IOError as e: ErrorMsg ('Fail to generate doxygen config file %s' % path) return False return True ######################################################################## # TEST CODE ######################################################################## if __name__== '__main__': df = DoxygenFile('Platform Document', 'm:\tree') df.AddPage(Page('Module', 'module')) p = df.AddPage(Page('Library', 'library')) p.AddDescription(desc) p.AddPage(Page('PCD', 'pcds')) df.Generate() print(df)
edk2-master
BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/basemodel/doxygen.py
## @file # # Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR> # # SPDX-License-Identifier: BSD-2-Clause-Patent # from __future__ import absolute_import from .message import * import re import os section_re = re.compile(r'^\[([\w., "]+)\]') class BaseINIFile(object): _objs = {} def __new__(cls, *args, **kwargs): """Maintain only a single instance of this object @return: instance of this class """ if len(args) == 0: return object.__new__(cls) filename = args[0] parent = None if len(args) > 1: parent = args[1] key = os.path.normpath(filename) if key not in cls._objs.keys(): cls._objs[key] = object.__new__(cls) if parent is not None: cls._objs[key].AddParent(parent) return cls._objs[key] def __init__(self, filename=None, parent=None): self._lines = [] self._sections = {} self._filename = filename self._globals = [] self._isModify = True def AddParent(self, parent): if parent is None: return if not hasattr(self, "_parents"): self._parents = [] if parent in self._parents: ErrorMsg("Duplicate parent is found for INI file %s" % self._filename) return self._parents.append(parent) def GetFilename(self): return os.path.normpath(self._filename) def IsModified(self): return self._isModify def Modify(self, modify=True, obj=None): if modify == self._isModify: return self._isModify = modify if modify: for parent in self._parents: parent.Modify(True, self) def _ReadLines(self, filename): # # try to open file # if not os.path.exists(filename): return False try: handle = open(filename, 'r') self._lines = handle.readlines() handle.close() except: raise EdkException("Fail to open file %s" % filename) return True def GetSectionInstance(self, parent, name, isCombined=False): return BaseINISection(parent, name, isCombined) def GetSectionByName(self, name): arr = [] for key in self._sections.keys(): if '.private' in key: continue for item in self._sections[key]: if item.GetBaseName().lower().find(name.lower()) != -1: arr.append(item) return arr def GetSectionObjectsByName(self, name): arr = [] sects = self.GetSectionByName(name) for sect in sects: for obj in sect.GetObjects(): arr.append(obj) return arr def Parse(self): if not self._isModify: return True if not self._ReadLines(self._filename): return False sObjs = [] inGlobal = True # process line for index in range(len(self._lines)): templine = self._lines[index].strip() # skip comments if len(templine) == 0: continue if re.match("^\[=*\]", templine) or re.match("^#", templine) or \ re.match("\*+/", templine): continue m = section_re.match(templine) if m is not None: # found a section inGlobal = False # Finish the latest section first if len(sObjs) != 0: for sObj in sObjs: sObj._end = index - 1 if not sObj.Parse(): ErrorMsg("Fail to parse section %s" % sObj.GetBaseName(), self._filename, sObj._start) # start new section sname_arr = m.groups()[0].split(',') sObjs = [] for name in sname_arr: sObj = self.GetSectionInstance(self, name, (len(sname_arr) > 1)) sObj._start = index sObjs.append(sObj) if name.lower() not in self._sections: self._sections[name.lower()] = [sObj] else: self._sections[name.lower()].append(sObj) elif inGlobal: # not start any section and find global object gObj = BaseINIGlobalObject(self) gObj._start = index gObj.Parse() self._globals.append(gObj) # Finish the last section if len(sObjs) != 0: for sObj in sObjs: sObj._end = index if not sObj.Parse(): ErrorMsg("Fail to parse section %s" % sObj.GetBaseName(), self._filename, sObj._start) self._isModify = False return True def Destroy(self, parent): # check referenced parent if parent is not None: assert parent in self._parents, "when destory ini object, can not found parent reference!" self._parents.remove(parent) if len(self._parents) != 0: return for sects in self._sections.values(): for sect in sects: sect.Destroy() # dereference from _objs array assert self.GetFilename() in self._objs.keys(), "When destroy ini object, can not find obj reference!" assert self in self._objs.values(), "When destroy ini object, can not find obj reference!" del self._objs[self.GetFilename()] # dereference self self.Clear() def GetDefine(self, name): sects = self.GetSectionByName('Defines') for sect in sects: for obj in sect.GetObjects(): line = obj.GetLineByOffset(obj._start).split('#')[0].strip() arr = line.split('=') if arr[0].strip().lower() == name.strip().lower(): return arr[1].strip() return None def Clear(self): for sects in self._sections.values(): for sect in sects: del sect self._sections.clear() for gObj in self._globals: del gObj del self._globals[:] del self._lines[:] def Reload(self): self.Clear() ret = self.Parse() if ret: self._isModify = False return ret def AddNewSection(self, sectName): if sectName.lower() in self._sections.keys(): ErrorMsg('Section %s can not be created for conflict with existing section') return None sectionObj = self.GetSectionInstance(self, sectName) sectionObj._start = len(self._lines) sectionObj._end = len(self._lines) + 1 self._lines.append('[%s]\n' % sectName) self._lines.append('\n\n') self._sections[sectName.lower()] = sectionObj return sectionObj def CopySectionsByName(self, oldDscObj, nameStr): sects = oldDscObj.GetSectionByName(nameStr) for sect in sects: sectObj = self.AddNewSection(sect.GetName()) sectObj.Copy(sect) def __str__(self): return ''.join(self._lines) ## Get file header's comment from basic INI file. # The file comments has two style: # 1) #/** @file # 2) ## @file # def GetFileHeader(self): desc = [] lineArr = self._lines inHeader = False for num in range(len(self._lines)): line = lineArr[num].strip() if not inHeader and (line.startswith("#/**") or line.startswith("##")) and \ line.find("@file") != -1: inHeader = True continue if inHeader and (line.startswith("#**/") or line.startswith('##')): inHeader = False break if inHeader: prefixIndex = line.find('#') if prefixIndex == -1: desc.append(line) else: desc.append(line[prefixIndex + 1:]) return '<br>\n'.join(desc) class BaseINISection(object): def __init__(self, parent, name, isCombined=False): self._parent = parent self._name = name self._isCombined = isCombined self._start = 0 self._end = 0 self._objs = [] def __del__(self): for obj in self._objs: del obj del self._objs[:] def GetName(self): return self._name def GetObjects(self): return self._objs def GetParent(self): return self._parent def GetStartLinenumber(self): return self._start def GetEndLinenumber(self): return self._end def GetLine(self, linenumber): return self._parent._lines[linenumber] def GetFilename(self): return self._parent.GetFilename() def GetSectionINIObject(self, parent): return BaseINISectionObject(parent) def Parse(self): # skip first line in section, it is used by section name visit = self._start + 1 iniObj = None while (visit <= self._end): line = self.GetLine(visit).strip() if re.match("^\[=*\]", line) or re.match("^#", line) or len(line) == 0: visit += 1 continue line = line.split('#')[0].strip() if iniObj is not None: if line.endswith('}'): iniObj._end = visit - self._start if not iniObj.Parse(): ErrorMsg("Fail to parse ini object", self.GetFilename(), iniObj.GetStartLinenumber()) else: self._objs.append(iniObj) iniObj = None else: iniObj = self.GetSectionINIObject(self) iniObj._start = visit - self._start if not line.endswith('{'): iniObj._end = visit - self._start if not iniObj.Parse(): ErrorMsg("Fail to parse ini object", self.GetFilename(), iniObj.GetStartLinenumber()) else: self._objs.append(iniObj) iniObj = None visit += 1 return True def Destroy(self): for obj in self._objs: obj.Destroy() def GetBaseName(self): return self._name def AddLine(self, line): end = self.GetEndLinenumber() self._parent._lines.insert(end, line) self._end += 1 def Copy(self, sectObj): index = sectObj.GetStartLinenumber() + 1 while index < sectObj.GetEndLinenumber(): line = sectObj.GetLine(index) if not line.strip().startswith('#'): self.AddLine(line) index += 1 def AddObject(self, obj): lines = obj.GenerateLines() for line in lines: self.AddLine(line) def GetComment(self): comments = [] start = self._start - 1 bFound = False while (start > 0): line = self.GetLine(start).strip() if len(line) == 0: start -= 1 continue if line.startswith('##'): bFound = True index = line.rfind('#') if (index + 1) < len(line): comments.append(line[index + 1:]) break if line.startswith('#'): start -= 1 continue break if bFound: end = start + 1 while (end < self._start): line = self.GetLine(end).strip() if len(line) == 0: break if not line.startswith('#'): break index = line.rfind('#') if (index + 1) < len(line): comments.append(line[index + 1:]) end += 1 return comments class BaseINIGlobalObject(object): def __init__(self, parent): self._start = 0 self._end = 0 def Parse(self): return True def __str__(self): return parent._lines[self._start] def __del__(self): pass class BaseINISectionObject(object): def __init__(self, parent): self._start = 0 self._end = 0 self._parent = parent def __del__(self): self._parent = None def GetParent(self): return self._parent def GetFilename(self): return self.GetParent().GetFilename() def GetPackageName(self): return self.GetFilename() def GetFileObj(self): return self.GetParent().GetParent() def GetStartLinenumber(self): return self.GetParent()._start + self._start def GetLineByOffset(self, offset): sect_start = self._parent.GetStartLinenumber() linenumber = sect_start + offset return self._parent.GetLine(linenumber) def GetLinenumberByOffset(self, offset): return offset + self._parent.GetStartLinenumber() def Parse(self): return True def Destroy(self): pass def __str__(self): return self.GetLineByOffset(self._start).strip() def GenerateLines(self): return ['default setion object string\n'] def GetComment(self): comments = [] start = self.GetStartLinenumber() - 1 bFound = False while (start > 0): line = self.GetParent().GetLine(start).strip() if len(line) == 0: start -= 1 continue if line.startswith('##'): bFound = True index = line.rfind('#') if (index + 1) < len(line): comments.append(line[index + 1:]) break if line.startswith('#'): start -= 1 continue break if bFound: end = start + 1 while (end <= self.GetStartLinenumber() - 1): line = self.GetParent().GetLine(end).strip() if len(line) == 0: break if not line.startswith('#'): break index = line.rfind('#') if (index + 1) < len(line): comments.append(line[index + 1:]) end += 1 return comments
edk2-master
BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/basemodel/ini.py
## @file # # Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR> # # SPDX-License-Identifier: BSD-2-Clause-Patent #
edk2-master
BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/edk2/__init__.py
## @file # # This file produce action class to generate doxygen document for edk2 codebase. # The action classes are shared by GUI and command line tools. # # Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR> # # SPDX-License-Identifier: BSD-2-Clause-Patent from plugins.EdkPlugins.basemodel import doxygen import os try: import wx gInGui = True except: gInGui = False import re from plugins.EdkPlugins.edk2.model import inf from plugins.EdkPlugins.edk2.model import dec from plugins.EdkPlugins.basemodel.message import * _ignore_dir = ['.svn', '_svn', 'cvs'] _inf_key_description_mapping_table = { 'INF_VERSION':'Version of INF file specification', #'BASE_NAME':'Module Name', 'FILE_GUID':'Module Guid', 'MODULE_TYPE': 'Module Type', 'VERSION_STRING': 'Module Version', 'LIBRARY_CLASS': 'Produced Library Class', 'EFI_SPECIFICATION_VERSION': 'UEFI Specification Version', 'PI_SPECIFICATION_VERSION': 'PI Specification Version', 'ENTRY_POINT': 'Module Entry Point Function', 'CONSTRUCTOR': 'Library Constructor Function' } _dec_key_description_mapping_table = { 'DEC_SPECIFICATION': 'Version of DEC file specification', 'PACKAGE_GUID': 'Package Guid' } class DoxygenAction: """This is base class for all doxygen action. """ def __init__(self, doxPath, chmPath, outputPath, projname, mode='html', log=None, verbose=False): """Constructor function. @param doxPath the obosolution path of doxygen execute file. @param outputPath the obosolution output path. @param log log function for output message """ self._doxPath = doxPath self._chmPath = chmPath self._outputPath = outputPath self._projname = projname self._configFile = None # doxygen config file is used by doxygen exe file self._indexPageFile = None # doxygen page file for index page. self._log = log self._mode = mode self._verbose = verbose self._doxygenCallback = None self._chmCallback = None def Log(self, message, level='info'): if self._log is not None: self._log(message, level) def IsVerbose(self): return self._verbose def Generate(self): """Generate interface called by outer directly""" self.Log(">>>>>> Start generate doxygen document for %s... Zzz....\n" % self._projname) # create doxygen config file at first self._configFile = doxygen.DoxygenConfigFile() self._configFile.SetOutputDir(self._outputPath) self._configFile.SetWarningFilePath(os.path.join(self._outputPath, 'warning.txt')) if self._mode.lower() == 'html': self._configFile.SetHtmlMode() else: self._configFile.SetChmMode() self.Log(" >>>>>> Initialize doxygen config file...Zzz...\n") self.InitializeConfigFile() self.Log(" >>>>>> Generate doxygen index page file...Zzz...\n") indexPagePath = self.GenerateIndexPage() if indexPagePath is None: self.Log("Fail to generate index page!\n", 'error') return False else: self.Log("Success to create doxygen index page file %s \n" % indexPagePath) # Add index page doxygen file to file list. self._configFile.AddFile(indexPagePath) # save config file to output path configFilePath = os.path.join(self._outputPath, self._projname + '.doxygen_config') self._configFile.Generate(configFilePath) self.Log(" <<<<<< Success Save doxygen config file to %s...\n" % configFilePath) # launch doxygen tool to generate document if self._doxygenCallback is not None: self.Log(" >>>>>> Start doxygen process...Zzz...\n") if not self._doxygenCallback(self._doxPath, configFilePath): return False else: self.Log("Fail to create doxygen process!", 'error') return False return True def InitializeConfigFile(self): """Initialize config setting for doxygen project. It will be invoked after config file object is created. Inherited class should implement it. """ def GenerateIndexPage(self): """Generate doxygen index page. Inherited class should implement it.""" return None def RegisterCallbackDoxygenProcess(self, callback): self._doxygenCallback = callback def RegisterCallbackCHMProcess(self, callback): self._chmCallback = callback class PlatformDocumentAction(DoxygenAction): """Generate platform doxygen document, will be implement at future.""" class PackageDocumentAction(DoxygenAction): """Generate package reference document""" def __init__(self, doxPath, chmPath, outputPath, pObj, mode='html', log=None, arch=None, tooltag=None, macros=[], onlyInclude=False, verbose=False): DoxygenAction.__init__(self, doxPath, chmPath, outputPath, pObj.GetName(), mode, log, verbose) self._pObj = pObj self._arch = arch self._tooltag = tooltag self._macros = macros self._onlyIncludeDocument = onlyInclude def InitializeConfigFile(self): if self._arch == 'IA32': self._configFile.AddPreDefined('MDE_CPU_IA32') elif self._arch == 'X64': self._configFile.AddPreDefined('MDE_CPU_X64') elif self._arch == 'IPF': self._configFile.AddPreDefined('MDE_CPU_IPF') elif self._arch == 'EBC': self._configFile.AddPreDefined('MDE_CPU_EBC') else: self._arch = None self._configFile.AddPreDefined('MDE_CPU_IA32') self._configFile.AddPreDefined('MDE_CPU_X64') self._configFile.AddPreDefined('MDE_CPU_IPF') self._configFile.AddPreDefined('MDE_CPU_EBC') self._configFile.AddPreDefined('MDE_CPU_ARM') for macro in self._macros: self._configFile.AddPreDefined(macro) namestr = self._pObj.GetName() if self._arch is not None: namestr += '[%s]' % self._arch if self._tooltag is not None: namestr += '[%s]' % self._tooltag self._configFile.SetProjectName(namestr) self._configFile.SetStripPath(self._pObj.GetWorkspace()) self._configFile.SetProjectVersion(self._pObj.GetFileObj().GetVersion()) self._configFile.AddPattern('*.decdoxygen') if self._tooltag.lower() == 'msft': self._configFile.AddPreDefined('_MSC_EXTENSIONS') elif self._tooltag.lower() == 'gnu': self._configFile.AddPreDefined('__GNUC__') elif self._tooltag.lower() == 'intel': self._configFile.AddPreDefined('__INTEL_COMPILER') else: self._tooltag = None self._configFile.AddPreDefined('_MSC_EXTENSIONS') self._configFile.AddPreDefined('__GNUC__') self._configFile.AddPreDefined('__INTEL_COMPILER') self._configFile.AddPreDefined('ASM_PFX= ') self._configFile.AddPreDefined('OPTIONAL= ') def GenerateIndexPage(self): """Generate doxygen index page. Inherited class should implement it.""" fObj = self._pObj.GetFileObj() pdObj = doxygen.DoxygenFile('%s Package Document' % self._pObj.GetName(), '%s.decdoxygen' % self._pObj.GetFilename()) self._configFile.AddFile(pdObj.GetFilename()) pdObj.AddDescription(fObj.GetFileHeader()) defSection = fObj.GetSectionByName('defines')[0] baseSection = doxygen.Section('PackageBasicInformation', 'Package Basic Information') descr = '<TABLE>' for obj in defSection.GetObjects(): if obj.GetKey() in _dec_key_description_mapping_table.keys(): descr += '<TR>' descr += '<TD><B>%s</B></TD>' % _dec_key_description_mapping_table[obj.GetKey()] descr += '<TD>%s</TD>' % obj.GetValue() descr += '</TR>' descr += '</TABLE><br>' baseSection.AddDescription(descr) pdObj.AddSection(baseSection) knownIssueSection = doxygen.Section('Known_Issue_section', 'Known Issue') knownIssueSection.AddDescription('<ul>') knownIssueSection.AddDescription('<li> OPTIONAL macro for function parameter can not be dealed with doxygen, so it disapear in this document! </li>') knownIssueSection.AddDescription('</ul>') pdObj.AddSection(knownIssueSection) self.AddAllIncludeFiles(self._pObj, self._configFile) pages = self.GenerateIncludesSubPage(self._pObj, self._configFile) if len(pages) != 0: pdObj.AddPages(pages) pages = self.GenerateLibraryClassesSubPage(self._pObj, self._configFile) if len(pages) != 0: pdObj.AddPages(pages) pages = self.GeneratePcdSubPages(self._pObj, self._configFile) if len(pages) != 0: pdObj.AddPages(pages) pages = self.GenerateGuidSubPages(self._pObj, self._configFile) if len(pages) != 0: pdObj.AddPages(pages) pages = self.GeneratePpiSubPages(self._pObj, self._configFile) if len(pages) != 0: pdObj.AddPages(pages) pages = self.GenerateProtocolSubPages(self._pObj, self._configFile) if len(pages) != 0: pdObj.AddPages(pages) if not self._onlyIncludeDocument: pdObj.AddPages(self.GenerateModulePages(self._pObj, self._configFile)) pdObj.Save() return pdObj.GetFilename() def GenerateIncludesSubPage(self, pObj, configFile): # by default add following path as include path to config file pkpath = pObj.GetFileObj().GetPackageRootPath() configFile.AddIncludePath(os.path.join(pkpath, 'Include')) configFile.AddIncludePath(os.path.join(pkpath, 'Include', 'Library')) configFile.AddIncludePath(os.path.join(pkpath, 'Include', 'Protocol')) configFile.AddIncludePath(os.path.join(pkpath, 'Include', 'Ppi')) configFile.AddIncludePath(os.path.join(pkpath, 'Include', 'Guid')) configFile.AddIncludePath(os.path.join(pkpath, 'Include', 'IndustryStandard')) rootArray = [] pageRoot = doxygen.Page("Public Includes", "%s_public_includes" % pObj.GetName()) objs = pObj.GetFileObj().GetSectionObjectsByName('includes') if len(objs) == 0: return [] for obj in objs: # Add path to include path path = os.path.join(pObj.GetFileObj().GetPackageRootPath(), obj.GetPath()) configFile.AddIncludePath(path) # only list common folder's include file if obj.GetArch().lower() != 'common': continue bNeedAddIncludePage = False topPage = doxygen.Page(self._ConvertPathToDoxygen(path, pObj), 'public_include_top') topPage.AddDescription('<ul>\n') for file in os.listdir(path): if file.lower() in _ignore_dir: continue fullpath = os.path.join(path, file) if os.path.isfile(fullpath): self.ProcessSourceFileForInclude(fullpath, pObj, configFile) topPage.AddDescription('<li> \link %s\endlink </li>\n' % self._ConvertPathToDoxygen(fullpath, pObj)) else: if file.lower() in ['library', 'protocol', 'guid', 'ppi', 'ia32', 'x64', 'ipf', 'ebc', 'arm', 'pi', 'uefi', 'aarch64']: continue bNeedAddSubPage = False subpage = doxygen.Page(self._ConvertPathToDoxygen(fullpath, pObj), 'public_include_%s' % file) subpage.AddDescription('<ul>\n') for subfile in os.listdir(fullpath): if subfile.lower() in _ignore_dir: continue bNeedAddSubPage = True subfullpath = os.path.join(fullpath, subfile) self.ProcessSourceFileForInclude(subfullpath, pObj, configFile) subpage.AddDescription('<li> \link %s \endlink </li>\n' % self._ConvertPathToDoxygen(subfullpath, pObj)) subpage.AddDescription('</ul>\n') if bNeedAddSubPage: bNeedAddIncludePage = True pageRoot.AddPage(subpage) topPage.AddDescription('</ul>\n') if bNeedAddIncludePage: pageRoot.AddPage(topPage) if pageRoot.GetSubpageCount() != 0: return [pageRoot] else: return [] def GenerateLibraryClassesSubPage(self, pObj, configFile): """ Generate sub page for library class for package. One DEC file maybe contains many library class sections for different architecture. @param fObj DEC file object. """ rootArray = [] pageRoot = doxygen.Page("Library Class", "%s_libraryclass" % pObj.GetName()) objs = pObj.GetFileObj().GetSectionObjectsByName('libraryclass', self._arch) if len(objs) == 0: return [] if self._arch is not None: for obj in objs: classPage = doxygen.Page(obj.GetClassName(), "lc_%s" % obj.GetClassName()) comments = obj.GetComment() if len(comments) != 0: classPage.AddDescription('<br>\n'.join(comments) + '<br>\n') pageRoot.AddPage(classPage) path = os.path.join(pObj.GetFileObj().GetPackageRootPath(), obj.GetHeaderFile()) path = path[len(pObj.GetWorkspace()) + 1:] if len(comments) == 0: classPage.AddDescription('\copydoc %s<p>' % obj.GetHeaderFile()) section = doxygen.Section('ref', 'Refer to Header File') section.AddDescription('\link %s\n' % obj.GetHeaderFile()) section.AddDescription(' \endlink<p>\n') classPage.AddSection(section) fullPath = os.path.join(pObj.GetFileObj().GetPackageRootPath(), obj.GetHeaderFile()) self.ProcessSourceFileForInclude(fullPath, pObj, configFile) else: archPageDict = {} for obj in objs: if obj.GetArch() not in archPageDict.keys(): archPageDict[obj.GetArch()] = doxygen.Page(obj.GetArch(), 'lc_%s' % obj.GetArch()) pageRoot.AddPage(archPageDict[obj.GetArch()]) subArchRoot = archPageDict[obj.GetArch()] classPage = doxygen.Page(obj.GetClassName(), "lc_%s" % obj.GetClassName()) comments = obj.GetComment() if len(comments) != 0: classPage.AddDescription('<br>\n'.join(comments) + '<br>\n') subArchRoot.AddPage(classPage) path = os.path.join(pObj.GetFileObj().GetPackageRootPath(), obj.GetHeaderFile()) path = path[len(pObj.GetWorkspace()) + 1:] if len(comments) == 0: classPage.AddDescription('\copydoc %s<p>' % obj.GetHeaderFile()) section = doxygen.Section('ref', 'Refer to Header File') section.AddDescription('\link %s\n' % obj.GetHeaderFile()) section.AddDescription(' \endlink<p>\n') classPage.AddSection(section) fullPath = os.path.join(pObj.GetFileObj().GetPackageRootPath(), obj.GetHeaderFile()) self.ProcessSourceFileForInclude(fullPath, pObj, configFile) rootArray.append(pageRoot) return rootArray def ProcessSourceFileForInclude(self, path, pObj, configFile, infObj=None): """ @param path the analysising file full path @param pObj package object @param configFile doxygen config file. """ if gInGui: wx.Yield() if not os.path.exists(path): ErrorMsg('Source file path %s does not exist!' % path) return if configFile.FileExists(path): return try: f = open(path, 'r') lines = f.readlines() f.close() except IOError: ErrorMsg('Fail to open file %s' % path) return configFile.AddFile(path) return no = 0 for no in range(len(lines)): if len(lines[no].strip()) == 0: continue if lines[no].strip()[:2] in ['##', '//', '/*', '*/']: continue index = lines[no].lower().find('include') #mo = IncludePattern.finditer(lines[no].lower()) mo = re.match(r"^#\s*include\s+[<\"]([\\/\w.]+)[>\"]$", lines[no].strip().lower()) if not mo: continue mo = re.match(r"^[#\w\s]+[<\"]([\\/\w.]+)[>\"]$", lines[no].strip()) filePath = mo.groups()[0] if filePath is None or len(filePath) == 0: continue # find header file in module's path firstly. fullPath = None if os.path.exists(os.path.join(os.path.dirname(path), filePath)): # Find the file in current directory fullPath = os.path.join(os.path.dirname(path), filePath).replace('\\', '/') else: # find in depedent package's include path incObjs = pObj.GetFileObj().GetSectionObjectsByName('includes') for incObj in incObjs: incPath = os.path.join(pObj.GetFileObj().GetPackageRootPath(), incObj.GetPath()).strip() incPath = os.path.realpath(os.path.join(incPath, filePath)) if os.path.exists(incPath): fullPath = incPath break if infObj is not None: pkgInfObjs = infObj.GetSectionObjectsByName('packages') for obj in pkgInfObjs: decObj = dec.DECFile(os.path.join(pObj.GetWorkspace(), obj.GetPath())) if not decObj: ErrorMsg ('Fail to create pacakge object for %s' % obj.GetPackageName()) continue if not decObj.Parse(): ErrorMsg ('Fail to load package object for %s' % obj.GetPackageName()) continue incObjs = decObj.GetSectionObjectsByName('includes') for incObj in incObjs: incPath = os.path.join(decObj.GetPackageRootPath(), incObj.GetPath()).replace('\\', '/') if os.path.exists(os.path.join(incPath, filePath)): fullPath = os.path.join(os.path.join(incPath, filePath)) break if fullPath is not None: break if fullPath is None and self.IsVerbose(): self.Log('Can not resolve header file %s for file %s in package %s\n' % (filePath, path, pObj.GetFileObj().GetFilename()), 'error') return else: fullPath = fullPath.replace('\\', '/') if self.IsVerbose(): self.Log('Preprocessing: Add include file %s for file %s\n' % (fullPath, path)) #LogMsg ('Preprocessing: Add include file %s for file %s' % (fullPath, path)) self.ProcessSourceFileForInclude(fullPath, pObj, configFile, infObj) def AddAllIncludeFiles(self, pObj, configFile): objs = pObj.GetFileObj().GetSectionObjectsByName('includes') for obj in objs: incPath = os.path.join(pObj.GetFileObj().GetPackageRootPath(), obj.GetPath()) for root, dirs, files in os.walk(incPath): for dir in dirs: if dir.lower() in _ignore_dir: dirs.remove(dir) for file in files: path = os.path.normpath(os.path.join(root, file)) configFile.AddFile(path.replace('/', '\\')) def GeneratePcdSubPages(self, pObj, configFile): """ Generate sub pages for package's PCD definition. @param pObj package object @param configFile config file object """ rootArray = [] objs = pObj.GetFileObj().GetSectionObjectsByName('pcd') if len(objs) == 0: return [] pcdRootPage = doxygen.Page('PCD', 'pcd_root_page') typeRootPageDict = {} typeArchRootPageDict = {} for obj in objs: if obj.GetPcdType() not in typeRootPageDict.keys(): typeRootPageDict[obj.GetPcdType()] = doxygen.Page(obj.GetPcdType(), 'pcd_%s_root_page' % obj.GetPcdType()) pcdRootPage.AddPage(typeRootPageDict[obj.GetPcdType()]) typeRoot = typeRootPageDict[obj.GetPcdType()] if self._arch is not None: pcdPage = doxygen.Page('%s' % obj.GetPcdName(), 'pcd_%s_%s_%s' % (obj.GetPcdType(), obj.GetArch(), obj.GetPcdName().split('.')[1])) pcdPage.AddDescription('<br>\n'.join(obj.GetComment()) + '<br>\n') section = doxygen.Section('PCDinformation', 'PCD Information') desc = '<TABLE>' desc += '<TR>' desc += '<TD><CAPTION>Name</CAPTION></TD>' desc += '<TD><CAPTION>Token Space</CAPTION></TD>' desc += '<TD><CAPTION>Token number</CAPTION></TD>' desc += '<TD><CAPTION>Data Type</CAPTION></TD>' desc += '<TD><CAPTION>Default Value</CAPTION></TD>' desc += '</TR>' desc += '<TR>' desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdName().split('.')[1] desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdName().split('.')[0] desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdToken() desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdDataType() desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdValue() desc += '</TR>' desc += '</TABLE>' section.AddDescription(desc) pcdPage.AddSection(section) typeRoot.AddPage(pcdPage) else: keystr = obj.GetPcdType() + obj.GetArch() if keystr not in typeArchRootPageDict.keys(): typeArchRootPage = doxygen.Page(obj.GetArch(), 'pcd_%s_%s_root_page' % (obj.GetPcdType(), obj.GetArch())) typeArchRootPageDict[keystr] = typeArchRootPage typeRoot.AddPage(typeArchRootPage) typeArchRoot = typeArchRootPageDict[keystr] pcdPage = doxygen.Page('%s' % obj.GetPcdName(), 'pcd_%s_%s_%s' % (obj.GetPcdType(), obj.GetArch(), obj.GetPcdName().split('.')[1])) pcdPage.AddDescription('<br>\n'.join(obj.GetComment()) + '<br>\n') section = doxygen.Section('PCDinformation', 'PCD Information') desc = '<TABLE>' desc += '<TR>' desc += '<TD><CAPTION>Name</CAPTION></TD>' desc += '<TD><CAPTION>Token Space</CAPTION></TD>' desc += '<TD><CAPTION>Token number</CAPTION></TD>' desc += '<TD><CAPTION>Data Type</CAPTION></TD>' desc += '<TD><CAPTION>Default Value</CAPTION></TD>' desc += '</TR>' desc += '<TR>' desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdName().split('.')[1] desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdName().split('.')[0] desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdToken() desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdDataType() desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdValue() desc += '</TR>' desc += '</TABLE>' section.AddDescription(desc) pcdPage.AddSection(section) typeArchRoot.AddPage(pcdPage) return [pcdRootPage] def _GenerateGuidSubPage(self, pObj, obj, configFile): guidPage = doxygen.Page('%s' % obj.GetName(), 'guid_%s_%s' % (obj.GetArch(), obj.GetName())) comments = obj.GetComment() if len(comments) != 0: guidPage.AddDescription('<br>'.join(obj.GetComment()) + '<br>') section = doxygen.Section('BasicGuidInfo', 'GUID Information') desc = '<TABLE>' desc += '<TR>' desc += '<TD><CAPTION>GUID\'s Guid Name</CAPTION></TD><TD><CAPTION>GUID\'s Guid</CAPTION></TD>' desc += '</TR>' desc += '<TR>' desc += '<TD>%s</TD>' % obj.GetName() desc += '<TD>%s</TD>' % obj.GetGuid() desc += '</TR>' desc += '</TABLE>' section.AddDescription(desc) guidPage.AddSection(section) refFile = self.FindHeaderFileForGuid(pObj, obj.GetName(), configFile) if refFile: relPath = refFile[len(pObj.GetWorkspace()) + 1:] if len(comments) == 0: guidPage.AddDescription(' \\copydoc %s <br>' % relPath) section = doxygen.Section('ref', 'Refer to Header File') section.AddDescription('\link %s\n' % relPath) section.AddDescription('\endlink\n') self.ProcessSourceFileForInclude(refFile, pObj, configFile) guidPage.AddSection(section) return guidPage def GenerateGuidSubPages(self, pObj, configFile): """ Generate sub pages for package's GUID definition. @param pObj package object @param configFilf doxygen config file object """ pageRoot = doxygen.Page('GUID', 'guid_root_page') objs = pObj.GetFileObj().GetSectionObjectsByName('guids', self._arch) if len(objs) == 0: return [] if self._arch is not None: for obj in objs: pageRoot.AddPage(self._GenerateGuidSubPage(pObj, obj, configFile)) else: guidArchRootPageDict = {} for obj in objs: if obj.GetArch() not in guidArchRootPageDict.keys(): guidArchRoot = doxygen.Page(obj.GetArch(), 'guid_arch_root_%s' % obj.GetArch()) pageRoot.AddPage(guidArchRoot) guidArchRootPageDict[obj.GetArch()] = guidArchRoot guidArchRoot = guidArchRootPageDict[obj.GetArch()] guidArchRoot.AddPage(self._GenerateGuidSubPage(pObj, obj, configFile)) return [pageRoot] def _GeneratePpiSubPage(self, pObj, obj, configFile): guidPage = doxygen.Page(obj.GetName(), 'ppi_page_%s' % obj.GetName()) comments = obj.GetComment() if len(comments) != 0: guidPage.AddDescription('<br>'.join(obj.GetComment()) + '<br>') section = doxygen.Section('BasicPpiInfo', 'PPI Information') desc = '<TABLE>' desc += '<TR>' desc += '<TD><CAPTION>PPI\'s Guid Name</CAPTION></TD><TD><CAPTION>PPI\'s Guid</CAPTION></TD>' desc += '</TR>' desc += '<TR>' desc += '<TD>%s</TD>' % obj.GetName() desc += '<TD>%s</TD>' % obj.GetGuid() desc += '</TR>' desc += '</TABLE>' section.AddDescription(desc) guidPage.AddSection(section) refFile = self.FindHeaderFileForGuid(pObj, obj.GetName(), configFile) if refFile: relPath = refFile[len(pObj.GetWorkspace()) + 1:] if len(comments) == 0: guidPage.AddDescription(' \\copydoc %s <br>' % relPath) section = doxygen.Section('ref', 'Refer to Header File') section.AddDescription('\link %s\n' % relPath) section.AddDescription('\endlink\n') self.ProcessSourceFileForInclude(refFile, pObj, configFile) guidPage.AddSection(section) return guidPage def GeneratePpiSubPages(self, pObj, configFile): """ Generate sub pages for package's GUID definition. @param pObj package object @param configFilf doxygen config file object """ pageRoot = doxygen.Page('PPI', 'ppi_root_page') objs = pObj.GetFileObj().GetSectionObjectsByName('ppis', self._arch) if len(objs) == 0: return [] if self._arch is not None: for obj in objs: pageRoot.AddPage(self._GeneratePpiSubPage(pObj, obj, configFile)) else: guidArchRootPageDict = {} for obj in objs: if obj.GetArch() not in guidArchRootPageDict.keys(): guidArchRoot = doxygen.Page(obj.GetArch(), 'ppi_arch_root_%s' % obj.GetArch()) pageRoot.AddPage(guidArchRoot) guidArchRootPageDict[obj.GetArch()] = guidArchRoot guidArchRoot = guidArchRootPageDict[obj.GetArch()] guidArchRoot.AddPage(self._GeneratePpiSubPage(pObj, obj, configFile)) return [pageRoot] def _GenerateProtocolSubPage(self, pObj, obj, configFile): guidPage = doxygen.Page(obj.GetName(), 'protocol_page_%s' % obj.GetName()) comments = obj.GetComment() if len(comments) != 0: guidPage.AddDescription('<br>'.join(obj.GetComment()) + '<br>') section = doxygen.Section('BasicProtocolInfo', 'PROTOCOL Information') desc = '<TABLE>' desc += '<TR>' desc += '<TD><CAPTION>PROTOCOL\'s Guid Name</CAPTION></TD><TD><CAPTION>PROTOCOL\'s Guid</CAPTION></TD>' desc += '</TR>' desc += '<TR>' desc += '<TD>%s</TD>' % obj.GetName() desc += '<TD>%s</TD>' % obj.GetGuid() desc += '</TR>' desc += '</TABLE>' section.AddDescription(desc) guidPage.AddSection(section) refFile = self.FindHeaderFileForGuid(pObj, obj.GetName(), configFile) if refFile: relPath = refFile[len(pObj.GetWorkspace()) + 1:] if len(comments) == 0: guidPage.AddDescription(' \\copydoc %s <br>' % relPath) section = doxygen.Section('ref', 'Refer to Header File') section.AddDescription('\link %s\n' % relPath) section.AddDescription('\endlink\n') self.ProcessSourceFileForInclude(refFile, pObj, configFile) guidPage.AddSection(section) return guidPage def GenerateProtocolSubPages(self, pObj, configFile): """ Generate sub pages for package's GUID definition. @param pObj package object @param configFilf doxygen config file object """ pageRoot = doxygen.Page('PROTOCOL', 'protocol_root_page') objs = pObj.GetFileObj().GetSectionObjectsByName('protocols', self._arch) if len(objs) == 0: return [] if self._arch is not None: for obj in objs: pageRoot.AddPage(self._GenerateProtocolSubPage(pObj, obj, configFile)) else: guidArchRootPageDict = {} for obj in objs: if obj.GetArch() not in guidArchRootPageDict.keys(): guidArchRoot = doxygen.Page(obj.GetArch(), 'protocol_arch_root_%s' % obj.GetArch()) pageRoot.AddPage(guidArchRoot) guidArchRootPageDict[obj.GetArch()] = guidArchRoot guidArchRoot = guidArchRootPageDict[obj.GetArch()] guidArchRoot.AddPage(self._GenerateProtocolSubPage(pObj, obj, configFile)) return [pageRoot] def FindHeaderFileForGuid(self, pObj, name, configFile): """ For declaration header file for GUID/PPI/Protocol. @param pObj package object @param name guid/ppi/protocol's name @param configFile config file object @return full path of header file and None if not found. """ startPath = pObj.GetFileObj().GetPackageRootPath() incPath = os.path.join(startPath, 'Include').replace('\\', '/') # if <PackagePath>/include exist, then search header under it. if os.path.exists(incPath): startPath = incPath for root, dirs, files in os.walk(startPath): for dir in dirs: if dir.lower() in _ignore_dir: dirs.remove(dir) for file in files: fPath = os.path.join(root, file) if not IsCHeaderFile(fPath): continue try: f = open(fPath, 'r') lines = f.readlines() f.close() except IOError: self.Log('Fail to open file %s\n' % fPath) continue for line in lines: if line.find(name) != -1 and \ line.find('extern') != -1: return fPath.replace('\\', '/') return None def GetPackageModuleList(self, pObj): """ Get all module's INF path under package's root path @param pObj package object @return arrary of INF full path """ mArray = [] packPath = pObj.GetFileObj().GetPackageRootPath() if not os.path.exists: return None for root, dirs, files in os.walk(packPath): for dir in dirs: if dir.lower() in _ignore_dir: dirs.remove(dir) for file in files: if CheckPathPostfix(file, 'inf'): fPath = os.path.join(root, file).replace('\\', '/') mArray.append(fPath) return mArray def GenerateModulePages(self, pObj, configFile): """ Generate sub pages for package's module which is under the package root directory. @param pObj package object @param configFilf doxygen config file object """ infList = self.GetPackageModuleList(pObj) rootPages = [] libObjs = [] modObjs = [] for infpath in infList: infObj = inf.INFFile(infpath) #infObj = INFFileObject.INFFile (pObj.GetWorkspacePath(), # inf) if not infObj: self.Log('Fail create INF object for %s' % inf) continue if not infObj.Parse(): self.Log('Fail to load INF file %s' % inf) continue if infObj.GetProduceLibraryClass() is not None: libObjs.append(infObj) else: modObjs.append(infObj) if len(libObjs) != 0: libRootPage = doxygen.Page('Libraries', 'lib_root_page') rootPages.append(libRootPage) for libInf in libObjs: libRootPage.AddPage(self.GenerateModulePage(pObj, libInf, configFile, True)) if len(modObjs) != 0: modRootPage = doxygen.Page('Modules', 'module_root_page') rootPages.append(modRootPage) for modInf in modObjs: modRootPage.AddPage(self.GenerateModulePage(pObj, modInf, configFile, False)) return rootPages def GenerateModulePage(self, pObj, infObj, configFile, isLib): """ Generate page for a module/library. @param infObj INF file object for module/library @param configFile doxygen config file object @param isLib Whether this module is library @param module doxygen page object """ workspace = pObj.GetWorkspace() refDecObjs = [] for obj in infObj.GetSectionObjectsByName('packages'): decObj = dec.DECFile(os.path.join(workspace, obj.GetPath())) if not decObj: ErrorMsg ('Fail to create pacakge object for %s' % obj.GetPackageName()) continue if not decObj.Parse(): ErrorMsg ('Fail to load package object for %s' % obj.GetPackageName()) continue refDecObjs.append(decObj) modPage = doxygen.Page('%s' % infObj.GetBaseName(), 'module_%s' % infObj.GetBaseName()) modPage.AddDescription(infObj.GetFileHeader()) basicInfSection = doxygen.Section('BasicModuleInformation', 'Basic Module Information') desc = "<TABLE>" for obj in infObj.GetSectionObjectsByName('defines'): key = obj.GetKey() value = obj.GetValue() if key not in _inf_key_description_mapping_table.keys(): continue if key == 'LIBRARY_CLASS' and value.find('|') != -1: clsname, types = value.split('|') desc += '<TR>' desc += '<TD><B>%s</B></TD>' % _inf_key_description_mapping_table[key] desc += '<TD>%s</TD>' % clsname desc += '</TR>' desc += '<TR>' desc += '<TD><B>Supported Module Types</B></TD>' desc += '<TD>%s</TD>' % types desc += '</TR>' else: desc += '<TR>' desc += '<TD><B>%s</B></TD>' % _inf_key_description_mapping_table[key] if key == 'EFI_SPECIFICATION_VERSION' and value == '0x00020000': value = '2.0' desc += '<TD>%s</TD>' % value desc += '</TR>' desc += '</TABLE>' basicInfSection.AddDescription(desc) modPage.AddSection(basicInfSection) # Add protocol section data = [] for obj in infObj.GetSectionObjectsByName('pcd', self._arch): data.append(obj.GetPcdName().strip()) if len(data) != 0: s = doxygen.Section('Pcds', 'Pcds') desc = "<TABLE>" desc += '<TR><TD><B>PCD Name</B></TD><TD><B>TokenSpace</B></TD><TD><B>Package</B></TD></TR>' for item in data: desc += '<TR>' desc += '<TD>%s</TD>' % item.split('.')[1] desc += '<TD>%s</TD>' % item.split('.')[0] pkgbasename = self.SearchPcdPackage(item, workspace, refDecObjs) desc += '<TD>%s</TD>' % pkgbasename desc += '</TR>' desc += "</TABLE>" s.AddDescription(desc) modPage.AddSection(s) # Add protocol section #sects = infObj.GetSectionByString('protocol') data = [] #for sect in sects: for obj in infObj.GetSectionObjectsByName('protocol', self._arch): data.append(obj.GetName().strip()) if len(data) != 0: s = doxygen.Section('Protocols', 'Protocols') desc = "<TABLE>" desc += '<TR><TD><B>Name</B></TD><TD><B>Package</B></TD></TR>' for item in data: desc += '<TR>' desc += '<TD>%s</TD>' % item pkgbasename = self.SearchProtocolPackage(item, workspace, refDecObjs) desc += '<TD>%s</TD>' % pkgbasename desc += '</TR>' desc += "</TABLE>" s.AddDescription(desc) modPage.AddSection(s) # Add ppi section #sects = infObj.GetSectionByString('ppi') data = [] #for sect in sects: for obj in infObj.GetSectionObjectsByName('ppi', self._arch): data.append(obj.GetName().strip()) if len(data) != 0: s = doxygen.Section('Ppis', 'Ppis') desc = "<TABLE>" desc += '<TR><TD><B>Name</B></TD><TD><B>Package</B></TD></TR>' for item in data: desc += '<TR>' desc += '<TD>%s</TD>' % item pkgbasename = self.SearchPpiPackage(item, workspace, refDecObjs) desc += '<TD>%s</TD>' % pkgbasename desc += '</TR>' desc += "</TABLE>" s.AddDescription(desc) modPage.AddSection(s) # Add guid section #sects = infObj.GetSectionByString('guid') data = [] #for sect in sects: for obj in infObj.GetSectionObjectsByName('guid', self._arch): data.append(obj.GetName().strip()) if len(data) != 0: s = doxygen.Section('Guids', 'Guids') desc = "<TABLE>" desc += '<TR><TD><B>Name</B></TD><TD><B>Package</B></TD></TR>' for item in data: desc += '<TR>' desc += '<TD>%s</TD>' % item pkgbasename = self.SearchGuidPackage(item, workspace, refDecObjs) desc += '<TD>%s</TD>' % pkgbasename desc += '</TR>' desc += "</TABLE>" s.AddDescription(desc) modPage.AddSection(s) section = doxygen.Section('LibraryClasses', 'Library Classes') desc = "<TABLE>" desc += '<TR><TD><B>Name</B></TD><TD><B>Type</B></TD><TD><B>Package</B></TD><TD><B>Header File</B></TD></TR>' if isLib: desc += '<TR>' desc += '<TD>%s</TD>' % infObj.GetProduceLibraryClass() desc += '<TD>Produce</TD>' try: pkgname, hPath = self.SearchLibraryClassHeaderFile(infObj.GetProduceLibraryClass(), workspace, refDecObjs) except: self.Log ('fail to get package header file for lib class %s' % infObj.GetProduceLibraryClass()) pkgname = 'NULL' hPath = 'NULL' desc += '<TD>%s</TD>' % pkgname if hPath != "NULL": #desc += '<TD>\link %s \endlink</TD>' % hPath desc += '<TD>%s</TD>' % hPath else: desc += '<TD>%s</TD>' % hPath desc += '</TR>' for lcObj in infObj.GetSectionObjectsByName('libraryclasses', self._arch): desc += '<TR>' desc += '<TD>%s</TD>' % lcObj.GetClass() retarr = self.SearchLibraryClassHeaderFile(lcObj.GetClass(), workspace, refDecObjs) if retarr is not None: pkgname, hPath = retarr else: self.Log('Fail find the library class %s definition from module %s dependent package!' % (lcObj.GetClass(), infObj.GetFilename()), 'error') pkgname = 'NULL' hPath = 'NULL' desc += '<TD>Consume</TD>' desc += '<TD>%s</TD>' % pkgname desc += '<TD>%s</TD>' % hPath desc += '</TR>' desc += "</TABLE>" section.AddDescription(desc) modPage.AddSection(section) section = doxygen.Section('SourceFiles', 'Source Files') section.AddDescription('<ul>\n') for obj in infObj.GetSourceObjects(self._arch, self._tooltag): sPath = infObj.GetModuleRootPath() sPath = os.path.join(sPath, obj.GetSourcePath()).replace('\\', '/').strip() if sPath.lower().endswith('.uni') or sPath.lower().endswith('.s') or sPath.lower().endswith('.asm') or sPath.lower().endswith('.nasm'): newPath = self.TranslateUniFile(sPath) configFile.AddFile(newPath) newPath = newPath[len(pObj.GetWorkspace()) + 1:] section.AddDescription('<li> \link %s \endlink </li>' % newPath) else: self.ProcessSourceFileForInclude(sPath, pObj, configFile, infObj) sPath = sPath[len(pObj.GetWorkspace()) + 1:] section.AddDescription('<li>\link %s \endlink </li>' % sPath) section.AddDescription('</ul>\n') modPage.AddSection(section) #sects = infObj.GetSectionByString('depex') data = [] #for sect in sects: for obj in infObj.GetSectionObjectsByName('depex'): data.append(str(obj)) if len(data) != 0: s = doxygen.Section('DependentSection', 'Module Dependencies') s.AddDescription('<br>'.join(data)) modPage.AddSection(s) return modPage def TranslateUniFile(self, path): newpath = path + '.dox' #import core.textfile as textfile #file = textfile.TextFile(path) try: file = open(path, 'r') except (IOError, OSError) as msg: return None t = file.read() file.close() output = '/** @file \n' #output = '<html><body>' arr = t.split('\r\n') for line in arr: if line.find('@file') != -1: continue if line.find('*/') != -1: continue line = line.strip() if line.strip().startswith('/'): arr = line.split(' ') if len(arr) > 1: line = ' '.join(arr[1:]) else: continue output += '%s<br>\n' % line output += '**/' if os.path.exists(newpath): os.remove(newpath) file = open(newpath, "w") file.write(output) file.close() return newpath def SearchPcdPackage(self, pcdname, workspace, decObjs): for decObj in decObjs: for pcd in decObj.GetSectionObjectsByName('pcd'): if pcdname == pcd.GetPcdName(): return decObj.GetBaseName() return None def SearchProtocolPackage(self, protname, workspace, decObjs): for decObj in decObjs: for proto in decObj.GetSectionObjectsByName('protocol'): if protname == proto.GetName(): return decObj.GetBaseName() return None def SearchPpiPackage(self, ppiname, workspace, decObjs): for decObj in decObjs: for ppi in decObj.GetSectionObjectsByName('ppi'): if ppiname == ppi.GetName(): return decObj.GetBaseName() return None def SearchGuidPackage(self, guidname, workspace, decObjs): for decObj in decObjs: for guid in decObj.GetSectionObjectsByName('guid'): if guidname == guid.GetName(): return decObj.GetBaseName() return None def SearchLibraryClassHeaderFile(self, className, workspace, decObjs): for decObj in decObjs: for cls in decObj.GetSectionObjectsByName('libraryclasses'): if cls.GetClassName().strip() == className: path = cls.GetHeaderFile().strip() path = os.path.join(decObj.GetPackageRootPath(), path) path = path[len(workspace) + 1:] return decObj.GetBaseName(), path.replace('\\', '/') return None def _ConvertPathToDoxygen(self, path, pObj): pRootPath = pObj.GetWorkspace() path = path[len(pRootPath) + 1:] return path.replace('\\', '/') def IsCHeaderFile(path): return CheckPathPostfix(path, 'h') def CheckPathPostfix(path, str): index = path.rfind('.') if index == -1: return False if path[index + 1:].lower() == str.lower(): return True return False
edk2-master
BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/edk2/model/doxygengen_spec.py
## @file # # This file produce action class to generate doxygen document for edk2 codebase. # The action classes are shared by GUI and command line tools. # # Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR> # # SPDX-License-Identifier: BSD-2-Clause-Patent """This file produce action class to generate doxygen document for edk2 codebase. The action classes are shared by GUI and command line tools. """ from plugins.EdkPlugins.basemodel import doxygen import os try: import wx gInGui = True except: gInGui = False import re from plugins.EdkPlugins.edk2.model import inf from plugins.EdkPlugins.edk2.model import dec from plugins.EdkPlugins.basemodel.message import * _ignore_dir = ['.svn', '_svn', 'cvs'] _inf_key_description_mapping_table = { 'INF_VERSION':'Version of INF file specification', #'BASE_NAME':'Module Name', 'FILE_GUID':'Module Guid', 'MODULE_TYPE': 'Module Type', 'VERSION_STRING': 'Module Version', 'LIBRARY_CLASS': 'Produced Library Class', 'EFI_SPECIFICATION_VERSION': 'UEFI Specification Version', 'PI_SPECIFICATION_VERSION': 'PI Specification Version', 'ENTRY_POINT': 'Module Entry Point Function', 'CONSTRUCTOR': 'Library Constructor Function' } _dec_key_description_mapping_table = { 'DEC_SPECIFICATION': 'Version of DEC file specification', 'PACKAGE_GUID': 'Package Guid' } class DoxygenAction: """This is base class for all doxygen action. """ def __init__(self, doxPath, chmPath, outputPath, projname, mode='html', log=None, verbose=False): """Constructor function. @param doxPath the obosolution path of doxygen execute file. @param outputPath the obosolution output path. @param log log function for output message """ self._doxPath = doxPath self._chmPath = chmPath self._outputPath = outputPath self._projname = projname self._configFile = None # doxygen config file is used by doxygen exe file self._indexPageFile = None # doxygen page file for index page. self._log = log self._mode = mode self._verbose = verbose self._doxygenCallback = None self._chmCallback = None def Log(self, message, level='info'): if self._log is not None: self._log(message, level) def IsVerbose(self): return self._verbose def Generate(self): """Generate interface called by outer directly""" self.Log(">>>>>> Start generate doxygen document for %s... Zzz....\n" % self._projname) # create doxygen config file at first self._configFile = doxygen.DoxygenConfigFile() self._configFile.SetOutputDir(self._outputPath) self._configFile.SetWarningFilePath(os.path.join(self._outputPath, 'warning.txt')) if self._mode.lower() == 'html': self._configFile.SetHtmlMode() else: self._configFile.SetChmMode() self.Log(" >>>>>> Initialize doxygen config file...Zzz...\n") self.InitializeConfigFile() self.Log(" >>>>>> Generate doxygen index page file...Zzz...\n") indexPagePath = self.GenerateIndexPage() if indexPagePath is None: self.Log("Fail to generate index page!\n", 'error') return False else: self.Log("Success to create doxygen index page file %s \n" % indexPagePath) # Add index page doxygen file to file list. self._configFile.AddFile(indexPagePath) # save config file to output path configFilePath = os.path.join(self._outputPath, self._projname + '.doxygen_config') self._configFile.Generate(configFilePath) self.Log(" <<<<<< Success Save doxygen config file to %s...\n" % configFilePath) # launch doxygen tool to generate document if self._doxygenCallback is not None: self.Log(" >>>>>> Start doxygen process...Zzz...\n") if not self._doxygenCallback(self._doxPath, configFilePath): return False else: self.Log("Fail to create doxygen process!", 'error') return False return True def InitializeConfigFile(self): """Initialize config setting for doxygen project. It will be invoked after config file object is created. Inherited class should implement it. """ def GenerateIndexPage(self): """Generate doxygen index page. Inherited class should implement it.""" return None def RegisterCallbackDoxygenProcess(self, callback): self._doxygenCallback = callback def RegisterCallbackCHMProcess(self, callback): self._chmCallback = callback class PlatformDocumentAction(DoxygenAction): """Generate platform doxygen document, will be implement at future.""" class PackageDocumentAction(DoxygenAction): """Generate package reference document""" def __init__(self, doxPath, chmPath, outputPath, pObj, mode='html', log=None, arch=None, tooltag=None, onlyInclude=False, verbose=False): DoxygenAction.__init__(self, doxPath, chmPath, outputPath, pObj.GetName(), mode, log, verbose) self._pObj = pObj self._arch = arch self._tooltag = tooltag self._onlyIncludeDocument = onlyInclude def InitializeConfigFile(self): if self._arch == 'IA32': self._configFile.AddPreDefined('MDE_CPU_IA32') elif self._arch == 'X64': self._configFile.AddPreDefined('MDE_CPU_X64') elif self._arch == 'IPF': self._configFile.AddPreDefined('MDE_CPU_IPF') elif self._arch == 'EBC': self._configFile.AddPreDefined('MDE_CPU_EBC') else: self._arch = None self._configFile.AddPreDefined('MDE_CPU_IA32') self._configFile.AddPreDefined('MDE_CPU_X64') self._configFile.AddPreDefined('MDE_CPU_IPF') self._configFile.AddPreDefined('MDE_CPU_EBC') self._configFile.AddPreDefined('MDE_CPU_ARM') namestr = self._pObj.GetName() if self._arch is not None: namestr += '[%s]' % self._arch if self._tooltag is not None: namestr += '[%s]' % self._tooltag self._configFile.SetProjectName(namestr) self._configFile.SetStripPath(self._pObj.GetWorkspace()) self._configFile.SetProjectVersion(self._pObj.GetFileObj().GetVersion()) self._configFile.AddPattern('*.decdoxygen') if self._tooltag.lower() == 'msft': self._configFile.AddPreDefined('_MSC_EXTENSIONS') elif self._tooltag.lower() == 'gnu': self._configFile.AddPreDefined('__GNUC__') elif self._tooltag.lower() == 'intel': self._configFile.AddPreDefined('__INTEL_COMPILER') else: self._tooltag = None self._configFile.AddPreDefined('_MSC_EXTENSIONS') self._configFile.AddPreDefined('__GNUC__') self._configFile.AddPreDefined('__INTEL_COMPILER') self._configFile.AddPreDefined('ASM_PFX= ') self._configFile.AddPreDefined('OPTIONAL= ') def GenerateIndexPage(self): """Generate doxygen index page. Inherited class should implement it.""" fObj = self._pObj.GetFileObj() pdObj = doxygen.DoxygenFile('%s Package Document' % self._pObj.GetName(), '%s.decdoxygen' % self._pObj.GetFilename()) self._configFile.AddFile(pdObj.GetFilename()) pdObj.AddDescription(fObj.GetFileHeader()) defSection = fObj.GetSectionByName('defines')[0] baseSection = doxygen.Section('PackageBasicInformation', 'Package Basic Information') descr = '<TABLE>' for obj in defSection.GetObjects(): if obj.GetKey() in _dec_key_description_mapping_table.keys(): descr += '<TR>' descr += '<TD><B>%s</B></TD>' % _dec_key_description_mapping_table[obj.GetKey()] descr += '<TD>%s</TD>' % obj.GetValue() descr += '</TR>' descr += '</TABLE><br>' baseSection.AddDescription(descr) pdObj.AddSection(baseSection) knownIssueSection = doxygen.Section('Known_Issue_section', 'Known Issue') knownIssueSection.AddDescription('<ul>') knownIssueSection.AddDescription('<li> OPTIONAL macro for function parameter can not be dealed with doxygen, so it disapear in this document! </li>') knownIssueSection.AddDescription('</ul>') pdObj.AddSection(knownIssueSection) self.AddAllIncludeFiles(self._pObj, self._configFile) pages = self.GenerateIncludesSubPage(self._pObj, self._configFile) if len(pages) != 0: pdObj.AddPages(pages) pages = self.GenerateLibraryClassesSubPage(self._pObj, self._configFile) if len(pages) != 0: pdObj.AddPages(pages) pages = self.GeneratePcdSubPages(self._pObj, self._configFile) if len(pages) != 0: pdObj.AddPages(pages) pages = self.GenerateGuidSubPages(self._pObj, self._configFile) if len(pages) != 0: pdObj.AddPages(pages) pages = self.GeneratePpiSubPages(self._pObj, self._configFile) if len(pages) != 0: pdObj.AddPages(pages) pages = self.GenerateProtocolSubPages(self._pObj, self._configFile) if len(pages) != 0: pdObj.AddPages(pages) if not self._onlyIncludeDocument: pdObj.AddPages(self.GenerateModulePages(self._pObj, self._configFile)) pdObj.Save() return pdObj.GetFilename() def GenerateIncludesSubPage(self, pObj, configFile): # by default add following path as include path to config file pkpath = pObj.GetFileObj().GetPackageRootPath() configFile.AddIncludePath(os.path.join(pkpath, 'Include')) configFile.AddIncludePath(os.path.join(pkpath, 'Include', 'Library')) configFile.AddIncludePath(os.path.join(pkpath, 'Include', 'Protocol')) configFile.AddIncludePath(os.path.join(pkpath, 'Include', 'Ppi')) configFile.AddIncludePath(os.path.join(pkpath, 'Include', 'Guid')) configFile.AddIncludePath(os.path.join(pkpath, 'Include', 'IndustryStandard')) rootArray = [] pageRoot = doxygen.Page("Public Includes", "%s_public_includes" % pObj.GetName()) objs = pObj.GetFileObj().GetSectionObjectsByName('includes') if len(objs) == 0: return [] for obj in objs: # Add path to include path path = os.path.join(pObj.GetFileObj().GetPackageRootPath(), obj.GetPath()) configFile.AddIncludePath(path) # only list common folder's include file if obj.GetArch().lower() != 'common': continue bNeedAddIncludePage = False topPage = doxygen.Page(self._ConvertPathToDoxygen(path, pObj), 'public_include_top') topPage.AddDescription('<ul>\n') for file in os.listdir(path): if file.lower() in _ignore_dir: continue fullpath = os.path.join(path, file) if os.path.isfile(fullpath): self.ProcessSourceFileForInclude(fullpath, pObj, configFile) topPage.AddDescription('<li> \link %s\endlink </li>\n' % self._ConvertPathToDoxygen(fullpath, pObj)) else: if file.lower() in ['library', 'protocol', 'guid', 'ppi', 'ia32', 'x64', 'ipf', 'ebc', 'arm', 'pi', 'uefi', 'aarch64']: continue bNeedAddSubPage = False subpage = doxygen.Page(self._ConvertPathToDoxygen(fullpath, pObj), 'public_include_%s' % file) subpage.AddDescription('<ul>\n') for subfile in os.listdir(fullpath): if subfile.lower() in _ignore_dir: continue bNeedAddSubPage = True subfullpath = os.path.join(fullpath, subfile) self.ProcessSourceFileForInclude(subfullpath, pObj, configFile) subpage.AddDescription('<li> \link %s \endlink </li>\n' % self._ConvertPathToDoxygen(subfullpath, pObj)) subpage.AddDescription('</ul>\n') if bNeedAddSubPage: bNeedAddIncludePage = True pageRoot.AddPage(subpage) topPage.AddDescription('</ul>\n') if bNeedAddIncludePage: pageRoot.AddPage(topPage) if pageRoot.GetSubpageCount() != 0: return [pageRoot] else: return [] def GenerateLibraryClassesSubPage(self, pObj, configFile): """ Generate sub page for library class for package. One DEC file maybe contains many library class sections for different architecture. @param fObj DEC file object. """ rootArray = [] pageRoot = doxygen.Page("Library Class", "%s_libraryclass" % pObj.GetName()) objs = pObj.GetFileObj().GetSectionObjectsByName('libraryclass', self._arch) if len(objs) == 0: return [] if self._arch is not None: for obj in objs: classPage = doxygen.Page(obj.GetClassName(), "lc_%s" % obj.GetClassName()) comments = obj.GetComment() if len(comments) != 0: classPage.AddDescription('<br>\n'.join(comments) + '<br>\n') pageRoot.AddPage(classPage) path = os.path.join(pObj.GetFileObj().GetPackageRootPath(), obj.GetHeaderFile()) path = path[len(pObj.GetWorkspace()) + 1:] if len(comments) == 0: classPage.AddDescription('\copydoc %s<p>' % obj.GetHeaderFile()) section = doxygen.Section('ref', 'Refer to Header File') section.AddDescription('\link %s\n' % obj.GetHeaderFile()) section.AddDescription(' \endlink<p>\n') classPage.AddSection(section) fullPath = os.path.join(pObj.GetFileObj().GetPackageRootPath(), obj.GetHeaderFile()) self.ProcessSourceFileForInclude(fullPath, pObj, configFile) else: archPageDict = {} for obj in objs: if obj.GetArch() not in archPageDict.keys(): archPageDict[obj.GetArch()] = doxygen.Page(obj.GetArch(), 'lc_%s' % obj.GetArch()) pageRoot.AddPage(archPageDict[obj.GetArch()]) subArchRoot = archPageDict[obj.GetArch()] classPage = doxygen.Page(obj.GetClassName(), "lc_%s" % obj.GetClassName()) comments = obj.GetComment() if len(comments) != 0: classPage.AddDescription('<br>\n'.join(comments) + '<br>\n') subArchRoot.AddPage(classPage) path = os.path.join(pObj.GetFileObj().GetPackageRootPath(), obj.GetHeaderFile()) path = path[len(pObj.GetWorkspace()) + 1:] if len(comments) == 0: classPage.AddDescription('\copydoc %s<p>' % obj.GetHeaderFile()) section = doxygen.Section('ref', 'Refer to Header File') section.AddDescription('\link %s\n' % obj.GetHeaderFile()) section.AddDescription(' \endlink<p>\n') classPage.AddSection(section) fullPath = os.path.join(pObj.GetFileObj().GetPackageRootPath(), obj.GetHeaderFile()) self.ProcessSourceFileForInclude(fullPath, pObj, configFile) rootArray.append(pageRoot) return rootArray def ProcessSourceFileForInclude(self, path, pObj, configFile, infObj=None): """ @param path the analysising file full path @param pObj package object @param configFile doxygen config file. """ if gInGui: wx.Yield() if not os.path.exists(path): ErrorMsg('Source file path %s does not exist!' % path) return if configFile.FileExists(path): return try: with open(path, 'r') as f: lines = f.readlines() except UnicodeDecodeError: return except IOError: ErrorMsg('Fail to open file %s' % path) return configFile.AddFile(path) no = 0 for no in range(len(lines)): if len(lines[no].strip()) == 0: continue if lines[no].strip()[:2] in ['##', '//', '/*', '*/']: continue index = lines[no].lower().find('include') #mo = IncludePattern.finditer(lines[no].lower()) mo = re.match(r"^#\s*include\s+[<\"]([\\/\w.]+)[>\"]$", lines[no].strip().lower()) if not mo: continue mo = re.match(r"^[#\w\s]+[<\"]([\\/\w.]+)[>\"]$", lines[no].strip()) filePath = mo.groups()[0] if filePath is None or len(filePath) == 0: continue # find header file in module's path firstly. fullPath = None if os.path.exists(os.path.join(os.path.dirname(path), filePath)): # Find the file in current directory fullPath = os.path.join(os.path.dirname(path), filePath).replace('\\', '/') else: # find in depedent package's include path incObjs = pObj.GetFileObj().GetSectionObjectsByName('includes') for incObj in incObjs: incPath = os.path.join(pObj.GetFileObj().GetPackageRootPath(), incObj.GetPath()).strip() incPath = os.path.realpath(os.path.join(incPath, filePath)) if os.path.exists(incPath): fullPath = incPath break if infObj is not None: pkgInfObjs = infObj.GetSectionObjectsByName('packages') for obj in pkgInfObjs: decObj = dec.DECFile(os.path.join(pObj.GetWorkspace(), obj.GetPath())) if not decObj: ErrorMsg ('Fail to create pacakge object for %s' % obj.GetPackageName()) continue if not decObj.Parse(): ErrorMsg ('Fail to load package object for %s' % obj.GetPackageName()) continue incObjs = decObj.GetSectionObjectsByName('includes') for incObj in incObjs: incPath = os.path.join(decObj.GetPackageRootPath(), incObj.GetPath()).replace('\\', '/') if os.path.exists(os.path.join(incPath, filePath)): fullPath = os.path.join(os.path.join(incPath, filePath)) break if fullPath is not None: break if fullPath is None and self.IsVerbose(): self.Log('Can not resolve header file %s for file %s in package %s\n' % (filePath, path, pObj.GetFileObj().GetFilename()), 'error') return else: fullPath = fullPath.replace('\\', '/') if self.IsVerbose(): self.Log('Preprocessing: Add include file %s for file %s\n' % (fullPath, path)) #LogMsg ('Preprocessing: Add include file %s for file %s' % (fullPath, path)) self.ProcessSourceFileForInclude(fullPath, pObj, configFile, infObj) def AddAllIncludeFiles(self, pObj, configFile): objs = pObj.GetFileObj().GetSectionObjectsByName('includes') for obj in objs: incPath = os.path.join(pObj.GetFileObj().GetPackageRootPath(), obj.GetPath()) for root, dirs, files in os.walk(incPath): for dir in dirs: if dir.lower() in _ignore_dir: dirs.remove(dir) for file in files: path = os.path.normpath(os.path.join(root, file)) configFile.AddFile(path.replace('/', '\\')) def GeneratePcdSubPages(self, pObj, configFile): """ Generate sub pages for package's PCD definition. @param pObj package object @param configFile config file object """ rootArray = [] objs = pObj.GetFileObj().GetSectionObjectsByName('pcd') if len(objs) == 0: return [] pcdRootPage = doxygen.Page('PCD', 'pcd_root_page') typeRootPageDict = {} typeArchRootPageDict = {} for obj in objs: if obj.GetPcdType() not in typeRootPageDict.keys(): typeRootPageDict[obj.GetPcdType()] = doxygen.Page(obj.GetPcdType(), 'pcd_%s_root_page' % obj.GetPcdType()) pcdRootPage.AddPage(typeRootPageDict[obj.GetPcdType()]) typeRoot = typeRootPageDict[obj.GetPcdType()] if self._arch is not None: pcdPage = doxygen.Page('%s' % obj.GetPcdName(), 'pcd_%s_%s_%s' % (obj.GetPcdType(), obj.GetArch(), obj.GetPcdName().split('.')[1])) pcdPage.AddDescription('<br>\n'.join(obj.GetComment()) + '<br>\n') section = doxygen.Section('PCDinformation', 'PCD Information') desc = '<TABLE>' desc += '<TR>' desc += '<TD><CAPTION>Name</CAPTION></TD>' desc += '<TD><CAPTION>Token Space</CAPTION></TD>' desc += '<TD><CAPTION>Token number</CAPTION></TD>' desc += '<TD><CAPTION>Data Type</CAPTION></TD>' desc += '<TD><CAPTION>Default Value</CAPTION></TD>' desc += '</TR>' desc += '<TR>' desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdName().split('.')[1] desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdName().split('.')[0] desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdToken() desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdDataType() desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdValue() desc += '</TR>' desc += '</TABLE>' section.AddDescription(desc) pcdPage.AddSection(section) typeRoot.AddPage(pcdPage) else: keystr = obj.GetPcdType() + obj.GetArch() if keystr not in typeArchRootPageDict.keys(): typeArchRootPage = doxygen.Page(obj.GetArch(), 'pcd_%s_%s_root_page' % (obj.GetPcdType(), obj.GetArch())) typeArchRootPageDict[keystr] = typeArchRootPage typeRoot.AddPage(typeArchRootPage) typeArchRoot = typeArchRootPageDict[keystr] pcdPage = doxygen.Page('%s' % obj.GetPcdName(), 'pcd_%s_%s_%s' % (obj.GetPcdType(), obj.GetArch(), obj.GetPcdName().split('.')[1])) pcdPage.AddDescription('<br>\n'.join(obj.GetComment()) + '<br>\n') section = doxygen.Section('PCDinformation', 'PCD Information') desc = '<TABLE>' desc += '<TR>' desc += '<TD><CAPTION>Name</CAPTION></TD>' desc += '<TD><CAPTION>Token Space</CAPTION></TD>' desc += '<TD><CAPTION>Token number</CAPTION></TD>' desc += '<TD><CAPTION>Data Type</CAPTION></TD>' desc += '<TD><CAPTION>Default Value</CAPTION></TD>' desc += '</TR>' desc += '<TR>' desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdName().split('.')[1] desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdName().split('.')[0] desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdToken() desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdDataType() desc += '<TD><CAPTION>%s</CAPTION></TD>' % obj.GetPcdValue() desc += '</TR>' desc += '</TABLE>' section.AddDescription(desc) pcdPage.AddSection(section) typeArchRoot.AddPage(pcdPage) return [pcdRootPage] def _GenerateGuidSubPage(self, pObj, obj, configFile): guidPage = doxygen.Page('%s' % obj.GetName(), 'guid_%s_%s' % (obj.GetArch(), obj.GetName())) comments = obj.GetComment() if len(comments) != 0: guidPage.AddDescription('<br>'.join(obj.GetComment()) + '<br>') section = doxygen.Section('BasicGuidInfo', 'GUID Information') desc = '<TABLE>' desc += '<TR>' desc += '<TD><CAPTION>GUID\'s Guid Name</CAPTION></TD><TD><CAPTION>GUID\'s Guid</CAPTION></TD>' desc += '</TR>' desc += '<TR>' desc += '<TD>%s</TD>' % obj.GetName() desc += '<TD>%s</TD>' % obj.GetGuid() desc += '</TR>' desc += '</TABLE>' section.AddDescription(desc) guidPage.AddSection(section) refFile = self.FindHeaderFileForGuid(pObj, obj.GetName(), configFile) if refFile: relPath = refFile[len(pObj.GetWorkspace()) + 1:] if len(comments) == 0: guidPage.AddDescription(' \\copydoc %s <br>' % relPath) section = doxygen.Section('ref', 'Refer to Header File') section.AddDescription('\link %s\n' % relPath) section.AddDescription('\endlink\n') self.ProcessSourceFileForInclude(refFile, pObj, configFile) guidPage.AddSection(section) return guidPage def GenerateGuidSubPages(self, pObj, configFile): """ Generate sub pages for package's GUID definition. @param pObj package object @param configFilf doxygen config file object """ pageRoot = doxygen.Page('GUID', 'guid_root_page') objs = pObj.GetFileObj().GetSectionObjectsByName('guids', self._arch) if len(objs) == 0: return [] if self._arch is not None: for obj in objs: pageRoot.AddPage(self._GenerateGuidSubPage(pObj, obj, configFile)) else: guidArchRootPageDict = {} for obj in objs: if obj.GetArch() not in guidArchRootPageDict.keys(): guidArchRoot = doxygen.Page(obj.GetArch(), 'guid_arch_root_%s' % obj.GetArch()) pageRoot.AddPage(guidArchRoot) guidArchRootPageDict[obj.GetArch()] = guidArchRoot guidArchRoot = guidArchRootPageDict[obj.GetArch()] guidArchRoot.AddPage(self._GenerateGuidSubPage(pObj, obj, configFile)) return [pageRoot] def _GeneratePpiSubPage(self, pObj, obj, configFile): guidPage = doxygen.Page(obj.GetName(), 'ppi_page_%s' % obj.GetName()) comments = obj.GetComment() if len(comments) != 0: guidPage.AddDescription('<br>'.join(obj.GetComment()) + '<br>') section = doxygen.Section('BasicPpiInfo', 'PPI Information') desc = '<TABLE>' desc += '<TR>' desc += '<TD><CAPTION>PPI\'s Guid Name</CAPTION></TD><TD><CAPTION>PPI\'s Guid</CAPTION></TD>' desc += '</TR>' desc += '<TR>' desc += '<TD>%s</TD>' % obj.GetName() desc += '<TD>%s</TD>' % obj.GetGuid() desc += '</TR>' desc += '</TABLE>' section.AddDescription(desc) guidPage.AddSection(section) refFile = self.FindHeaderFileForGuid(pObj, obj.GetName(), configFile) if refFile: relPath = refFile[len(pObj.GetWorkspace()) + 1:] if len(comments) == 0: guidPage.AddDescription(' \\copydoc %s <br>' % relPath) section = doxygen.Section('ref', 'Refer to Header File') section.AddDescription('\link %s\n' % relPath) section.AddDescription('\endlink\n') self.ProcessSourceFileForInclude(refFile, pObj, configFile) guidPage.AddSection(section) return guidPage def GeneratePpiSubPages(self, pObj, configFile): """ Generate sub pages for package's GUID definition. @param pObj package object @param configFilf doxygen config file object """ pageRoot = doxygen.Page('PPI', 'ppi_root_page') objs = pObj.GetFileObj().GetSectionObjectsByName('ppis', self._arch) if len(objs) == 0: return [] if self._arch is not None: for obj in objs: pageRoot.AddPage(self._GeneratePpiSubPage(pObj, obj, configFile)) else: guidArchRootPageDict = {} for obj in objs: if obj.GetArch() not in guidArchRootPageDict.keys(): guidArchRoot = doxygen.Page(obj.GetArch(), 'ppi_arch_root_%s' % obj.GetArch()) pageRoot.AddPage(guidArchRoot) guidArchRootPageDict[obj.GetArch()] = guidArchRoot guidArchRoot = guidArchRootPageDict[obj.GetArch()] guidArchRoot.AddPage(self._GeneratePpiSubPage(pObj, obj, configFile)) return [pageRoot] def _GenerateProtocolSubPage(self, pObj, obj, configFile): guidPage = doxygen.Page(obj.GetName(), 'protocol_page_%s' % obj.GetName()) comments = obj.GetComment() if len(comments) != 0: guidPage.AddDescription('<br>'.join(obj.GetComment()) + '<br>') section = doxygen.Section('BasicProtocolInfo', 'PROTOCOL Information') desc = '<TABLE>' desc += '<TR>' desc += '<TD><CAPTION>PROTOCOL\'s Guid Name</CAPTION></TD><TD><CAPTION>PROTOCOL\'s Guid</CAPTION></TD>' desc += '</TR>' desc += '<TR>' desc += '<TD>%s</TD>' % obj.GetName() desc += '<TD>%s</TD>' % obj.GetGuid() desc += '</TR>' desc += '</TABLE>' section.AddDescription(desc) guidPage.AddSection(section) refFile = self.FindHeaderFileForGuid(pObj, obj.GetName(), configFile) if refFile: relPath = refFile[len(pObj.GetWorkspace()) + 1:] if len(comments) == 0: guidPage.AddDescription(' \\copydoc %s <br>' % relPath) section = doxygen.Section('ref', 'Refer to Header File') section.AddDescription('\link %s\n' % relPath) section.AddDescription('\endlink\n') self.ProcessSourceFileForInclude(refFile, pObj, configFile) guidPage.AddSection(section) return guidPage def GenerateProtocolSubPages(self, pObj, configFile): """ Generate sub pages for package's GUID definition. @param pObj package object @param configFilf doxygen config file object """ pageRoot = doxygen.Page('PROTOCOL', 'protocol_root_page') objs = pObj.GetFileObj().GetSectionObjectsByName('protocols', self._arch) if len(objs) == 0: return [] if self._arch is not None: for obj in objs: pageRoot.AddPage(self._GenerateProtocolSubPage(pObj, obj, configFile)) else: guidArchRootPageDict = {} for obj in objs: if obj.GetArch() not in guidArchRootPageDict.keys(): guidArchRoot = doxygen.Page(obj.GetArch(), 'protocol_arch_root_%s' % obj.GetArch()) pageRoot.AddPage(guidArchRoot) guidArchRootPageDict[obj.GetArch()] = guidArchRoot guidArchRoot = guidArchRootPageDict[obj.GetArch()] guidArchRoot.AddPage(self._GenerateProtocolSubPage(pObj, obj, configFile)) return [pageRoot] def FindHeaderFileForGuid(self, pObj, name, configFile): """ For declaration header file for GUID/PPI/Protocol. @param pObj package object @param name guid/ppi/protocol's name @param configFile config file object @return full path of header file and None if not found. """ startPath = pObj.GetFileObj().GetPackageRootPath() incPath = os.path.join(startPath, 'Include').replace('\\', '/') # if <PackagePath>/include exist, then search header under it. if os.path.exists(incPath): startPath = incPath for root, dirs, files in os.walk(startPath): for dir in dirs: if dir.lower() in _ignore_dir: dirs.remove(dir) for file in files: fPath = os.path.join(root, file) if not IsCHeaderFile(fPath): continue try: f = open(fPath, 'r') lines = f.readlines() f.close() except IOError: self.Log('Fail to open file %s\n' % fPath) continue for line in lines: if line.find(name) != -1 and \ line.find('extern') != -1: return fPath.replace('\\', '/') return None def GetPackageModuleList(self, pObj): """ Get all module's INF path under package's root path @param pObj package object @return arrary of INF full path """ mArray = [] packPath = pObj.GetFileObj().GetPackageRootPath() if not os.path.exists: return None for root, dirs, files in os.walk(packPath): for dir in dirs: if dir.lower() in _ignore_dir: dirs.remove(dir) for file in files: if CheckPathPostfix(file, 'inf'): fPath = os.path.join(root, file).replace('\\', '/') mArray.append(fPath) return mArray def GenerateModulePages(self, pObj, configFile): """ Generate sub pages for package's module which is under the package root directory. @param pObj package object @param configFilf doxygen config file object """ infList = self.GetPackageModuleList(pObj) rootPages = [] libObjs = [] modObjs = [] for infpath in infList: infObj = inf.INFFile(infpath) #infObj = INFFileObject.INFFile (pObj.GetWorkspacePath(), # inf) if not infObj: self.Log('Fail create INF object for %s' % inf) continue if not infObj.Parse(): self.Log('Fail to load INF file %s' % inf) continue if infObj.GetProduceLibraryClass() is not None: libObjs.append(infObj) else: modObjs.append(infObj) if len(libObjs) != 0: libRootPage = doxygen.Page('Libraries', 'lib_root_page') rootPages.append(libRootPage) for libInf in libObjs: libRootPage.AddPage(self.GenerateModulePage(pObj, libInf, configFile, True)) if len(modObjs) != 0: modRootPage = doxygen.Page('Modules', 'module_root_page') rootPages.append(modRootPage) for modInf in modObjs: modRootPage.AddPage(self.GenerateModulePage(pObj, modInf, configFile, False)) return rootPages def GenerateModulePage(self, pObj, infObj, configFile, isLib): """ Generate page for a module/library. @param infObj INF file object for module/library @param configFile doxygen config file object @param isLib Whether this module is library @param module doxygen page object """ workspace = pObj.GetWorkspace() refDecObjs = [] for obj in infObj.GetSectionObjectsByName('packages'): decObj = dec.DECFile(os.path.join(workspace, obj.GetPath())) if not decObj: ErrorMsg ('Fail to create pacakge object for %s' % obj.GetPackageName()) continue if not decObj.Parse(): ErrorMsg ('Fail to load package object for %s' % obj.GetPackageName()) continue refDecObjs.append(decObj) modPage = doxygen.Page('%s' % infObj.GetBaseName(), 'module_%s' % infObj.GetBaseName()) modPage.AddDescription(infObj.GetFileHeader()) basicInfSection = doxygen.Section('BasicModuleInformation', 'Basic Module Information') desc = "<TABLE>" for obj in infObj.GetSectionObjectsByName('defines'): key = obj.GetKey() value = obj.GetValue() if key not in _inf_key_description_mapping_table.keys(): continue if key == 'LIBRARY_CLASS' and value.find('|') != -1: clsname, types = value.split('|') desc += '<TR>' desc += '<TD><B>%s</B></TD>' % _inf_key_description_mapping_table[key] desc += '<TD>%s</TD>' % clsname desc += '</TR>' desc += '<TR>' desc += '<TD><B>Supported Module Types</B></TD>' desc += '<TD>%s</TD>' % types desc += '</TR>' else: desc += '<TR>' desc += '<TD><B>%s</B></TD>' % _inf_key_description_mapping_table[key] if key == 'EFI_SPECIFICATION_VERSION' and value == '0x00020000': value = '2.0' desc += '<TD>%s</TD>' % value desc += '</TR>' desc += '</TABLE>' basicInfSection.AddDescription(desc) modPage.AddSection(basicInfSection) # Add protocol section data = [] for obj in infObj.GetSectionObjectsByName('pcd', self._arch): data.append(obj.GetPcdName().strip()) if len(data) != 0: s = doxygen.Section('Pcds', 'Pcds') desc = "<TABLE>" desc += '<TR><TD><B>PCD Name</B></TD><TD><B>TokenSpace</B></TD><TD><B>Package</B></TD></TR>' for item in data: desc += '<TR>' desc += '<TD>%s</TD>' % item.split('.')[1] desc += '<TD>%s</TD>' % item.split('.')[0] pkgbasename = self.SearchPcdPackage(item, workspace, refDecObjs) desc += '<TD>%s</TD>' % pkgbasename desc += '</TR>' desc += "</TABLE>" s.AddDescription(desc) modPage.AddSection(s) # Add protocol section #sects = infObj.GetSectionByString('protocol') data = [] #for sect in sects: for obj in infObj.GetSectionObjectsByName('protocol', self._arch): data.append(obj.GetName().strip()) if len(data) != 0: s = doxygen.Section('Protocols', 'Protocols') desc = "<TABLE>" desc += '<TR><TD><B>Name</B></TD><TD><B>Package</B></TD></TR>' for item in data: desc += '<TR>' desc += '<TD>%s</TD>' % item pkgbasename = self.SearchProtocolPackage(item, workspace, refDecObjs) desc += '<TD>%s</TD>' % pkgbasename desc += '</TR>' desc += "</TABLE>" s.AddDescription(desc) modPage.AddSection(s) # Add ppi section #sects = infObj.GetSectionByString('ppi') data = [] #for sect in sects: for obj in infObj.GetSectionObjectsByName('ppi', self._arch): data.append(obj.GetName().strip()) if len(data) != 0: s = doxygen.Section('Ppis', 'Ppis') desc = "<TABLE>" desc += '<TR><TD><B>Name</B></TD><TD><B>Package</B></TD></TR>' for item in data: desc += '<TR>' desc += '<TD>%s</TD>' % item pkgbasename = self.SearchPpiPackage(item, workspace, refDecObjs) desc += '<TD>%s</TD>' % pkgbasename desc += '</TR>' desc += "</TABLE>" s.AddDescription(desc) modPage.AddSection(s) # Add guid section #sects = infObj.GetSectionByString('guid') data = [] #for sect in sects: for obj in infObj.GetSectionObjectsByName('guid', self._arch): data.append(obj.GetName().strip()) if len(data) != 0: s = doxygen.Section('Guids', 'Guids') desc = "<TABLE>" desc += '<TR><TD><B>Name</B></TD><TD><B>Package</B></TD></TR>' for item in data: desc += '<TR>' desc += '<TD>%s</TD>' % item pkgbasename = self.SearchGuidPackage(item, workspace, refDecObjs) desc += '<TD>%s</TD>' % pkgbasename desc += '</TR>' desc += "</TABLE>" s.AddDescription(desc) modPage.AddSection(s) section = doxygen.Section('LibraryClasses', 'Library Classes') desc = "<TABLE>" desc += '<TR><TD><B>Name</B></TD><TD><B>Type</B></TD><TD><B>Package</B></TD><TD><B>Header File</B></TD></TR>' if isLib: desc += '<TR>' desc += '<TD>%s</TD>' % infObj.GetProduceLibraryClass() desc += '<TD>Produce</TD>' try: pkgname, hPath = self.SearchLibraryClassHeaderFile(infObj.GetProduceLibraryClass(), workspace, refDecObjs) except: self.Log ('fail to get package header file for lib class %s' % infObj.GetProduceLibraryClass()) pkgname = 'NULL' hPath = 'NULL' desc += '<TD>%s</TD>' % pkgname if hPath != "NULL": desc += '<TD>\link %s \endlink</TD>' % hPath else: desc += '<TD>%s</TD>' % hPath desc += '</TR>' for lcObj in infObj.GetSectionObjectsByName('libraryclasses', self._arch): desc += '<TR>' desc += '<TD>%s</TD>' % lcObj.GetClass() retarr = self.SearchLibraryClassHeaderFile(lcObj.GetClass(), workspace, refDecObjs) if retarr is not None: pkgname, hPath = retarr else: self.Log('Fail find the library class %s definition from module %s dependent package!' % (lcObj.GetClass(), infObj.GetFilename()), 'error') pkgname = 'NULL' hPath = 'NULL' desc += '<TD>Consume</TD>' desc += '<TD>%s</TD>' % pkgname desc += '<TD>\link %s \endlink</TD>' % hPath desc += '</TR>' desc += "</TABLE>" section.AddDescription(desc) modPage.AddSection(section) section = doxygen.Section('SourceFiles', 'Source Files') section.AddDescription('<ul>\n') for obj in infObj.GetSourceObjects(self._arch, self._tooltag): sPath = infObj.GetModuleRootPath() sPath = os.path.join(sPath, obj.GetSourcePath()).replace('\\', '/').strip() if sPath.lower().endswith('.uni') or sPath.lower().endswith('.s') or sPath.lower().endswith('.asm') or sPath.lower().endswith('.nasm'): newPath = self.TranslateUniFile(sPath) configFile.AddFile(newPath) newPath = newPath[len(pObj.GetWorkspace()) + 1:] section.AddDescription('<li> \link %s \endlink </li>' % newPath) else: self.ProcessSourceFileForInclude(sPath, pObj, configFile, infObj) sPath = sPath[len(pObj.GetWorkspace()) + 1:] section.AddDescription('<li>\link %s \endlink </li>' % sPath) section.AddDescription('</ul>\n') modPage.AddSection(section) #sects = infObj.GetSectionByString('depex') data = [] #for sect in sects: for obj in infObj.GetSectionObjectsByName('depex'): data.append(str(obj)) if len(data) != 0: s = doxygen.Section('DependentSection', 'Module Dependencies') s.AddDescription('<br>'.join(data)) modPage.AddSection(s) return modPage def TranslateUniFile(self, path): newpath = path + '.dox' #import core.textfile as textfile #file = textfile.TextFile(path) try: file = open(path, 'r') except (IOError, OSError) as msg: return None t = file.read() file.close() output = '/** @file \n' #output = '<html><body>' arr = t.split('\r\n') for line in arr: if line.find('@file') != -1: continue if line.find('*/') != -1: continue line = line.strip() if line.strip().startswith('/'): arr = line.split(' ') if len(arr) > 1: line = ' '.join(arr[1:]) else: continue output += '%s<br>\n' % line output += '**/' if os.path.exists(newpath): os.remove(newpath) file = open(newpath, "w") file.write(output) file.close() return newpath def SearchPcdPackage(self, pcdname, workspace, decObjs): for decObj in decObjs: for pcd in decObj.GetSectionObjectsByName('pcd'): if pcdname == pcd.GetPcdName(): return decObj.GetBaseName() return None def SearchProtocolPackage(self, protname, workspace, decObjs): for decObj in decObjs: for proto in decObj.GetSectionObjectsByName('protocol'): if protname == proto.GetName(): return decObj.GetBaseName() return None def SearchPpiPackage(self, ppiname, workspace, decObjs): for decObj in decObjs: for ppi in decObj.GetSectionObjectsByName('ppi'): if ppiname == ppi.GetName(): return decObj.GetBaseName() return None def SearchGuidPackage(self, guidname, workspace, decObjs): for decObj in decObjs: for guid in decObj.GetSectionObjectsByName('guid'): if guidname == guid.GetName(): return decObj.GetBaseName() return None def SearchLibraryClassHeaderFile(self, className, workspace, decObjs): for decObj in decObjs: for cls in decObj.GetSectionObjectsByName('libraryclasses'): if cls.GetClassName().strip() == className: path = cls.GetHeaderFile().strip() path = os.path.join(decObj.GetPackageRootPath(), path) path = path[len(workspace) + 1:] return decObj.GetBaseName(), path.replace('\\', '/') return None def _ConvertPathToDoxygen(self, path, pObj): pRootPath = pObj.GetWorkspace() path = path[len(pRootPath) + 1:] return path.replace('\\', '/') def IsCHeaderFile(path): return CheckPathPostfix(path, 'h') def CheckPathPostfix(path, str): index = path.rfind('.') if index == -1: return False if path[index + 1:].lower() == str.lower(): return True return False
edk2-master
BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/edk2/model/doxygengen.py
## @file # # Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR> # # SPDX-License-Identifier: BSD-2-Clause-Patent #
edk2-master
BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/edk2/model/__init__.py
## @file # # Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR> # # SPDX-License-Identifier: BSD-2-Clause-Patent # from plugins.EdkPlugins.basemodel import ini import re, os from plugins.EdkPlugins.basemodel.message import * class DECFile(ini.BaseINIFile): def GetSectionInstance(self, parent, name, isCombined=False): return DECSection(parent, name, isCombined) def GetComponents(self): return self.GetSectionByName('Components') def GetPackageRootPath(self): return os.path.dirname(self.GetFilename()).strip() def GetBaseName(self): return self.GetDefine("PACKAGE_NAME").strip() def GetVersion(self): return self.GetDefine("PACKAGE_VERSION").strip() def GetSectionObjectsByName(self, name, arch=None): arr = [] sects = self.GetSectionByName(name) for sect in sects: # skip unmatched archtecture content if not sect.IsArchMatch(arch): continue for obj in sect.GetObjects(): arr.append(obj) return arr class DECSection(ini.BaseINISection): def GetSectionINIObject(self, parent): type = self.GetType() if type.lower().find('defines') != -1: return DECDefineSectionObject(self) if type.lower().find('includes') != -1: return DECIncludeObject(self) if type.lower().find('pcd') != -1: return DECPcdObject(self) if type.lower() == 'libraryclasses': return DECLibraryClassObject(self) if type.lower() == 'guids': return DECGuidObject(self) if type.lower() == 'ppis': return DECPpiObject(self) if type.lower() == 'protocols': return DECProtocolObject(self) return DECSectionObject(self) def GetType(self): arr = self._name.split('.') return arr[0].strip() def GetArch(self): arr = self._name.split('.') if len(arr) == 1: return 'common' return arr[1] def IsArchMatch(self, arch): if arch is None or self.GetArch() == 'common': return True if self.GetArch().lower() != arch.lower(): return False return True class DECSectionObject(ini.BaseINISectionObject): def GetArch(self): return self.GetParent().GetArch() class DECDefineSectionObject(DECSectionObject): def __init__(self, parent): DECSectionObject.__init__(self, parent) self._key = None self._value = None def Parse(self): assert (self._start == self._end), 'The object in define section must be in single line' line = self.GetLineByOffset(self._start).strip() line = line.split('#')[0] arr = line.split('=') if len(arr) != 2: ErrorMsg('Invalid define section object', self.GetFilename(), self.GetParent().GetName() ) return False self._key = arr[0].strip() self._value = arr[1].strip() return True def GetKey(self): return self._key def GetValue(self): return self._value class DECGuidObject(DECSectionObject): _objs = {} def __init__(self, parent): DECSectionObject.__init__(self, parent) self._name = None def Parse(self): line = self.GetLineByOffset(self._start).strip().split('#')[0] self._name = line.split('=')[0].strip() self._guid = line.split('=')[1].strip() objdict = DECGuidObject._objs if self._name not in objdict.keys(): objdict[self._name] = [self] else: objdict[self._name].append(self) return True def GetName(self): return self._name def GetGuid(self): return self._guid def Destroy(self): objdict = DECGuidObject._objs objdict[self._name].remove(self) if len(objdict[self._name]) == 0: del objdict[self._name] @staticmethod def GetObjectDict(): return DECGuidObject._objs class DECPpiObject(DECSectionObject): _objs = {} def __init__(self, parent): DECSectionObject.__init__(self, parent) self._name = None def Parse(self): line = self.GetLineByOffset(self._start).strip().split('#')[0] self._name = line.split('=')[0].strip() self._guid = line.split('=')[1].strip() objdict = DECPpiObject._objs if self._name not in objdict.keys(): objdict[self._name] = [self] else: objdict[self._name].append(self) return True def GetName(self): return self._name def GetGuid(self): return self._guid def Destroy(self): objdict = DECPpiObject._objs objdict[self._name].remove(self) if len(objdict[self._name]) == 0: del objdict[self._name] @staticmethod def GetObjectDict(): return DECPpiObject._objs class DECProtocolObject(DECSectionObject): _objs = {} def __init__(self, parent): DECSectionObject.__init__(self, parent) self._name = None def Parse(self): line = self.GetLineByOffset(self._start).strip().split('#')[0] self._name = line.split('=')[0].strip() self._guid = line.split('=')[1].strip() objdict = DECProtocolObject._objs if self._name not in objdict.keys(): objdict[self._name] = [self] else: objdict[self._name].append(self) return True def GetName(self): return self._name def GetGuid(self): return self._guid def Destroy(self): objdict = DECProtocolObject._objs objdict[self._name].remove(self) if len(objdict[self._name]) == 0: del objdict[self._name] @staticmethod def GetObjectDict(): return DECProtocolObject._objs class DECLibraryClassObject(DECSectionObject): _objs = {} def __init__(self, parent): DECSectionObject.__init__(self, parent) self.mClassName = None self.mHeaderFile = None def Parse(self): line = self.GetLineByOffset(self._start).strip().split('#')[0] self.mClassName, self.mHeaderFile = line.split('|') objdict = DECLibraryClassObject._objs if self.mClassName not in objdict.keys(): objdict[self.mClassName] = [self] else: objdict[self.mClassName].append(self) return True def GetClassName(self): return self.mClassName def GetName(self): return self.mClassName def GetHeaderFile(self): return self.mHeaderFile def Destroy(self): objdict = DECLibraryClassObject._objs objdict[self.mClassName].remove(self) if len(objdict[self.mClassName]) == 0: del objdict[self.mClassName] @staticmethod def GetObjectDict(): return DECLibraryClassObject._objs class DECIncludeObject(DECSectionObject): def __init__(self, parent): DECSectionObject.__init__(self, parent) def GetPath(self): return self.GetLineByOffset(self._start).split('#')[0].strip() class DECPcdObject(DECSectionObject): _objs = {} def __init__(self, parent): DECSectionObject.__init__(self, parent) self.mPcdName = None self.mPcdDefaultValue = None self.mPcdDataType = None self.mPcdToken = None def Parse(self): line = self.GetLineByOffset(self._start).strip().split('#')[0] (self.mPcdName, self.mPcdDefaultValue, self.mPcdDataType, self.mPcdToken) = line.split('|') objdict = DECPcdObject._objs if self.mPcdName not in objdict.keys(): objdict[self.mPcdName] = [self] else: objdict[self.mPcdName].append(self) return True def Destroy(self): objdict = DECPcdObject._objs objdict[self.mPcdName].remove(self) if len(objdict[self.mPcdName]) == 0: del objdict[self.mPcdName] def GetPcdType(self): return self.GetParent().GetType() def GetPcdName(self): return self.mPcdName def GetPcdValue(self): return self.mPcdDefaultValue def GetPcdDataType(self): return self.mPcdDataType def GetPcdToken(self): return self.mPcdToken def GetName(self): return self.GetPcdName().split('.')[1] @staticmethod def GetObjectDict(): return DECPcdObject._objs
edk2-master
BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/edk2/model/dec.py
## @file # # Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR> # # SPDX-License-Identifier: BSD-2-Clause-Patent # from plugins.EdkPlugins.basemodel import ini import re, os from plugins.EdkPlugins.basemodel.message import * class DSCFile(ini.BaseINIFile): def GetSectionInstance(self, parent, name, isCombined=False): return DSCSection(parent, name, isCombined) def GetComponents(self): return self.GetSectionObjectsByName('Components') class DSCSection(ini.BaseINISection): def GetSectionINIObject(self, parent): type = self.GetType() if type.lower() == 'components': return DSCComponentObject(self) if type.lower() == 'libraryclasses': return DSCLibraryClassObject(self) if type.lower() == 'defines': return ini.BaseINISectionObject(self) if type.lower() == 'pcdsfeatureflag' or \ type.lower() == 'pcdsfixedatbuild' or \ type.lower() == 'pcdspatchableinmodule' or\ type.lower() == 'pcdsdynamicdefault' or \ type.lower() == 'pcdsdynamicex' or \ type.lower() == 'pcdsdynamichii' or \ type.lower() == 'pcdsdynamicvpd': return DSCPcdObject(self) return DSCSectionObject(self) def GetType(self): arr = self._name.split('.') return arr[0].strip() def GetArch(self): arr = self._name.split('.') if len(arr) == 1: return 'common' return arr[1] def GetModuleType(self): arr = self._name.split('.') if len(arr) < 3: return 'common' return arr[2] class DSCSectionObject(ini.BaseINISectionObject): def GetArch(self): return self.GetParent().GetArch() class DSCPcdObject(DSCSectionObject): def __init__(self, parent): ini.BaseINISectionObject.__init__(self, parent) self._name = None def Parse(self): line = self.GetLineByOffset(self._start).strip().split('#')[0] self._name = line.split('|')[0] self._value = line.split('|')[1] return True def GetPcdName(self): return self._name def GetPcdType(self): return self.GetParent().GetType() def GetPcdValue(self): return self._value class DSCLibraryClassObject(DSCSectionObject): def __init__(self, parent): ini.BaseINISectionObject.__init__(self, parent) def GetClass(self): line = self.GetLineByOffset(self._start) return line.split('#')[0].split('|')[0].strip() def GetInstance(self): line = self.GetLineByOffset(self._start) return line.split('#')[0].split('|')[1].strip() def GetArch(self): return self.GetParent().GetArch() def GetModuleType(self): return self.GetParent().GetModuleType() class DSCComponentObject(DSCSectionObject): def __init__(self, parent): ini.BaseINISectionObject.__init__(self, parent) self._OveridePcds = {} self._OverideLibraries = {} self._Filename = '' def __del__(self): self._OverideLibraries.clear() self._OverideLibraries.clear() ini.BaseINISectionObject.__del__(self) def AddOverideLib(self, libclass, libinstPath): if libclass not in self._OverideLibraries.keys(): self._OverideLibraries[libclass] = libinstPath def AddOveridePcd(self, name, type, value=None): if type not in self._OveridePcds.keys(): self._OveridePcds[type] = [] self._OveridePcds[type].append((name, value)) def GetOverideLibs(self): return self._OverideLibraries def GetArch(self): return self.GetParent().GetArch() def GetOveridePcds(self): return self._OveridePcds def GetFilename(self): return self.GetLineByOffset(self._start).split('#')[0].split('{')[0].strip() def SetFilename(self, fName): self._Filename = fName def Parse(self): if (self._start < self._end): # # The first line is inf path and could be ignored # The end line is '}' and could be ignored # curr = self._start + 1 end = self._end - 1 OverideName = '' while (curr <= end): line = self.GetLineByOffset(curr).strip() if len(line) > 0 and line[0] != '#': line = line.split('#')[0].strip() if line[0] == '<': OverideName = line[1:len(line)-1] elif OverideName.lower() == 'libraryclasses': arr = line.split('|') self._OverideLibraries[arr[0].strip()] = arr[1].strip() elif OverideName.lower() == 'pcds': ErrorMsg('EDES does not support PCD overide', self.GetFileName(), self.GetParent().GetLinenumberByOffset(curr)) curr = curr + 1 return True def GenerateLines(self): lines = [] hasLib = False hasPcd = False if len(self._OverideLibraries) != 0: hasLib = True if len(self._OveridePcds) != 0: hasPcd = True if hasLib or hasPcd: lines.append((' %s {\n' % self._Filename)) else: lines.append((' %s \n' % self._Filename)) return lines if hasLib: lines.append(' <LibraryClasses>\n') for libKey in self._OverideLibraries.keys(): lines.append(' %s|%s\n' % (libKey, self._OverideLibraries[libKey])) if hasPcd: for key in self._OveridePcds.keys(): lines.append(' <%s>\n' % key) for name, value in self._OveridePcds[key]: if value is not None: lines.append(' %s|%s\n' % (name, value)) else: lines.append(' %s\n' % name) if hasLib or hasPcd: lines.append(' }\n') return lines
edk2-master
BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/edk2/model/dsc.py
## @file # # Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR> # # SPDX-License-Identifier: BSD-2-Clause-Patent # from plugins.EdkPlugins.basemodel import ini import re, os from plugins.EdkPlugins.basemodel.message import * class INFFile(ini.BaseINIFile): _libobjs = {} def GetSectionInstance(self, parent, name, isCombined=False): return INFSection(parent, name, isCombined) def GetProduceLibraryClass(self): obj = self.GetDefine("LIBRARY_CLASS") if obj is None: return None return obj.split('|')[0].strip() def GetSectionObjectsByName(self, name, arch=None): arr = [] sects = self.GetSectionByName(name) for sect in sects: # skip unmatched archtecture content if not sect.IsArchMatch(arch): continue for obj in sect.GetObjects(): arr.append(obj) return arr def GetSourceObjects(self, arch=None, tool=None): arr = [] sects = self.GetSectionByName('sources') for sect in sects: # skip unmatched archtecture content if not sect.IsArchMatch(arch): continue for obj in sect.GetObjects(): if not obj.IsMatchFamily(tool): continue arr.append(obj) return arr def Parse(self): if not ini.BaseINIFile.Parse(self): return False classname = self.GetProduceLibraryClass() if classname is not None: libobjdict = INFFile._libobjs if classname in libobjdict: if self not in libobjdict[classname]: libobjdict[classname].append(self) else: libobjdict[classname] = [self] return True def GetBaseName(self): return self.GetDefine("BASE_NAME").strip() def GetModuleRootPath(self): return os.path.dirname(self.GetFilename()) def Clear(self): classname = self.GetProduceLibraryClass() if classname is not None: libobjdict = INFFile._libobjs libobjdict[classname].remove(self) if len(libobjdict[classname]) == 0: del libobjdict[classname] ini.BaseINIFile.Clear(self) class INFSection(ini.BaseINISection): def GetSectionINIObject(self, parent): type = self.GetType() if type.lower() == 'libraryclasses': return INFLibraryClassObject(self) if type.lower() == 'sources': return INFSourceObject(self) if type.lower().find('pcd') != -1: return INFPcdObject(self) if type.lower() == 'packages': return INFDependentPackageObject(self) if type.lower() in ['guids', 'protocols', 'ppis']: return INFGuidObject(self) if type.lower() == 'defines': return INFDefineSectionObject(self) return INFSectionObject(self) def GetType(self): arr = self._name.split('.') return arr[0].strip() def GetArch(self): arr = self._name.split('.') if len(arr) == 1: return 'common' return arr[1] def IsArchMatch(self, arch): if arch is None or self.GetArch() == 'common': return True if self.GetArch().lower() != arch.lower(): return False return True class INFSectionObject(ini.BaseINISectionObject): def GetArch(self): return self.GetParent().GetArch() class INFDefineSectionObject(INFSectionObject): def __init__(self, parent): INFSectionObject.__init__(self, parent) self._key = None self._value = None def Parse(self): assert (self._start == self._end), 'The object in define section must be in single line' line = self.GetLineByOffset(self._start).strip() line = line.split('#')[0] arr = line.split('=') if len(arr) != 2: ErrorMsg('Invalid define section object', self.GetFilename(), self._start ) return False self._key = arr[0].strip() self._value = arr[1].strip() return True def GetKey(self): return self._key def GetValue(self): return self._value class INFLibraryClassObject(INFSectionObject): _objs = {} def __init__(self, parent): INFSectionObject.__init__(self, parent) self._classname = None def GetClass(self): return self._classname def Parse(self): self._classname = self.GetLineByOffset(self._start).split('#')[0].strip() objdict = INFLibraryClassObject._objs if self._classname in objdict: objdict[self._classname].append(self) else: objdict[self._classname] = [self] return True def Destroy(self): objdict = INFLibraryClassObject._objs objdict[self._classname].remove(self) if len(objdict[self._classname]) == 0: del objdict[self._classname] def GetName(self): return self._classname @staticmethod def GetObjectDict(): return INFLibraryClassObject._objs class INFDependentPackageObject(INFSectionObject): def GetPath(self): return self.GetLineByOffset(self._start).split('#')[0].strip() class INFSourceObject(INFSectionObject): _objs = {} def __init__(self, parent): INFSectionObject.__init__(self, parent) self.mSourcename = None self.mToolCode = None self.mFamily = None self.mTagName = None self.mFeaturePcd = None self.mFilename = None def GetSourcePath(self): return self.mSourcename def GetSourceFullPath(self): path = os.path.dirname(self.GetFilename()) path = os.path.join(path, self.GetSourcePath()) return os.path.normpath(path) def GetToolCode(self): return self.mToolCode def GetFamily(self): return self.mFamily def GetTagName(self): return self.mTagName def GetFeaturePcd(self): return self.mFeaturePcd def Parse(self): line = self.GetLineByOffset(self._start).strip().split('#')[0] arr = line.split('|') self.mSourcename = arr[0].strip() if len(arr) >= 2: self.mFamily = arr[1].strip() if len(arr) >= 3: self.mTagName = arr[2].strip() if len(arr) >= 4: self.mToolCode = arr[3].strip() if len(arr) >= 5: self.mFeaturePcd = arr[4].strip() self.mFilename = os.path.basename(self.GetSourceFullPath()) objdict = INFSourceObject._objs if self.mFilename not in objdict: objdict[self.mFilename] = [self] else: objdict[self.mFilename].append(self) return True def GetName(self): return self.mFilename def Destroy(self): objdict = INFSourceObject._objs objdict[self.mFilename].remove(self) if len(objdict[self.mFilename]) == 0: del objdict[self.mFilename] def IsMatchFamily(self, family): if family is None: return True if self.mFamily is not None: if family.strip().lower() == self.mFamily.lower(): return True else: return False else: fname = self.GetSourcePath() if fname.endswith('.S') and family.lower() != 'gcc': return False if fname.endswith('.s') and (self.GetArch().lower() != 'ipf' and self.GetArch().lower() != 'common'): return False if fname.lower().endswith('.asm') and (family.lower() != 'msft' and family.lower() != 'intel'): return False return True @staticmethod def GetObjectDict(): return INFSourceObject._objs class INFPcdObject(INFSectionObject): _objs = {} def __init__(self, parent): INFSectionObject.__init__(self, parent) self.mPcdType = None self.mDefaultValue = None self.mPcdName = None @staticmethod def GetObjectDict(): return INFPcdObject._objs def Parse(self): line = self.GetLineByOffset(self._start).strip().split('#')[0] arr = line.split('|') self.mPcdName = arr[0].strip() if len(arr) >= 2: self.mDefaultValue = arr[1].strip() objdict = INFPcdObject._objs if self.GetName() in objdict: if self not in objdict[self.GetName()]: objdict[self.GetName()].append(self) else: objdict[self.GetName()] = [self] return True def GetPcdName(self): return self.mPcdName def GetPcdType(self): return self.GetParent().GetType() def GetName(self): return self.mPcdName.split('.')[1] def Destroy(self): objdict = INFPcdObject._objs objdict[self.GetName()].remove(self) if len(objdict[self.GetName()]) == 0: del objdict[self.GetName()] class INFGuidObject(INFSectionObject): def __init__(self, parent): INFSectionObject.__init__(self, parent) self._name = None def Parse(self): line = self.GetLineByOffset(self._start).strip().split('#')[0].split("|")[0] self._name = line.strip() return True def GetName(self): return self._name
edk2-master
BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/edk2/model/inf.py
## @file # # Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR> # # SPDX-License-Identifier: BSD-2-Clause-Patent from plugins.EdkPlugins.basemodel import ini from plugins.EdkPlugins.edk2.model import dsc from plugins.EdkPlugins.edk2.model import inf from plugins.EdkPlugins.edk2.model import dec import os from plugins.EdkPlugins.basemodel.message import * class SurfaceObject(object): _objs = {} def __new__(cls, *args, **kwargs): """Maintain only a single instance of this object @return: instance of this class """ obj = object.__new__(cls) if "None" not in cls._objs: cls._objs["None"] = [] cls._objs["None"].append(obj) return obj def __init__(self, parent, workspace): self._parent = parent self._fileObj = None self._workspace = workspace self._isModify = False self._modifiedObjs = [] def __del__(self): pass def Destroy(self): key = self.GetRelativeFilename() self.GetFileObj().Destroy(self) del self._fileObj # dereference self from _objs arrary assert key in self._objs, "when destory, object is not in obj list" assert self in self._objs[key], "when destory, object is not in obj list" self._objs[key].remove(self) if len(self._objs[key]) == 0: del self._objs[key] def GetParent(self): return self._parent def GetWorkspace(self): return self._workspace def GetFileObjectClass(self): return ini.BaseINIFile def GetFilename(self): return self.GetFileObj().GetFilename() def GetFileObj(self): return self._fileObj def GetRelativeFilename(self): fullPath = self.GetFilename() return fullPath[len(self._workspace) + 1:] def Load(self, relativePath): # if has been loaded, directly return if self._fileObj is not None: return True relativePath = os.path.normpath(relativePath) fullPath = os.path.join(self._workspace, relativePath) fullPath = os.path.normpath(fullPath) if not os.path.exists(fullPath): ErrorMsg("file does not exist!", fullPath) return False self._fileObj = self.GetFileObjectClass()(fullPath, self) if not self._fileObj.Parse(): ErrorMsg("Fail to parse file!", fullPath) return False # remove self from None list to list with filename as key cls = self.__class__ if self not in cls._objs["None"]: ErrorMsg("Sufrace object does not be create into None list") cls._objs["None"].remove(self) if relativePath not in cls._objs: cls._objs[relativePath] = [] cls._objs[relativePath].append(self) return True def Reload(self, force=False): ret = True # whether require must be update if force: ret = self.GetFileObj().Reload(True) else: if self.IsModified(): if self.GetFileObj().IsModified(): ret = self.GetFileObj().Reload() return ret def Modify(self, modify=True, modifiedObj=None): if modify: #LogMsg("%s is modified, modified object is %s" % (self.GetFilename(), modifiedObj)) if issubclass(modifiedObj.__class__, ini.BaseINIFile) and self._isModify: return self._isModify = modify self.GetParent().Modify(modify, self) else: self._isModify = modify def IsModified(self): return self._isModify def GetModifiedObjs(self): return self._modifiedObjs def FilterObjsByArch(self, objs, arch): arr = [] for obj in objs: if obj.GetArch().lower() == 'common': arr.append(obj) continue if obj.GetArch().lower() == arch.lower(): arr.append(obj) continue return arr class Platform(SurfaceObject): def __init__(self, parent, workspace): SurfaceObject.__init__(self, parent, workspace) self._modules = [] self._packages = [] def Destroy(self): for module in self._modules: module.Destroy() del self._modules[:] del self._packages[:] SurfaceObject.Destroy(self) def GetName(self): return self.GetFileObj().GetDefine("PLATFORM_NAME") def GetFileObjectClass(self): return dsc.DSCFile def GetModuleCount(self): if self.GetFileObj() is None: ErrorMsg("Fail to get module count because DSC file has not been load!") return len(self.GetFileObj().GetComponents()) def GetSupportArchs(self): return self.GetFileObj().GetDefine("SUPPORTED_ARCHITECTURES").strip().split('#')[0].split('|') def LoadModules(self, precallback=None, postcallback=None): for obj in self.GetFileObj().GetComponents(): mFilename = obj.GetFilename() if precallback is not None: precallback(self, mFilename) arch = obj.GetArch() if arch.lower() == 'common': archarr = self.GetSupportArchs() else: archarr = [arch] for arch in archarr: module = Module(self, self.GetWorkspace()) if module.Load(mFilename, arch, obj.GetOveridePcds(), obj.GetOverideLibs()): self._modules.append(module) if postcallback is not None: postcallback(self, module) else: del module ErrorMsg("Fail to load module %s" % mFilename) def GetModules(self): return self._modules def GetLibraryPath(self, classname, arch, type): objs = self.GetFileObj().GetSectionObjectsByName("libraryclasses") for obj in objs: if classname.lower() != obj.GetClass().lower(): continue if obj.GetArch().lower() != 'common' and \ obj.GetArch().lower() != arch.lower(): continue if obj.GetModuleType().lower() != 'common' and \ obj.GetModuleType().lower() != type.lower(): continue return obj.GetInstance() ErrorMsg("Fail to get library class %s [%s][%s] from platform %s" % (classname, arch, type, self.GetFilename())) return None def GetPackage(self, path): package = self.GetParent().GetPackage(path) if package not in self._packages: self._packages.append(package) return package def GetPcdBuildObjs(self, name, arch=None): arr = [] objs = self.GetFileObj().GetSectionObjectsByName('pcds') for obj in objs: if obj.GetPcdName().lower() == name.lower(): arr.append(obj) if arch is not None: arr = self.FilterObjsByArch(arr, arch) return arr def Reload(self, callback=None): # do not care force paramter for platform object isFileChanged = self.GetFileObj().IsModified() ret = SurfaceObject.Reload(self, False) if not ret: return False if isFileChanged: # destroy all modules and reload them again for obj in self._modules: obj.Destroy() del self._modules[:] del self._packages[:] self.LoadModules(callback) else: for obj in self._modules: callback(self, obj.GetFilename()) obj.Reload() self.Modify(False) return True def Modify(self, modify=True, modifiedObj=None): if modify: #LogMsg("%s is modified, modified object is %s" % (self.GetFilename(), modifiedObj)) if issubclass(modifiedObj.__class__, ini.BaseINIFile) and self._isModify: return self._isModify = modify self.GetParent().Modify(modify, self) else: if self.GetFileObj().IsModified(): return for obj in self._modules: if obj.IsModified(): return self._isModify = modify self.GetParent().Modify(modify, self) def GetModuleObject(self, relativePath, arch): path = os.path.normpath(relativePath) for obj in self._modules: if obj.GetRelativeFilename() == path: if arch.lower() == 'common': return obj if obj.GetArch() == arch: return obj return None def GenerateFullReferenceDsc(self): oldDsc = self.GetFileObj() newDsc = dsc.DSCFile() newDsc.CopySectionsByName(oldDsc, 'defines') newDsc.CopySectionsByName(oldDsc, 'SkuIds') # # Dynamic common section should also be copied # newDsc.CopySectionsByName(oldDsc, 'PcdsDynamicDefault') newDsc.CopySectionsByName(oldDsc, 'PcdsDynamicHii') newDsc.CopySectionsByName(oldDsc, 'PcdsDynamicVpd') newDsc.CopySectionsByName(oldDsc, 'PcdsDynamicEx') sects = oldDsc.GetSectionByName('Components') for oldSect in sects: newSect = newDsc.AddNewSection(oldSect.GetName()) for oldComObj in oldSect.GetObjects(): module = self.GetModuleObject(oldComObj.GetFilename(), oldSect.GetArch()) if module is None: continue newComObj = dsc.DSCComponentObject(newSect) newComObj.SetFilename(oldComObj.GetFilename()) # add all library instance for override section libdict = module.GetLibraries() for libclass in libdict.keys(): if libdict[libclass] is not None: newComObj.AddOverideLib(libclass, libdict[libclass].GetRelativeFilename().replace('\\', '/')) # add all pcds for override section pcddict = module.GetPcds() for pcd in pcddict.values(): buildPcd = pcd.GetBuildObj() buildType = buildPcd.GetPcdType() buildValue = None if buildType.lower() == 'pcdsdynamichii' or \ buildType.lower() == 'pcdsdynamicvpd' or \ buildType.lower() == 'pcdsdynamicdefault': buildType = 'PcdsDynamic' if buildType != 'PcdsDynamic': buildValue = buildPcd.GetPcdValue() newComObj.AddOveridePcd(buildPcd.GetPcdName(), buildType, buildValue) newSect.AddObject(newComObj) return newDsc class Module(SurfaceObject): def __init__(self, parent, workspace): SurfaceObject.__init__(self, parent, workspace) self._arch = 'common' self._parent = parent self._overidePcds = {} self._overideLibs = {} self._libs = {} self._pcds = {} self._ppis = [] self._protocols = [] self._depexs = [] self._guids = [] self._packages = [] def Destroy(self): for lib in self._libs.values(): if lib is not None: lib.Destroy() self._libs.clear() for pcd in self._pcds.values(): pcd.Destroy() self._pcds.clear() for ppi in self._ppis: ppi.DeRef(self) del self._ppis[:] for protocol in self._protocols: if protocol is not None: protocol.DeRef(self) del self._protocols[:] for guid in self._guids: if guid is not None: guid.DeRef(self) del self._guids[:] del self._packages[:] del self._depexs[:] SurfaceObject.Destroy(self) def GetFileObjectClass(self): return inf.INFFile def GetLibraries(self): return self._libs def Load(self, filename, arch='common', overidePcds=None, overideLibs=None): if not SurfaceObject.Load(self, filename): return False self._arch = arch if overidePcds is not None: self._overideLibs = overideLibs if overideLibs is not None: self._overidePcds = overidePcds self._SearchLibraries() self._SearchPackage() self._SearchSurfaceItems() return True def GetArch(self): return self._arch def GetModuleName(self): return self.GetFileObj().GetDefine("BASE_NAME") def GetModuleType(self): return self.GetFileObj().GetDefine("MODULE_TYPE") def GetPlatform(self): return self.GetParent() def GetModuleObj(self): return self def GetPcds(self): pcds = self._pcds.copy() for lib in self._libs.values(): if lib is None: continue for name in lib._pcds.keys(): pcds[name] = lib._pcds[name] return pcds def GetPpis(self): ppis = [] ppis += self._ppis for lib in self._libs.values(): if lib is None: continue ppis += lib._ppis return ppis def GetProtocols(self): pros = [] pros = self._protocols for lib in self._libs.values(): if lib is None: continue pros += lib._protocols return pros def GetGuids(self): guids = [] guids += self._guids for lib in self._libs.values(): if lib is None: continue guids += lib._guids return guids def GetDepexs(self): deps = [] deps += self._depexs for lib in self._libs.values(): if lib is None: continue deps += lib._depexs return deps def IsLibrary(self): return self.GetFileObj().GetDefine("LIBRARY_CLASS") is not None def GetLibraryInstance(self, classname, arch, type): if classname not in self._libs.keys(): # find in overide lib firstly if classname in self._overideLibs.keys(): self._libs[classname] = Library(self, self.GetWorkspace()) self._libs[classname].Load(self._overideLibs[classname]) return self._libs[classname] parent = self.GetParent() if issubclass(parent.__class__, Platform): path = parent.GetLibraryPath(classname, arch, type) if path is None: ErrorMsg('Fail to get library instance for %s' % classname, self.GetFilename()) return None self._libs[classname] = Library(self, self.GetWorkspace()) if not self._libs[classname].Load(path, self.GetArch()): self._libs[classname] = None else: self._libs[classname] = parent.GetLibraryInstance(classname, arch, type) return self._libs[classname] def GetSourceObjs(self): return self.GetFileObj().GetSectionObjectsByName('source') def _SearchLibraries(self): objs = self.GetFileObj().GetSectionObjectsByName('libraryclasses') arch = self.GetArch() type = self.GetModuleType() for obj in objs: if obj.GetArch().lower() != 'common' and \ obj.GetArch().lower() not in self.GetPlatform().GetSupportArchs(): continue classname = obj.GetClass() instance = self.GetLibraryInstance(classname, arch, type) if not self.IsLibrary() and instance is not None: instance._isInherit = False if classname not in self._libs.keys(): self._libs[classname] = instance def _SearchSurfaceItems(self): # get surface item from self's inf pcds = [] ppis = [] pros = [] deps = [] guids = [] if self.GetFileObj() is not None: pcds = self.FilterObjsByArch(self.GetFileObj().GetSectionObjectsByName('pcd'), self.GetArch()) for pcd in pcds: if pcd.GetPcdName() not in self._pcds.keys(): pcdItem = PcdItem(pcd.GetPcdName(), self, pcd) self._pcds[pcd.GetPcdName()] = ModulePcd(self, pcd.GetPcdName(), pcd, pcdItem) ppis += self.FilterObjsByArch(self.GetFileObj().GetSectionObjectsByName('ppis'), self.GetArch()) for ppi in ppis: item = PpiItem(ppi.GetName(), self, ppi) if item not in self._ppis: self._ppis.append(item) pros += self.FilterObjsByArch(self.GetFileObj().GetSectionObjectsByName('protocols'), self.GetArch()) for pro in pros: item = ProtocolItem(pro.GetName(), self, pro) if item not in self._protocols: self._protocols.append(item) deps += self.FilterObjsByArch(self.GetFileObj().GetSectionObjectsByName('depex'), self.GetArch()) for dep in deps: item = DepexItem(self, dep) self._depexs.append(item) guids += self.FilterObjsByArch(self.GetFileObj().GetSectionObjectsByName('guids'), self.GetArch()) for guid in guids: item = GuidItem(guid.GetName(), self, guid) if item not in self._guids: self._guids.append(item) def _SearchPackage(self): objs = self.GetFileObj().GetSectionObjectsByName('packages') for obj in objs: package = self.GetPlatform().GetPackage(obj.GetPath()) if package is not None: self._packages.append(package) def GetPackages(self): return self._packages def GetPcdObjects(self): if self.GetFileObj() is None: return [] return self.GetFileObj().GetSectionObjectsByName('pcd') def GetLibraryClassHeaderFilePath(self): lcname = self.GetFileObj().GetProduceLibraryClass() if lcname is None: return None pkgs = self.GetPackages() for package in pkgs: path = package.GetLibraryClassHeaderPathByName(lcname) if path is not None: return os.path.realpath(os.path.join(package.GetFileObj().GetPackageRootPath(), path)) return None def Reload(self, force=False, callback=None): if callback is not None: callback(self, "Starting reload...") ret = SurfaceObject.Reload(self, force) if not ret: return False if not force and not self.IsModified(): return True for lib in self._libs.values(): if lib is not None: lib.Destroy() self._libs.clear() for pcd in self._pcds.values(): pcd.Destroy() self._pcds.clear() for ppi in self._ppis: ppi.DeRef(self) del self._ppis[:] for protocol in self._protocols: protocol.DeRef(self) del self._protocols[:] for guid in self._guids: guid.DeRef(self) del self._guids[:] del self._packages[:] del self._depexs[:] if callback is not None: callback(self, "Searching libraries...") self._SearchLibraries() if callback is not None: callback(self, "Searching packages...") self._SearchPackage() if callback is not None: callback(self, "Searching surface items...") self._SearchSurfaceItems() self.Modify(False) return True def Modify(self, modify=True, modifiedObj=None): if modify: #LogMsg("%s is modified, modified object is %s" % (self.GetFilename(), modifiedObj)) if issubclass(modifiedObj.__class__, ini.BaseINIFile) and self._isModify: return self._isModify = modify self.GetParent().Modify(modify, self) else: if self.GetFileObj().IsModified(): return self._isModify = modify self.GetParent().Modify(modify, self) class Library(Module): def __init__(self, parent, workspace): Module.__init__(self, parent, workspace) self._isInherit = True def IsInherit(self): return self._isInherit def GetModuleType(self): return self.GetParent().GetModuleType() def GetPlatform(self): return self.GetParent().GetParent() def GetModuleObj(self): return self.GetParent() def GetArch(self): return self.GetParent().GetArch() def Destroy(self): self._libs.clear() self._pcds.clear() SurfaceObject.Destroy(self) class Package(SurfaceObject): def __init__(self, parent, workspace): SurfaceObject.__init__(self, parent, workspace) self._pcds = {} self._guids = {} self._protocols = {} self._ppis = {} def GetPcds(self): return self._pcds def GetPpis(self): return list(self._ppis.values()) def GetProtocols(self): return list(self._protocols.values()) def GetGuids(self): return list(self._guids.values()) def Destroy(self): for pcd in self._pcds.values(): if pcd is not None: pcd.Destroy() for guid in self._guids.values(): if guid is not None: guid.Destroy() for protocol in self._protocols.values(): if protocol is not None: protocol.Destroy() for ppi in self._ppis.values(): if ppi is not None: ppi.Destroy() self._pcds.clear() self._guids.clear() self._protocols.clear() self._ppis.clear() self._pcds.clear() SurfaceObject.Destroy(self) def Load(self, relativePath): ret = SurfaceObject.Load(self, relativePath) if not ret: return False pcds = self.GetFileObj().GetSectionObjectsByName('pcds') for pcd in pcds: if pcd.GetPcdName() in self._pcds.keys(): if self._pcds[pcd.GetPcdName()] is not None: self._pcds[pcd.GetPcdName()].AddDecObj(pcd) else: self._pcds[pcd.GetPcdName()] = PcdItem(pcd.GetPcdName(), self, pcd) guids = self.GetFileObj().GetSectionObjectsByName('guids') for guid in guids: if guid.GetName() not in self._guids.keys(): self._guids[guid.GetName()] = GuidItem(guid.GetName(), self, guid) else: WarnMsg("Duplicate definition for %s" % guid.GetName()) ppis = self.GetFileObj().GetSectionObjectsByName('ppis') for ppi in ppis: if ppi.GetName() not in self._ppis.keys(): self._ppis[ppi.GetName()] = PpiItem(ppi.GetName(), self, ppi) else: WarnMsg("Duplicate definition for %s" % ppi.GetName()) protocols = self.GetFileObj().GetSectionObjectsByName('protocols') for protocol in protocols: if protocol.GetName() not in self._protocols.keys(): self._protocols[protocol.GetName()] = ProtocolItem(protocol.GetName(), self, protocol) else: WarnMsg("Duplicate definition for %s" % protocol.GetName()) return True def GetFileObjectClass(self): return dec.DECFile def GetName(self): return self.GetFileObj().GetDefine("PACKAGE_NAME") def GetPcdDefineObjs(self, name=None): arr = [] objs = self.GetFileObj().GetSectionObjectsByName('pcds') if name is None: return objs for obj in objs: if obj.GetPcdName().lower() == name.lower(): arr.append(obj) return arr def GetLibraryClassObjs(self): return self.GetFileObj().GetSectionObjectsByName('libraryclasses') def Modify(self, modify=True, modifiedObj=None): if modify: self._isModify = modify self.GetParent().Modify(modify, self) else: if self.GetFileObj().IsModified(): return self._isModify = modify self.GetParent().Modify(modify, self) def GetLibraryClassHeaderPathByName(self, clsname): objs = self.GetLibraryClassObjs() for obj in objs: if obj.GetClassName() == clsname: return obj.GetHeaderFile() return None class DepexItem(object): def __init__(self, parent, infObj): self._parent = parent self._infObj = infObj def GetDepexString(self): return str(self._infObj) def GetInfObject(self): return self._infObj class ModulePcd(object): _type_mapping = {'FeaturePcd': 'PcdsFeatureFlag', 'FixedPcd': 'PcdsFixedAtBuild', 'PatchPcd': 'PcdsPatchableInModule'} def __init__(self, parent, name, infObj, pcdItem): assert issubclass(parent.__class__, Module), "Module's PCD's parent must be module!" assert pcdItem is not None, 'Pcd %s does not in some package!' % name self._name = name self._parent = parent self._pcdItem = pcdItem self._infObj = infObj def GetName(self): return self._name def GetParent(self): return self._name def GetArch(self): return self._parent.GetArch() def Destroy(self): self._pcdItem.DeRef(self._parent) self._infObj = None def GetBuildObj(self): platformInfos = self._parent.GetPlatform().GetPcdBuildObjs(self._name, self.GetArch()) modulePcdType = self._infObj.GetPcdType() # if platform do not gives pcd's value, get default value from package if len(platformInfos) == 0: if modulePcdType.lower() == 'pcd': return self._pcdItem.GetDecObject() else: for obj in self._pcdItem.GetDecObjects(): if modulePcdType not in self._type_mapping.keys(): ErrorMsg("Invalid PCD type %s" % modulePcdType) return None if self._type_mapping[modulePcdType] == obj.GetPcdType(): return obj ErrorMsg ('Module PCD type %s does not in valied range [%s] in package!' % \ (modulePcdType)) else: if modulePcdType.lower() == 'pcd': if len(platformInfos) > 1: WarnMsg("Find more than one value for PCD %s in platform %s" % \ (self._name, self._parent.GetPlatform().GetFilename())) return platformInfos[0] else: for obj in platformInfos: if modulePcdType not in self._type_mapping.keys(): ErrorMsg("Invalid PCD type %s" % modulePcdType) return None if self._type_mapping[modulePcdType] == obj.GetPcdType(): return obj ErrorMsg('Can not find value for pcd %s in pcd type %s' % \ (self._name, modulePcdType)) return None class SurfaceItem(object): _objs = {} def __new__(cls, *args, **kwargs): """Maintain only a single instance of this object @return: instance of this class """ name = args[0] parent = args[1] fileObj = args[2] if issubclass(parent.__class__, Package): if name in cls._objs.keys(): ErrorMsg("%s item is duplicated defined in packages: %s and %s" % (name, parent.GetFilename(), cls._objs[name].GetParent().GetFilename())) return None obj = object.__new__(cls) cls._objs[name] = obj return obj elif issubclass(parent.__class__, Module): if name not in cls._objs.keys(): ErrorMsg("%s item does not defined in any package! It is used by module %s" % \ (name, parent.GetFilename())) return None return cls._objs[name] return None def __init__(self, name, parent, fileObj): if issubclass(parent.__class__, Package): self._name = name self._parent = parent self._decObj = [fileObj] self._refMods = {} else: self.RefModule(parent, fileObj) @classmethod def GetObjectDict(cls): return cls._objs def GetParent(self): return self._parent def GetReference(self): return self._refMods def RefModule(self, mObj, infObj): if mObj in self._refMods.keys(): return self._refMods[mObj] = infObj def DeRef(self, mObj): if mObj not in self._refMods.keys(): WarnMsg("%s is not referenced by module %s" % (self._name, mObj.GetFilename())) return del self._refMods[mObj] def Destroy(self): self._refMods.clear() cls = self.__class__ del cls._objs[self._name] def GetName(self): return self._name def GetDecObject(self): return self._decObj[0] def GetDecObjects(self): return self._decObj class PcdItem(SurfaceItem): def AddDecObj(self, fileObj): for decObj in self._decObj: if decObj.GetFilename() != fileObj.GetFilename(): ErrorMsg("Pcd %s defined in more than one packages : %s and %s" % \ (self._name, decObj.GetFilename(), fileObj.GetFilename())) return if decObj.GetPcdType() == fileObj.GetPcdType() and \ decObj.GetArch().lower() == fileObj.GetArch(): ErrorMsg("Pcd %s is duplicated defined in pcd type %s in package %s" % \ (self._name, decObj.GetPcdType(), decObj.GetFilename())) return self._decObj.append(fileObj) def GetValidPcdType(self): types = [] for obj in self._decObj: if obj.GetPcdType() not in types: types += obj.GetPcdType() return types class GuidItem(SurfaceItem): pass class PpiItem(SurfaceItem): pass class ProtocolItem(SurfaceItem): pass
edk2-master
BaseTools/Scripts/PackageDocumentTools/plugins/EdkPlugins/edk2/model/baseobject.py
# @file # Script to Build OVMF UEFI firmware # # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: BSD-2-Clause-Patent ## import os import sys sys.path.append(os.path.dirname(os.path.abspath(__file__))) from PlatformBuildLib import SettingsManager from PlatformBuildLib import PlatformBuilder # ####################################################################################### # # Common Configuration # # ####################################################################################### # class CommonPlatform(): ''' Common settings for this platform. Define static data here and use for the different parts of stuart ''' PackagesSupported = ("OvmfPkg",) ArchSupported = ("X64",) TargetsSupported = ("DEBUG", "RELEASE", "NOOPT") Scopes = ('ovmf', 'edk2-build') WorkspaceRoot = os.path.realpath(os.path.join( os.path.dirname(os.path.abspath(__file__)), "..", "..")) @classmethod def GetDscName(cls, ArchCsv: str) -> str: ''' return the DSC given the architectures requested. ArchCsv: csv string containing all architectures to build ''' return "Bhyve/BhyveX64.dsc" import PlatformBuildLib PlatformBuildLib.CommonPlatform = CommonPlatform
edk2-master
OvmfPkg/PlatformCI/BhyveBuild.py
# @file # Script to Build OVMF UEFI firmware # # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: BSD-2-Clause-Patent ## import os import sys sys.path.append(os.path.dirname(os.path.abspath(__file__))) from PlatformBuildLib import SettingsManager from PlatformBuildLib import PlatformBuilder # ####################################################################################### # # Common Configuration # # ####################################################################################### # class CommonPlatform(): ''' Common settings for this platform. Define static data here and use for the different parts of stuart ''' PackagesSupported = ("OvmfPkg",) ArchSupported = ("X64",) TargetsSupported = ("DEBUG", "RELEASE", "NOOPT") Scopes = ('ovmf', 'edk2-build') WorkspaceRoot = os.path.realpath(os.path.join( os.path.dirname(os.path.abspath(__file__)), "..", "..")) @classmethod def GetDscName(cls, ArchCsv: str) -> str: ''' return the DSC given the architectures requested. ArchCsv: csv string containing all architectures to build ''' return "IntelTdx/IntelTdxX64.dsc" import PlatformBuildLib PlatformBuildLib.CommonPlatform = CommonPlatform
edk2-master
OvmfPkg/PlatformCI/IntelTdxBuild.py
# @file # Script to Build OVMF UEFI firmware # # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: BSD-2-Clause-Patent ## import os import sys sys.path.append(os.path.dirname(os.path.abspath(__file__))) from PlatformBuildLib import SettingsManager from PlatformBuildLib import PlatformBuilder # ####################################################################################### # # Common Configuration # # ####################################################################################### # class CommonPlatform(): ''' Common settings for this platform. Define static data here and use for the different parts of stuart ''' PackagesSupported = ("OvmfPkg",) ArchSupported = ("X64",) TargetsSupported = ("DEBUG", "RELEASE", "NOOPT") Scopes = ('ovmf', 'edk2-build') WorkspaceRoot = os.path.realpath(os.path.join( os.path.dirname(os.path.abspath(__file__)), "..", "..")) @classmethod def GetDscName(cls, ArchCsv: str) -> str: ''' return the DSC given the architectures requested. ArchCsv: csv string containing all architectures to build ''' return "CloudHv/CloudHvX64.dsc" import PlatformBuildLib PlatformBuildLib.CommonPlatform = CommonPlatform
edk2-master
OvmfPkg/PlatformCI/CloudHvBuild.py
# @file # Script to Build OVMF UEFI firmware # # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: BSD-2-Clause-Patent ## import os import sys sys.path.append(os.path.dirname(os.path.abspath(__file__))) from PlatformBuildLib import SettingsManager from PlatformBuildLib import PlatformBuilder # ####################################################################################### # # Common Configuration # # ####################################################################################### # class CommonPlatform(): ''' Common settings for this platform. Define static data here and use for the different parts of stuart ''' PackagesSupported = ("OvmfPkg",) ArchSupported = ("X64",) TargetsSupported = ("DEBUG", "RELEASE", "NOOPT") Scopes = ('ovmf', 'edk2-build') WorkspaceRoot = os.path.realpath(os.path.join( os.path.dirname(os.path.abspath(__file__)), "..", "..")) @classmethod def GetDscName(cls, ArchCsv: str) -> str: ''' return the DSC given the architectures requested. ArchCsv: csv string containing all architectures to build ''' return "Microvm/MicrovmX64.dsc" import PlatformBuildLib PlatformBuildLib.CommonPlatform = CommonPlatform
edk2-master
OvmfPkg/PlatformCI/MicrovmBuild.py
# @file # Script to Build OVMF UEFI firmware # # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: BSD-2-Clause-Patent ## import os import sys import subprocess sys.path.append(os.path.dirname(os.path.abspath(__file__))) from PlatformBuildLib import SettingsManager from PlatformBuildLib import PlatformBuilder # ####################################################################################### # # Common Configuration # # ####################################################################################### # class CommonPlatform(): ''' Common settings for this platform. Define static data here and use for the different parts of stuart ''' PackagesSupported = ("OvmfPkg",) ArchSupported = ("X64",) TargetsSupported = ("DEBUG", "RELEASE", "NOOPT") Scopes = ('ovmf', 'edk2-build') WorkspaceRoot = os.path.realpath(os.path.join( os.path.dirname(os.path.abspath(__file__)), "..", "..")) @classmethod def GetDscName(cls, ArchCsv: str) -> str: ''' return the DSC given the architectures requested. ArchCsv: csv string containing all architectures to build ''' return "AmdSev/AmdSevX64.dsc" import PlatformBuildLib PlatformBuildLib.CommonPlatform = CommonPlatform # hack alert -- create dummy grub.efi subprocess.run(['touch', 'OvmfPkg/AmdSev/Grub/grub.efi']) subprocess.run(['ls', '-l', '--sort=time', 'OvmfPkg/AmdSev/Grub'])
edk2-master
OvmfPkg/PlatformCI/AmdSevBuild.py
# @file # Script to Build OVMF UEFI firmware # # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: BSD-2-Clause-Patent ## import os import logging import io from edk2toolext.environment import shell_environment from edk2toolext.environment.uefi_build import UefiBuilder from edk2toolext.invocables.edk2_platform_build import BuildSettingsManager from edk2toolext.invocables.edk2_setup import SetupSettingsManager, RequiredSubmodule from edk2toolext.invocables.edk2_update import UpdateSettingsManager from edk2toolext.invocables.edk2_pr_eval import PrEvalSettingsManager from edk2toollib.utility_functions import RunCmd # ####################################################################################### # # Configuration for Update & Setup # # ####################################################################################### # class SettingsManager(UpdateSettingsManager, SetupSettingsManager, PrEvalSettingsManager): def GetPackagesSupported(self): ''' return iterable of edk2 packages supported by this build. These should be edk2 workspace relative paths ''' return CommonPlatform.PackagesSupported def GetArchitecturesSupported(self): ''' return iterable of edk2 architectures supported by this build ''' return CommonPlatform.ArchSupported def GetTargetsSupported(self): ''' return iterable of edk2 target tags supported by this build ''' return CommonPlatform.TargetsSupported def GetRequiredSubmodules(self): ''' return iterable containing RequiredSubmodule objects. If no RequiredSubmodules return an empty iterable ''' rs = [] # intentionally declare this one with recursive false to avoid overhead rs.append(RequiredSubmodule( "CryptoPkg/Library/OpensslLib/openssl", False)) # To avoid maintenance of this file for every new submodule # lets just parse the .gitmodules and add each if not already in list. # The GetRequiredSubmodules is designed to allow a build to optimize # the desired submodules but it isn't necessary for this repository. result = io.StringIO() ret = RunCmd("git", "config --file .gitmodules --get-regexp path", workingdir=self.GetWorkspaceRoot(), outstream=result) # Cmd output is expected to look like: # submodule.CryptoPkg/Library/OpensslLib/openssl.path CryptoPkg/Library/OpensslLib/openssl # submodule.SoftFloat.path ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3 if ret == 0: for line in result.getvalue().splitlines(): _, _, path = line.partition(" ") if path is not None: if path not in [x.path for x in rs]: rs.append(RequiredSubmodule(path, True)) # add it with recursive since we don't know return rs def SetArchitectures(self, list_of_requested_architectures): ''' Confirm the requests architecture list is valid and configure SettingsManager to run only the requested architectures. Raise Exception if a list_of_requested_architectures is not supported ''' unsupported = set(list_of_requested_architectures) - set(self.GetArchitecturesSupported()) if(len(unsupported) > 0): errorString = ( "Unsupported Architecture Requested: " + " ".join(unsupported)) logging.critical( errorString ) raise Exception( errorString ) self.ActualArchitectures = list_of_requested_architectures def GetWorkspaceRoot(self): ''' get WorkspacePath ''' return CommonPlatform.WorkspaceRoot def GetActiveScopes(self): ''' return tuple containing scopes that should be active for this process ''' return CommonPlatform.Scopes def FilterPackagesToTest(self, changedFilesList: list, potentialPackagesList: list) -> list: ''' Filter other cases that this package should be built based on changed files. This should cover things that can't be detected as dependencies. ''' build_these_packages = [] possible_packages = potentialPackagesList.copy() for f in changedFilesList: # BaseTools files that might change the build if "BaseTools" in f: if os.path.splitext(f) not in [".txt", ".md"]: build_these_packages = possible_packages break # if the azure pipeline platform template file changed if "platform-build-run-steps.yml" in f: build_these_packages = possible_packages break return build_these_packages def GetPlatformDscAndConfig(self) -> tuple: ''' If a platform desires to provide its DSC then Policy 4 will evaluate if any of the changes will be built in the dsc. The tuple should be (<workspace relative path to dsc file>, <input dictionary of dsc key value pairs>) ''' dsc = CommonPlatform.GetDscName(",".join(self.ActualArchitectures)) return (f"OvmfPkg/{dsc}", {}) # ####################################################################################### # # Actual Configuration for Platform Build # # ####################################################################################### # class PlatformBuilder( UefiBuilder, BuildSettingsManager): def __init__(self): UefiBuilder.__init__(self) def AddCommandLineOptions(self, parserObj): ''' Add command line options to the argparser ''' parserObj.add_argument('-a', "--arch", dest="build_arch", type=str, default="IA32,X64", help="Optional - CSV of architecture to build. IA32 will use IA32 for Pei & Dxe. " "X64 will use X64 for both PEI and DXE. IA32,X64 will use IA32 for PEI and " "X64 for DXE. default is IA32,X64") def RetrieveCommandLineOptions(self, args): ''' Retrieve command line options from the argparser ''' shell_environment.GetBuildVars().SetValue("TARGET_ARCH"," ".join(args.build_arch.upper().split(",")), "From CmdLine") dsc = CommonPlatform.GetDscName(args.build_arch) shell_environment.GetBuildVars().SetValue("ACTIVE_PLATFORM", f"OvmfPkg/{dsc}", "From CmdLine") def GetWorkspaceRoot(self): ''' get WorkspacePath ''' return CommonPlatform.WorkspaceRoot def GetPackagesPath(self): ''' Return a list of workspace relative paths that should be mapped as edk2 PackagesPath ''' return () def GetActiveScopes(self): ''' return tuple containing scopes that should be active for this process ''' return CommonPlatform.Scopes def GetName(self): ''' Get the name of the repo, platform, or product being build ''' ''' Used for naming the log file, among others ''' # check the startup nsh flag and if set then rename the log file. # this helps in CI so we don't overwrite the build log since running # uses the stuart_build command. if(shell_environment.GetBuildVars().GetValue("MAKE_STARTUP_NSH", "FALSE") == "TRUE"): return "OvmfPkg_With_Run" return "OvmfPkg" def GetLoggingLevel(self, loggerType): ''' Get the logging level for a given type base == lowest logging level supported con == Screen logging txt == plain text file logging md == markdown file logging ''' return logging.DEBUG def SetPlatformEnv(self): logging.debug("PlatformBuilder SetPlatformEnv") self.env.SetValue("PRODUCT_NAME", "OVMF", "Platform Hardcoded") self.env.SetValue("MAKE_STARTUP_NSH", "FALSE", "Default to false") self.env.SetValue("QEMU_HEADLESS", "FALSE", "Default to false") return 0 def PlatformPreBuild(self): return 0 def PlatformPostBuild(self): return 0 def FlashRomImage(self): VirtualDrive = os.path.join(self.env.GetValue("BUILD_OUTPUT_BASE"), "VirtualDrive") os.makedirs(VirtualDrive, exist_ok=True) OutputPath_FV = os.path.join(self.env.GetValue("BUILD_OUTPUT_BASE"), "FV") if (self.env.GetValue("QEMU_SKIP") and self.env.GetValue("QEMU_SKIP").upper() == "TRUE"): logging.info("skipping qemu boot test") return 0 # # QEMU must be on the path # cmd = "qemu-system-x86_64" args = "-debugcon stdio" # write messages to stdio args += " -global isa-debugcon.iobase=0x402" # debug messages out thru virtual io port args += " -net none" # turn off network args += " -smp 4" args += f" -drive file=fat:rw:{VirtualDrive},format=raw,media=disk" # Mount disk with startup.nsh if (self.env.GetValue("QEMU_HEADLESS").upper() == "TRUE"): args += " -display none" # no graphics if (self.env.GetBuildValue("SMM_REQUIRE") == "1"): args += " -machine q35,smm=on" #,accel=(tcg|kvm)" args += " --accel tcg,thread=single" #args += " -m ..." args += " -global driver=cfi.pflash01,property=secure,value=on" args += " -drive if=pflash,format=raw,unit=0,file=" + os.path.join(OutputPath_FV, "OVMF_CODE.fd") + ",readonly=on" args += " -drive if=pflash,format=raw,unit=1,file=" + os.path.join(OutputPath_FV, "OVMF_VARS.fd") else: args += " -pflash " + os.path.join(OutputPath_FV, "OVMF.fd") # path to firmware if (self.env.GetValue("MAKE_STARTUP_NSH").upper() == "TRUE"): f = open(os.path.join(VirtualDrive, "startup.nsh"), "w") f.write("BOOT SUCCESS !!! \n") ## add commands here f.write("reset -s\n") f.close() ret = RunCmd(cmd, args) if ret == 0xc0000005: #for some reason getting a c0000005 on successful return return 0 return ret
edk2-master
OvmfPkg/PlatformCI/PlatformBuildLib.py
# @file # Script to Build OVMF UEFI firmware # # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: BSD-2-Clause-Patent ## import os import sys sys.path.append(os.path.dirname(os.path.abspath(__file__))) from PlatformBuildLib import SettingsManager from PlatformBuildLib import PlatformBuilder # ####################################################################################### # # Common Configuration # # ####################################################################################### # class CommonPlatform(): ''' Common settings for this platform. Define static data here and use for the different parts of stuart ''' PackagesSupported = ("OvmfPkg",) ArchSupported = ("X64",) TargetsSupported = ("DEBUG", "RELEASE", "NOOPT") Scopes = ('ovmf', 'edk2-build') WorkspaceRoot = os.path.realpath(os.path.join( os.path.dirname(os.path.abspath(__file__)), "..", "..")) @classmethod def GetDscName(cls, ArchCsv: str) -> str: ''' return the DSC given the architectures requested. ArchCsv: csv string containing all architectures to build ''' return "OvmfXen.dsc" import PlatformBuildLib PlatformBuildLib.CommonPlatform = CommonPlatform
edk2-master
OvmfPkg/PlatformCI/XenBuild.py
# @file # Script to Build OVMF UEFI firmware # # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: BSD-2-Clause-Patent ## import os import sys sys.path.append(os.path.dirname(os.path.abspath(__file__))) from PlatformBuildLib import SettingsManager from PlatformBuildLib import PlatformBuilder # ####################################################################################### # # Common Configuration # # ####################################################################################### # class CommonPlatform(): ''' Common settings for this platform. Define static data here and use for the different parts of stuart ''' PackagesSupported = ("OvmfPkg",) ArchSupported = ("RISCV64",) TargetsSupported = ("DEBUG", "RELEASE", "NOOPT") Scopes = ('ovmf', 'edk2-build') WorkspaceRoot = os.path.realpath(os.path.join( os.path.dirname(os.path.abspath(__file__)), "..", "..")) @classmethod def GetDscName(cls, ArchCsv: str) -> str: ''' return the DSC given the architectures requested. ArchCsv: csv string containing all architectures to build ''' return "RiscVVirt/RiscVVirtQemu.dsc" import PlatformBuildLib PlatformBuildLib.CommonPlatform = CommonPlatform
edk2-master
OvmfPkg/PlatformCI/QemuBuild.py
# @file # Script to Build OVMF UEFI firmware # # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: BSD-2-Clause-Patent ## import os import sys sys.path.append(os.path.dirname(os.path.abspath(__file__))) from PlatformBuildLib import SettingsManager from PlatformBuildLib import PlatformBuilder # ####################################################################################### # # Common Configuration # # ####################################################################################### # class CommonPlatform(): ''' Common settings for this platform. Define static data here and use for the different parts of stuart ''' PackagesSupported = ("OvmfPkg",) ArchSupported = ("IA32", "X64") TargetsSupported = ("DEBUG", "RELEASE", "NOOPT") Scopes = ('ovmf', 'edk2-build') WorkspaceRoot = os.path.realpath(os.path.join( os.path.dirname(os.path.abspath(__file__)), "..", "..")) @classmethod def GetDscName(cls, ArchCsv: str) -> str: ''' return the DSC given the architectures requested. ArchCsv: csv string containing all architectures to build ''' dsc = "OvmfPkg" if "IA32" in ArchCsv.upper().split(","): dsc += "Ia32" if "X64" in ArchCsv.upper().split(","): dsc += "X64" dsc += ".dsc" return dsc import PlatformBuildLib PlatformBuildLib.CommonPlatform = CommonPlatform
edk2-master
OvmfPkg/PlatformCI/PlatformBuild.py
# # Copyright (c) 2011-2013, ARM Limited. All rights reserved. # # SPDX-License-Identifier: BSD-2-Clause-Patent # import os import firmware_volume import build_report import system_table # Reload external classes reload(firmware_volume) reload(build_report) reload(system_table) def readMem32(executionContext, address): bytes = executionContext.getMemoryService().read(address, 4, 32) return struct.unpack('<I',bytes)[0] def dump_fv(ec, fv_base, fv_size): fv = firmware_volume.FirmwareVolume(ec, int(build.PCDs['gArmTokenSpaceGuid']['PcdFvBaseAddress'][0],16), int(build.PCDs['gArmTokenSpaceGuid']['PcdFvSize'][0],16)) ffs = fv.get_next_ffs() while ffs != None: print "# %s" % ffs section = ffs.get_next_section() while section != None: print "\t%s" % section try: print "\t\t- %s" % section.get_debug_filepath() except Exception: pass section = ffs.get_next_section(section) ffs = fv.get_next_ffs(ffs) def dump_system_table(ec, mem_base, mem_size): st = system_table.SystemTable(ec, mem_base, mem_size) debug_info_table_base = st.get_configuration_table(system_table.DebugInfoTable.CONST_DEBUG_INFO_TABLE_GUID) debug_info_table = system_table.DebugInfoTable(ec, debug_info_table_base) debug_info_table.dump() def load_symbol_from_file(ec, filename, address, verbose = False): if verbose: print "Add symbols of %s at 0x%x" % (filename, address) try: ec.getImageService().addSymbols(filename, address) except: try: # We could get an exception if the symbols are already loaded ec.getImageService().unloadSymbols(filename) ec.getImageService().addSymbols(filename, address) except: print "Warning: not possible to load symbols from %s at 0x%x" % (filename, address) def is_aarch64(ec): success = True try: # Try to access a Aarch64 specific register ec.getRegisterService().getValue('X0') except: success = False return success class ArmPlatform: def __init__(self, sysmembase=None, sysmemsize=None, fvs={}): self.sysmembase = sysmembase self.sysmemsize = sysmemsize self.fvs = fvs class ArmPlatformDebugger: system_table = None firmware_volumes = {} REGION_TYPE_SYSMEM = 1 REGION_TYPE_ROM = 2 REGION_TYPE_FV = 3 def __init__(self, ec, report_log, regions, verbose = False): self.ec = ec self.verbose = verbose fvs = [] sysmem_base = None sysmem_size = None if report_log and os.path.isfile(report_log): try: self.build = build_report.BuildReport(report_log) except IOError: raise IOError(2, 'Report \'%s\' is not valid' % report_log) # Generate list of supported Firmware Volumes if self.build.PCDs['gArmTokenSpaceGuid'].has_key('PcdFvSize') and int(self.build.PCDs['gArmTokenSpaceGuid']['PcdFvSize'][0],16) != 0: fvs.append((int(self.build.PCDs['gArmTokenSpaceGuid']['PcdFvBaseAddress'][0],16),int(self.build.PCDs['gArmTokenSpaceGuid']['PcdFvSize'][0],16))) if self.build.PCDs['gArmTokenSpaceGuid'].has_key('PcdSecureFvSize') and int(self.build.PCDs['gArmTokenSpaceGuid']['PcdSecureFvSize'][0],16) != 0: fvs.append((int(self.build.PCDs['gArmTokenSpaceGuid']['PcdSecureFvBaseAddress'][0],16),int(self.build.PCDs['gArmTokenSpaceGuid']['PcdSecureFvSize'][0],16))) if self.build.PCDs['gArmTokenSpaceGuid'].has_key('PcdHypFvSize') and int(self.build.PCDs['gArmTokenSpaceGuid']['PcdHypFvSize'][0],16) != 0: fvs.append((int(self.build.PCDs['gArmTokenSpaceGuid']['PcdHypFvBaseAddress'][0],16),int(self.build.PCDs['gArmTokenSpaceGuid']['PcdHypFvSize'][0],16))) sysmem_base = int(self.build.PCDs['gArmTokenSpaceGuid']['PcdSystemMemoryBase'][0],16) sysmem_size = int(self.build.PCDs['gArmTokenSpaceGuid']['PcdSystemMemorySize'][0],16) else: for region in regions: if region[0] == ArmPlatformDebugger.REGION_TYPE_SYSMEM: sysmem_base = region[1] sysmem_size = region[2] elif region[0] == ArmPlatformDebugger.REGION_TYPE_FV: fvs.append((region[1],region[2])) elif region[0] == ArmPlatformDebugger.REGION_TYPE_ROM: for base in xrange(region[1], region[1] + region[2], 0x400000): signature = struct.unpack("cccc", self.ec.getMemoryService().read(base, 4, 32)) if signature == FirmwareVolume.CONST_FV_SIGNATURE: fvs.append((base,0)) else: print "Region type '%d' Not Supported" % region[0] self.platform = ArmPlatform(sysmem_base, sysmem_size, fvs) def in_sysmem(self, addr): return (self.platform.sysmembase is not None) and (self.platform.sysmembase <= addr) and (addr < self.platform.sysmembase + self.platform.sysmemsize) def in_fv(self, addr): return (self.get_fv_at(addr) != None) def get_fv_at(self, addr): for fv in self.platform.fvs: if (fv[0] <= addr) and (addr < fv[0] + fv[1]): return fv return None def load_current_symbols(self): pc = int(self.ec.getRegisterService().getValue('PC')) & 0xFFFFFFFF if self.in_fv(pc): debug_infos = [] (fv_base, fv_size) = self.get_fv_at(pc) if self.firmware_volumes.has_key(fv_base) == False: self.firmware_volumes[fv_base] = firmware_volume.FirmwareVolume(self.ec, fv_base, fv_size) stack_frame = self.ec.getTopLevelStackFrame() info = self.firmware_volumes[fv_base].load_symbols_at(int(stack_frame.getRegisterService().getValue('PC')) & 0xFFFFFFFF, self.verbose) debug_infos.append(info) while stack_frame.next() is not None: stack_frame = stack_frame.next() # Stack frame attached to 'PC' pc = int(stack_frame.getRegisterService().getValue('PC')) & 0xFFFFFFFF # Check if the symbols for this stack frame have already been loaded found = False for debug_info in debug_infos: if (pc >= debug_info[0]) and (pc < debug_info[0] + debug_info[1]): found = True if found == False: info = self.firmware_volumes[fv_base].load_symbols_at(pc) debug_infos.append(info) #self.firmware_volumes[fv_base].load_symbols_at(pc) elif self.in_sysmem(pc): debug_infos = [] if self.system_table is None: # Find the System Table self.system_table = system_table.SystemTable(self.ec, self.platform.sysmembase, self.platform.sysmemsize) # Find the Debug Info Table debug_info_table_base = self.system_table.get_configuration_table(system_table.DebugInfoTable.CONST_DEBUG_INFO_TABLE_GUID) self.debug_info_table = system_table.DebugInfoTable(self.ec, debug_info_table_base) stack_frame = self.ec.getTopLevelStackFrame() info = self.debug_info_table.load_symbols_at(int(stack_frame.getRegisterService().getValue('PC')) & 0xFFFFFFFF, self.verbose) debug_infos.append(info) while stack_frame.next() is not None: stack_frame = stack_frame.next() # Stack frame attached to 'PC' pc = int(stack_frame.getRegisterService().getValue('PC')) & 0xFFFFFFFF # Check if the symbols for this stack frame have already been loaded found = False for debug_info in debug_infos: if (pc >= debug_info[0]) and (pc < debug_info[0] + debug_info[1]): found = True if found == False: try: info = self.debug_info_table.load_symbols_at(pc) debug_infos.append(info) except: pass #self.debug_info_table.load_symbols_at(pc) else: raise Exception('ArmPlatformDebugger', "Not supported region") def load_all_symbols(self): # Load all the XIP symbols attached to the Firmware Volume for (fv_base, fv_size) in self.platform.fvs: if self.firmware_volumes.has_key(fv_base) == False: self.firmware_volumes[fv_base] = firmware_volume.FirmwareVolume(self.ec, fv_base, fv_size) self.firmware_volumes[fv_base].load_all_symbols(self.verbose) try: # Load all symbols of module loaded into System Memory if self.system_table is None: # Find the System Table self.system_table = system_table.SystemTable(self.ec, self.platform.sysmembase, self.platform.sysmemsize) # Find the Debug Info Table debug_info_table_base = self.system_table.get_configuration_table(system_table.DebugInfoTable.CONST_DEBUG_INFO_TABLE_GUID) self.debug_info_table = system_table.DebugInfoTable(self.ec, debug_info_table_base) self.debug_info_table.load_all_symbols(self.verbose) except: # Debugger exception could be excepted if DRAM has not been initialized or if we have not started to run from DRAM yet print "Note: no symbols have been found in System Memory (possible cause: the UEFI permanent memory has not been installed yet)"
edk2-master
ArmPlatformPkg/Scripts/Ds5/edk2_debugger.py
# # Copyright (c) 2021, Arm Limited. All rights reserved. # # SPDX-License-Identifier: BSD-2-Clause-Patent # from arm_ds.debugger_v1 import DebugException import subprocess, os, edk2_debugger, re def get_module_name(line): path = line.rsplit(' ')[1] return os.path.splitext(os.path.basename(path))[0] def get_module_path(line): return line.rsplit(' ')[1] def get_module_entrypoint(list, module_name): line = [i for i in list if module_name in i and re.search(r'\b'+module_name+r'\b', i)] if len(line) == 0: # Module was not loaded using DxeDispatcher or PeiDispatcher. It is a SEC module # Symbols for these modules are loaded from FV, not from console log return None entrypoint_str = line[0].rsplit(' ')[4] return entrypoint_str.rsplit('=')[1] def load_symbol_from_console(ec, console_file, objdump, verbose): if objdump is None: print "Error: A path to objdump tool is not specified, but -i parameter is provided" elif not os.path.exists(objdump): print "Error: Provided path to objdump is invalid: %s" % objdump elif not os.path.exists(console_file): print "Error: UEFI console file is not found: %s" % console_file else: full_list = open(console_file).read().splitlines() efi_list = [i for i in full_list if "EntryPoint=" in i] full_list = dict.fromkeys(full_list) full_list = [i for i in full_list if "add-symbol-file" in i] module_dict = {} for line in full_list: name = get_module_name(line) module_dict[name] = (get_module_path(line), get_module_entrypoint(efi_list, name)) for module in module_dict: entrypoint_addr = module_dict[module][1] if entrypoint_addr is not None: path = module_dict[module][0] if not os.path.exists(path): print "Module not found: " + path + ". Skipping..." continue sp = subprocess.Popen([objdump,'-S', path], stdout = subprocess.PIPE) objdump_out = sp.stdout.readlines() entrypoint_record = [i for i in objdump_out if "<_ModuleEntryPoint>" in i] entrypoint_offset = entrypoint_record[0].split(' ')[0] load_addr = int(entrypoint_addr, 16) - int(entrypoint_offset, 16) edk2_debugger.load_symbol_from_file(ec, path, load_addr, verbose)
edk2-master
ArmPlatformPkg/Scripts/Ds5/console_loader.py
# # Copyright (c) 2011-2021, Arm Limited. All rights reserved. # # SPDX-License-Identifier: BSD-2-Clause-Patent # from arm_ds.debugger_v1 import Debugger from arm_ds.debugger_v1 import DebugException from console_loader import load_symbol_from_console import re, sys, getopt import edk2_debugger # Reload external classes reload(edk2_debugger) def usage(): print "-v,--verbose" print "-a,--all: Load all symbols" print "-l,--report=: Filename for the EDK2 report log" print "-m,--sysmem=(base,size): System Memory region" print "-f,--fv=(base,size): Firmware region" print "-r,--rom=(base,size): ROM region" print "-i,--input=: Filename for the EDK2 console output" print "-o,--objdump=: Path to the objdump tool" verbose = False load_all = False report_file = None input_file = None objdump = None regions = [] opts,args = getopt.getopt(sys.argv[1:], "hvar:i:o:vm:vr:vf:v", ["help","verbose","all","report=","sysmem=","rom=","fv=","input=","objdump="]) if (opts is None) or (not opts): report_file = '../../../report.log' else: region_reg = re.compile("\((.*),(.*)\)") base_reg = re.compile("(.*)") for o,a in opts: region_type = None regex = None m = None if o in ("-h","--help"): usage() sys.exit() elif o in ("-v","--verbose"): verbose = True elif o in ("-a","--all"): load_all = True elif o in ("-l","--report"): report_file = a elif o in ("-m","--sysmem"): region_type = edk2_debugger.ArmPlatformDebugger.REGION_TYPE_SYSMEM regex = region_reg elif o in ("-f","--fv"): region_type = edk2_debugger.ArmPlatformDebugger.REGION_TYPE_FV regex = region_reg elif o in ("-r","--rom"): region_type = edk2_debugger.ArmPlatformDebugger.REGION_TYPE_ROM regex = region_reg elif o in ("-i","--input"): input_file = a elif o in ("-o", "--objdump"): objdump = a else: assert False, "Unhandled option (%s)" % o if region_type: m = regex.match(a) if m: if regex.groups == 1: regions.append((region_type,int(m.group(1),0),0)) else: regions.append((region_type,int(m.group(1),0),int(m.group(2),0))) else: if regex.groups == 1: raise Exception('cmd_load_symbols', "Expect a base address") else: raise Exception('cmd_load_symbols', "Expect a region format as (base,size)") # Debugger object for accessing the debugger debugger = Debugger() # Initialisation commands ec = debugger.getCurrentExecutionContext() ec.getExecutionService().stop() # in case the execution context reference is out of date ec = debugger.getCurrentExecutionContext() try: armplatform_debugger = edk2_debugger.ArmPlatformDebugger(ec, report_file, regions, verbose) if load_all: armplatform_debugger.load_all_symbols() else: armplatform_debugger.load_current_symbols() except IOError, (ErrorNumber, ErrorMessage): print "Error: %s" % ErrorMessage except Exception, (ErrorClass, ErrorMessage): print "Error(%s): %s" % (ErrorClass, ErrorMessage) except DebugException, de: print "DebugError: %s" % (de.getMessage()) if input_file: load_symbol_from_console(ec, input_file, objdump, verbose)
edk2-master
ArmPlatformPkg/Scripts/Ds5/cmd_load_symbols.py
#!/usr/bin/python # # Copyright (c) 2014, ARM Limited. All rights reserved. # # SPDX-License-Identifier: BSD-2-Clause-Patent # import getopt import operator import os import pickle import sys from sys import argv from cStringIO import StringIO modules = {} functions = {} functions_addr = {} def usage(): print "-t,--trace: Location of the Trace file" print "-s,--symbols: Location of the symbols and modules" def get_address_from_string(address): return int(address.strip("S:").strip("N:").strip("EL2:").strip("EL1:"), 16) def get_module_from_addr(modules, addr): for key,value in modules.items(): if (value['start'] <= addr) and (addr <= value['end']): return key return None def add_cycles_to_function(functions, func_name, addr, cycles): if func_name != "<Unknown>": # Check if we are still in the previous function if add_cycles_to_function.prev_func_name == func_name: add_cycles_to_function.prev_entry['cycles'] += cycles return (add_cycles_to_function.prev_func_name, add_cycles_to_function.prev_module_name) if func_name in functions.keys(): for module_name, module_value in functions[func_name].iteritems(): if (module_value['start'] <= addr) and (addr < module_value['end']): module_value['cycles'] += cycles add_cycles_to_function.prev_func_name = func_name add_cycles_to_function.prev_module_name = module_name add_cycles_to_function.prev_entry = module_value return (func_name, module_name) elif (module_value['end'] == 0): module_value['cycles'] += cycles add_cycles_to_function.prev_func_name = func_name add_cycles_to_function.prev_module_name = module_name add_cycles_to_function.prev_entry = module_value return (func_name, module_name) # Workaround to fix the 'info func' limitation that does not expose the 'static' function module_name = get_module_from_addr(modules, addr) functions[func_name] = {} functions[func_name][module_name] = {} functions[func_name][module_name]['start'] = 0 functions[func_name][module_name]['end'] = 0 functions[func_name][module_name]['cycles'] = cycles functions[func_name][module_name]['count'] = 0 add_cycles_to_function.prev_func_name = func_name add_cycles_to_function.prev_module_name = module_name add_cycles_to_function.prev_entry = functions[func_name][module_name] return (func_name, module_name) else: # Check if we are still in the previous function if (add_cycles_to_function.prev_entry is not None) and (add_cycles_to_function.prev_entry['start'] <= addr) and (addr < add_cycles_to_function.prev_entry['end']): add_cycles_to_function.prev_entry['cycles'] += cycles return (add_cycles_to_function.prev_func_name, add_cycles_to_function.prev_module_name) # Generate the key for the given address key = addr & ~0x0FFF if key not in functions_addr.keys(): if 'Unknown' not in functions.keys(): functions['Unknown'] = {} if 'Unknown' not in functions['Unknown'].keys(): functions['Unknown']['Unknown'] = {} functions['Unknown']['Unknown']['cycles'] = 0 functions['Unknown']['Unknown']['count'] = 0 functions['Unknown']['Unknown']['cycles'] += cycles add_cycles_to_function.prev_func_name = None return None for func_key, module in functions_addr[key].iteritems(): for module_key, module_value in module.iteritems(): if (module_value['start'] <= addr) and (addr < module_value['end']): module_value['cycles'] += cycles # In case o <Unknown> we prefer to fallback on the direct search add_cycles_to_function.prev_func_name = func_key add_cycles_to_function.prev_module_name = module_key add_cycles_to_function.prev_entry = module_value return (func_key, module_key) print "Warning: Function %s @ 0x%x not found" % (func_name, addr) add_cycles_to_function.prev_func_name = None return None # Static variables for the previous function add_cycles_to_function.prev_func_name = None add_cycles_to_function.prev_entry = None def trace_read(): global trace_process line = trace.readline() trace_process += len(line) return line # # Parse arguments # trace_name = None symbols_file = None opts,args = getopt.getopt(sys.argv[1:], "ht:vs:v", ["help","trace=","symbols="]) if (opts is None) or (not opts): usage() sys.exit() for o,a in opts: if o in ("-h","--help"): usage() sys.exit() elif o in ("-t","--trace"): trace_name = a elif o in ("-s","--symbols"): symbols_file = a else: assert False, "Unhandled option (%s)" % o # # We try first to see if we run the script from DS-5 # try: from arm_ds.debugger_v1 import Debugger from arm_ds.debugger_v1 import DebugException # Debugger object for accessing the debugger debugger = Debugger() # Initialisation commands ec = debugger.getExecutionContext(0) ec.getExecutionService().stop() ec.getExecutionService().waitForStop() # in case the execution context reference is out of date ec = debugger.getExecutionContext(0) # # Get the module name and their memory range # info_file = ec.executeDSCommand("info file") info_file_str = StringIO(info_file) line = info_file_str.readline().strip('\n') while line != '': if ("Symbols from" in line): # Get the module name from the line 'Symbols from "/home/...."' module_name = line.split("\"")[1].split("/")[-1] modules[module_name] = {} # Look for the text section line = info_file_str.readline().strip('\n') while (line != '') and ("Symbols from" not in line): if ("ER_RO" in line): modules[module_name]['start'] = get_address_from_string(line.split()[0]) modules[module_name]['end'] = get_address_from_string(line.split()[2]) line = info_file_str.readline().strip('\n') break; if (".text" in line): modules[module_name]['start'] = get_address_from_string(line.split()[0]) modules[module_name]['end'] = get_address_from_string(line.split()[2]) line = info_file_str.readline().strip('\n') break; line = info_file_str.readline().strip('\n') line = info_file_str.readline().strip('\n') # # Get the function name and their memory range # info_func = ec.executeDSCommand("info func") info_func_str = StringIO(info_func) # Skip the first line 'Low-level symbols ...' line = info_func_str.readline().strip('\n') func_prev = None while line != '': # We ignore all the functions after 'Functions in' if ("Functions in " in line): line = info_func_str.readline().strip('\n') while line != '': line = info_func_str.readline().strip('\n') line = info_func_str.readline().strip('\n') continue if ("Low-level symbols" in line): # We need to fixup the last function of the module if func_prev is not None: func_prev['end'] = modules[module_name]['end'] func_prev = None line = info_func_str.readline().strip('\n') continue func_name = line.split()[1] func_start = get_address_from_string(line.split()[0]) module_name = get_module_from_addr(modules, func_start) if func_name not in functions.keys(): functions[func_name] = {} functions[func_name][module_name] = {} functions[func_name][module_name]['start'] = func_start functions[func_name][module_name]['cycles'] = 0 functions[func_name][module_name]['count'] = 0 # Set the end address of the previous function if func_prev is not None: func_prev['end'] = func_start func_prev = functions[func_name][module_name] line = info_func_str.readline().strip('\n') # Fixup the last function func_prev['end'] = modules[module_name]['end'] if symbols_file is not None: pickle.dump((modules, functions), open(symbols_file, "w")) except: if symbols_file is None: print "Error: Symbols file is required when run out of ARM DS-5" sys.exit() (modules, functions) = pickle.load(open(symbols_file, "r")) # # Build optimized table for the <Unknown> functions # functions_addr = {} for func_key, module in functions.iteritems(): for module_key, module_value in module.iteritems(): key = module_value['start'] & ~0x0FFF if key not in functions_addr.keys(): functions_addr[key] = {} if func_key not in functions_addr[key].keys(): functions_addr[key][func_key] = {} functions_addr[key][func_key][module_key] = module_value # # Process the trace file # if trace_name is None: sys.exit() trace = open(trace_name, "r") trace_size = os.path.getsize(trace_name) trace_process = 0 # Get the column names from the first line columns = trace_read().split() column_addr = columns.index('Address') column_cycles = columns.index('Cycles') column_function = columns.index('Function') line = trace_read() i = 0 prev_callee = None while line: try: func_name = line.split('\t')[column_function].strip() address = get_address_from_string(line.split('\t')[column_addr]) cycles = int(line.split('\t')[column_cycles]) callee = add_cycles_to_function(functions, func_name, address, cycles) if (prev_callee != None) and (prev_callee != callee): functions[prev_callee[0]][prev_callee[1]]['count'] += 1 prev_callee = callee except ValueError: pass line = trace_read() if ((i % 1000000) == 0) and (i != 0): percent = (trace_process * 100.00) / trace_size print "Processing file ... (%.2f %%)" % (percent) i = i + 1 # Fixup the last callee functions[prev_callee[0]][prev_callee[1]]['count'] += 1 # # Process results # functions_cycles = {} all_functions_cycles = {} total_cycles = 0 for func_key, module in functions.iteritems(): for module_key, module_value in module.iteritems(): key = "%s/%s" % (module_key, func_key) functions_cycles[key] = (module_value['cycles'], module_value['count']) total_cycles += module_value['cycles'] if func_key not in all_functions_cycles.keys(): all_functions_cycles[func_key] = (module_value['cycles'], module_value['count']) else: all_functions_cycles[func_key] = tuple(map(sum, zip(all_functions_cycles[func_key], (module_value['cycles'], module_value['count'])))) sorted_functions_cycles = sorted(functions_cycles.iteritems(), key=operator.itemgetter(1), reverse = True) sorted_all_functions_cycles = sorted(all_functions_cycles.items(), key=operator.itemgetter(1), reverse = True) print print "----" for (key,value) in sorted_functions_cycles[:20]: if value[0] != 0: print "%s (cycles: %d - %d%%, count: %d)" % (key, value[0], (value[0] * 100) / total_cycles, value[1]) else: break; print "----" for (key,value) in sorted_all_functions_cycles[:20]: if value[0] != 0: print "%s (cycles: %d - %d%%, count: %d)" % (key, value[0], (value[0] * 100) / total_cycles, value[1]) else: break;
edk2-master
ArmPlatformPkg/Scripts/Ds5/profile.py
# # Copyright (c) 2011-2013, ARM Limited. All rights reserved. # # SPDX-License-Identifier: BSD-2-Clause-Patent # from arm_ds.debugger_v1 import DebugException import struct import string import edk2_debugger class EfiFileSection(object): EFI_SECTION_PE32 = 0x10 EFI_SECTION_PIC = 0x11 EFI_SECTION_TE = 0x12 EFI_IMAGE_DEBUG_TYPE_CODEVIEW = 0x2 SIZEOF_EFI_FFS_FILE_HEADER = 0x28 def __init__(self, ec, base): self.base = base self.ec = ec def __str__(self): return "FileSection(type:0x%X, size:0x%x)" % (self.get_type(), self.get_size()) def get_base(self): return self.base def get_type(self): return struct.unpack("B", self.ec.getMemoryService().read(self.base + 0x3, 1, 8))[0] def get_size(self): return (struct.unpack("<I", self.ec.getMemoryService().read(self.base, 4, 32))[0] & 0x00ffffff) def get_debug_filepath(self): type = self.get_type() if type == EfiFileSection.EFI_SECTION_TE: section = EfiSectionTE(self, ec, self.base + 0x4) elif type == EfiFileSection.EFI_SECTION_PE32: section = EfiSectionPE32(self, ec, self.base + 0x4) else: raise Exception("EfiFileSection", "No debug section") return section.get_debug_filepath() class EfiSectionTE: SIZEOF_EFI_TE_IMAGE_HEADER = 0x28 EFI_TE_IMAGE_SIGNATURE = ('V','Z') def __init__(self, ec, base_te): self.ec = ec self.base_te = int(base_te) te_sig = struct.unpack("cc", self.ec.getMemoryService().read(self.base_te, 2, 32)) if te_sig != EfiSectionTE.EFI_TE_IMAGE_SIGNATURE: raise Exception("EfiFileSectionTE","TE Signature incorrect") def get_debug_filepath(self): stripped_size = struct.unpack("<H", self.ec.getMemoryService().read(self.base_te + 0x6, 2, 32))[0] stripped_size -= EfiSectionTE.SIZEOF_EFI_TE_IMAGE_HEADER debug_dir_entry_rva = self.ec.getMemoryService().readMemory32(self.base_te + 0x20) if debug_dir_entry_rva == 0: raise Exception("EfiFileSectionTE","No debug directory for image") debug_dir_entry_rva -= stripped_size debug_type = self.ec.getMemoryService().readMemory32(self.base_te + debug_dir_entry_rva + 0xC) if (debug_type != 0xdf) and (debug_type != EfiFileSection.EFI_IMAGE_DEBUG_TYPE_CODEVIEW): raise Exception("EfiFileSectionTE","Debug type is not dwarf") debug_rva = self.ec.getMemoryService().readMemory32(self.base_te + debug_dir_entry_rva + 0x14) debug_rva -= stripped_size dwarf_sig = struct.unpack("cccc", self.ec.getMemoryService().read(self.base_te + debug_rva, 4, 32)) if (dwarf_sig != 0x66727764) and (dwarf_sig != FirmwareFile.CONST_NB10_SIGNATURE): raise Exception("EfiFileSectionTE","Dwarf debug signature not found") if dwarf_sig == 0x66727764: filename = self.base_te + debug_rva + 0xc else: filename = self.base_te + debug_rva + 0x10 filename = struct.unpack("400s", self.ec.getMemoryService().read(filename, 400, 32))[0] return filename[0:string.find(filename,'\0')] def get_debug_elfbase(self): stripped_size = struct.unpack("<H", self.ec.getMemoryService().read(self.base_te + 0x6, 2, 32))[0] stripped_size -= EfiSectionTE.SIZEOF_EFI_TE_IMAGE_HEADER return self.base_te - stripped_size class EfiSectionPE32: def __init__(self, ec, base_pe32): self.ec = ec self.base_pe32 = base_pe32 def get_debug_filepath(self): # Offset from dos hdr to PE file hdr file_header_offset = self.ec.getMemoryService().readMemory32(self.base_pe32 + 0x3C) # Offset to debug dir in PE hdrs debug_dir_entry_rva = self.ec.getMemoryService().readMemory32(self.base_pe32 + file_header_offset + 0xA8) if debug_dir_entry_rva == 0: raise Exception("EfiFileSectionPE32","No Debug Directory") debug_type = self.ec.getMemoryService().readMemory32(self.base_pe32 + debug_dir_entry_rva + 0xC) if (debug_type != 0xdf) and (debug_type != EfiFileSection.EFI_IMAGE_DEBUG_TYPE_CODEVIEW): raise Exception("EfiFileSectionPE32","Debug type is not dwarf") debug_rva = self.ec.getMemoryService().readMemory32(self.base_pe32 + debug_dir_entry_rva + 0x14) dwarf_sig = struct.unpack("cccc", self.ec.getMemoryService().read(str(self.base_pe32 + debug_rva), 4, 32)) if (dwarf_sig != 0x66727764) and (dwarf_sig != FirmwareFile.CONST_NB10_SIGNATURE): raise Exception("EfiFileSectionPE32","Dwarf debug signature not found") if dwarf_sig == 0x66727764: filename = self.base_pe32 + debug_rva + 0xc else: filename = self.base_pe32 + debug_rva + 0x10 filename = struct.unpack("400s", self.ec.getMemoryService().read(str(filename), 400, 32))[0] return filename[0:string.find(filename,'\0')] def get_debug_elfbase(self): return self.base_pe32 class EfiSectionPE64: def __init__(self, ec, base_pe64): self.ec = ec self.base_pe64 = base_pe64 def get_debug_filepath(self): # Offset from dos hdr to PE file hdr (EFI_IMAGE_NT_HEADERS64) file_header_offset = self.ec.getMemoryService().readMemory32(self.base_pe64 + 0x3C) # Offset to debug dir in PE hdrs debug_dir_entry_rva = self.ec.getMemoryService().readMemory32(self.base_pe64 + file_header_offset + 0xB8) if debug_dir_entry_rva == 0: raise Exception("EfiFileSectionPE64","No Debug Directory") debug_type = self.ec.getMemoryService().readMemory32(self.base_pe64 + debug_dir_entry_rva + 0xC) if (debug_type != 0xdf) and (debug_type != EfiFileSection.EFI_IMAGE_DEBUG_TYPE_CODEVIEW): raise Exception("EfiFileSectionPE64","Debug type is not dwarf") debug_rva = self.ec.getMemoryService().readMemory32(self.base_pe64 + debug_dir_entry_rva + 0x14) dwarf_sig = struct.unpack("cccc", self.ec.getMemoryService().read(str(self.base_pe64 + debug_rva), 4, 32)) if (dwarf_sig != 0x66727764) and (dwarf_sig != FirmwareFile.CONST_NB10_SIGNATURE): raise Exception("EfiFileSectionPE64","Dwarf debug signature not found") if dwarf_sig == 0x66727764: filename = self.base_pe64 + debug_rva + 0xc else: filename = self.base_pe64 + debug_rva + 0x10 filename = struct.unpack("400s", self.ec.getMemoryService().read(str(filename), 400, 32))[0] return filename[0:string.find(filename,'\0')] def get_debug_elfbase(self): return self.base_pe64 class FirmwareFile: EFI_FV_FILETYPE_RAW = 0x01 EFI_FV_FILETYPE_FREEFORM = 0x02 EFI_FV_FILETYPE_SECURITY_CORE = 0x03 EFI_FV_FILETYPE_PEI_CORE = 0x04 EFI_FV_FILETYPE_DXE_CORE = 0x05 EFI_FV_FILETYPE_PEIM = 0x06 EFI_FV_FILETYPE_DRIVER = 0x07 EFI_FV_FILETYPE_COMBINED_PEIM_DRIVER = 0x08 EFI_FV_FILETYPE_APPLICATION = 0x09 EFI_FV_FILETYPE_FIRMWARE_VOLUME_IMAGE = 0x0B EFI_FV_FILETYPE_FFS_MIN = 0xF0 CONST_NB10_SIGNATURE = ('N','B','1','0') def __init__(self, fv, base, ec): self.fv = fv self.base = base self.ec = ec def __str__(self): return "FFS(state:0x%x, type:0x%X, size:0x%x)" % (self.get_state(), self.get_type(), self.get_size()) def get_base(self): return self.base def get_size(self): size = (self.ec.getMemoryService().readMemory32(self.base + 0x14) & 0x00ffffff) # Occupied size is the size considering the alignment return size + ((0x8 - (size & 0x7)) & 0x7) def get_type(self): return self.ec.getMemoryService().readMemory8(self.base + 0x12) def get_state(self): state = self.ec.getMemoryService().readMemory8(self.base + 0x17) polarity = self.fv.get_polarity() if polarity: state = ~state highest_bit = 0x80; while (highest_bit != 0) and ((highest_bit & state) == 0): highest_bit >>= 1 return highest_bit def get_next_section(self, section=None): if section == None: if self.get_type() != FirmwareFile.EFI_FV_FILETYPE_FFS_MIN: section_base = self.get_base() + 0x18; else: return None else: section_base = int(section.get_base() + section.get_size()) # Align to next 4 byte boundary if (section_base & 0x3) != 0: section_base = section_base + 0x4 - (section_base & 0x3) if section_base < self.get_base() + self.get_size(): return EfiFileSection(self.ec, section_base) else: return None class FirmwareVolume: CONST_FV_SIGNATURE = ('_','F','V','H') EFI_FVB2_ERASE_POLARITY = 0x800 DebugInfos = [] def __init__(self, ec, fv_base, fv_size): self.ec = ec self.fv_base = fv_base self.fv_size = fv_size try: signature = struct.unpack("cccc", self.ec.getMemoryService().read(fv_base + 0x28, 4, 32)) except DebugException: raise Exception("FirmwareVolume", "Not possible to access the defined firmware volume at [0x%X,0x%X]. Could be the used build report does not correspond to your current debugging context." % (int(fv_base),int(fv_base+fv_size))) if signature != FirmwareVolume.CONST_FV_SIGNATURE: raise Exception("FirmwareVolume", "This is not a valid firmware volume") def get_size(self): return self.ec.getMemoryService().readMemory32(self.fv_base + 0x20) def get_attributes(self): return self.ec.getMemoryService().readMemory32(self.fv_base + 0x2C) def get_polarity(self): attributes = self.get_attributes() if attributes & FirmwareVolume.EFI_FVB2_ERASE_POLARITY: return 1 else: return 0 def get_next_ffs(self, ffs=None): if ffs == None: # Get the offset of the first FFS file from the FV header ffs_base = self.fv_base + self.ec.getMemoryService().readMemory16(self.fv_base + 0x30) else: # Goto the next FFS file ffs_base = int(ffs.get_base() + ffs.get_size()) # Align to next 8 byte boundary if (ffs_base & 0x7) != 0: ffs_base = ffs_base + 0x8 - (ffs_base & 0x7) if ffs_base < self.fv_base + self.get_size(): return FirmwareFile(self, ffs_base, self.ec) else: return None def get_debug_info(self): self.DebugInfos = [] ffs = self.get_next_ffs() while ffs != None: section = ffs.get_next_section() while section != None: type = section.get_type() if (type == EfiFileSection.EFI_SECTION_TE) or (type == EfiFileSection.EFI_SECTION_PE32): self.DebugInfos.append((section.get_base(), section.get_size(), section.get_type())) section = ffs.get_next_section(section) ffs = self.get_next_ffs(ffs) def load_symbols_at(self, addr, verbose = False): if self.DebugInfos == []: self.get_debug_info() for debug_info in self.DebugInfos: if (addr >= debug_info[0]) and (addr < debug_info[0] + debug_info[1]): if debug_info[2] == EfiFileSection.EFI_SECTION_TE: section = EfiSectionTE(self.ec, debug_info[0] + 0x4) elif debug_info[2] == EfiFileSection.EFI_SECTION_PE32: section = EfiSectionPE32(self.ec, debug_info[0] + 0x4) else: raise Exception('FirmwareVolume','Section Type not supported') try: edk2_debugger.load_symbol_from_file(self.ec, section.get_debug_filepath(), section.get_debug_elfbase(), verbose) except Exception, (ErrorClass, ErrorMessage): if verbose: print "Error while loading a symbol file (%s: %s)" % (ErrorClass, ErrorMessage) return debug_info def load_all_symbols(self, verbose = False): if self.DebugInfos == []: self.get_debug_info() for debug_info in self.DebugInfos: if debug_info[2] == EfiFileSection.EFI_SECTION_TE: section = EfiSectionTE(self.ec, debug_info[0] + 0x4) elif debug_info[2] == EfiFileSection.EFI_SECTION_PE32: section = EfiSectionPE32(self.ec, debug_info[0] + 0x4) else: continue try: edk2_debugger.load_symbol_from_file(self.ec, section.get_debug_filepath(), section.get_debug_elfbase(), verbose) except Exception, (ErrorClass, ErrorMessage): if verbose: print "Error while loading a symbol file (%s: %s)" % (ErrorClass, ErrorMessage)
edk2-master
ArmPlatformPkg/Scripts/Ds5/firmware_volume.py
# # Copyright (c) 2011-2012, ARM Limited. All rights reserved. # # SPDX-License-Identifier: BSD-2-Clause-Patent # import re class BuildReport: PCDs = {} def parse_platform_summary(self, file): pass def parse_pcd_report(self, report_file): pcd_reg = re.compile(" (\*P|\*F|\*M| ) (\w+)(\ +)\: (.*) \((\w+)\) = (.*)\n") for line in report_file.xreadlines(): stripped_line = line.strip() if re.match("\<=+\>", stripped_line): return elif re.match("g.*Guid", stripped_line): guid = stripped_line self.PCDs[guid] = {} else: m = pcd_reg.match(line) if m: self.PCDs[guid][m.group(2)] = (m.group(6).strip(),m.group(5)) def parse_firmware_device(self, file): pass def parse_module_summary(self, file): #print "Module Summary" pass CONST_SECTION_HEADERS = [('Platform Summary', parse_platform_summary), ('Platform Configuration Database Report',parse_pcd_report), ('Firmware Device (FD)',parse_firmware_device), ('Module Summary',parse_module_summary)] def __init__(self, filename = 'report.log'): report_file = open(filename, 'r') for line in report_file.xreadlines(): for section_header in BuildReport.CONST_SECTION_HEADERS: if line.strip() == section_header[0]: section_header[1](self, report_file) #print self.PCDs
edk2-master
ArmPlatformPkg/Scripts/Ds5/build_report.py
# # Copyright (c) 2011-2013, ARM Limited. All rights reserved. # # SPDX-License-Identifier: BSD-2-Clause-Patent # from arm_ds.debugger_v1 import DebugException import struct import edk2_debugger import firmware_volume class DebugInfoTable: CONST_DEBUG_INFO_TABLE_GUID = ( 0x49152E77L, 0x47641ADAL, 0xFE7AA2B7L, 0x8B5ED9FEL) DebugInfos = [] def __init__(self, ec, debug_info_table_header_offset): self.ec = ec self.base = debug_info_table_header_offset def get_debug_info(self): # Get the information from EFI_DEBUG_IMAGE_INFO_TABLE_HEADER count = self.ec.getMemoryService().readMemory32(self.base + 0x4) if edk2_debugger.is_aarch64(self.ec): debug_info_table_base = self.ec.getMemoryService().readMemory64(self.base + 0x8) else: debug_info_table_base = self.ec.getMemoryService().readMemory32(self.base + 0x8) self.DebugInfos = [] for i in range(0, count): # Get the address of the structure EFI_DEBUG_IMAGE_INFO if edk2_debugger.is_aarch64(self.ec): debug_info = self.ec.getMemoryService().readMemory64(debug_info_table_base + (i * 8)) else: debug_info = self.ec.getMemoryService().readMemory32(debug_info_table_base + (i * 4)) if debug_info: debug_info_type = self.ec.getMemoryService().readMemory32(debug_info) # Normal Debug Info Type if debug_info_type == 1: if edk2_debugger.is_aarch64(self.ec): # Get the base address of the structure EFI_LOADED_IMAGE_PROTOCOL loaded_image_protocol = self.ec.getMemoryService().readMemory64(debug_info + 0x8) image_base = self.ec.getMemoryService().readMemory64(loaded_image_protocol + 0x40) image_size = self.ec.getMemoryService().readMemory32(loaded_image_protocol + 0x48) else: # Get the base address of the structure EFI_LOADED_IMAGE_PROTOCOL loaded_image_protocol = self.ec.getMemoryService().readMemory32(debug_info + 0x4) image_base = self.ec.getMemoryService().readMemory32(loaded_image_protocol + 0x20) image_size = self.ec.getMemoryService().readMemory32(loaded_image_protocol + 0x28) self.DebugInfos.append((image_base,image_size)) # Return (base, size) def load_symbols_at(self, addr, verbose = False): if self.DebugInfos == []: self.get_debug_info() found = False for debug_info in self.DebugInfos: if (addr >= debug_info[0]) and (addr < debug_info[0] + debug_info[1]): if edk2_debugger.is_aarch64(self.ec): section = firmware_volume.EfiSectionPE64(self.ec, debug_info[0]) else: section = firmware_volume.EfiSectionPE32(self.ec, debug_info[0]) try: edk2_debugger.load_symbol_from_file(self.ec, section.get_debug_filepath(), section.get_debug_elfbase(), verbose) except Exception, (ErrorClass, ErrorMessage): if verbose: print "Error while loading a symbol file (%s: %s)" % (ErrorClass, ErrorMessage) found = True return debug_info if found == False: raise Exception('DebugInfoTable','No symbol found at 0x%x' % addr) def load_all_symbols(self, verbose = False): if self.DebugInfos == []: self.get_debug_info() for debug_info in self.DebugInfos: if edk2_debugger.is_aarch64(self.ec): section = firmware_volume.EfiSectionPE64(self.ec, debug_info[0]) else: section = firmware_volume.EfiSectionPE32(self.ec, debug_info[0]) try: edk2_debugger.load_symbol_from_file(self.ec, section.get_debug_filepath(), section.get_debug_elfbase(), verbose) except Exception, (ErrorClass, ErrorMessage): if verbose: print "Error while loading a symbol file (%s: %s)" % (ErrorClass, ErrorMessage) def dump(self): self.get_debug_info() for debug_info in self.DebugInfos: base_pe32 = debug_info[0] if edk2_debugger.is_aarch64(self.ec): section = firmware_volume.EfiSectionPE64(self.ec, base_pe32) else: section = firmware_volume.EfiSectionPE32(self.ec, base_pe32) print section.get_debug_filepath() class SystemTable: CONST_ST_SIGNATURE = ('I','B','I',' ','S','Y','S','T') def __init__(self, ec, membase, memsize): self.membase = membase self.memsize = memsize self.ec = ec found = False # Start from the top of the memory offset = self.membase + self.memsize # Align to highest 4MB boundary offset = offset & ~0x3FFFFF # We should not have a System Table at the top of the System Memory offset = offset - 0x400000 # Start at top and look on 4MB boundaries for system table ptr structure while offset > self.membase: try: signature = struct.unpack("cccccccc", self.ec.getMemoryService().read(str(offset), 8, 32)) except DebugException: raise Exception('SystemTable','Fail to access System Memory. Ensure all the memory in the region [0x%x;0x%X] is accessible.' % (membase,membase+memsize)) if signature == SystemTable.CONST_ST_SIGNATURE: found = True if edk2_debugger.is_aarch64(self.ec): self.system_table_base = self.ec.getMemoryService().readMemory64(offset + 0x8) else: self.system_table_base = self.ec.getMemoryService().readMemory32(offset + 0x8) break offset = offset - 0x400000 if not found: raise Exception('SystemTable','System Table not found in System Memory [0x%x;0x%X]' % (membase,membase+memsize)) def get_configuration_table(self, conf_table_guid): if edk2_debugger.is_aarch64(self.ec): # Number of configuration Table entry conf_table_entry_count = self.ec.getMemoryService().readMemory32(self.system_table_base + 0x68) # Get location of the Configuration Table entries conf_table_offset = self.ec.getMemoryService().readMemory64(self.system_table_base + 0x70) else: # Number of configuration Table entry conf_table_entry_count = self.ec.getMemoryService().readMemory32(self.system_table_base + 0x40) # Get location of the Configuration Table entries conf_table_offset = self.ec.getMemoryService().readMemory32(self.system_table_base + 0x44) for i in range(0, conf_table_entry_count): if edk2_debugger.is_aarch64(self.ec): offset = conf_table_offset + (i * 0x18) else: offset = conf_table_offset + (i * 0x14) guid = struct.unpack("<IIII", self.ec.getMemoryService().read(str(offset), 16, 32)) if guid == conf_table_guid: if edk2_debugger.is_aarch64(self.ec): return self.ec.getMemoryService().readMemory64(offset + 0x10) else: return self.ec.getMemoryService().readMemory32(offset + 0x10) raise Exception('SystemTable','Configuration Table not found')
edk2-master
ArmPlatformPkg/Scripts/Ds5/system_table.py
## @file # This file contains the script to build UniversalPayload # # Copyright (c) 2021, Intel Corporation. All rights reserved.<BR> # SPDX-License-Identifier: BSD-2-Clause-Patent ## import argparse import subprocess import os import shutil import sys from ctypes import * from Tools.ElfFv import ReplaceFv sys.dont_write_bytecode = True class UPLD_INFO_HEADER(LittleEndianStructure): _pack_ = 1 _fields_ = [ ('Identifier', ARRAY(c_char, 4)), ('HeaderLength', c_uint32), ('SpecRevision', c_uint16), ('Reserved', c_uint16), ('Revision', c_uint32), ('Attribute', c_uint32), ('Capability', c_uint32), ('ProducerId', ARRAY(c_char, 16)), ('ImageId', ARRAY(c_char, 16)), ] def __init__(self): self.Identifier = b'PLDH' self.HeaderLength = sizeof(UPLD_INFO_HEADER) self.SpecRevision = 0x0070 self.Revision = 0x0000010105 self.ImageId = b'UEFI' self.ProducerId = b'INTEL' def BuildUniversalPayload(Args): def RunCommand(cmd): print(cmd) p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,cwd=os.environ['WORKSPACE']) while True: line = p.stdout.readline() if not line: break print(line.strip().decode(errors='ignore')) p.communicate() if p.returncode != 0: print("- Failed - error happened when run command: %s"%cmd) raise Exception("ERROR: when run command: %s"%cmd) BuildTarget = Args.Target ToolChain = Args.ToolChain Quiet = "--quiet" if Args.Quiet else "" ElfToolChain = 'CLANGDWARF' BuildDir = os.path.join(os.environ['WORKSPACE'], os.path.normpath("Build/UefiPayloadPkgX64")) BuildModule = "" BuildArch = "" if Args.Arch == 'X64': BuildArch = "X64" EntryOutputDir = os.path.join(BuildDir, "{}_{}".format (BuildTarget, ElfToolChain), os.path.normpath("X64/UefiPayloadPkg/UefiPayloadEntry/UniversalPayloadEntry/DEBUG/UniversalPayloadEntry.dll")) else: BuildArch = "IA32 -a X64" EntryOutputDir = os.path.join(BuildDir, "{}_{}".format (BuildTarget, ElfToolChain), os.path.normpath("IA32/UefiPayloadPkg/UefiPayloadEntry/UniversalPayloadEntry/DEBUG/UniversalPayloadEntry.dll")) DscPath = os.path.normpath("UefiPayloadPkg/UefiPayloadPkg.dsc") ModuleReportPath = os.path.join(BuildDir, "UefiUniversalPayloadEntry.txt") UpldInfoFile = os.path.join(BuildDir, "UniversalPayloadInfo.bin") Pcds = "" if (Args.pcd != None): for PcdItem in Args.pcd: Pcds += " --pcd {}".format (PcdItem) Defines = "" if (Args.Macro != None): for MacroItem in Args.Macro: Defines += " -D {}".format (MacroItem) # # Building DXE core and DXE drivers as DXEFV. # if Args.BuildEntryOnly == False: PayloadReportPath = os.path.join(BuildDir, "UefiUniversalPayload.txt") BuildPayload = "build -p {} -b {} -a X64 -t {} -y {} {}".format (DscPath, BuildTarget, ToolChain, PayloadReportPath, Quiet) BuildPayload += Pcds BuildPayload += Defines RunCommand(BuildPayload) # # Building Universal Payload entry. # if Args.PreBuildUplBinary is None: EntryModuleInf = os.path.normpath("UefiPayloadPkg/UefiPayloadEntry/UniversalPayloadEntry.inf") BuildModule = "build -p {} -b {} -a {} -m {} -t {} -y {} {}".format (DscPath, BuildTarget, BuildArch, EntryModuleInf, ElfToolChain, ModuleReportPath, Quiet) BuildModule += Pcds BuildModule += Defines RunCommand(BuildModule) if Args.PreBuildUplBinary is not None: EntryOutputDir = os.path.join(BuildDir, "UniversalPayload.elf") shutil.copy (os.path.abspath(Args.PreBuildUplBinary), EntryOutputDir) # # Buid Universal Payload Information Section ".upld_info" # upld_info_hdr = UPLD_INFO_HEADER() upld_info_hdr.SpecRevision = Args.SpecRevision upld_info_hdr.Revision = Args.Revision upld_info_hdr.ProducerId = Args.ProducerId.encode()[:16] upld_info_hdr.ImageId = Args.ImageId.encode()[:16] upld_info_hdr.Attribute |= 1 if BuildTarget == "DEBUG" else 0 fp = open(UpldInfoFile, 'wb') fp.write(bytearray(upld_info_hdr)) fp.close() MultiFvList = [] if Args.BuildEntryOnly == False: MultiFvList = [ ['uefi_fv', os.path.join(BuildDir, "{}_{}".format (BuildTarget, ToolChain), os.path.normpath("FV/DXEFV.Fv")) ], ['bds_fv', os.path.join(BuildDir, "{}_{}".format (BuildTarget, ToolChain), os.path.normpath("FV/BDSFV.Fv")) ], ['network_fv', os.path.join(BuildDir, "{}_{}".format (BuildTarget, ToolChain), os.path.normpath("FV/NETWORKFV.Fv")) ], ] AddSectionName = '.upld_info' ReplaceFv (EntryOutputDir, UpldInfoFile, AddSectionName, Alignment = 4) if Args.PreBuildUplBinary is None: shutil.copy (EntryOutputDir, os.path.join(BuildDir, 'UniversalPayload.elf')) return MultiFvList, os.path.join(BuildDir, 'UniversalPayload.elf') def main(): def ValidateSpecRevision (Argument): try: (MajorStr, MinorStr) = Argument.split('.') except: raise argparse.ArgumentTypeError ('{} is not a valid SpecRevision format (Major[8-bits].Minor[8-bits]).'.format (Argument)) # # Spec Revision Bits 15 : 8 - Major Version. Bits 7 : 0 - Minor Version. # if len(MinorStr) > 0 and len(MinorStr) < 3: try: Minor = int(MinorStr, 16) if len(MinorStr) == 2 else (int(MinorStr, 16) << 4) except: raise argparse.ArgumentTypeError ('{} Minor version of SpecRevision is not a valid integer value.'.format (Argument)) else: raise argparse.ArgumentTypeError ('{} is not a valid SpecRevision format (Major[8-bits].Minor[8-bits]).'.format (Argument)) if len(MajorStr) > 0 and len(MajorStr) < 3: try: Major = int(MajorStr, 16) except: raise argparse.ArgumentTypeError ('{} Major version of SpecRevision is not a valid integer value.'.format (Argument)) else: raise argparse.ArgumentTypeError ('{} is not a valid SpecRevision format (Major[8-bits].Minor[8-bits]).'.format (Argument)) return int('0x{0:02x}{1:02x}'.format(Major, Minor), 0) def Validate32BitInteger (Argument): try: Value = int (Argument, 0) except: raise argparse.ArgumentTypeError ('{} is not a valid integer value.'.format (Argument)) if Value < 0: raise argparse.ArgumentTypeError ('{} is a negative value.'.format (Argument)) if Value > 0xffffffff: raise argparse.ArgumentTypeError ('{} is larger than 32-bits.'.format (Argument)) return Value def ValidateAddFv (Argument): Value = Argument.split ("=") if len (Value) != 2: raise argparse.ArgumentTypeError ('{} is incorrect format with "xxx_fv=xxx.fv"'.format (Argument)) if Value[0][-3:] != "_fv": raise argparse.ArgumentTypeError ('{} is incorrect format with "xxx_fv=xxx.fv"'.format (Argument)) if Value[1][-3:].lower () != ".fv": raise argparse.ArgumentTypeError ('{} is incorrect format with "xxx_fv=xxx.fv"'.format (Argument)) if os.path.exists (Value[1]) == False: raise argparse.ArgumentTypeError ('File {} is not found.'.format (Value[1])) return Value parser = argparse.ArgumentParser(description='For building Universal Payload') parser.add_argument('-t', '--ToolChain') parser.add_argument('-b', '--Target', default='DEBUG') parser.add_argument('-a', '--Arch', choices=['IA32', 'X64'], help='Specify the ARCH for payload entry module. Default build X64 image.', default ='X64') parser.add_argument("-D", "--Macro", action="append", default=["UNIVERSAL_PAYLOAD=TRUE"]) parser.add_argument('-i', '--ImageId', type=str, help='Specify payload ID (16 bytes maximal).', default ='UEFI') parser.add_argument('-q', '--Quiet', action='store_true', help='Disable all build messages except FATAL ERRORS.') parser.add_argument("-p", "--pcd", action="append") parser.add_argument("-s", "--SpecRevision", type=ValidateSpecRevision, default ='0.7', help='Indicates compliance with a revision of this specification in the BCD format.') parser.add_argument("-r", "--Revision", type=Validate32BitInteger, default ='0x0000010105', help='Revision of the Payload binary. Major.Minor.Revision.Build') parser.add_argument("-o", "--ProducerId", default ='INTEL', help='A null-terminated OEM-supplied string that identifies the payload producer (16 bytes maximal).') parser.add_argument("-sk", "--SkipBuild", action='store_true', help='Skip UniversalPayload build') parser.add_argument("-af", "--AddFv", type=ValidateAddFv, action='append', help='Add or replace specific FV into payload, Ex: uefi_fv=XXX.fv') command_group = parser.add_mutually_exclusive_group() command_group.add_argument("-e", "--BuildEntryOnly", action='store_true', help='Build UniversalPayload Entry file') command_group.add_argument("-pb", "--PreBuildUplBinary", default=None, help='Specify the UniversalPayload file') args = parser.parse_args() MultiFvList = [] UniversalPayloadBinary = args.PreBuildUplBinary if (args.SkipBuild == False): MultiFvList, UniversalPayloadBinary = BuildUniversalPayload(args) if (args.AddFv != None): for (SectionName, SectionFvFile) in args.AddFv: MultiFvList.append ([SectionName, SectionFvFile]) if (UniversalPayloadBinary != None): for (SectionName, SectionFvFile) in MultiFvList: if os.path.exists (SectionFvFile) == False: continue print ("Patch {}={} into {}".format (SectionName, SectionFvFile, UniversalPayloadBinary)) ReplaceFv (UniversalPayloadBinary, SectionFvFile, '.upld.{}'.format (SectionName)) print ("\nSuccessfully build Universal Payload") if __name__ == '__main__': main()
edk2-master
UefiPayloadPkg/UniversalPayloadBuild.py
## @file # This file is used to replace FV. # # Copyright (c) 2023, Intel Corporation. All rights reserved.<BR> # SPDX-License-Identifier: BSD-2-Clause-Patent ##
edk2-master
UefiPayloadPkg/Tools/__init__.py
## @file # OBJCOPY parser, it's used to replace FV # # Copyright (c) 2023, Intel Corporation. All rights reserved.<BR> # SPDX-License-Identifier: BSD-2-Clause-Patent ## import argparse from ctypes import * import struct class ElfSectionHeader64: def __init__(self, sh_name, sh_type, sh_flags, sh_addr, sh_offset, sh_size, sh_link, sh_info, sh_addralign, sh_entsize): self.sh_name = sh_name self.sh_type = sh_type self.sh_flags = sh_flags self.sh_addr = sh_addr self.sh_offset = sh_offset self.sh_size = sh_size self.sh_link = sh_link self.sh_info = sh_info self.sh_addralign = sh_addralign self.sh_entsize = sh_entsize def pack(self): return struct.pack('<IIQQQQIIQQ', self.sh_name, self.sh_type, self.sh_flags, self.sh_addr, self.sh_offset, self.sh_size, self.sh_link, self.sh_info, self.sh_addralign, self.sh_entsize) @classmethod def unpack(cls, data): unpacked_data = struct.unpack('<IIQQQQIIQQ', data) return cls(*unpacked_data) class ElfHeader64: def __init__(self, data): # Parse the ELF identification bytes self.e_ident = struct.unpack('16s', data[:16])[0] self.e_type = struct.unpack('H', data[16:18])[0] self.e_machine = struct.unpack('H', data[18:20])[0] self.e_version = struct.unpack('I', data[20:24])[0] self.e_entry = struct.unpack('Q', data[24:32])[0] self.e_phoff = struct.unpack('Q', data[32:40])[0] self.e_shoff = struct.unpack('Q', data[40:48])[0] self.e_flags = struct.unpack('I', data[48:52])[0] self.e_ehsize = struct.unpack('H', data[52:54])[0] self.e_phentsize = struct.unpack('H', data[54:56])[0] self.e_phnum = struct.unpack('H', data[56:58])[0] self.e_shentsize = struct.unpack('H', data[58:60])[0] self.e_shnum = struct.unpack('H', data[60:62])[0] self.e_shstrndx = struct.unpack('H', data[62:64])[0] def pack(self): # Pack the ELF header data into a binary string data = b'' data += struct.pack('16s', self.e_ident) data += struct.pack('H', self.e_type) data += struct.pack('H', self.e_machine) data += struct.pack('I', self.e_version) data += struct.pack('Q', self.e_entry) data += struct.pack('Q', self.e_phoff) data += struct.pack('Q', self.e_shoff) data += struct.pack('I', self.e_flags) data += struct.pack('H', self.e_ehsize) data += struct.pack('H', self.e_phentsize) data += struct.pack('H', self.e_phnum) data += struct.pack('H', self.e_shentsize) data += struct.pack('H', self.e_shnum) data += struct.pack('H', self.e_shstrndx) return data class Elf64_Phdr: def __init__(self, data): self.p_type = struct.unpack("<L", data[0:4])[0] self.p_flags = struct.unpack("<L", data[4:8])[0] self.p_offset = struct.unpack("<Q", data[8:16])[0] self.p_vaddr = struct.unpack("<Q", data[16:24])[0] self.p_paddr = struct.unpack("<Q", data[24:32])[0] self.p_filesz = struct.unpack("<Q", data[32:40])[0] self.p_memsz = struct.unpack("<Q", data[40:48])[0] self.p_align = struct.unpack("<Q", data[48:56])[0] def pack(self): # Pack the Program header table into a binary string data = b'' data += struct.pack('<L', self.p_type) data += struct.pack('<L', self.p_flags) data += struct.pack('<Q', self.p_offset) data += struct.pack('<Q', self.p_vaddr) data += struct.pack('<Q', self.p_paddr) data += struct.pack('<Q', self.p_filesz) data += struct.pack('<Q', self.p_memsz) data += struct.pack('<Q', self.p_align) return data class ElfSectionHeader32: def __init__(self, sh_name, sh_type, sh_flags, sh_addr, sh_offset, sh_size, sh_link, sh_info, sh_addralign, sh_entsize): self.sh_name = sh_name self.sh_type = sh_type self.sh_flags = sh_flags self.sh_addr = sh_addr self.sh_offset = sh_offset self.sh_size = sh_size self.sh_link = sh_link self.sh_info = sh_info self.sh_addralign = sh_addralign self.sh_entsize = sh_entsize def pack(self): return struct.pack('<IIIIIIIIII', self.sh_name, self.sh_type, self.sh_flags, self.sh_addr, self.sh_offset, self.sh_size, self.sh_link, self.sh_info, self.sh_addralign, self.sh_entsize) @classmethod def unpack(cls, data): unpacked_data = struct.unpack('<IIIIIIIIII', data) return cls(*unpacked_data) class ElfHeader32: def __init__(self, data): # Parse the ELF identification bytes self.e_ident = struct.unpack('16s', data[:16])[0] self.e_type = struct.unpack('H', data[16:18])[0] self.e_machine = struct.unpack('H', data[18:20])[0] self.e_version = struct.unpack('I', data[20:24])[0] self.e_entry = struct.unpack('I', data[24:28])[0] self.e_phoff = struct.unpack('I', data[28:32])[0] self.e_shoff = struct.unpack('I', data[32:36])[0] self.e_flags = struct.unpack('I', data[36:40])[0] self.e_ehsize = struct.unpack('H', data[40:42])[0] self.e_phentsize = struct.unpack('H', data[42:44])[0] self.e_phnum = struct.unpack('H', data[44:46])[0] self.e_shentsize = struct.unpack('H', data[46:48])[0] self.e_shnum = struct.unpack('H', data[48:50])[0] self.e_shstrndx = struct.unpack('H', data[50:52])[0] def pack(self): # Pack the ELF header data into a binary string data = b'' data += struct.pack('16s', self.e_ident) data += struct.pack('H', self.e_type) data += struct.pack('H', self.e_machine) data += struct.pack('I', self.e_version) data += struct.pack('I', self.e_entry) data += struct.pack('I', self.e_phoff) data += struct.pack('I', self.e_shoff) data += struct.pack('I', self.e_flags) data += struct.pack('H', self.e_ehsize) data += struct.pack('H', self.e_phentsize) data += struct.pack('H', self.e_phnum) data += struct.pack('H', self.e_shentsize) data += struct.pack('H', self.e_shnum) data += struct.pack('H', self.e_shstrndx) return data class Elf32_Phdr: def __init__(self, data): self.p_type = struct.unpack("<L", data[0:4])[0] self.p_offset = struct.unpack("<L", data[4:8])[0] self.p_vaddr = struct.unpack("<L", data[8:12])[0] self.p_paddr = struct.unpack("<L", data[12:16])[0] self.p_filesz = struct.unpack("<L", data[16:20])[0] self.p_memsz = struct.unpack("<L", data[20:24])[0] self.p_flags = struct.unpack("<L", data[24:28])[0] self.p_align = struct.unpack("<L", data[28:32])[0] def pack(self): # Pack the Program header table into a binary string data = b'' data += struct.pack('<L', self.p_type) data += struct.pack('<L', self.p_offset) data += struct.pack('<L', self.p_vaddr) data += struct.pack('<L', self.p_paddr) data += struct.pack('<L', self.p_filesz) data += struct.pack('<L', self.p_memsz) data += struct.pack('<L', self.p_flags) data += struct.pack('<L', self.p_align) return data def SectionAlignment(NewUPLEntry, AlignmentIndex): # Section entry Alignment # Alignment is transfer to integer if AlignmentIndex is string. if isinstance(AlignmentIndex, str): int_num = int(AlignmentIndex, 16) int_num = 10 * (int_num//16) + int_num % 16 else: int_num = AlignmentIndex if (int_num != 0 or int_num != 1): if ((len(NewUPLEntry) % int_num) != 0): AlignNumber = int_num - (len(NewUPLEntry) % int_num) if (AlignNumber != 0): for x in range(AlignNumber): NewUPLEntry = NewUPLEntry + bytearray(b'\0') return NewUPLEntry def SectionEntryFill(SectionEntry, Alignment, Value, Offset): # Alignment n = 0 if (len (Value) < Alignment): Value = Value.zfill(Alignment) for x in range(0, (Alignment//2)): Index = '0x' + Value[n] + Value[n + 1] SectionEntry[Offset - x] = int(Index,16) n += 2 return SectionEntry def ElfHeaderParser(UPLEntry): # Read EI_CLASS, it stores information that elf with 32-bit or 64-bit architectures. EI_CLASS = UPLEntry[4] # If Elf is 64-bit objects. if (EI_CLASS == 2): # Elf header is stored at 0x0-0x40 in 64-bits objects ElfHeaderData = UPLEntry[:64] # If Elf is 32-bit objects. else: # Elf header is stored at 0x0-0x34 in 32-bits objects ElfHeaderData = UPLEntry[:53] # If Elf is 64-bit objects. if (EI_CLASS == 2): elf_header = ElfHeader64(ElfHeaderData) ElfHeaderOffset = elf_header.e_shoff SectionHeaderEntryNumber = elf_header.e_shnum StringIndexNumber = elf_header.e_shstrndx SectionHeaderEntrySize = elf_header.e_shentsize StringIndexEntryOffset = ElfHeaderOffset + (StringIndexNumber * SectionHeaderEntrySize) unpacked_header = ElfSectionHeader64.unpack(UPLEntry[StringIndexEntryOffset: (StringIndexEntryOffset + SectionHeaderEntrySize)]) StringIndexSize = unpacked_header.sh_size StringIndexOffset = unpacked_header.sh_offset # If elf is 32-bit objects. else: elf_header = ElfHeader32(ElfHeaderData) ElfHeaderOffset = elf_header.e_shoff SectionHeaderEntryNumber = elf_header.e_shnum StringIndexNumber = elf_header.e_shstrndx SectionHeaderEntrySize = elf_header.e_shentsize StringIndexEntryOffset = ElfHeaderOffset + (StringIndexNumber * SectionHeaderEntrySize) unpacked_header = ElfSectionHeader32.unpack(UPLEntry[StringIndexEntryOffset: (StringIndexEntryOffset + SectionHeaderEntrySize)]) StringIndexSize = unpacked_header.sh_size StringIndexOffset = unpacked_header.sh_offset return ElfHeaderOffset, SectionHeaderEntryNumber, StringIndexNumber, StringIndexEntryOffset, StringIndexSize, SectionHeaderEntrySize, StringIndexOffset, EI_CLASS def FindSection(UPLEntry, SectionName): ElfHeaderOffset, SectionHeaderEntryNumber, StringIndexNumber, _, StringIndexSize, SectionHeaderEntrySize, StringIndexOffset, EI_CLASS = ElfHeaderParser(UPLEntry) # StringIndex is String Index section StringIndex = UPLEntry[StringIndexOffset:StringIndexOffset+StringIndexSize] # Section header isn't exist if SectionNameOffset = -1. StringIndex = StringIndex.decode('utf-8', errors='ignore') SectionNameOffset = StringIndex.find(SectionName) return SectionNameOffset, ElfHeaderOffset, SectionHeaderEntrySize, SectionHeaderEntryNumber, StringIndexOffset, StringIndexNumber, EI_CLASS def AddNewSectionEntry64(LastUPLEntrylen, StringIndexValue, SectionSize, Alignment): # If elf is 64-bit objects. NewSectionEntry = ElfSectionHeader64 (StringIndexValue, 1, 0, 0, LastUPLEntrylen, SectionSize, 0, 0, Alignment, 0) sh_bytes = NewSectionEntry.pack() return sh_bytes def AddNewSectionEntry32(LastUPLEntrylen, StringIndexValue, SectionSize, Alignment): # If elf is 32-bit objects. NewSectionEntry = ElfSectionHeader32 (StringIndexValue, 1, 0, 0, LastUPLEntrylen, SectionSize, 0, 0, Alignment, 0) sh_bytes = NewSectionEntry.pack() return sh_bytes def AddSectionHeader64(SHentry, NewUPLEntrylen, SectionHeaderEntrySize, Index, RemoveNameOffset, SectionName, StringIndexNumber): SHentry = bytearray(SHentry) unpacked_header = ElfSectionHeader64.unpack(SHentry[(Index * SectionHeaderEntrySize):((Index * SectionHeaderEntrySize) + SectionHeaderEntrySize)]) # Section header of section 0 shows 0. It don't modify any offset. if (Index != 0): # read section offset. unpacked_header.sh_offset = NewUPLEntrylen # Modify offset of name in section entry # if RemoveNameOffset != 0 that is remove function. if (RemoveNameOffset != 0): if (unpacked_header.sh_name > RemoveNameOffset): unpacked_header.sh_name -= len (SectionName) # Modify size of name string section entry in section entry. if (Index == StringIndexNumber): unpacked_header.sh_size -= len (SectionName) # added section else : if (Index == StringIndexNumber): unpacked_header.sh_size += len (SectionName) NewSHentry = ElfSectionHeader64 ( unpacked_header.sh_name, unpacked_header.sh_type, unpacked_header.sh_flags, unpacked_header.sh_addr, unpacked_header.sh_offset, unpacked_header.sh_size, unpacked_header.sh_link, unpacked_header.sh_info, unpacked_header.sh_addralign, unpacked_header.sh_entsize).pack() return NewSHentry def AddSectionHeader32(SHentry, NewUPLEntrylen, SectionHeaderEntrySize, Index, RemoveNameOffset, SectionName, StringIndexNumber): SHentry = bytearray(SHentry) unpacked_header = ElfSectionHeader32.unpack(SHentry[(Index * SectionHeaderEntrySize):((Index * SectionHeaderEntrySize) + SectionHeaderEntrySize)]) if (Index != 0): NewSHentry = SHentry[(Index * SectionHeaderEntrySize):((Index * SectionHeaderEntrySize) + SectionHeaderEntrySize)] unpacked_header.sh_offset = NewUPLEntrylen # Modify offset of name in section entry # if RemoveNameOffset != 0 that is remove function. if (RemoveNameOffset != 0): if (unpacked_header.sh_name > RemoveNameOffset): unpacked_header.sh_name -= len (SectionName) # Modify size of name string section entry in section entry. if (Index == StringIndexNumber): unpacked_header.sh_size -= len (SectionName) # added section else : if (Index == StringIndexNumber): unpacked_header.sh_size += len (SectionName) NewSHentry = ElfSectionHeader32 ( unpacked_header.sh_name, unpacked_header.sh_type, unpacked_header.sh_flags, unpacked_header.sh_addr, unpacked_header.sh_offset, unpacked_header.sh_size, unpacked_header.sh_link, unpacked_header.sh_info, unpacked_header.sh_addralign, unpacked_header.sh_entsize).pack() return NewSHentry def ModifyPHSegmentOffset64(NewUPLEntry, ElfHeaderOffset, PHSegmentName): # Modify offset and address of program header tables. elf_header = ElfHeader64(NewUPLEntry[:64]) SHentry = NewUPLEntry[ElfHeaderOffset:] # Elf program header tables start from 0x40 in 64-bits objects PHentry = NewUPLEntry[64: 64 + (elf_header.e_phnum * elf_header.e_phentsize)] PHdrs = [] SHdrs = [] for i in range(elf_header.e_shnum): SHData = SHentry[(i * elf_header.e_shentsize): (i * elf_header.e_shentsize) + elf_header.e_shentsize] unpacked_SectionHeader = ElfSectionHeader64.unpack(SHData) SHdrs.append(unpacked_SectionHeader) for i in range(elf_header.e_phnum): PHData = PHentry[(i * elf_header.e_phentsize): (i * elf_header.e_phentsize) + elf_header.e_phentsize] unpacked_ProgramHeader = Elf64_Phdr(PHData) PHdrs.append(unpacked_ProgramHeader) if (PHSegmentName == '.text'): PHdrs[0].p_offset = SHdrs[1].sh_offset PHdrs[0].p_paddr = SHdrs[1].sh_addr PHdrs[4].p_offset = SHdrs[1].sh_offset PHdrs[4].p_paddr = SHdrs[1].sh_addr elif (PHSegmentName == '.dynamic'): PHdrs[1].p_offset = SHdrs[2].sh_offset PHdrs[1].p_paddr = SHdrs[2].sh_addr PHdrs[3].p_offset = SHdrs[2].sh_offset PHdrs[3].p_paddr = SHdrs[2].sh_addr elif (PHSegmentName == '.data'): PHdrs[2].p_offset = SHdrs[3].sh_offset PHdrs[2].p_paddr = SHdrs[3].sh_addr packed_PHData = b'' for phdr in PHdrs: packed_PHData += phdr.pack() NewUPLEntry = bytearray(NewUPLEntry) NewUPLEntry[64: 64 + (elf_header.e_phnum * elf_header.e_phentsize)] = packed_PHData return NewUPLEntry def ModifyPHSegmentOffset32(NewUPLEntry, ElfHeaderOffset, PHSegmentName): # Modify offset and address of program header tables. # Elf header is stored at 0x0-0x34 in 32-bits objects elf_header = ElfHeader32(NewUPLEntry[:52]) SHentry = NewUPLEntry[ElfHeaderOffset:] # Elf program header tables start from 0x34 in 32-bits objects PHentry = NewUPLEntry[52: 52 + (elf_header.e_phnum * elf_header.e_phentsize)] PHdrs = [] SHdrs = [] for i in range(elf_header.e_shnum): SHData = SHentry[(i * elf_header.e_shentsize): (i * elf_header.e_shentsize) + elf_header.e_shentsize] unpacked_SectionHeader = ElfSectionHeader32.unpack(SHData) SHdrs.append(unpacked_SectionHeader) for i in range(elf_header.e_phnum): PHData = PHentry[(i * elf_header.e_phentsize): (i * elf_header.e_phentsize) + elf_header.e_phentsize] unpacked_ProgramHeader = Elf32_Phdr(PHData) PHdrs.append(unpacked_ProgramHeader) if (PHSegmentName == '.text'): PHdrs[0].p_offset = SHdrs[1].sh_offset PHdrs[0].p_paddr = SHdrs[1].sh_addr PHdrs[0].p_vaddr = SHdrs[1].sh_addr PHdrs[2].p_offset = SHdrs[1].sh_offset PHdrs[2].p_paddr = SHdrs[1].sh_addr PHdrs[0].p_vaddr = SHdrs[1].sh_addr elif (PHSegmentName == '.data'): PHdrs[1].p_offset = SHdrs[2].sh_offset PHdrs[1].p_paddr = SHdrs[2].sh_addr PHdrs[1].p_vaddr = SHdrs[2].sh_addr packed_PHData = b'' for phdr in PHdrs: packed_PHData += phdr.pack() NewUPLEntry = bytearray(NewUPLEntry) NewUPLEntry[52: 52 + (elf_header.e_phnum * elf_header.e_phentsize)] = packed_PHData return NewUPLEntry def RemoveSection64(UniversalPayloadEntry, RemoveSectionName): # If elf is 64-bit objects. # Get offsets as follows: # 1. Section name which will remove in section name string. # 2. Section which will remove. # 3. Section header which will remove. with open(UniversalPayloadEntry,'rb') as f: UPLEntry = f.read() RemoveSectionNameOffset, ElfHeaderOffset, SectionHeaderEntrySize, SectionHeaderEntryNumber, _, StringIndexNumber, _ = FindSection(UPLEntry, RemoveSectionName) if (RemoveSectionNameOffset == -1): raise argparse.ArgumentTypeError ('Section: {} not found.'.format (RemoveSectionNameOffset)) # Read section header entry SHentry = UPLEntry[ElfHeaderOffset:] # find deleted fv section offset. # Elf header is stored at 0x0-0x40 in 64-bits objects elf_header = ElfHeader64(UPLEntry[:64]) Counter = 0 RemoveIndex = 0 RemoveNameOffset = 0 for Index in range(0, elf_header.e_shnum): # Read Index of section header. unpacked_SectionHeader = ElfSectionHeader64.unpack(SHentry[(Index * elf_header.e_shentsize):((Index * elf_header.e_shentsize) + elf_header.e_shentsize)]) # Find offset of section name which is removed. if (unpacked_SectionHeader.sh_name == RemoveSectionNameOffset): RemoveIndex = Counter Counter += 1 else: Counter += 1 # Elf header is recombined. # Elf header and program header table in front of first section are reserved. # Elf header size is 0x40 with 64-bit object. ElfHeaderSize = 64 ElfHandPH = ElfHeaderSize + (elf_header.e_phnum * elf_header.e_phentsize) NewUPLEntry = UPLEntry[:ElfHandPH] # Keep Section header and program header table, RemoveSection64() only recombined section and section header. NewUPLEntry = bytearray(NewUPLEntry) # Sections is recombined. # 1. name of deleted section is removed in name string section. # 2. deleted section is removed in dll file. # 3. re-align sections before and after deleted section. NewUPLEntrylen = [] for Index in range(0, (SectionHeaderEntryNumber)): unpacked_SectionHeader = ElfSectionHeader64.unpack(SHentry[(Index * SectionHeaderEntrySize):((Index * SectionHeaderEntrySize) + SectionHeaderEntrySize)]) NewUPLEntrylen.append(len(NewUPLEntry)) if (Index == 0): # Address alignment, section will align with alignment of next section. AlignmentIndex = 8 if (SectionHeaderEntryNumber > 2): unpacked_NextSectionHeader = ElfSectionHeader64.unpack(SHentry[((Index + 1) * SectionHeaderEntrySize):(((Index + 1) * SectionHeaderEntrySize) + SectionHeaderEntrySize)]) NewUPLEntry = SectionAlignment(NewUPLEntry, unpacked_NextSectionHeader.sh_addralign) # Section in front of removed section elif (Index + 1 == RemoveIndex): NewUPLEntry += UPLEntry[unpacked_SectionHeader.sh_offset:(unpacked_SectionHeader.sh_offset + unpacked_SectionHeader.sh_size)] # Read section address alignment # If section that will be removed in .dll is not first and last one . # Address alignment, section will align with alignment of section after deleted section. # Check next and the section after next are not end of section. if ((Index + 2) < (SectionHeaderEntryNumber - 1)): unpacked_Next2SectionHeader = ElfSectionHeader64.unpack(SHentry[((Index + 2) * SectionHeaderEntrySize):(((Index + 2) * SectionHeaderEntrySize) + SectionHeaderEntrySize)]) NewUPLEntry = SectionAlignment(NewUPLEntry, unpacked_Next2SectionHeader.sh_addralign) else: # It is align 8 bytes if next section or the section after next is last one. AlignmentIndex = 8 NewUPLEntry = SectionAlignment(NewUPLEntry, AlignmentIndex) # section is Deleted section elif (Index == RemoveIndex): # Don't add removed section to elf. # Find offset of section name. RemoveNameOffset = unpacked_SectionHeader.sh_name # section is name string section. elif (Index == StringIndexNumber): # StringIndex is String Index section StringIndex = UPLEntry[unpacked_SectionHeader.sh_offset:(unpacked_SectionHeader.sh_offset + unpacked_SectionHeader.sh_size)] # Remove name of removed section in name string section. # Section header isn't exist if RemoveSectionNameOffset equal to -1. StringIndex = bytearray(StringIndex) RemoveSectionName = bytearray(RemoveSectionName, encoding='utf-8') RemoveSectionName = RemoveSectionName + bytes('\0', encoding='utf-8') StringIndex = StringIndex.replace(RemoveSectionName,b'') NewUPLEntry += StringIndex # other sections. else: NewUPLEntry += UPLEntry[unpacked_SectionHeader.sh_offset:(unpacked_SectionHeader.sh_offset + unpacked_SectionHeader.sh_size)] # Address alignment, section will align with alignment of next section. if (Index < (SectionHeaderEntryNumber - 1)): NewUPLEntry = SectionAlignment(NewUPLEntry, unpacked_NextSectionHeader.sh_addralign) else: # If section is last one. AlignmentIndex = 8 NewUPLEntry = SectionAlignment(NewUPLEntry, AlignmentIndex) SectionHeaderOffset = len(NewUPLEntry) # Add section header for Number in range(0, (SectionHeaderEntryNumber)): if (Number != RemoveIndex): NewSHentry = AddSectionHeader64(SHentry, NewUPLEntrylen[Number], SectionHeaderEntrySize, Number, RemoveNameOffset, RemoveSectionName, StringIndexNumber) NewUPLEntry += NewSHentry # Modify number of sections and offset of section header in Elf header. elf_header.e_shoff = SectionHeaderOffset elf_header.e_shnum -= 1 NewUPLEntry = elf_header.pack() + NewUPLEntry[64:] # write to Elf. with open(UniversalPayloadEntry,'wb') as f: f.write(NewUPLEntry) def RemoveSection32(UniversalPayloadEntry, RemoveSectionName): # If elf is 32-bit objects. # Get offsets as follows: # 1. Section name which will remove in section name string. # 2. Section which will remove. # 3. Section header which will remove. with open(UniversalPayloadEntry,'rb') as f: UPLEntry = f.read() RemoveSectionNameOffset, ElfHeaderOffset, SectionHeaderEntrySize, SectionHeaderEntryNumber, _, StringIndexNumber, EI_CLASS = FindSection(UPLEntry, RemoveSectionName) if (RemoveSectionNameOffset == -1): raise argparse.ArgumentTypeError ('Section: {} not found.'.format (RemoveSectionNameOffset)) # Read section header entry SHentry = UPLEntry[ElfHeaderOffset:] # find deleted fv section offset. # Elf header is stored at 0x0-0x34 in 32-bits objects elf_header = ElfHeader32(UPLEntry[:52]) Counter = 0 RemoveIndex = 0 RemoveNameOffset = 0 for Index in range(0, elf_header.e_shnum): # Read Index of section header. unpacked_SectionHeader = ElfSectionHeader32.unpack(SHentry[(Index * elf_header.e_shentsize):((Index * elf_header.e_shentsize) + elf_header.e_shentsize)]) # Find offset of section name which is removed. if (unpacked_SectionHeader.sh_name == RemoveSectionNameOffset): RemoveIndex = Counter Counter += 1 else: Counter += 1 # Elf header is recombined. # Elf header and program header table in front of first section are reserved. # Elf header size is 0x34 with 32-bit object. ElfHeaderSize = 52 ElfHandPH = ElfHeaderSize + (elf_header.e_phnum * elf_header.e_phentsize) NewUPLEntry = UPLEntry[:ElfHandPH] # Keep Section header and program header table, RemoveSection32() only recombined section and section header. NewUPLEntry = bytearray(NewUPLEntry) # Sections is recombined. # 1. name of deleted section is removed in name string section. # 2. deleted section is removed in dll file. # 3. re-align sections before and after deleted section. NewUPLEntrylen = [] for Index in range(0, (SectionHeaderEntryNumber)): unpacked_SectionHeader = ElfSectionHeader32.unpack(SHentry[(Index * SectionHeaderEntrySize):((Index * SectionHeaderEntrySize) + SectionHeaderEntrySize)]) NewUPLEntrylen.append(len(NewUPLEntry)) if (Index == 0): # Address alignment, section will align with alignment of next section. AlignmentIndex = 8 if (SectionHeaderEntryNumber > 2): unpacked_NextSectionHeader = ElfSectionHeader32.unpack(SHentry[((Index + 1) * SectionHeaderEntrySize):(((Index + 1) * SectionHeaderEntrySize) + SectionHeaderEntrySize)]) NewUPLEntry = SectionAlignment(NewUPLEntry, unpacked_NextSectionHeader.sh_addralign) # Section in front of removed section elif (Index + 1 == RemoveIndex): NewUPLEntry += UPLEntry[unpacked_SectionHeader.sh_offset:(unpacked_SectionHeader.sh_offset + unpacked_SectionHeader.sh_size)] # Read section address alignment # If section that will be removed in .dll is not first and last one . # Address alignment, section will align with alignment of section after deleted section. # Check next and the section after next are not end of section. if ((Index + 2) < (SectionHeaderEntryNumber - 1)): unpacked_Next2SectionHeader = ElfSectionHeader32.unpack(SHentry[((Index + 2) * SectionHeaderEntrySize):(((Index + 2) * SectionHeaderEntrySize) + SectionHeaderEntrySize)]) NewUPLEntry = SectionAlignment(NewUPLEntry, unpacked_Next2SectionHeader.sh_addralign) else: # It is align 8 bytes if next section or the section after next is last one. AlignmentIndex = 8 NewUPLEntry = SectionAlignment(NewUPLEntry, AlignmentIndex) # section is Deleted section elif (Index == RemoveIndex): # Don't add removed section to elf. # Find offset of section name. RemoveNameOffset = unpacked_SectionHeader.sh_name # section is name string section. elif (Index == StringIndexNumber): # StringIndex is String Index section StringIndex = UPLEntry[unpacked_SectionHeader.sh_offset:(unpacked_SectionHeader.sh_offset + unpacked_SectionHeader.sh_size)] # Remove name of removed section in name string section. # Section header isn't exist if RemoveSectionNameOffset equal to -1. StringIndex = bytearray(StringIndex) RemoveSectionName = bytearray(RemoveSectionName, encoding='utf-8') RemoveSectionName = RemoveSectionName + bytes('\0', encoding='utf-8') StringIndex = StringIndex.replace(RemoveSectionName,b'') NewUPLEntry += StringIndex # other sections. else: NewUPLEntry += UPLEntry[unpacked_SectionHeader.sh_offset:(unpacked_SectionHeader.sh_offset + unpacked_SectionHeader.sh_size)] # Address alignment, section will align with alignment of next section. if (Index < (SectionHeaderEntryNumber - 1)): NewUPLEntry = SectionAlignment(NewUPLEntry, unpacked_NextSectionHeader.sh_addralign) else: # If section is last one. AlignmentIndex = 8 NewUPLEntry = SectionAlignment(NewUPLEntry, AlignmentIndex) SectionHeaderOffset = len(NewUPLEntry) # Add section header for Number in range(0, (SectionHeaderEntryNumber)): if (Number != RemoveIndex): NewSHentry = AddSectionHeader32(SHentry, NewUPLEntrylen[Number], SectionHeaderEntrySize, Number, RemoveNameOffset, RemoveSectionName, StringIndexNumber) NewUPLEntry += NewSHentry # Modify number of sections and offset of section header in Elf header. elf_header.e_shoff = SectionHeaderOffset elf_header.e_shnum -= 1 NewUPLEntry = elf_header.pack() + NewUPLEntry[52:] # write to Elf. with open(UniversalPayloadEntry,'wb') as f: f.write(NewUPLEntry) def AddSection64(UniversalPayloadEntry, AddSectionName, ElfHeaderOffset, SectionHeaderEntrySize, SectionHeaderEntryNumber, StringIndexNumber, FileBinary, Alignment): with open(UniversalPayloadEntry,'rb+') as f: UPLEntry = f.read() fFileBinary = open(FileBinary, 'rb') Binary_File = fFileBinary.read() ElfHeaderOffset, SectionHeaderEntryNumber, StringIndexNumber, _, _, SectionHeaderEntrySize, _, _ = ElfHeaderParser(UPLEntry) # Read section header entry SHentry = UPLEntry[ElfHeaderOffset:] # Elf header is recombined. # Elf header and program header table in front of first section are reserved. # Elf header is stored at 0x0-0x40 in 64-bits objects elf_header = ElfHeader64(UPLEntry[:64]) # Elf header size is 0x40 with 64-bit object. ElfHeaderSize = 64 ElfHandPH = ElfHeaderSize + (elf_header.e_phnum * elf_header.e_phentsize) NewUPLEntry = UPLEntry[:ElfHandPH] # Keep Section header and program header table, AddSection64() only recombined section and section header. NewUPLEntry = bytearray(NewUPLEntry) # Sections is recombined. # 1. name of added section is added in name string section. # 2. added section is added in dll file. # 3. re-align sections before and after added section. NewUPLEntrylen = [] StringIndexValue = 0 for Index in range(0, SectionHeaderEntryNumber): NewUPLEntrylen.append(len(NewUPLEntry)) unpacked_SectionHeader = ElfSectionHeader64.unpack(SHentry[(Index * SectionHeaderEntrySize):((Index * SectionHeaderEntrySize) + SectionHeaderEntrySize)]) # Sections is recombined. if (Index == 0): # Address alignment, section will align with alignment of next section. AlignmentIndex = 8 if (SectionHeaderEntryNumber > 2): unpacked_NextSectionHeader = ElfSectionHeader64.unpack(SHentry[((Index + 1) * SectionHeaderEntrySize):(((Index + 1) * SectionHeaderEntrySize) + SectionHeaderEntrySize)]) NewUPLEntry = SectionAlignment(NewUPLEntry, unpacked_NextSectionHeader.sh_addralign) # Section is last one. elif (Index == (SectionHeaderEntryNumber - 1)): # Add new section at the end. NewUPLEntry += UPLEntry[unpacked_SectionHeader.sh_offset:(unpacked_SectionHeader.sh_offset + unpacked_SectionHeader.sh_size)] NewUPLEntry = SectionAlignment(NewUPLEntry, Alignment) LastUPLEntrylen = len(NewUPLEntry) NewUPLEntry += Binary_File # Address alignment, section will align with alignment of next section. AlignmentIndex = 8 NewUPLEntry = SectionAlignment(NewUPLEntry, AlignmentIndex) # section is name string section. elif (Index == StringIndexNumber): # StringIndex is String Index section StringIndex = UPLEntry[unpacked_SectionHeader.sh_offset:(unpacked_SectionHeader.sh_offset + unpacked_SectionHeader.sh_size)] # Read name of added Section after StringIndex is transform into string. StringIndex = bytearray(StringIndex) StringIndexValue = len(StringIndex) AddSectionName = bytearray(AddSectionName, encoding='utf-8') + bytes('\0', encoding='utf-8') StringIndex += AddSectionName NewUPLEntry += StringIndex # section after name string section but not last one. elif ((Index > StringIndexNumber) and (Index < (SectionHeaderEntryNumber - 1))): NewUPLEntry += UPLEntry[unpacked_SectionHeader.sh_offset:(unpacked_SectionHeader.sh_offset + unpacked_SectionHeader.sh_size)] # Address alignment, section will align with alignment of next section. unpacked_NextSectionHeader = ElfSectionHeader64.unpack(SHentry[((Index + 1) * SectionHeaderEntrySize):(((Index + 1) * SectionHeaderEntrySize) + SectionHeaderEntrySize)]) NewUPLEntry = SectionAlignment(NewUPLEntry, unpacked_NextSectionHeader.sh_addralign) # Section before name string section. else: NewUPLEntry += UPLEntry[unpacked_SectionHeader.sh_offset:(unpacked_SectionHeader.sh_offset + unpacked_SectionHeader.sh_size)] # Address alignment, section will align with alignment of next section. if (Index < (SectionHeaderEntryNumber - 1)): unpacked_NextSectionHeader = ElfSectionHeader64.unpack(SHentry[((Index + 1) * SectionHeaderEntrySize):(((Index + 1) * SectionHeaderEntrySize) + SectionHeaderEntrySize)]) NewUPLEntry = SectionAlignment(NewUPLEntry, unpacked_NextSectionHeader.sh_addralign) SectionHeaderOffset = len(NewUPLEntry) RemoveNameOffset = 0 # Add section header for Number in range(0, (SectionHeaderEntryNumber)): NewSHentry = AddSectionHeader64(SHentry, NewUPLEntrylen[Number], SectionHeaderEntrySize, Number, RemoveNameOffset, AddSectionName, StringIndexNumber) NewUPLEntry += NewSHentry NewUPLEntry += bytearray(AddNewSectionEntry64(LastUPLEntrylen, StringIndexValue, len(Binary_File), Alignment)) # Modify number of sections and offset of section header in Elf header. # Modify offset in in Elf header. elf_header.e_shoff = SectionHeaderOffset elf_header.e_shnum += 1 elf_header = elf_header.pack() UPLEntryBin = elf_header + NewUPLEntry[64:] # Modify offsets and address of program header table in elf. PHSegmentName = '.text' _, ElfHeaderOffset, SectionHeaderEntrySize, SectionHeaderEntryNumber, _, StringIndexNumber, _ = FindSection(UPLEntryBin, PHSegmentName) UPLEntryBin = ModifyPHSegmentOffset64(UPLEntryBin, ElfHeaderOffset, PHSegmentName) # Modify offsets and address of program header table in elf. PHSegmentName = '.dynamic' _, ElfHeaderOffset, SectionHeaderEntrySize, SectionHeaderEntryNumber, _, StringIndexNumber, _ = FindSection(UPLEntryBin, PHSegmentName) UPLEntryBin = ModifyPHSegmentOffset64(UPLEntryBin, ElfHeaderOffset, PHSegmentName) # Modify offsets and address of program header table in elf. PHSegmentName = '.data' _, ElfHeaderOffset, SectionHeaderEntrySize, SectionHeaderEntryNumber, _, StringIndexNumber, _ = FindSection(UPLEntryBin, PHSegmentName) UPLEntryBin = ModifyPHSegmentOffset64(UPLEntryBin, ElfHeaderOffset, PHSegmentName) fFileBinary.close() return UPLEntryBin def AddSection32(UniversalPayloadEntry, AddSectionName, ElfHeaderOffset, SectionHeaderEntrySize, SectionHeaderEntryNumber, StringIndexNumber, FileBinary, Alignment): with open(UniversalPayloadEntry,'rb+') as f: # Read Elf and binary which will be write to elf. UPLEntry = f.read() fFileBinary = open(FileBinary, 'rb') Binary_File = fFileBinary.read() ElfHeaderOffset, SectionHeaderEntryNumber, StringIndexNumber, _, _, SectionHeaderEntrySize, _, _ = ElfHeaderParser(UPLEntry) # Read section header entry SHentry = UPLEntry[ElfHeaderOffset:] # Elf header is recombined. # Elf header and program header table in front of first section are reserved. # Elf header is stored at 0x0-0x34 in 32-bits objects elf_header = ElfHeader32(UPLEntry[:52]) # Elf header size is 0x34 with 32-bit object. ElfHeaderSize = 52 ElfHandPH = ElfHeaderSize + (elf_header.e_phnum * elf_header.e_phentsize) NewUPLEntry = UPLEntry[:ElfHandPH] # Keep Section header and program header table, AddSection32() only recombined section and section header. NewUPLEntry = bytearray(NewUPLEntry) # Sections is recombined. # 1. name of added section is added in name string section. # 2. added section is added in dll file. # 3. re-align sections before and after added section. NewUPLEntrylen = [] StringIndexValue = 0 for Index in range(0, SectionHeaderEntryNumber): NewUPLEntrylen.append(len(NewUPLEntry)) unpacked_SectionHeader = ElfSectionHeader32.unpack(SHentry[(Index * SectionHeaderEntrySize):((Index * SectionHeaderEntrySize) + SectionHeaderEntrySize)]) # Sections is recombined. if (Index == 0): # Address alignment, section will align with alignment of next section. AlignmentIndex = 8 if (SectionHeaderEntryNumber > 2): unpacked_NextSectionHeader = ElfSectionHeader32.unpack(SHentry[((Index + 1) * SectionHeaderEntrySize):(((Index + 1) * SectionHeaderEntrySize) + SectionHeaderEntrySize)]) NewUPLEntry = SectionAlignment(NewUPLEntry, unpacked_NextSectionHeader.sh_addralign) # Section is last one. elif (Index == (SectionHeaderEntryNumber - 1)): # Add new section at the end. NewUPLEntry += UPLEntry[unpacked_SectionHeader.sh_offset:(unpacked_SectionHeader.sh_offset + unpacked_SectionHeader.sh_size)] NewUPLEntry = SectionAlignment(NewUPLEntry, Alignment) LastUPLEntrylen = len(NewUPLEntry) NewUPLEntry += Binary_File # Address alignment, section will align with alignment of next section. AlignmentIndex = 8 NewUPLEntry = SectionAlignment(NewUPLEntry, AlignmentIndex) # section is name string section. elif (Index == StringIndexNumber): # StringIndex is String Index section StringIndex = UPLEntry[unpacked_SectionHeader.sh_offset:(unpacked_SectionHeader.sh_offset + unpacked_SectionHeader.sh_size)] # Read name of added Section after StringIndex is transform into string. StringIndex = bytearray(StringIndex) StringIndexValue = len(StringIndex) AddSectionName = bytearray(AddSectionName, encoding='utf-8') + bytes('\0', encoding='utf-8') StringIndex += AddSectionName NewUPLEntry += StringIndex # section after name string section but not last one. elif ((Index > StringIndexNumber) and (Index < (SectionHeaderEntryNumber - 1))): NewUPLEntry += UPLEntry[unpacked_SectionHeader.sh_offset:(unpacked_SectionHeader.sh_offset + unpacked_SectionHeader.sh_size)] # Address alignment, section will align with alignment of next section. unpacked_NextSectionHeader = ElfSectionHeader32.unpack(SHentry[((Index + 1) * SectionHeaderEntrySize):(((Index + 1) * SectionHeaderEntrySize) + SectionHeaderEntrySize)]) NewUPLEntry = SectionAlignment(NewUPLEntry, unpacked_NextSectionHeader.sh_addralign) # Section before name string section. else: NewUPLEntry += UPLEntry[unpacked_SectionHeader.sh_offset:(unpacked_SectionHeader.sh_offset + unpacked_SectionHeader.sh_size)] # Address alignment, section will align with alignment of next section. if (Index < (SectionHeaderEntryNumber - 1)): unpacked_NextSectionHeader = ElfSectionHeader32.unpack(SHentry[((Index + 1) * SectionHeaderEntrySize):(((Index + 1) * SectionHeaderEntrySize) + SectionHeaderEntrySize)]) NewUPLEntry = SectionAlignment(NewUPLEntry, unpacked_NextSectionHeader.sh_addralign) SectionHeaderOffset = len(NewUPLEntry) RemoveNameOffset = 0 # Add section header for Number in range(0, (SectionHeaderEntryNumber)): NewSHentry = AddSectionHeader32(SHentry, NewUPLEntrylen[Number], SectionHeaderEntrySize, Number, RemoveNameOffset, AddSectionName, StringIndexNumber) NewUPLEntry += NewSHentry NewUPLEntry += bytearray(AddNewSectionEntry32(LastUPLEntrylen, StringIndexValue, len(Binary_File), Alignment)) # Modify number of sections and offset of section header in Elf header. # Modify offset in in Elf header. elf_header.e_shoff = SectionHeaderOffset elf_header.e_shnum += 1 PHTableSize = elf_header.e_phentsize elf_header = elf_header.pack() UPLEntryBin = elf_header + NewUPLEntry[52:] # Modify offsets and address of program header table in elf. PHSegmentName = '.text' _, ElfHeaderOffset, SectionHeaderEntrySize, SectionHeaderEntryNumber, _, StringIndexNumber, _ = FindSection(UPLEntryBin, PHSegmentName) UPLEntryBin = ModifyPHSegmentOffset32(UPLEntryBin, ElfHeaderOffset, PHSegmentName) # Modify offsets and address of program header table in elf. Its are stored at 0x08-0x0F and 0x10-0x17 PHSegmentName = '.data' _, ElfHeaderOffset, SectionHeaderEntrySize, SectionHeaderEntryNumber, _, StringIndexNumber, _ = FindSection(UPLEntryBin, PHSegmentName) UPLEntryBin = ModifyPHSegmentOffset32(UPLEntryBin, ElfHeaderOffset, PHSegmentName) fFileBinary.close() return UPLEntryBin def ReplaceFv (UniversalPayloadEntry, FileBinary, AddSectionName, Alignment = 16): with open(UniversalPayloadEntry,'rb+') as f: UPLEntry = f.read() SectionNameOffset, ElfHeaderOffset, SectionHeaderEntrySize, SectionHeaderEntryNumber, _, StringIndexNumber, EI_CLASS = FindSection(UPLEntry, AddSectionName) # If elf is 64-bit objects. if (EI_CLASS == 2): # Remove section if it exists. if (SectionNameOffset != -1): RemoveSection64(UniversalPayloadEntry, AddSectionName) # Add section. NewUPLEntry = AddSection64(UniversalPayloadEntry, AddSectionName, ElfHeaderOffset, SectionHeaderEntrySize, SectionHeaderEntryNumber, StringIndexNumber, FileBinary, Alignment) # If elf is 32-bit objects. else: # Remove section if it exists. if (SectionNameOffset != -1): RemoveSection32(UniversalPayloadEntry, AddSectionName) # Add section. NewUPLEntry = AddSection32(UniversalPayloadEntry, AddSectionName, ElfHeaderOffset, SectionHeaderEntrySize, SectionHeaderEntryNumber, StringIndexNumber, FileBinary, Alignment) with open(UniversalPayloadEntry,'wb') as f: f.write(NewUPLEntry) return 0
edk2-master
UefiPayloadPkg/Tools/ElfFv.py
## @file # This python script update content from mipi_syst.h.in in mipi sys-T submodule # and generate it as mipi_syst.h. mipi_syst.h include necessary data structure and # definition that will be consumed by MipiSysTLib itself, mipi sys-T submodule # and other library. # # This script needs to be done once by a developer when adding some # project-relating definition or a new version of mipi_syst.h.in is released. # Normal users do not need to do this, since the resulting file is stored # in the EDK2 git repository. # # Customize structures mentioned below to generate updated mipi_syst.h file: # 1. ExistingValueToBeReplaced # -> To replace existing value in mipi_syst.h.in to newer one. # 2. ExistingDefinitionToBeRemoved # -> To #undef a existing definition in mipi_syst.h.in. # 3. NewItemToBeAdded # -> Items in this structure will be placed at the end of mipi_syst.h as a customized section. # # Run GenMipiSystH.py without any parameters as normal python script after customizing. # # Copyright (c) 2023, Intel Corporation. All rights reserved.<BR> # # SPDX-License-Identifier: BSD-2-Clause-Patent # ## import os import re # # A existing value to be customized should place this structure # Definitions in this customizable structure will be processed by ReplaceOldValue() # e.g: # Before: @SYST_CFG_VERSION_MAJOR@ # After: 1 # ExistingValueToBeReplaced = [ ["@SYST_CFG_VERSION_MAJOR@", "1"], # Major version ["@SYST_CFG_VERSION_MINOR@", "0"], # Minor version ["@SYST_CFG_VERSION_PATCH@", "0"], # Patch version ["@SYST_CFG_CONFORMANCE_LEVEL@", "30"], # Feature level of mipi sys-T submodule ["mipi_syst/platform.h", "Platform.h"], ] # # A existing definition to be removed should place this structure # Definitions in this customizable structure will be processed by RemoveDefinition() # e.g: # Before: # #define MIPI_SYST_PCFG_ENABLE_PLATFORM_STATE_DATA # After: # #define MIPI_SYST_PCFG_ENABLE_PLATFORM_STATE_DATA # #undef MIPI_SYST_PCFG_ENABLE_PLATFORM_STATE_DATA # ExistingDefinitionToBeRemoved = [ "MIPI_SYST_PCFG_ENABLE_PLATFORM_STATE_DATA", "MIPI_SYST_PCFG_ENABLE_HEAP_MEMORY", "MIPI_SYST_PCFG_ENABLE_PRINTF_API", "MIPI_SYST_PCFG_ENABLE_LOCATION_RECORD", "MIPI_SYST_PCFG_ENABLE_LOCATION_ADDRESS", ] # # Items in this structure will be placed at the end of mipi_syst.h as a customized section. # NewItemToBeAdded = [ "typedef struct mipi_syst_handle_flags MIPI_SYST_HANDLE_FLAGS;", "typedef struct mipi_syst_msg_tag MIPI_SYST_MSG_TAG;", "typedef struct mipi_syst_guid MIPI_SYST_GUID;", "typedef enum mipi_syst_severity MIPI_SYST_SEVERITY;", "typedef struct mipi_syst_handle MIPI_SYST_HANDLE;", "typedef struct mipi_syst_header MIPI_SYST_HEADER;", ] def ProcessSpecialCharacter(Str): Str = Str.rstrip(" \n") Str = Str.replace("\t", " ") Str += "\n" return Str def ReplaceOldValue(Str): for i in range(len(ExistingValueToBeReplaced)): Result = re.search(ExistingValueToBeReplaced[i][0], Str) if Result is not None: Str = Str.replace(ExistingValueToBeReplaced[i][0], ExistingValueToBeReplaced[i][1]) break return Str def RemoveDefinition(Str): Result = re.search("\*", Str) if Result is None: for i in range(len(ExistingDefinitionToBeRemoved)): Result = re.search(ExistingDefinitionToBeRemoved[i], Str) if Result is not None: Result = re.search("defined", Str) if Result is None: Str = Str + "#undef " + ExistingDefinitionToBeRemoved[i] break return Str def main(): MipiSystHSrcDir = "mipisyst/library/include/mipi_syst.h.in" MipiSystHRealSrcDir = os.path.join(os.getcwd(), os.path.normpath(MipiSystHSrcDir)) MipiSystHRealDstDir = os.path.join(os.getcwd(), "mipi_syst.h") # # Read content from mipi_syst.h.in and process each line by demand # with open(MipiSystHRealSrcDir, "r") as rfObj: SrcFile = rfObj.readlines() for lineIndex in range(len(SrcFile)): SrcFile[lineIndex] = ProcessSpecialCharacter(SrcFile[lineIndex]) SrcFile[lineIndex] = ReplaceOldValue(SrcFile[lineIndex]) SrcFile[lineIndex] = RemoveDefinition(SrcFile[lineIndex]) # # Typedef a structure or enum type # i = -1 for struct in NewItemToBeAdded: struct += "\n" SrcFile.insert(i, struct) i -= 1 # # Save edited content to mipi_syst.h # with open(MipiSystHRealDstDir, "w") as wfObj: wfObj.writelines(SrcFile) if __name__ == '__main__': main()
edk2-master
MdePkg/Library/MipiSysTLib/GenMipiSystH.py
#!/usr/bin/python # # Copyright 2014 Apple Inc. All rights reserved. # # SPDX-License-Identifier: BSD-2-Clause-Patent # import lldb import os import uuid import string import optparse import shlex guid_dict = {} def EFI_GUID_TypeSummary (valobj,internal_dict): """ Type summary for EFI GUID, print C Name if known """ # typedef struct { # UINT32 Data1; # UINT16 Data2; # UINT16 Data3; # UINT8 Data4[8]; # } EFI_GUID; SBError = lldb.SBError() data1_val = valobj.GetChildMemberWithName('Data1') data1 = data1_val.GetValueAsUnsigned(0) data2_val = valobj.GetChildMemberWithName('Data2') data2 = data2_val.GetValueAsUnsigned(0) data3_val = valobj.GetChildMemberWithName('Data3') data3 = data3_val.GetValueAsUnsigned(0) str = "%x-%x-%x-" % (data1, data2, data3) data4_val = valobj.GetChildMemberWithName('Data4') for i in range (data4_val.num_children): if i == 2: str +='-' str += "%02x" % data4_val.GetChildAtIndex(i).data.GetUnsignedInt8(SBError, 0) return guid_dict.get (str.upper(), '') EFI_STATUS_Dict = { (0x8000000000000000 | 1): "Load Error", (0x8000000000000000 | 2): "Invalid Parameter", (0x8000000000000000 | 3): "Unsupported", (0x8000000000000000 | 4): "Bad Buffer Size", (0x8000000000000000 | 5): "Buffer Too Small", (0x8000000000000000 | 6): "Not Ready", (0x8000000000000000 | 7): "Device Error", (0x8000000000000000 | 8): "Write Protected", (0x8000000000000000 | 9): "Out of Resources", (0x8000000000000000 | 10): "Volume Corrupt", (0x8000000000000000 | 11): "Volume Full", (0x8000000000000000 | 12): "No Media", (0x8000000000000000 | 13): "Media changed", (0x8000000000000000 | 14): "Not Found", (0x8000000000000000 | 15): "Access Denied", (0x8000000000000000 | 16): "No Response", (0x8000000000000000 | 17): "No mapping", (0x8000000000000000 | 18): "Time out", (0x8000000000000000 | 19): "Not started", (0x8000000000000000 | 20): "Already started", (0x8000000000000000 | 21): "Aborted", (0x8000000000000000 | 22): "ICMP Error", (0x8000000000000000 | 23): "TFTP Error", (0x8000000000000000 | 24): "Protocol Error", 0 : "Success", 1 : "Warning Unknown Glyph", 2 : "Warning Delete Failure", 3 : "Warning Write Failure", 4 : "Warning Buffer Too Small", (0x80000000 | 1): "Load Error", (0x80000000 | 2): "Invalid Parameter", (0x80000000 | 3): "Unsupported", (0x80000000 | 4): "Bad Buffer Size", (0x80000000 | 5): "Buffer Too Small", (0x80000000 | 6): "Not Ready", (0x80000000 | 7): "Device Error", (0x80000000 | 8): "Write Protected", (0x80000000 | 9): "Out of Resources", (0x80000000 | 10): "Volume Corrupt", (0x80000000 | 11): "Volume Full", (0x80000000 | 12): "No Media", (0x80000000 | 13): "Media changed", (0x80000000 | 14): "Not Found", (0x80000000 | 15): "Access Denied", (0x80000000 | 16): "No Response", (0x80000000 | 17): "No mapping", (0x80000000 | 18): "Time out", (0x80000000 | 19): "Not started", (0x80000000 | 20): "Already started", (0x80000000 | 21): "Aborted", (0x80000000 | 22): "ICMP Error", (0x80000000 | 23): "TFTP Error", (0x80000000 | 24): "Protocol Error", } def EFI_STATUS_TypeSummary (valobj,internal_dict): # # Return summary string for EFI_STATUS from dictionary # Status = valobj.GetValueAsUnsigned(0) return EFI_STATUS_Dict.get (Status, '') def EFI_TPL_TypeSummary (valobj,internal_dict): # # Return TPL values # if valobj.TypeIsPointerType(): return "" Tpl = valobj.GetValueAsUnsigned(0) if Tpl < 4: Str = "%d" % Tpl elif Tpl == 6: Str = "TPL_DRIVER (Obsolete Concept in edk2)" elif Tpl < 8: Str = "TPL_APPLICATION" if Tpl - 4 > 0: Str += " + " + "%d" % (Tpl - 4) elif Tpl < 16: Str = "TPL_CALLBACK" if Tpl - 8 > 0: Str += " + " + "%d" % (Tpl - 4) elif Tpl < 31: Str = "TPL_NOTIFY" if Tpl - 16 > 0: Str += " + " + "%d" % (Tpl - 4) elif Tpl == 31: Str = "TPL_HIGH_LEVEL" else: Str = "Invalid TPL" return Str def CHAR16_TypeSummary (valobj,internal_dict): # # Display EFI CHAR16 'unsigned short' as string # SBError = lldb.SBError() Str = '' if valobj.TypeIsPointerType(): if valobj.GetValueAsUnsigned () == 0: return "NULL" # CHAR16 * max string size 1024 for i in range (1024): Char = valobj.GetPointeeData(i,1).GetUnsignedInt16(SBError, 0) if SBError.fail or Char == 0: break Str += unichr (Char) Str = 'L"' + Str + '"' return Str.encode ('utf-8', 'replace') if valobj.num_children == 0: # CHAR16 if chr (valobj.unsigned) in string.printable: Str = "L'" + unichr (valobj.unsigned) + "'" return Str.encode ('utf-8', 'replace') else: # CHAR16 [] for i in range (valobj.num_children): Char = valobj.GetChildAtIndex(i).data.GetUnsignedInt16(SBError, 0) if Char == 0: break Str += unichr (Char) Str = 'L"' + Str + '"' return Str.encode ('utf-8', 'replace') return Str def CHAR8_TypeSummary (valobj,internal_dict): # # Display EFI CHAR8 'signed char' as string # unichr() is used as a junk string can produce an error message like this: # UnicodeEncodeError: 'ascii' codec can't encode character u'\x90' in position 1: ordinal not in range(128) # SBError = lldb.SBError() Str = '' if valobj.TypeIsPointerType(): if valobj.GetValueAsUnsigned () == 0: return "NULL" # CHAR8 * max string size 1024 for i in range (1024): Char = valobj.GetPointeeData(i,1).GetUnsignedInt8(SBError, 0) if SBError.fail or Char == 0: break Str += unichr (Char) Str = '"' + Str + '"' return Str.encode ('utf-8', 'replace') if valobj.num_children == 0: # CHAR8 if chr (valobj.unsigned) in string.printable: Str = '"' + unichr (valobj.unsigned) + '"' return Str.encode ('utf-8', 'replace') else: # CHAR8 [] for i in range (valobj.num_children): Char = valobj.GetChildAtIndex(i).data.GetUnsignedInt8(SBError, 0) if Char == 0: break Str += unichr (Char) Str = '"' + Str + '"' return Str.encode ('utf-8', 'replace') return Str device_path_dict = { (0x01, 0x01): "PCI_DEVICE_PATH", (0x01, 0x02): "PCCARD_DEVICE_PATH", (0x01, 0x03): "MEMMAP_DEVICE_PATH", (0x01, 0x04): "VENDOR_DEVICE_PATH", (0x01, 0x05): "CONTROLLER_DEVICE_PATH", (0x02, 0x01): "ACPI_HID_DEVICE_PATH", (0x02, 0x02): "ACPI_EXTENDED_HID_DEVICE_PATH", (0x02, 0x03): "ACPI_ADR_DEVICE_PATH", (0x03, 0x01): "ATAPI_DEVICE_PATH", (0x03, 0x12): "SATA_DEVICE_PATH", (0x03, 0x02): "SCSI_DEVICE_PATH", (0x03, 0x03): "FIBRECHANNEL_DEVICE_PATH", (0x03, 0x04): "F1394_DEVICE_PATH", (0x03, 0x05): "USB_DEVICE_PATH", (0x03, 0x0f): "USB_CLASS_DEVICE_PATH", (0x03, 0x10): "FW_SBP2_UNIT_LUN_DEVICE_PATH", (0x03, 0x11): "DEVICE_LOGICAL_UNIT_DEVICE_PATH", (0x03, 0x06): "I2O_DEVICE_PATH", (0x03, 0x0b): "MAC_ADDR_DEVICE_PATH", (0x03, 0x0c): "IPv4_DEVICE_PATH", (0x03, 0x09): "INFINIBAND_DEVICE_PATH", (0x03, 0x0e): "UART_DEVICE_PATH", (0x03, 0x0a): "VENDOR_DEVICE_PATH", (0x03, 0x13): "ISCSI_DEVICE_PATH", (0x04, 0x01): "HARDDRIVE_DEVICE_PATH", (0x04, 0x02): "CDROM_DEVICE_PATH", (0x04, 0x03): "VENDOR_DEVICE_PATH", (0x04, 0x04): "FILEPATH_DEVICE_PATH", (0x04, 0x05): "MEDIA_PROTOCOL_DEVICE_PATH", (0x05, 0x01): "BBS_BBS_DEVICE_PATH", (0x7F, 0xFF): "EFI_DEVICE_PATH_PROTOCOL", (0xFF, 0xFF): "EFI_DEVICE_PATH_PROTOCOL", } def EFI_DEVICE_PATH_PROTOCOL_TypeSummary (valobj,internal_dict): # # # if valobj.TypeIsPointerType(): # EFI_DEVICE_PATH_PROTOCOL * return "" Str = "" if valobj.num_children == 3: # EFI_DEVICE_PATH_PROTOCOL Type = valobj.GetChildMemberWithName('Type').unsigned SubType = valobj.GetChildMemberWithName('SubType').unsigned if (Type, SubType) in device_path_dict: TypeStr = device_path_dict[Type, SubType] else: TypeStr = "" LenLow = valobj.GetChildMemberWithName('Length').GetChildAtIndex(0).unsigned LenHigh = valobj.GetChildMemberWithName('Length').GetChildAtIndex(1).unsigned Len = LenLow + (LenHigh >> 8) Address = long ("%d" % valobj.addr) if (Address == lldb.LLDB_INVALID_ADDRESS): # Need to research this, it seems to be the nested struct case ExprStr = "" elif (Type & 0x7f == 0x7f): ExprStr = "End Device Path" if SubType == 0xff else "End This Instance" else: ExprStr = "expr *(%s *)0x%08x" % (TypeStr, Address) Str = " {\n" Str += " (UINT8) Type = 0x%02x // %s\n" % (Type, "END" if (Type & 0x7f == 0x7f) else "") Str += " (UINT8) SubType = 0x%02x // %s\n" % (SubType, ExprStr) Str += " (UINT8 [2]) Length = { // 0x%04x (%d) bytes\n" % (Len, Len) Str += " (UINT8) [0] = 0x%02x\n" % LenLow Str += " (UINT8) [1] = 0x%02x\n" % LenHigh Str += " }\n" if (Type & 0x7f == 0x7f) and (SubType == 0xff): pass elif ExprStr != "": NextNode = Address + Len Str += "// Next node 'expr *(EFI_DEVICE_PATH_PROTOCOL *)0x%08x'\n" % NextNode return Str def TypePrintFormating(debugger): # # Set the default print formatting for EFI types in lldb. # seems lldb defaults to decimal. # category = debugger.GetDefaultCategory() FormatBool = lldb.SBTypeFormat(lldb.eFormatBoolean) category.AddTypeFormat(lldb.SBTypeNameSpecifier("BOOLEAN"), FormatBool) FormatHex = lldb.SBTypeFormat(lldb.eFormatHex) category.AddTypeFormat(lldb.SBTypeNameSpecifier("UINT64"), FormatHex) category.AddTypeFormat(lldb.SBTypeNameSpecifier("INT64"), FormatHex) category.AddTypeFormat(lldb.SBTypeNameSpecifier("UINT32"), FormatHex) category.AddTypeFormat(lldb.SBTypeNameSpecifier("INT32"), FormatHex) category.AddTypeFormat(lldb.SBTypeNameSpecifier("UINT16"), FormatHex) category.AddTypeFormat(lldb.SBTypeNameSpecifier("INT16"), FormatHex) category.AddTypeFormat(lldb.SBTypeNameSpecifier("UINT8"), FormatHex) category.AddTypeFormat(lldb.SBTypeNameSpecifier("INT8"), FormatHex) category.AddTypeFormat(lldb.SBTypeNameSpecifier("UINTN"), FormatHex) category.AddTypeFormat(lldb.SBTypeNameSpecifier("INTN"), FormatHex) category.AddTypeFormat(lldb.SBTypeNameSpecifier("CHAR8"), FormatHex) category.AddTypeFormat(lldb.SBTypeNameSpecifier("CHAR16"), FormatHex) category.AddTypeFormat(lldb.SBTypeNameSpecifier("EFI_PHYSICAL_ADDRESS"), FormatHex) category.AddTypeFormat(lldb.SBTypeNameSpecifier("PHYSICAL_ADDRESS"), FormatHex) category.AddTypeFormat(lldb.SBTypeNameSpecifier("EFI_STATUS"), FormatHex) category.AddTypeFormat(lldb.SBTypeNameSpecifier("EFI_TPL"), FormatHex) category.AddTypeFormat(lldb.SBTypeNameSpecifier("EFI_LBA"), FormatHex) category.AddTypeFormat(lldb.SBTypeNameSpecifier("EFI_BOOT_MODE"), FormatHex) category.AddTypeFormat(lldb.SBTypeNameSpecifier("EFI_FV_FILETYPE"), FormatHex) # # Smart type printing for EFI # debugger.HandleCommand("type summary add EFI_GUID --python-function lldbefi.EFI_GUID_TypeSummary") debugger.HandleCommand("type summary add EFI_STATUS --python-function lldbefi.EFI_STATUS_TypeSummary") debugger.HandleCommand("type summary add EFI_TPL --python-function lldbefi.EFI_TPL_TypeSummary") debugger.HandleCommand("type summary add EFI_DEVICE_PATH_PROTOCOL --python-function lldbefi.EFI_DEVICE_PATH_PROTOCOL_TypeSummary") debugger.HandleCommand("type summary add CHAR16 --python-function lldbefi.CHAR16_TypeSummary") debugger.HandleCommand('type summary add --regex "CHAR16 \[[0-9]+\]" --python-function lldbefi.CHAR16_TypeSummary') debugger.HandleCommand("type summary add CHAR8 --python-function lldbefi.CHAR8_TypeSummary") debugger.HandleCommand('type summary add --regex "CHAR8 \[[0-9]+\]" --python-function lldbefi.CHAR8_TypeSummary') debugger.HandleCommand( 'setting set frame-format "frame #${frame.index}: ${frame.pc}' '{ ${module.file.basename}{:${function.name}()${function.pc-offset}}}' '{ at ${line.file.fullpath}:${line.number}}\n"' ) gEmulatorBreakWorkaroundNeeded = True def LoadEmulatorEfiSymbols(frame, bp_loc , internal_dict): # # This is an lldb breakpoint script, and assumes the breakpoint is on a # function with the same prototype as SecGdbScriptBreak(). The # argument names are important as lldb looks them up. # # VOID # SecGdbScriptBreak ( # char *FileName, # int FileNameLength, # long unsigned int LoadAddress, # int AddSymbolFlag # ) # { # return; # } # # When the emulator loads a PE/COFF image, it calls the stub function with # the filename of the symbol file, the length of the FileName, the # load address and a flag to indicate if this is a load or unload operation # global gEmulatorBreakWorkaroundNeeded if gEmulatorBreakWorkaroundNeeded: # turn off lldb debug prints on SIGALRM (EFI timer tick) frame.thread.process.target.debugger.HandleCommand("process handle SIGALRM -n false") gEmulatorBreakWorkaroundNeeded = False # Convert C string to Python string Error = lldb.SBError() FileNamePtr = frame.FindVariable ("FileName").GetValueAsUnsigned() FileNameLen = frame.FindVariable ("FileNameLength").GetValueAsUnsigned() FileName = frame.thread.process.ReadCStringFromMemory (FileNamePtr, FileNameLen, Error) if not Error.Success(): print("!ReadCStringFromMemory() did not find a %d byte C string at %x" % (FileNameLen, FileNamePtr)) # make breakpoint command continue return False debugger = frame.thread.process.target.debugger if frame.FindVariable ("AddSymbolFlag").GetValueAsUnsigned() == 1: LoadAddress = frame.FindVariable ("LoadAddress").GetValueAsUnsigned() - 0x240 debugger.HandleCommand ("target modules add %s" % FileName) print("target modules load --slid 0x%x %s" % (LoadAddress, FileName)) debugger.HandleCommand ("target modules load --slide 0x%x --file %s" % (LoadAddress, FileName)) else: target = debugger.GetSelectedTarget() for SBModule in target.module_iter(): ModuleName = SBModule.GetFileSpec().GetDirectory() + '/' ModuleName += SBModule.GetFileSpec().GetFilename() if FileName == ModuleName or FileName == SBModule.GetFileSpec().GetFilename(): target.ClearModuleLoadAddress (SBModule) if not target.RemoveModule (SBModule): print("!lldb.target.RemoveModule (%s) FAILED" % SBModule) # make breakpoint command continue return False def GuidToCStructStr (guid, Name=False): # # Convert a 16-byte bytesarray (or bytearray compat object) to C guid string # { 0xB402621F, 0xA940, 0x1E4A, { 0x86, 0x6B, 0x4D, 0xC9, 0x16, 0x2B, 0x34, 0x7C } } # # Name=True means lookup name in GuidNameDict and us it if you find it # if not isinstance (guid, bytearray): # convert guid object to UUID, and UUID to bytearray Uuid = uuid.UUID(guid) guid = bytearray (Uuid.bytes_le) return "{ 0x%02.2X%02.2X%02.2X%02.2X, 0x%02.2X%02.2X, 0x%02.2X%02.2X, { 0x%02.2X, 0x%02.2X, 0x%02.2X, 0x%02.2X, 0x%02.2X, 0x%02.2X, 0x%02.2X, 0x%02.2X } }" % \ (guid[3], guid[2], guid[1], guid[0], guid[5], guid[4], guid[7], guid[6], guid[8], guid[9], guid[10], guid[11], guid[12], guid[13], guid[14], guid[15]) def ParseGuidString(GuidStr): # # Error check and convert C Guid init to string # ParseGuidString("49152E77-1ADA-4764-B7A2-7AFEFED95E8B") # ParseGuidString("{ 0xBA24B391, 0x73FD, 0xC54C, { 0x9E, 0xAF, 0x0C, 0xA7, 0x8A, 0x35, 0x46, 0xD1 } }") # if "{" in GuidStr : # convert C form "{ 0xBA24B391, 0x73FD, 0xC54C, { 0x9E, 0xAF, 0x0C, 0xA7, 0x8A, 0x35, 0x46, 0xD1 } }" # to string form BA24B391-73FD-C54C-9EAF-0CA78A3546D1 # make a list of Hex numbers like: ['0xBA24B391', '0x73FD', '0xC54C', '0x9E', '0xAF', '0x0C', '0xA7', '0x8A', '0x35', '0x46', '0xD1'] Hex = ''.join(x for x in GuidStr if x not in '{,}').split() Str = "%08X-%04X-%04X-%02.2X%02.2X-%02.2X%02.2X%02.2X%02.2X%02.2X%02.2X" % \ (int(Hex[0], 0), int(Hex[1], 0), int(Hex[2], 0), int(Hex[3], 0), int(Hex[4], 0), \ int(Hex[5], 0), int(Hex[6], 0), int(Hex[7], 0), int(Hex[8], 0), int(Hex[9], 0), int(Hex[10], 0)) elif GuidStr.count('-') == 4: # validate "49152E77-1ADA-4764-B7A2-7AFEFED95E8B" form Check = "%s" % str(uuid.UUID(GuidStr)).upper() if GuidStr.upper() == Check: Str = GuidStr.upper() else: Ste = "" else: Str = "" return Str def create_guid_options(): usage = "usage: %prog [data]" description='''lookup EFI_GUID by CName, C struct, or GUID string and print out all three. ''' parser = optparse.OptionParser(description=description, prog='guid',usage=usage) return parser def efi_guid_command(debugger, command, result, dict): # Use the Shell Lexer to properly parse up command options just like a # shell would command_args = shlex.split(command) parser = create_guid_options() try: (options, args) = parser.parse_args(command_args) if len(args) >= 1: if args[0] == "{": # caller forgot to quote the string" # mark arg[0] a string containing all args[n] args[0] = ' '.join(args) GuidStr = ParseGuidString (args[0]) if GuidStr == "": # return Key of GuidNameDict for value args[0] GuidStr = [Key for Key, Value in guid_dict.iteritems() if Value == args[0]][0] GuidStr = GuidStr.upper() except: # if you don't handle exceptions, passing an incorrect argument to the OptionParser will cause LLDB to exit # (courtesy of OptParse dealing with argument errors by throwing SystemExit) result.SetError ("option parsing failed") return if len(args) >= 1: if GuidStr in guid_dict: print("%s = %s" % (guid_dict[GuidStr], GuidStr)) print("%s = %s" % (guid_dict[GuidStr], GuidToCStructStr (GuidStr))) else: print(GuidStr) else: # dump entire dictionary width = max(len(v) for k,v in guid_dict.iteritems()) for value in sorted(guid_dict, key=guid_dict.get): print('%-*s %s %s' % (width, guid_dict[value], value, GuidToCStructStr(value))) return # ########## Code that runs when this script is imported into LLDB ########### # def __lldb_init_module (debugger, internal_dict): # This initializer is being run from LLDB in the embedded command interpreter # Make the options so we can generate the help text for the new LLDB # command line command prior to registering it with LLDB below global guid_dict # Source Guid.xref file if we can find it inputfile = os.getcwd() inputfile += os.sep + os.pardir + os.sep + 'FV' + os.sep + 'Guid.xref' with open(inputfile) as f: for line in f: data = line.split(' ') if len(data) >= 2: guid_dict[data[0].upper()] = data[1].strip('\n') # init EFI specific type formatters TypePrintFormating (debugger) # add guid command parser = create_guid_options() efi_guid_command.__doc__ = parser.format_help() debugger.HandleCommand('command script add -f lldbefi.efi_guid_command guid') Target = debugger.GetTargetAtIndex(0) if Target: Breakpoint = Target.BreakpointCreateByName('SecGdbScriptBreak') if Breakpoint.GetNumLocations() == 1: # Set the emulator breakpoints, if we are in the emulator debugger.HandleCommand("breakpoint command add -s python -F lldbefi.LoadEmulatorEfiSymbols {id}".format(id=Breakpoint.GetID())) print('Type r to run emulator. SecLldbScriptBreak armed. EFI modules should now get source level debugging in the emulator.')
edk2-master
EmulatorPkg/Unix/lldbefi.py
# @file # Script to Build EmulatorPkg UEFI firmware # # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: BSD-2-Clause-Patent ## import os import logging import io from edk2toolext.environment import shell_environment from edk2toolext.environment.uefi_build import UefiBuilder from edk2toolext.invocables.edk2_platform_build import BuildSettingsManager from edk2toolext.invocables.edk2_setup import SetupSettingsManager, RequiredSubmodule from edk2toolext.invocables.edk2_update import UpdateSettingsManager from edk2toolext.invocables.edk2_pr_eval import PrEvalSettingsManager from edk2toollib.utility_functions import RunCmd from edk2toollib.utility_functions import GetHostInfo # ####################################################################################### # # Common Configuration # # ####################################################################################### # class CommonPlatform(): ''' Common settings for this platform. Define static data here and use for the different parts of stuart ''' PackagesSupported = ("EmulatorPkg",) ArchSupported = ("X64", "IA32") TargetsSupported = ("DEBUG", "RELEASE", "NOOPT") Scopes = ('emulatorpkg', 'edk2-build') WorkspaceRoot = os.path.realpath(os.path.join( os.path.dirname(os.path.abspath(__file__)), "..", "..")) # ####################################################################################### # # Configuration for Update & Setup # # ####################################################################################### # class SettingsManager(UpdateSettingsManager, SetupSettingsManager, PrEvalSettingsManager): def GetPackagesSupported(self): ''' return iterable of edk2 packages supported by this build. These should be edk2 workspace relative paths ''' return CommonPlatform.PackagesSupported def GetArchitecturesSupported(self): ''' return iterable of edk2 architectures supported by this build ''' return CommonPlatform.ArchSupported def GetTargetsSupported(self): ''' return iterable of edk2 target tags supported by this build ''' return CommonPlatform.TargetsSupported def GetRequiredSubmodules(self): ''' return iterable containing RequiredSubmodule objects. If no RequiredSubmodules return an empty iterable ''' rs = [] # intentionally declare this one with recursive false to avoid overhead rs.append(RequiredSubmodule( "CryptoPkg/Library/OpensslLib/openssl", False)) # To avoid maintenance of this file for every new submodule # lets just parse the .gitmodules and add each if not already in list. # The GetRequiredSubmodules is designed to allow a build to optimize # the desired submodules but it isn't necessary for this repository. result = io.StringIO() ret = RunCmd("git", "config --file .gitmodules --get-regexp path", workingdir=self.GetWorkspaceRoot(), outstream=result) # Cmd output is expected to look like: # submodule.CryptoPkg/Library/OpensslLib/openssl.path CryptoPkg/Library/OpensslLib/openssl # submodule.SoftFloat.path ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3 if ret == 0: for line in result.getvalue().splitlines(): _, _, path = line.partition(" ") if path is not None: if path not in [x.path for x in rs]: rs.append(RequiredSubmodule(path, True)) # add it with recursive since we don't know return rs def SetArchitectures(self, list_of_requested_architectures): ''' Confirm the requests architecture list is valid and configure SettingsManager to run only the requested architectures. Raise Exception if a list_of_requested_architectures is not supported ''' unsupported = set(list_of_requested_architectures) - \ set(self.GetArchitecturesSupported()) if(len(unsupported) > 0): errorString = ( "Unsupported Architecture Requested: " + " ".join(unsupported)) logging.critical(errorString) raise Exception(errorString) self.ActualArchitectures = list_of_requested_architectures def GetWorkspaceRoot(self): ''' get WorkspacePath ''' return CommonPlatform.WorkspaceRoot def GetActiveScopes(self): ''' return tuple containing scopes that should be active for this process ''' return CommonPlatform.Scopes def FilterPackagesToTest(self, changedFilesList: list, potentialPackagesList: list) -> list: ''' Filter other cases that this package should be built based on changed files. This should cover things that can't be detected as dependencies. ''' build_these_packages = [] possible_packages = potentialPackagesList.copy() for f in changedFilesList: # BaseTools files that might change the build if "BaseTools" in f: if os.path.splitext(f) not in [".txt", ".md"]: build_these_packages = possible_packages break # if the azure pipeline platform template file changed if "platform-build-run-steps.yml" in f: build_these_packages = possible_packages break return build_these_packages def GetPlatformDscAndConfig(self) -> tuple: ''' If a platform desires to provide its DSC then Policy 4 will evaluate if any of the changes will be built in the dsc. The tuple should be (<workspace relative path to dsc file>, <input dictionary of dsc key value pairs>) ''' return (os.path.join("EmulatorPkg", "EmulatorPkg.dsc"), {}) # ####################################################################################### # # Actual Configuration for Platform Build # # ####################################################################################### # class PlatformBuilder(UefiBuilder, BuildSettingsManager): def __init__(self): UefiBuilder.__init__(self) def AddCommandLineOptions(self, parserObj): ''' Add command line options to the argparser ''' parserObj.add_argument('-a', "--arch", dest="build_arch", type=str, default="X64", help="Optional - architecture to build. IA32 will use IA32 for Pei & Dxe. " "X64 will use X64 for both PEI and DXE.") def RetrieveCommandLineOptions(self, args): ''' Retrieve command line options from the argparser ''' shell_environment.GetBuildVars().SetValue( "TARGET_ARCH", args.build_arch.upper(), "From CmdLine") shell_environment.GetBuildVars().SetValue( "ACTIVE_PLATFORM", "EmulatorPkg/EmulatorPkg.dsc", "From CmdLine") def GetWorkspaceRoot(self): ''' get WorkspacePath ''' return CommonPlatform.WorkspaceRoot def GetPackagesPath(self): ''' Return a list of workspace relative paths that should be mapped as edk2 PackagesPath ''' return () def GetActiveScopes(self): ''' return tuple containing scopes that should be active for this process ''' return CommonPlatform.Scopes def GetName(self): ''' Get the name of the repo, platform, or product being build ''' ''' Used for naming the log file, among others ''' # check the startup nsh flag and if set then rename the log file. # this helps in CI so we don't overwrite the build log since running # uses the stuart_build command. if(shell_environment.GetBuildVars().GetValue("MAKE_STARTUP_NSH", "FALSE") == "TRUE"): return "EmulatorPkg_With_Run" return "EmulatorPkg" def GetLoggingLevel(self, loggerType): ''' Get the logging level for a given type base == lowest logging level supported con == Screen logging txt == plain text file logging md == markdown file logging ''' return logging.DEBUG def SetPlatformEnv(self): logging.debug("PlatformBuilder SetPlatformEnv") self.env.SetValue("PRODUCT_NAME", "EmulatorPkg", "Platform Hardcoded") self.env.SetValue("TOOL_CHAIN_TAG", "VS2019", "Default Toolchain") # Add support for using the correct Platform Headers, tools, and Libs based on emulator architecture # requested to be built when building VS2019 or VS2017 if self.env.GetValue("TOOL_CHAIN_TAG") == "VS2019" or self.env.GetValue("TOOL_CHAIN_TAG") == "VS2017": key = self.env.GetValue("TOOL_CHAIN_TAG") + "_HOST" if self.env.GetValue("TARGET_ARCH") == "IA32": shell_environment.ShellEnvironment().set_shell_var(key, "x86") elif self.env.GetValue("TARGET_ARCH") == "X64": shell_environment.ShellEnvironment().set_shell_var(key, "x64") # Add support for using the correct Platform Headers, tools, and Libs based on emulator architecture # requested to be built when building on linux. if GetHostInfo().os.upper() == "LINUX": self.ConfigureLinuxDLinkPath() if GetHostInfo().os.upper() == "WINDOWS": self.env.SetValue("BLD_*_WIN_HOST_BUILD", "TRUE", "Trigger Windows host build") self.env.SetValue("MAKE_STARTUP_NSH", "FALSE", "Default to false") # I don't see what this does but it is in build.sh key = "BLD_*_BUILD_" + self.env.GetValue("TARGET_ARCH") self.env.SetValue(key, "TRUE", "match script in build.sh") return 0 def PlatformPreBuild(self): return 0 def PlatformPostBuild(self): return 0 def FlashRomImage(self): ''' Use the FlashRom Function to run the emulator. This gives an easy stuart command line to activate the emulator. ''' OutputPath = os.path.join(self.env.GetValue( "BUILD_OUTPUT_BASE"), self.env.GetValue("TARGET_ARCH")) if (self.env.GetValue("MAKE_STARTUP_NSH") == "TRUE"): f = open(os.path.join(OutputPath, "startup.nsh"), "w") f.write("BOOT SUCCESS !!! \n") # add commands here f.write("reset\n") f.close() if GetHostInfo().os.upper() == "WINDOWS": cmd = "WinHost.exe" elif GetHostInfo().os.upper() == "LINUX": cmd = "./Host" else: logging.critical("Unsupported Host") return -1 return RunCmd(cmd, "", workingdir=OutputPath) def ConfigureLinuxDLinkPath(self): ''' logic copied from build.sh to setup the correct libraries ''' if self.env.GetValue("TARGET_ARCH") == "IA32": LIB_NAMES = ["ld-linux.so.2", "libdl.so.2 crt1.o", "crti.o crtn.o"] LIB_SEARCH_PATHS = ["/usr/lib/i386-linux-gnu", "/usr/lib32", "/lib32", "/usr/lib", "/lib"] elif self.env.GetValue("TARGET_ARCH") == "X64": LIB_NAMES = ["ld-linux-x86-64.so.2", "libdl.so.2", "crt1.o", "crti.o", "crtn.o"] LIB_SEARCH_PATHS = ["/usr/lib/x86_64-linux-gnu", "/usr/lib64", "/lib64", "/usr/lib", "/lib"] HOST_DLINK_PATHS = "" for lname in LIB_NAMES: logging.debug(f"Looking for {lname}") for dname in LIB_SEARCH_PATHS: logging.debug(f"In {dname}") if os.path.isfile(os.path.join(dname, lname)): logging.debug(f"Found {lname} in {dname}") HOST_DLINK_PATHS += os.path.join( os.path.join(dname, lname)) + os.pathsep break HOST_DLINK_PATHS = HOST_DLINK_PATHS.rstrip(os.pathsep) logging.critical(f"Setting HOST_DLINK_PATHS to {HOST_DLINK_PATHS}") shell_environment.ShellEnvironment().set_shell_var( "HOST_DLINK_PATHS", HOST_DLINK_PATHS)
edk2-master
EmulatorPkg/PlatformCI/PlatformBuild.py
# @file # # Copyright (c) Microsoft Corporation. # Copyright (c) 2020, Hewlett Packard Enterprise Development LP. All rights reserved.<BR> # Copyright (c) 2020 - 2021, ARM Limited. All rights reserved.<BR> # SPDX-License-Identifier: BSD-2-Clause-Patent ## import os import logging from edk2toolext.environment import shell_environment from edk2toolext.invocables.edk2_ci_build import CiBuildSettingsManager from edk2toolext.invocables.edk2_setup import SetupSettingsManager, RequiredSubmodule from edk2toolext.invocables.edk2_update import UpdateSettingsManager from edk2toolext.invocables.edk2_pr_eval import PrEvalSettingsManager from edk2toollib.utility_functions import GetHostInfo class Settings(CiBuildSettingsManager, UpdateSettingsManager, SetupSettingsManager, PrEvalSettingsManager): def __init__(self): self.ActualPackages = [] self.ActualTargets = [] self.ActualArchitectures = [] self.ActualToolChainTag = "" self.UseBuiltInBaseTools = None self.ActualScopes = None # ####################################################################################### # # Extra CmdLine configuration # # ####################################################################################### # def AddCommandLineOptions(self, parserObj): group = parserObj.add_mutually_exclusive_group() group.add_argument("-force_piptools", "--fpt", dest="force_piptools", action="store_true", default=False, help="Force the system to use pip tools") group.add_argument("-no_piptools", "--npt", dest="no_piptools", action="store_true", default=False, help="Force the system to not use pip tools") def RetrieveCommandLineOptions(self, args): super().RetrieveCommandLineOptions(args) if args.force_piptools: self.UseBuiltInBaseTools = True if args.no_piptools: self.UseBuiltInBaseTools = False # ####################################################################################### # # Default Support for this Ci Build # # ####################################################################################### # def GetPackagesSupported(self): ''' return iterable of edk2 packages supported by this build. These should be edk2 workspace relative paths ''' return ("ArmPkg", "ArmPlatformPkg", "ArmVirtPkg", "DynamicTablesPkg", "EmbeddedPkg", "EmulatorPkg", "IntelFsp2Pkg", "IntelFsp2WrapperPkg", "MdePkg", "MdeModulePkg", "NetworkPkg", "PcAtChipsetPkg", "SecurityPkg", "UefiCpuPkg", "FmpDevicePkg", "ShellPkg", "SignedCapsulePkg", "StandaloneMmPkg", "FatPkg", "CryptoPkg", "PrmPkg", "UnitTestFrameworkPkg", "OvmfPkg", "RedfishPkg", "SourceLevelDebugPkg", "UefiPayloadPkg" ) def GetArchitecturesSupported(self): ''' return iterable of edk2 architectures supported by this build ''' return ( "IA32", "X64", "ARM", "AARCH64", "RISCV64", "LOONGARCH64") def GetTargetsSupported(self): ''' return iterable of edk2 target tags supported by this build ''' return ("DEBUG", "RELEASE", "NO-TARGET", "NOOPT") # ####################################################################################### # # Verify and Save requested Ci Build Config # # ####################################################################################### # def SetPackages(self, list_of_requested_packages): ''' Confirm the requested package list is valid and configure SettingsManager to build the requested packages. Raise UnsupportedException if a requested_package is not supported ''' unsupported = set(list_of_requested_packages) - \ set(self.GetPackagesSupported()) if(len(unsupported) > 0): logging.critical( "Unsupported Package Requested: " + " ".join(unsupported)) raise Exception("Unsupported Package Requested: " + " ".join(unsupported)) self.ActualPackages = list_of_requested_packages def SetArchitectures(self, list_of_requested_architectures): ''' Confirm the requests architecture list is valid and configure SettingsManager to run only the requested architectures. Raise Exception if a list_of_requested_architectures is not supported ''' unsupported = set(list_of_requested_architectures) - \ set(self.GetArchitecturesSupported()) if(len(unsupported) > 0): logging.critical( "Unsupported Architecture Requested: " + " ".join(unsupported)) raise Exception( "Unsupported Architecture Requested: " + " ".join(unsupported)) self.ActualArchitectures = list_of_requested_architectures def SetTargets(self, list_of_requested_target): ''' Confirm the request target list is valid and configure SettingsManager to run only the requested targets. Raise UnsupportedException if a requested_target is not supported ''' unsupported = set(list_of_requested_target) - \ set(self.GetTargetsSupported()) if(len(unsupported) > 0): logging.critical( "Unsupported Targets Requested: " + " ".join(unsupported)) raise Exception("Unsupported Targets Requested: " + " ".join(unsupported)) self.ActualTargets = list_of_requested_target # ####################################################################################### # # Actual Configuration for Ci Build # # ####################################################################################### # def GetActiveScopes(self): ''' return tuple containing scopes that should be active for this process ''' if self.ActualScopes is None: scopes = ("cibuild", "edk2-build", "host-based-test") self.ActualToolChainTag = shell_environment.GetBuildVars().GetValue("TOOL_CHAIN_TAG", "") is_linux = GetHostInfo().os.upper() == "LINUX" if self.UseBuiltInBaseTools is None: is_linux = GetHostInfo().os.upper() == "LINUX" # try and import the pip module for basetools try: import edk2basetools self.UseBuiltInBaseTools = True except ImportError: self.UseBuiltInBaseTools = False pass if self.UseBuiltInBaseTools == True: scopes += ('pipbuild-unix',) if is_linux else ('pipbuild-win',) logging.warning("Using Pip Tools based BaseTools") else: logging.warning("Falling back to using in-tree BaseTools") self.ActualScopes = scopes return self.ActualScopes def GetRequiredSubmodules(self): ''' return iterable containing RequiredSubmodule objects. If no RequiredSubmodules return an empty iterable ''' rs = [] rs.append(RequiredSubmodule( "ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3", False)) rs.append(RequiredSubmodule( "CryptoPkg/Library/OpensslLib/openssl", False)) rs.append(RequiredSubmodule( "UnitTestFrameworkPkg/Library/CmockaLib/cmocka", False)) rs.append(RequiredSubmodule( "UnitTestFrameworkPkg/Library/GoogleTestLib/googletest", False)) rs.append(RequiredSubmodule( "MdeModulePkg/Universal/RegularExpressionDxe/oniguruma", False)) rs.append(RequiredSubmodule( "MdeModulePkg/Library/BrotliCustomDecompressLib/brotli", False)) rs.append(RequiredSubmodule( "BaseTools/Source/C/BrotliCompress/brotli", False)) rs.append(RequiredSubmodule( "RedfishPkg/Library/JsonLib/jansson", False)) rs.append(RequiredSubmodule( "UnitTestFrameworkPkg/Library/SubhookLib/subhook", False)) rs.append(RequiredSubmodule( "MdePkg/Library/BaseFdtLib/libfdt", False)) rs.append(RequiredSubmodule( "MdePkg/Library/MipiSysTLib/mipisyst", False)) return rs def GetName(self): return "Edk2" def GetDependencies(self): return [ ] def GetPackagesPath(self): return () def GetWorkspaceRoot(self): ''' get WorkspacePath ''' return os.path.dirname(os.path.dirname(os.path.abspath(__file__))) def FilterPackagesToTest(self, changedFilesList: list, potentialPackagesList: list) -> list: ''' Filter potential packages to test based on changed files. ''' build_these_packages = [] possible_packages = potentialPackagesList.copy() for f in changedFilesList: # split each part of path for comparison later nodes = f.split("/") # python file change in .pytool folder causes building all if f.endswith(".py") and ".pytool" in nodes: build_these_packages = possible_packages break # BaseTools files that might change the build if "BaseTools" in nodes: if os.path.splitext(f) not in [".txt", ".md"]: build_these_packages = possible_packages break return build_these_packages
edk2-master
.pytool/CISettings.py
# @file dependency_check.py # # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: BSD-2-Clause-Patent ## import logging import os from edk2toolext.environment.plugintypes.ci_build_plugin import ICiBuildPlugin from edk2toollib.uefi.edk2.parsers.inf_parser import InfParser from edk2toolext.environment.var_dict import VarDict class DependencyCheck(ICiBuildPlugin): """ A CiBuildPlugin that finds all modules (inf files) in a package and reviews the packages used to confirm they are acceptable. This is to help enforce layering and identify improper dependencies between packages. Configuration options: "DependencyCheck": { "AcceptableDependencies": [], # Package dec files that are allowed in all INFs. Example: MdePkg/MdePkg.dec "AcceptableDependencies-<MODULE_TYPE>": [], # OPTIONAL Package dependencies for INFs that are HOST_APPLICATION "AcceptableDependencies-HOST_APPLICATION": [], # EXAMPLE Package dependencies for INFs that are HOST_APPLICATION "IgnoreInf": [] # Ignore INF if found in filesystem } """ def GetTestName(self, packagename: str, environment: VarDict) -> tuple: """ Provide the testcase name and classname for use in reporting Args: packagename: string containing name of package to build environment: The VarDict for the test to run in Returns: a tuple containing the testcase name and the classname (testcasename, classname) testclassname: a descriptive string for the testcase can include whitespace classname: should be patterned <packagename>.<plugin>.<optionally any unique condition> """ return ("Test Package Dependencies for modules in " + packagename, packagename + ".DependencyCheck") ## # External function of plugin. This function is used to perform the task of the MuBuild Plugin # # - package is the edk2 path to package. This means workspace/packagepath relative. # - edk2path object configured with workspace and packages path # - PkgConfig Object (dict) for the pkg # - EnvConfig Object # - Plugin Manager Instance # - Plugin Helper Obj Instance # - Junit Logger # - output_stream the StringIO output stream from this plugin via logging def RunBuildPlugin(self, packagename, Edk2pathObj, pkgconfig, environment, PLM, PLMHelper, tc, output_stream=None): overall_status = 0 # Get current platform abs_pkg_path = Edk2pathObj.GetAbsolutePathOnThisSystemFromEdk2RelativePath(packagename) # Get INF Files INFFiles = self.WalkDirectoryForExtension([".inf"], abs_pkg_path) INFFiles = [Edk2pathObj.GetEdk2RelativePathFromAbsolutePath(x) for x in INFFiles] # make edk2relative path so can compare with Ignore List # Remove ignored INFs if "IgnoreInf" in pkgconfig: for a in pkgconfig["IgnoreInf"]: a = a.replace(os.sep, "/") ## convert path sep in case ignore list is bad. Can't change case try: INFFiles.remove(a) tc.LogStdOut("IgnoreInf {0}".format(a)) except: logging.info("DependencyConfig.IgnoreInf -> {0} not found in filesystem. Invalid ignore file".format(a)) tc.LogStdError("DependencyConfig.IgnoreInf -> {0} not found in filesystem. Invalid ignore file".format(a)) # Get the AccpetableDependencies list if "AcceptableDependencies" not in pkgconfig: logging.info("DependencyCheck Skipped. No Acceptable Dependencies defined.") tc.LogStdOut("DependencyCheck Skipped. No Acceptable Dependencies defined.") tc.SetSkipped() return -1 # Log dependencies for k in pkgconfig.keys(): if k.startswith("AcceptableDependencies"): pkgstring = "\n".join(pkgconfig[k]) if ("-" in k): _, _, mod_type = k.partition("-") tc.LogStdOut(f"Additional dependencies for MODULE_TYPE {mod_type}:\n {pkgstring}") else: tc.LogStdOut(f"Acceptable Dependencies:\n {pkgstring}") # For each INF file for file in INFFiles: ip = InfParser() logging.debug("Parsing " + file) ip.SetBaseAbsPath(Edk2pathObj.WorkspacePath).SetPackagePaths(Edk2pathObj.PackagePathList).ParseFile(file) if("MODULE_TYPE" not in ip.Dict): tc.LogStdOut("Ignoring INF. Missing key for MODULE_TYPE {0}".format(file)) continue mod_type = ip.Dict["MODULE_TYPE"].upper() for p in ip.PackagesUsed: if p not in pkgconfig["AcceptableDependencies"]: # If not in the main acceptable dependencies list then check module specific mod_specific_key = "AcceptableDependencies-" + mod_type if mod_specific_key in pkgconfig and p in pkgconfig[mod_specific_key]: continue logging.error("Dependency Check: Invalid Dependency INF: {0} depends on pkg {1}".format(file, p)) tc.LogStdError("Dependency Check: Invalid Dependency INF: {0} depends on pkg {1}".format(file, p)) overall_status += 1 # If XML object exists, add results if overall_status != 0: tc.SetFailed("Failed with {0} errors".format(overall_status), "DEPENDENCYCHECK_FAILED") else: tc.SetSuccess() return overall_status
edk2-master
.pytool/Plugin/DependencyCheck/DependencyCheck.py
# @file LibraryClassCheck.py # # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: BSD-2-Clause-Patent ## import logging import os from edk2toolext.environment.plugintypes.ci_build_plugin import ICiBuildPlugin from edk2toollib.uefi.edk2.parsers.dec_parser import DecParser from edk2toollib.uefi.edk2.parsers.inf_parser import InfParser from edk2toolext.environment.var_dict import VarDict class LibraryClassCheck(ICiBuildPlugin): """ A CiBuildPlugin that scans the code tree and library classes for undeclared files Configuration options: "LibraryClassCheck": { IgnoreHeaderFile: [], # Ignore a file found on disk IgnoreLibraryClass: [] # Ignore a declaration found in dec file } """ def GetTestName(self, packagename: str, environment: VarDict) -> tuple: """ Provide the testcase name and classname for use in reporting testclassname: a descriptive string for the testcase can include whitespace classname: should be patterned <packagename>.<plugin>.<optionally any unique condition> Args: packagename: string containing name of package to build environment: The VarDict for the test to run in Returns: a tuple containing the testcase name and the classname (testcasename, classname) """ return ("Check library class declarations in " + packagename, packagename + ".LibraryClassCheck") def __GetPkgDec(self, rootpath): try: allEntries = os.listdir(rootpath) for entry in allEntries: if entry.lower().endswith(".dec"): return(os.path.join(rootpath, entry)) except Exception: logging.error("Unable to find DEC for package:{0}".format(rootpath)) return None ## # External function of plugin. This function is used to perform the task of the MuBuild Plugin # # - package is the edk2 path to package. This means workspace/packagepath relative. # - edk2path object configured with workspace and packages path # - PkgConfig Object (dict) for the pkg # - EnvConfig Object # - Plugin Manager Instance # - Plugin Helper Obj Instance # - Junit Logger # - output_stream the StringIO output stream from this plugin via logging def RunBuildPlugin(self, packagename, Edk2pathObj, pkgconfig, environment, PLM, PLMHelper, tc, output_stream=None): overall_status = 0 LibraryClassIgnore = [] abs_pkg_path = Edk2pathObj.GetAbsolutePathOnThisSystemFromEdk2RelativePath(packagename) abs_dec_path = self.__GetPkgDec(abs_pkg_path) wsr_dec_path = Edk2pathObj.GetEdk2RelativePathFromAbsolutePath(abs_dec_path) if abs_dec_path is None or wsr_dec_path == "" or not os.path.isfile(abs_dec_path): tc.SetSkipped() tc.LogStdError("No DEC file {0} in package {1}".format(abs_dec_path, abs_pkg_path)) return -1 # Get all include folders dec = DecParser() dec.SetBaseAbsPath(Edk2pathObj.WorkspacePath).SetPackagePaths(Edk2pathObj.PackagePathList) dec.ParseFile(wsr_dec_path) AllHeaderFiles = [] for includepath in dec.IncludePaths: ## Get all header files in the library folder AbsLibraryIncludePath = os.path.join(abs_pkg_path, includepath, "Library") if(not os.path.isdir(AbsLibraryIncludePath)): continue hfiles = self.WalkDirectoryForExtension([".h"], AbsLibraryIncludePath) hfiles = [os.path.relpath(x,abs_pkg_path) for x in hfiles] # make package root relative path hfiles = [x.replace("\\", "/") for x in hfiles] # make package relative path AllHeaderFiles.extend(hfiles) if len(AllHeaderFiles) == 0: tc.SetSkipped() tc.LogStdError(f"No Library include folder in any Include path") return -1 # Remove ignored paths if "IgnoreHeaderFile" in pkgconfig: for a in pkgconfig["IgnoreHeaderFile"]: try: tc.LogStdOut("Ignoring Library Header File {0}".format(a)) AllHeaderFiles.remove(a) except: tc.LogStdError("LibraryClassCheck.IgnoreHeaderFile -> {0} not found. Invalid Header File".format(a)) logging.info("LibraryClassCheck.IgnoreHeaderFile -> {0} not found. Invalid Header File".format(a)) if "IgnoreLibraryClass" in pkgconfig: LibraryClassIgnore = pkgconfig["IgnoreLibraryClass"] ## Attempt to find library classes for lcd in dec.LibraryClasses: ## Check for correct file path separator if "\\" in lcd.path: tc.LogStdError("LibraryClassCheck.DecFilePathSeparator -> {0} invalid.".format(lcd.path)) logging.error("LibraryClassCheck.DecFilePathSeparator -> {0} invalid.".format(lcd.path)) overall_status += 1 continue if lcd.name in LibraryClassIgnore: tc.LogStdOut("Ignoring Library Class Name {0}".format(lcd.name)) LibraryClassIgnore.remove(lcd.name) continue logging.debug(f"Looking for Library Class {lcd.path}") try: AllHeaderFiles.remove(lcd.path) except ValueError: tc.LogStdError(f"Library {lcd.name} with path {lcd.path} not found in package filesystem") logging.error(f"Library {lcd.name} with path {lcd.path} not found in package filesystem") overall_status += 1 ## any remaining AllHeaderFiles are not described in DEC for h in AllHeaderFiles: tc.LogStdError(f"Library Header File {h} not declared in package DEC {wsr_dec_path}") logging.error(f"Library Header File {h} not declared in package DEC {wsr_dec_path}") overall_status += 1 ## Warn about any invalid library class names in the ignore list for r in LibraryClassIgnore: tc.LogStdError("LibraryClassCheck.IgnoreLibraryClass -> {0} not found. Library Class not found".format(r)) logging.info("LibraryClassCheck.IgnoreLibraryClass -> {0} not found. Library Class not found".format(r)) # If XML object exists, add result if overall_status != 0: tc.SetFailed("LibraryClassCheck {0} Failed. Errors {1}".format(wsr_dec_path, overall_status), "CHECK_FAILED") else: tc.SetSuccess() return overall_status
edk2-master
.pytool/Plugin/LibraryClassCheck/LibraryClassCheck.py
# @file HostUnitTestDscCompleteCheck.py # # This is a copy of DscCompleteCheck with different filtering logic. # It should be discussed if this should be one plugin # # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: BSD-2-Clause-Patent ## import logging import os from edk2toolext.environment.plugintypes.ci_build_plugin import ICiBuildPlugin from edk2toollib.uefi.edk2.parsers.dsc_parser import DscParser from edk2toollib.uefi.edk2.parsers.inf_parser import InfParser from edk2toolext.environment.var_dict import VarDict class HostUnitTestDscCompleteCheck(ICiBuildPlugin): """ A CiBuildPlugin that scans the package Host Unit Test dsc file and confirms all Host application modules (inf files) are listed in the components sections. Configuration options: "HostUnitTestDscCompleteCheck": { "DscPath": "", # Path to Host based unit test DSC file "IgnoreInf": [] # Ignore INF if found in filesystem but not dsc } """ def GetTestName(self, packagename: str, environment: VarDict) -> tuple: """ Provide the testcase name and classname for use in reporting Args: packagename: string containing name of package to build environment: The VarDict for the test to run in Returns: a tuple containing the testcase name and the classname (testcasename, classname) testclassname: a descriptive string for the testcase can include whitespace classname: should be patterned <packagename>.<plugin>.<optionally any unique condition> """ return ("Check the " + packagename + " Host Unit Test DSC for a being complete", packagename + ".HostUnitTestDscCompleteCheck") ## # External function of plugin. This function is used to perform the task of the MuBuild Plugin # # - package is the edk2 path to package. This means workspace/packagepath relative. # - edk2path object configured with workspace and packages path # - PkgConfig Object (dict) for the pkg # - VarDict containing the shell environment Build Vars # - Plugin Manager Instance # - Plugin Helper Obj Instance # - Junit Logger # - output_stream the StringIO output stream from this plugin via logging def RunBuildPlugin(self, packagename, Edk2pathObj, pkgconfig, environment, PLM, PLMHelper, tc, output_stream=None): overall_status = 0 # Parse the config for required DscPath element if "DscPath" not in pkgconfig: tc.SetSkipped() tc.LogStdError( "DscPath not found in config file. Nothing to check.") return -1 abs_pkg_path = Edk2pathObj.GetAbsolutePathOnThisSystemFromEdk2RelativePath( packagename) abs_dsc_path = os.path.join(abs_pkg_path, pkgconfig["DscPath"].strip()) wsr_dsc_path = Edk2pathObj.GetEdk2RelativePathFromAbsolutePath( abs_dsc_path) if abs_dsc_path is None or wsr_dsc_path == "" or not os.path.isfile(abs_dsc_path): tc.SetSkipped() tc.LogStdError("Package Host Unit Test Dsc not found") return 0 # Get INF Files INFFiles = self.WalkDirectoryForExtension([".inf"], abs_pkg_path) INFFiles = [Edk2pathObj.GetEdk2RelativePathFromAbsolutePath( x) for x in INFFiles] # make edk2relative path so can compare with DSC # remove ignores if "IgnoreInf" in pkgconfig: for a in pkgconfig["IgnoreInf"]: a = a.replace(os.sep, "/") try: tc.LogStdOut("Ignoring INF {0}".format(a)) INFFiles.remove(a) except: tc.LogStdError( "HostUnitTestDscCompleteCheck.IgnoreInf -> {0} not found in filesystem. Invalid ignore file".format(a)) logging.info( "HostUnitTestDscCompleteCheck.IgnoreInf -> {0} not found in filesystem. Invalid ignore file".format(a)) # DSC Parser dp = DscParser() dp.SetBaseAbsPath(Edk2pathObj.WorkspacePath) dp.SetPackagePaths(Edk2pathObj.PackagePathList) dp.SetInputVars(environment.GetAllBuildKeyValues()) dp.ParseFile(wsr_dsc_path) # Check if INF in component section for INF in INFFiles: if not any(INF.strip() in x for x in dp.ThreeMods) and \ not any(INF.strip() in x for x in dp.SixMods) and \ not any(INF.strip() in x for x in dp.OtherMods): infp = InfParser().SetBaseAbsPath(Edk2pathObj.WorkspacePath) infp.SetPackagePaths(Edk2pathObj.PackagePathList) infp.ParseFile(INF) if("MODULE_TYPE" not in infp.Dict): tc.LogStdOut( "Ignoring INF. Missing key for MODULE_TYPE {0}".format(INF)) continue if(infp.Dict["MODULE_TYPE"] == "HOST_APPLICATION"): # should compile test a library that is declared type HOST_APPLICATION pass elif len(infp.SupportedPhases) > 0 and \ "HOST_APPLICATION" in infp.SupportedPhases: # should compile test a library that supports HOST_APPLICATION but # require it to be an explicit opt-in pass else: tc.LogStdOut( "Ignoring INF. MODULE_TYPE or suppored phases not HOST_APPLICATION {0}".format(INF)) continue logging.critical(INF + " not in " + wsr_dsc_path) tc.LogStdError("{0} not in {1}".format(INF, wsr_dsc_path)) overall_status = overall_status + 1 # If XML object exists, add result if overall_status != 0: tc.SetFailed("HostUnitTestDscCompleteCheck {0} Failed. Errors {1}".format( wsr_dsc_path, overall_status), "CHECK_FAILED") else: tc.SetSuccess() return overall_status
edk2-master
.pytool/Plugin/HostUnitTestDscCompleteCheck/HostUnitTestDscCompleteCheck.py
# @file GuidCheck.py # # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: BSD-2-Clause-Patent ## import logging from edk2toolext.environment.plugintypes.ci_build_plugin import ICiBuildPlugin from edk2toollib.uefi.edk2.guid_list import GuidList from edk2toolext.environment.var_dict import VarDict class GuidCheck(ICiBuildPlugin): """ A CiBuildPlugin that scans the code tree and looks for duplicate guids from the package being tested. Configuration options: "GuidCheck": { "IgnoreGuidName": [], # provide in format guidname=guidvalue or just guidname "IgnoreGuidValue": [], "IgnoreFoldersAndFiles": [], "IgnoreDuplicates": [] # Provide in format guidname=guidname=guidname... } """ def GetTestName(self, packagename: str, environment: VarDict) -> tuple: """ Provide the testcase name and classname for use in reporting Args: packagename: string containing name of package to build environment: The VarDict for the test to run in Returns: a tuple containing the testcase name and the classname (testcasename, classname) testclassname: a descriptive string for the testcase can include whitespace classname: should be patterned <packagename>.<plugin>.<optionally any unique condition> """ return ("Confirm GUIDs are unique in " + packagename, packagename + ".GuidCheck") def _FindConflictingGuidValues(self, guidlist: list) -> list: """ Find all duplicate guids by guid value and report them as errors """ # Sort the list by guid guidsorted = sorted( guidlist, key=lambda x: x.guid.upper(), reverse=True) previous = None # Store previous entry for comparison error = None errors = [] for index in range(len(guidsorted)): i = guidsorted[index] if(previous is not None): if i.guid == previous.guid: # Error if(error is None): # Catch errors with more than 1 conflict error = ErrorEntry("guid") error.entries.append(previous) errors.append(error) error.entries.append(i) else: # no match. clear error error = None previous = i return errors def _FindConflictingGuidNames(self, guidlist: list) -> list: """ Find all duplicate guids by name and if they are not all from inf files report them as errors. It is ok to have BASE_NAME duplication. Is this useful? It would catch two same named guids in dec file that resolve to different values. """ # Sort the list by guid namesorted = sorted(guidlist, key=lambda x: x.name.upper()) previous = None # Store previous entry for comparison error = None errors = [] for index in range(len(namesorted)): i = namesorted[index] if(previous is not None): # If name matches if i.name == previous.name: if(error is None): # Catch errors with more than 1 conflict error = ErrorEntry("name") error.entries.append(previous) errors.append(error) error.entries.append(i) else: # no match. clear error error = None previous = i # Loop thru and remove any errors where all files are infs as it is ok if # they have the same inf base name. for e in errors[:]: if len( [en for en in e.entries if not en.absfilepath.lower().endswith(".inf")]) == 0: errors.remove(e) return errors ## # External function of plugin. This function is used to perform the task of the MuBuild Plugin # # - package is the edk2 path to package. This means workspace/packagepath relative. # - edk2path object configured with workspace and packages path # - PkgConfig Object (dict) for the pkg # - EnvConfig Object # - Plugin Manager Instance # - Plugin Helper Obj Instance # - Junit Logger # - output_stream the StringIO output stream from this plugin via logging def RunBuildPlugin(self, packagename, Edk2pathObj, pkgconfig, environment, PLM, PLMHelper, tc, output_stream=None): Errors = [] abs_pkg_path = Edk2pathObj.GetAbsolutePathOnThisSystemFromEdk2RelativePath( packagename) if abs_pkg_path is None: tc.SetSkipped() tc.LogStdError("No package {0}".format(packagename)) return -1 All_Ignores = ["/Build", "/Conf"] # Parse the config for other ignores if "IgnoreFoldersAndFiles" in pkgconfig: All_Ignores.extend(pkgconfig["IgnoreFoldersAndFiles"]) # Parse the workspace for all GUIDs gs = GuidList.guidlist_from_filesystem( Edk2pathObj.WorkspacePath, ignore_lines=All_Ignores) # Remove ignored guidvalue if "IgnoreGuidValue" in pkgconfig: for a in pkgconfig["IgnoreGuidValue"]: try: tc.LogStdOut("Ignoring Guid {0}".format(a.upper())) for b in gs[:]: if b.guid == a.upper(): gs.remove(b) except: tc.LogStdError("GuidCheck.IgnoreGuid -> {0} not found. Invalid ignore guid".format(a.upper())) logging.info("GuidCheck.IgnoreGuid -> {0} not found. Invalid ignore guid".format(a.upper())) # Remove ignored guidname if "IgnoreGuidName" in pkgconfig: for a in pkgconfig["IgnoreGuidName"]: entry = a.split("=") if(len(entry) > 2): tc.LogStdError("GuidCheck.IgnoreGuidName -> {0} Invalid Format.".format(a)) logging.info("GuidCheck.IgnoreGuidName -> {0} Invalid Format.".format(a)) continue try: tc.LogStdOut("Ignoring Guid {0}".format(a)) for b in gs[:]: if b.name == entry[0]: if(len(entry) == 1): gs.remove(b) elif(len(entry) == 2 and b.guid.upper() == entry[1].upper()): gs.remove(b) else: c.LogStdError("GuidCheck.IgnoreGuidName -> {0} incomplete match. Invalid ignore guid".format(a)) except: tc.LogStdError("GuidCheck.IgnoreGuidName -> {0} not found. Invalid ignore name".format(a)) logging.info("GuidCheck.IgnoreGuidName -> {0} not found. Invalid ignore name".format(a)) # Find conflicting Guid Values Errors.extend(self._FindConflictingGuidValues(gs)) # Check if there are expected duplicates and remove it from the error list if "IgnoreDuplicates" in pkgconfig: for a in pkgconfig["IgnoreDuplicates"]: names = a.split("=") if len(names) < 2: tc.LogStdError("GuidCheck.IgnoreDuplicates -> {0} invalid format".format(a)) logging.info("GuidCheck.IgnoreDuplicates -> {0} invalid format".format(a)) continue for b in Errors[:]: if b.type != "guid": continue ## Make a list of the names that are not in the names list. If there ## are any in the list then this error should not be ignored. t = [x for x in b.entries if x.name not in names] if(len(t) == len(b.entries)): ## did not apply to any entry continue elif(len(t) == 0): ## full match - ignore duplicate tc.LogStdOut("GuidCheck.IgnoreDuplicates -> {0}".format(a)) Errors.remove(b) elif(len(t) < len(b.entries)): ## partial match tc.LogStdOut("GuidCheck.IgnoreDuplicates -> {0} incomplete match".format(a)) logging.info("GuidCheck.IgnoreDuplicates -> {0} incomplete match".format(a)) else: tc.LogStdOut("GuidCheck.IgnoreDuplicates -> {0} unknown error.".format(a)) logging.info("GuidCheck.IgnoreDuplicates -> {0} unknown error".format(a)) # Find conflicting Guid Names Errors.extend(self._FindConflictingGuidNames(gs)) # Log errors for anything within the package under test for er in Errors[:]: InMyPackage = False for a in er.entries: if abs_pkg_path in a.absfilepath: InMyPackage = True break if(not InMyPackage): Errors.remove(er) else: logging.error(str(er)) tc.LogStdError(str(er)) # add result to test case overall_status = len(Errors) if overall_status != 0: tc.SetFailed("GuidCheck {0} Failed. Errors {1}".format( packagename, overall_status), "CHECK_FAILED") else: tc.SetSuccess() return overall_status class ErrorEntry(): """ Custom/private class for reporting errors in the GuidList """ def __init__(self, errortype): self.type = errortype # 'guid' or 'name' depending on error type self.entries = [] # GuidListEntry that are in error condition def __str__(self): a = f"Error Duplicate {self.type}: " if(self.type == "guid"): a += f" {self.entries[0].guid}" elif(self.type == "name"): a += f" {self.entries[0].name}" a += f" ({len(self.entries)})\n" for e in self.entries: a += "\t" + str(e) + "\n" return a
edk2-master
.pytool/Plugin/GuidCheck/GuidCheck.py
# @file SpellCheck.py # # An edk2-pytool based plugin wrapper for cspell # # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: BSD-2-Clause-Patent ## import logging import json import yaml from io import StringIO import os from edk2toolext.environment.plugintypes.ci_build_plugin import ICiBuildPlugin from edk2toollib.utility_functions import RunCmd from edk2toolext.environment.var_dict import VarDict from edk2toollib.gitignore_parser import parse_gitignore_lines from edk2toolext.environment import version_aggregator class SpellCheck(ICiBuildPlugin): """ A CiBuildPlugin that uses the cspell node module to scan the files from the package being tested for spelling errors. The plugin contains the base cspell.json file then thru the configuration options other settings can be changed or extended. Configuration options: "SpellCheck": { "AuditOnly": False, # Don't fail the build if there are errors. Just log them "IgnoreFiles": [], # use gitignore syntax to ignore errors in matching files "ExtendWords": [], # words to extend to the dictionary for this package "IgnoreStandardPaths": [], # Standard Plugin defined paths that should be ignore "AdditionalIncludePaths": [] # Additional paths to spell check (wildcards supported) } """ # # A package can remove any of these using IgnoreStandardPaths # STANDARD_PLUGIN_DEFINED_PATHS = ("*.c", "*.h", "*.nasm", "*.asm", "*.masm", "*.s", "*.asl", "*.dsc", "*.dec", "*.fdf", "*.inf", "*.md", "*.txt" ) def GetTestName(self, packagename: str, environment: VarDict) -> tuple: """ Provide the testcase name and classname for use in reporting Args: packagename: string containing name of package to build environment: The VarDict for the test to run in Returns: a tuple containing the testcase name and the classname (testcasename, classname) testclassname: a descriptive string for the testcase can include whitespace classname: should be patterned <packagename>.<plugin>.<optionally any unique condition> """ return ("Spell check files in " + packagename, packagename + ".SpellCheck") ## # External function of plugin. This function is used to perform the task of the CiBuild Plugin # # - package is the edk2 path to package. This means workspace/packagepath relative. # - edk2path object configured with workspace and packages path # - PkgConfig Object (dict) for the pkg # - EnvConfig Object # - Plugin Manager Instance # - Plugin Helper Obj Instance # - Junit Logger # - output_stream the StringIO output stream from this plugin via logging def RunBuildPlugin(self, packagename, Edk2pathObj, pkgconfig, environment, PLM, PLMHelper, tc, output_stream=None): Errors = [] abs_pkg_path = Edk2pathObj.GetAbsolutePathOnThisSystemFromEdk2RelativePath( packagename) if abs_pkg_path is None: tc.SetSkipped() tc.LogStdError("No package {0}".format(packagename)) return -1 # check for node return_buffer = StringIO() ret = RunCmd("node", "--version", outstream=return_buffer) if (ret != 0): tc.SetSkipped() tc.LogStdError("NodeJs not installed. Test can't run") logging.warning("NodeJs not installed. Test can't run") return -1 node_version = return_buffer.getvalue().strip() # format vXX.XX.XX tc.LogStdOut(f"Node version: {node_version}") version_aggregator.GetVersionAggregator().ReportVersion( "NodeJs", node_version, version_aggregator.VersionTypes.INFO) # Check for cspell return_buffer = StringIO() ret = RunCmd("cspell", "--version", outstream=return_buffer) if (ret != 0): tc.SetSkipped() tc.LogStdError("cspell not installed. Test can't run") logging.warning("cspell not installed. Test can't run") return -1 cspell_version = return_buffer.getvalue().strip() # format XX.XX.XX tc.LogStdOut(f"CSpell version: {cspell_version}") version_aggregator.GetVersionAggregator().ReportVersion( "CSpell", cspell_version, version_aggregator.VersionTypes.INFO) # copy the default as a list package_relative_paths_to_spell_check = list(SpellCheck.STANDARD_PLUGIN_DEFINED_PATHS) # # Allow the ci.yaml to remove any of the above standard paths # if("IgnoreStandardPaths" in pkgconfig): for a in pkgconfig["IgnoreStandardPaths"]: if(a in package_relative_paths_to_spell_check): tc.LogStdOut( f"ignoring standard path due to ci.yaml ignore: {a}") package_relative_paths_to_spell_check.remove(a) else: tc.LogStdOut(f"Invalid IgnoreStandardPaths value: {a}") # # check for any additional include paths defined by package config # if("AdditionalIncludePaths" in pkgconfig): package_relative_paths_to_spell_check.extend( pkgconfig["AdditionalIncludePaths"]) # # Make the path string for cspell to check # relpath = os.path.relpath(abs_pkg_path) cpsell_paths = " ".join( # Double quote each path to defer expansion to cspell parameters [f'"{relpath}/**/{x}"' for x in package_relative_paths_to_spell_check]) # Make the config file config_file_path = os.path.join( Edk2pathObj.WorkspacePath, "Build", packagename, "cspell_actual_config.json") mydir = os.path.dirname(os.path.abspath(__file__)) # load as yaml so it can have comments base = os.path.join(mydir, "cspell.base.yaml") with open(base, "r") as i: config = yaml.safe_load(i) if("ExtendWords" in pkgconfig): config["words"].extend(pkgconfig["ExtendWords"]) with open(config_file_path, "w") as o: json.dump(config, o) # output as json so compat with cspell All_Ignores = [] # Parse the config for other ignores if "IgnoreFiles" in pkgconfig: All_Ignores.extend(pkgconfig["IgnoreFiles"]) # spell check all the files ignore = parse_gitignore_lines(All_Ignores, os.path.join( abs_pkg_path, "nofile.txt"), abs_pkg_path) # result is a list of strings like this # C:\src\sp-edk2\edk2\FmpDevicePkg\FmpDevicePkg.dec:53:9 - Unknown word (Capule) EasyFix = [] results = self._check_spelling(cpsell_paths, config_file_path) for r in results: path, _, word = r.partition(" - Unknown word ") if len(word) == 0: # didn't find pattern continue pathinfo = path.rsplit(":", 2) # remove the line no info if(ignore(pathinfo[0])): # check against ignore list tc.LogStdOut(f"ignoring error due to ci.yaml ignore: {r}") continue # real error EasyFix.append(word.strip().strip("()")) Errors.append(r) # Log all errors tc StdError for l in Errors: tc.LogStdError(l.strip()) # Helper - Log the syntax needed to add these words to dictionary if len(EasyFix) > 0: EasyFix = sorted(set(a.lower() for a in EasyFix)) tc.LogStdOut("\n Easy fix:") OneString = "If these are not errors add this to your ci.yaml file.\n" OneString += '"SpellCheck": {\n "ExtendWords": [' for a in EasyFix: tc.LogStdOut(f'\n"{a}",') OneString += f'\n "{a}",' logging.info(OneString.rstrip(",") + '\n ]\n}') # add result to test case overall_status = len(Errors) if overall_status != 0: if "AuditOnly" in pkgconfig and pkgconfig["AuditOnly"]: # set as skipped if AuditOnly tc.SetSkipped() return -1 else: tc.SetFailed("SpellCheck {0} Failed. Errors {1}".format( packagename, overall_status), "CHECK_FAILED") else: tc.SetSuccess() return overall_status def _check_spelling(self, abs_file_to_check: str, abs_config_file_to_use: str) -> []: output = StringIO() ret = RunCmd( "cspell", f"--config {abs_config_file_to_use} {abs_file_to_check}", outstream=output) if ret == 0: return [] else: return output.getvalue().strip().splitlines()
edk2-master
.pytool/Plugin/SpellCheck/SpellCheck.py
# @file HostUnitTestCompilerPlugin.py ## # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: BSD-2-Clause-Patent ## import logging import os import re from edk2toollib.uefi.edk2.parsers.dsc_parser import DscParser from edk2toolext.environment.plugintypes.ci_build_plugin import ICiBuildPlugin from edk2toolext.environment.uefi_build import UefiBuilder from edk2toolext import edk2_logging from edk2toolext.environment.var_dict import VarDict from edk2toollib.utility_functions import GetHostInfo class HostUnitTestCompilerPlugin(ICiBuildPlugin): """ A CiBuildPlugin that compiles the dsc for host based unit test apps. An IUefiBuildPlugin may be attached to this plugin that will run the unit tests and collect the results after successful compilation. Configuration options: "HostUnitTestCompilerPlugin": { "DscPath": "<path to dsc from root of pkg>" } """ def GetTestName(self, packagename: str, environment: VarDict) -> tuple: """ Provide the testcase name and classname for use in reporting testclassname: a descriptive string for the testcase can include whitespace classname: should be patterned <packagename>.<plugin>.<optionally any unique condition> Args: packagename: string containing name of package to build environment: The VarDict for the test to run in Returns: a tuple containing the testcase name and the classname (testcasename, classname) """ num,types = self.__GetHostUnitTestArch(environment) types = types.replace(" ", "_") return ("Compile and Run Host-Based UnitTests for " + packagename + " on arch " + types, packagename + ".HostUnitTestCompiler." + types) def RunsOnTargetList(self): return ["NOOPT"] # # Find the intersection of application types that can run on this host # and the TARGET_ARCH being build in this request. # # return tuple with (number of UEFI arch types, space separated string) def __GetHostUnitTestArch(self, environment): requested = environment.GetValue("TARGET_ARCH").split(' ') host = [] if GetHostInfo().arch == 'x86': #assume 64bit can handle 64 and 32 #assume 32bit can only handle 32 ## change once IA32 issues resolved host.append("IA32") if GetHostInfo().bit == '64': host.append("X64") elif GetHostInfo().arch == 'ARM': if GetHostInfo().bit == '64': host.append("AARCH64") elif GetHostInfo().bit == '32': host.append("ARM") willrun = set(requested) & set(host) return (len(willrun), " ".join(willrun)) ## # External function of plugin. This function is used to perform the task of the ICiBuildPlugin Plugin # # - package is the edk2 path to package. This means workspace/packagepath relative. # - edk2path object configured with workspace and packages path # - PkgConfig Object (dict) for the pkg # - EnvConfig Object # - Plugin Manager Instance # - Plugin Helper Obj Instance # - Junit Logger # - output_stream the StringIO output stream from this plugin via logging def RunBuildPlugin(self, packagename, Edk2pathObj, pkgconfig, environment, PLM, PLMHelper, tc, output_stream=None): self._env = environment environment.SetValue("CI_BUILD_TYPE", "host_unit_test", "Set in HostUnitTestCompilerPlugin") # Parse the config for required DscPath element if "DscPath" not in pkgconfig: tc.SetSkipped() tc.LogStdError("DscPath not found in config file. Nothing to compile for HostBasedUnitTests.") return -1 AP = Edk2pathObj.GetAbsolutePathOnThisSystemFromEdk2RelativePath(packagename) APDSC = os.path.join(AP, pkgconfig["DscPath"].strip()) AP_Path = Edk2pathObj.GetEdk2RelativePathFromAbsolutePath(APDSC) if AP is None or AP_Path is None or not os.path.isfile(APDSC): tc.SetSkipped() tc.LogStdError("Package HostBasedUnitTest Dsc not found.") return -1 logging.info("Building {0}".format(AP_Path)) self._env.SetValue("ACTIVE_PLATFORM", AP_Path, "Set in Compiler Plugin") num, RUNNABLE_ARCHITECTURES = self.__GetHostUnitTestArch(environment) if(num == 0): tc.SetSkipped() tc.LogStdError("No host architecture compatibility") return -1 if not environment.SetValue("TARGET_ARCH", RUNNABLE_ARCHITECTURES, "Update Target Arch based on Host Support"): #use AllowOverride function since this is a controlled attempt to change environment.AllowOverride("TARGET_ARCH") if not environment.SetValue("TARGET_ARCH", RUNNABLE_ARCHITECTURES, "Update Target Arch based on Host Support"): raise RuntimeError("Can't Change TARGET_ARCH as required") # Parse DSC to check for SUPPORTED_ARCHITECTURES dp = DscParser() dp.SetBaseAbsPath(Edk2pathObj.WorkspacePath) dp.SetPackagePaths(Edk2pathObj.PackagePathList) dp.ParseFile(AP_Path) if "SUPPORTED_ARCHITECTURES" in dp.LocalVars: SUPPORTED_ARCHITECTURES = dp.LocalVars["SUPPORTED_ARCHITECTURES"].split('|') TARGET_ARCHITECTURES = environment.GetValue("TARGET_ARCH").split(' ') # Skip if there is no intersection between SUPPORTED_ARCHITECTURES and TARGET_ARCHITECTURES if len(set(SUPPORTED_ARCHITECTURES) & set(TARGET_ARCHITECTURES)) == 0: tc.SetSkipped() tc.LogStdError("No supported architecutres to build for host unit tests") return -1 uefiBuilder = UefiBuilder() # do all the steps # WorkSpace, PackagesPath, PInHelper, PInManager ret = uefiBuilder.Go(Edk2pathObj.WorkspacePath, os.pathsep.join(Edk2pathObj.PackagePathList), PLMHelper, PLM) if ret != 0: # failure: tc.SetFailed("Compile failed for {0}".format(packagename), "Compile_FAILED") tc.LogStdError("{0} Compile failed with error code {1} ".format(AP_Path, ret)) return 1 else: tc.SetSuccess() return 0
edk2-master
.pytool/Plugin/HostUnitTestCompilerPlugin/HostUnitTestCompilerPlugin.py
# @file CharEncodingCheck.py # # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: BSD-2-Clause-Patent ## import os import logging from edk2toolext.environment.plugintypes.ci_build_plugin import ICiBuildPlugin from edk2toolext.environment.var_dict import VarDict ## # map ## EcodingMap = { ".md": 'utf-8', ".dsc": 'utf-8', ".dec": 'utf-8', ".c": 'utf-8', ".h": 'utf-8', ".asm": 'utf-8', ".masm": 'utf-8', ".nasm": 'utf-8', ".s": 'utf-8', ".inf": 'utf-8', ".asl": 'utf-8', ".uni": 'utf-8', ".py": 'utf-8' } class CharEncodingCheck(ICiBuildPlugin): """ A CiBuildPlugin that scans each file in the code tree and confirms the encoding is correct. Configuration options: "CharEncodingCheck": { "IgnoreFiles": [] } """ def GetTestName(self, packagename: str, environment: VarDict) -> tuple: """ Provide the testcase name and classname for use in reporting testclassname: a descriptive string for the testcase can include whitespace classname: should be patterned <packagename>.<plugin>.<optionally any unique condition> Args: packagename: string containing name of package to build environment: The VarDict for the test to run in Returns: a tuple containing the testcase name and the classname (testcasename, classname) """ return ("Check for valid file encoding for " + packagename, packagename + ".CharEncodingCheck") ## # External function of plugin. This function is used to perform the task of the ci_build_plugin Plugin # # - package is the edk2 path to package. This means workspace/packagepath relative. # - edk2path object configured with workspace and packages path # - PkgConfig Object (dict) for the pkg # - EnvConfig Object # - Plugin Manager Instance # - Plugin Helper Obj Instance # - Junit Logger # - output_stream the StringIO output stream from this plugin via logging def RunBuildPlugin(self, packagename, Edk2pathObj, pkgconfig, environment, PLM, PLMHelper, tc, output_stream=None): overall_status = 0 files_tested = 0 abs_pkg_path = Edk2pathObj.GetAbsolutePathOnThisSystemFromEdk2RelativePath(packagename) if abs_pkg_path is None: tc.SetSkipped() tc.LogStdError("No Package folder {0}".format(abs_pkg_path)) return 0 for (ext, enc) in EcodingMap.items(): files = self.WalkDirectoryForExtension([ext], abs_pkg_path) files = [Edk2pathObj.GetEdk2RelativePathFromAbsolutePath(x) for x in files] # make edk2relative path so can process ignores if "IgnoreFiles" in pkgconfig: for a in pkgconfig["IgnoreFiles"]: a = a.replace(os.sep, "/") try: tc.LogStdOut("Ignoring File {0}".format(a)) files.remove(a) except: tc.LogStdError("CharEncodingCheck.IgnoreInf -> {0} not found in filesystem. Invalid ignore file".format(a)) logging.info("CharEncodingCheck.IgnoreInf -> {0} not found in filesystem. Invalid ignore file".format(a)) files = [Edk2pathObj.GetAbsolutePathOnThisSystemFromEdk2RelativePath(x) for x in files] for a in files: files_tested += 1 if not self.TestEncodingOk(a, enc): tc.LogStdError("Encoding Failure in {0}. Not {1}".format(a, enc)) overall_status += 1 tc.LogStdOut("Tested Encoding on {0} files".format(files_tested)) if overall_status != 0: tc.SetFailed("CharEncoding {0} Failed. Errors {1}".format(packagename, overall_status), "CHAR_ENCODING_CHECK_FAILED") else: tc.SetSuccess() return overall_status def TestEncodingOk(self, apath, encodingValue): try: with open(apath, "rb") as fobj: fobj.read().decode(encodingValue) except Exception as exp: logging.error("Encoding failure: file: {0} type: {1}".format(apath, encodingValue)) logging.debug("EXCEPTION: while processing {1} - {0}".format(exp, apath)) return False return True
edk2-master
.pytool/Plugin/CharEncodingCheck/CharEncodingCheck.py
# @file CompilerPlugin.py ## # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: BSD-2-Clause-Patent ## import logging import os import re from edk2toollib.uefi.edk2.parsers.dsc_parser import DscParser from edk2toolext.environment.plugintypes.ci_build_plugin import ICiBuildPlugin from edk2toolext.environment.uefi_build import UefiBuilder from edk2toolext import edk2_logging from edk2toolext.environment.var_dict import VarDict class CompilerPlugin(ICiBuildPlugin): """ A CiBuildPlugin that compiles the package dsc from the package being tested. Configuration options: "CompilerPlugin": { "DscPath": "<path to dsc from root of pkg>" } """ def GetTestName(self, packagename: str, environment: VarDict) -> tuple: """ Provide the testcase name and classname for use in reporting Args: packagename: string containing name of package to build environment: The VarDict for the test to run in Returns: a tuple containing the testcase name and the classname (testcasename, classname) """ target = environment.GetValue("TARGET") return ("Compile " + packagename + " " + target, packagename + ".Compiler." + target) def RunsOnTargetList(self): return ["DEBUG", "RELEASE"] ## # External function of plugin. This function is used to perform the task of the ICiBuildPlugin Plugin # # - package is the edk2 path to package. This means workspace/packagepath relative. # - edk2path object configured with workspace and packages path # - PkgConfig Object (dict) for the pkg # - EnvConfig Object # - Plugin Manager Instance # - Plugin Helper Obj Instance # - Junit Logger # - output_stream the StringIO output stream from this plugin via logging def RunBuildPlugin(self, packagename, Edk2pathObj, pkgconfig, environment, PLM, PLMHelper, tc, output_stream=None): self._env = environment # Parse the config for required DscPath element if "DscPath" not in pkgconfig: tc.SetSkipped() tc.LogStdError("DscPath not found in config file. Nothing to compile.") return -1 AP = Edk2pathObj.GetAbsolutePathOnThisSystemFromEdk2RelativePath(packagename) APDSC = os.path.join(AP, pkgconfig["DscPath"].strip()) AP_Path = Edk2pathObj.GetEdk2RelativePathFromAbsolutePath(APDSC) if AP is None or AP_Path is None or not os.path.isfile(APDSC): tc.SetSkipped() tc.LogStdError("Package Dsc not found.") return -1 logging.info("Building {0}".format(AP_Path)) self._env.SetValue("ACTIVE_PLATFORM", AP_Path, "Set in Compiler Plugin") # Parse DSC to check for SUPPORTED_ARCHITECTURES dp = DscParser() dp.SetBaseAbsPath(Edk2pathObj.WorkspacePath) dp.SetPackagePaths(Edk2pathObj.PackagePathList) dp.ParseFile(AP_Path) if "SUPPORTED_ARCHITECTURES" in dp.LocalVars: SUPPORTED_ARCHITECTURES = dp.LocalVars["SUPPORTED_ARCHITECTURES"].split('|') TARGET_ARCHITECTURES = environment.GetValue("TARGET_ARCH").split(' ') # Skip if there is no intersection between SUPPORTED_ARCHITECTURES and TARGET_ARCHITECTURES if len(set(SUPPORTED_ARCHITECTURES) & set(TARGET_ARCHITECTURES)) == 0: tc.SetSkipped() tc.LogStdError("No supported architecutres to build") return -1 uefiBuilder = UefiBuilder() # do all the steps # WorkSpace, PackagesPath, PInHelper, PInManager ret = uefiBuilder.Go(Edk2pathObj.WorkspacePath, os.pathsep.join(Edk2pathObj.PackagePathList), PLMHelper, PLM) if ret != 0: # failure: tc.SetFailed("Compile failed for {0}".format(packagename), "Compile_FAILED") tc.LogStdError("{0} Compile failed with error code {1} ".format(AP_Path, ret)) return 1 else: tc.SetSuccess() return 0
edk2-master
.pytool/Plugin/CompilerPlugin/CompilerPlugin.py
# @file LicenseCheck.py # # Copyright (c) 2020, Intel Corporation. All rights reserved.<BR> # SPDX-License-Identifier: BSD-2-Clause-Patent ## import os import shutil import logging import re from io import StringIO from typing import List, Tuple from edk2toolext.environment.plugintypes.ci_build_plugin import ICiBuildPlugin from edk2toolext.environment.var_dict import VarDict from edk2toollib.utility_functions import RunCmd class LicenseCheck(ICiBuildPlugin): """ A CiBuildPlugin to check the license for new added files. Configuration options: "LicenseCheck": { "IgnoreFiles": [] }, """ license_format_preflix = 'SPDX-License-Identifier' bsd2_patent = 'BSD-2-Clause-Patent' Readdedfileformat = re.compile(r'\+\+\+ b\/(.*)') file_extension_list = [".c", ".h", ".inf", ".dsc", ".dec", ".py", ".bat", ".sh", ".uni", ".yaml", ".fdf", ".inc", "yml", ".asm", ".asm16", ".asl", ".vfr", ".s", ".S", ".aslc", ".nasm", ".nasmb", ".idf", ".Vfr", ".H"] def GetTestName(self, packagename: str, environment: VarDict) -> tuple: """ Provide the testcase name and classname for use in reporting testclassname: a descriptive string for the testcase can include whitespace classname: should be patterned <packagename>.<plugin>.<optionally any unique condition> Args: packagename: string containing name of package to build environment: The VarDict for the test to run in Returns: a tuple containing the testcase name and the classname (testcasename, classname) """ return ("Check for license for " + packagename, packagename + ".LicenseCheck") ## # External function of plugin. This function is used to perform the task of the ci_build_plugin Plugin # # - package is the edk2 path to package. This means workspace/packagepath relative. # - edk2path object configured with workspace and packages path # - PkgConfig Object (dict) for the pkg # - EnvConfig Object # - Plugin Manager Instance # - Plugin Helper Obj Instance # - Junit Logger # - output_stream the StringIO output stream from this plugin via logging def RunBuildPlugin(self, packagename, Edk2pathObj, pkgconfig, environment, PLM, PLMHelper, tc, output_stream=None): # Create temp directory temp_path = os.path.join(Edk2pathObj.WorkspacePath, 'Build', '.pytool', 'Plugin', 'LicenseCheck') if not os.path.exists(temp_path): os.makedirs(temp_path) # Output file to use for git diff operations temp_diff_output = os.path.join (temp_path, 'diff.txt') params = "diff --output={} --unified=0 origin/master HEAD".format(temp_diff_output) RunCmd("git", params) with open(temp_diff_output) as file: patch = file.read().strip().split("\n") # Delete temp directory if os.path.exists(temp_path): shutil.rmtree(temp_path) ignore_files = [] if "IgnoreFiles" in pkgconfig: ignore_files = pkgconfig["IgnoreFiles"] self.ok = True self.startcheck = False self.license = True self.all_file_pass = True count = len(patch) line_index = 0 for line in patch: if line.startswith('--- /dev/null'): nextline = patch[line_index + 1] added_file = self.Readdedfileformat.search(nextline).group(1) added_file_extension = os.path.splitext(added_file)[1] if added_file_extension in self.file_extension_list and packagename in added_file: if (self.IsIgnoreFile(added_file, ignore_files)): line_index = line_index + 1 continue self.startcheck = True self.license = False if self.startcheck and self.license_format_preflix in line: if self.bsd2_patent in line: self.license = True if line_index + 1 == count or patch[line_index + 1].startswith('diff --') and self.startcheck: if not self.license: self.all_file_pass = False error_message = "Invalid license in: " + added_file + " Hint: Only BSD-2-Clause-Patent is accepted." logging.error(error_message) self.startcheck = False self.license = True line_index = line_index + 1 if self.all_file_pass: tc.SetSuccess() return 0 else: tc.SetFailed("License Check {0} Failed. ".format(packagename), "LICENSE_CHECK_FAILED") return 1 def IsIgnoreFile(self, file: str, ignore_files: List[str]) -> bool: for f in ignore_files: if f in file: return True return False
edk2-master
.pytool/Plugin/LicenseCheck/LicenseCheck.py
# @file UncrustifyCheck.py # # An edk2-pytool based plugin wrapper for Uncrustify # # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: BSD-2-Clause-Patent ## import configparser import difflib import errno import logging import os import pathlib import shutil import timeit from edk2toolext.environment import version_aggregator from edk2toolext.environment.plugin_manager import PluginManager from edk2toolext.environment.plugintypes.ci_build_plugin import ICiBuildPlugin from edk2toolext.environment.plugintypes.uefi_helper_plugin import HelperFunctions from edk2toolext.environment.var_dict import VarDict from edk2toollib.gitignore_parser import parse_gitignore_lines from edk2toollib.log.junit_report_format import JunitReportTestCase from edk2toollib.uefi.edk2.path_utilities import Edk2Path from edk2toollib.utility_functions import RunCmd from io import StringIO from typing import Any, Dict, List, Tuple # # Provide more user friendly messages for certain scenarios # class UncrustifyException(Exception): def __init__(self, message, exit_code): super().__init__(message) self.exit_code = exit_code class UncrustifyAppEnvVarNotFoundException(UncrustifyException): def __init__(self, message): super().__init__(message, -101) class UncrustifyAppVersionErrorException(UncrustifyException): def __init__(self, message): super().__init__(message, -102) class UncrustifyAppExecutionException(UncrustifyException): def __init__(self, message): super().__init__(message, -103) class UncrustifyStalePluginFormattedFilesException(UncrustifyException): def __init__(self, message): super().__init__(message, -120) class UncrustifyInputFileCreationErrorException(UncrustifyException): def __init__(self, message): super().__init__(message, -121) class UncrustifyInvalidIgnoreStandardPathsException(UncrustifyException): def __init__(self, message): super().__init__(message, -122) class UncrustifyGitIgnoreFileException(UncrustifyException): def __init__(self, message): super().__init__(message, -140) class UncrustifyGitSubmoduleException(UncrustifyException): def __init__(self, message): super().__init__(message, -141) class UncrustifyCheck(ICiBuildPlugin): """ A CiBuildPlugin that uses Uncrustify to check the source files in the package being tested for coding standard issues. By default, the plugin runs against standard C source file extensions but its configuration can be modified through its configuration file. Configuration options: "UncrustifyCheck": { "AdditionalIncludePaths": [], # Additional paths to check formatting (wildcards supported). "AuditOnly": False, # Don't fail the build if there are errors. Just log them. "ConfigFilePath": "", # Custom path to an Uncrustify config file. "IgnoreStandardPaths": [], # Standard Plugin defined paths that should be ignored. "OutputFileDiffs": False, # Output chunks of formatting diffs in the test case log. # This can significantly slow down the plugin on very large packages. "SkipGitExclusions": False # Don't exclude git ignored files and files in git submodules. } """ # # By default, use an "uncrustify.cfg" config file in the plugin directory # A package can override this path via "ConfigFilePath" # # Note: Values specified via "ConfigFilePath" are relative to the package # DEFAULT_CONFIG_FILE_PATH = os.path.join( pathlib.Path(__file__).parent.resolve(), "uncrustify.cfg") # # The extension used for formatted files produced by this plugin # FORMATTED_FILE_EXTENSION = ".uncrustify_plugin" # # A package can add any additional paths with "AdditionalIncludePaths" # A package can remove any of these paths with "IgnoreStandardPaths" # STANDARD_PLUGIN_DEFINED_PATHS = ("*.c", "*.h") # # The Uncrustify application path should set in this environment variable # UNCRUSTIFY_PATH_ENV_KEY = "UNCRUSTIFY_CI_PATH" def GetTestName(self, packagename: str, environment: VarDict) -> Tuple: """ Provide the testcase name and classname for use in reporting Args: packagename: string containing name of package to build environment: The VarDict for the test to run in Returns: A tuple containing the testcase name and the classname (testcasename, classname) testclassname: a descriptive string for the testcase can include whitespace classname: should be patterned <packagename>.<plugin>.<optionally any unique condition> """ return ("Check file coding standard compliance in " + packagename, packagename + ".UncrustifyCheck") def RunBuildPlugin(self, package_rel_path: str, edk2_path: Edk2Path, package_config: Dict[str, List[str]], environment_config: Any, plugin_manager: PluginManager, plugin_manager_helper: HelperFunctions, tc: JunitReportTestCase, output_stream=None) -> int: """ External function of plugin. This function is used to perform the task of the CiBuild Plugin. Args: - package_rel_path: edk2 workspace relative path to the package - edk2_path: Edk2Path object with workspace and packages paths - package_config: Dictionary with the package configuration - environment_config: Environment configuration - plugin_manager: Plugin Manager Instance - plugin_manager_helper: Plugin Manager Helper Instance - tc: JUnit test case - output_stream: The StringIO output stream from this plugin (logging) Returns >0 : Number of errors found 0 : Passed successfully -1 : Skipped for missing prereq """ try: # Initialize plugin and check pre-requisites. self._initialize_environment_info( package_rel_path, edk2_path, package_config, tc) self._initialize_configuration() self._check_for_preexisting_formatted_files() # Log important context information. self._log_uncrustify_app_info() # Get template file contents if specified self._get_template_file_contents() # Create meta input files & directories self._create_temp_working_directory() self._create_uncrustify_file_list_file() self._run_uncrustify() # Post-execution actions. self._process_uncrustify_results() except UncrustifyException as e: self._tc.LogStdError( f"Uncrustify error {e.exit_code}. Details:\n\n{str(e)}") logging.warning( f"Uncrustify error {e.exit_code}. Details:\n\n{str(e)}") return -1 else: if self._formatted_file_error_count > 0: if self._audit_only_mode: logging.info( "Setting test as skipped since AuditOnly is enabled") self._tc.SetSkipped() return -1 else: self._tc.SetFailed( f"{self._plugin_name} failed due to {self._formatted_file_error_count} incorrectly formatted files.", "CHECK_FAILED") else: self._tc.SetSuccess() return self._formatted_file_error_count finally: self._cleanup_temporary_formatted_files() self._cleanup_temporary_directory() def _initialize_configuration(self) -> None: """ Initializes plugin configuration. """ self._initialize_app_info() self._initialize_config_file_info() self._initialize_file_to_format_info() self._initialize_test_case_output_options() def _check_for_preexisting_formatted_files(self) -> None: """ Checks if any formatted files from prior execution are present. Existence of such files is an unexpected condition. This might result from an error that occurred during a previous run or a premature exit from a debug scenario. In any case, the package should be clean before starting a new run. """ pre_existing_formatted_file_count = len( [str(path.resolve()) for path in pathlib.Path(self._abs_package_path).rglob(f'*{UncrustifyCheck.FORMATTED_FILE_EXTENSION}')]) if pre_existing_formatted_file_count > 0: raise UncrustifyStalePluginFormattedFilesException( f"{pre_existing_formatted_file_count} formatted files already exist. To prevent overwriting these files, please remove them before running this plugin.") def _cleanup_temporary_directory(self) -> None: """ Cleans up the temporary directory used for this execution instance. This removes the directory and all files created during this instance. """ if hasattr(self, '_working_dir'): self._remove_tree(self._working_dir) def _cleanup_temporary_formatted_files(self) -> None: """ Cleans up the temporary formmatted files produced by Uncrustify. This will recursively remove all formatted files generated by Uncrustify during this execution instance. """ if hasattr(self, '_abs_package_path'): formatted_files = [str(path.resolve()) for path in pathlib.Path( self._abs_package_path).rglob(f'*{UncrustifyCheck.FORMATTED_FILE_EXTENSION}')] for formatted_file in formatted_files: os.remove(formatted_file) def _create_temp_working_directory(self) -> None: """ Creates the temporary directory used for this execution instance. """ self._working_dir = os.path.join( self._abs_workspace_path, "Build", ".pytool", "Plugin", f"{self._plugin_name}") try: pathlib.Path(self._working_dir).mkdir(parents=True, exist_ok=True) except OSError as e: raise UncrustifyInputFileCreationErrorException( f"Error creating plugin directory {self._working_dir}.\n\n{repr(e)}.") def _create_uncrustify_file_list_file(self) -> None: """ Creates the file with the list of source files for Uncrustify to process. """ self._app_input_file_path = os.path.join( self._working_dir, "uncrustify_file_list.txt") with open(self._app_input_file_path, 'w', encoding='utf8') as f: f.writelines(f"\n".join(self._abs_file_paths_to_format)) def _execute_uncrustify(self) -> None: """ Executes Uncrustify with the initialized configuration. """ output = StringIO() self._app_exit_code = RunCmd( self._app_path, f"-c {self._app_config_file} -F {self._app_input_file_path} --if-changed --suffix {UncrustifyCheck.FORMATTED_FILE_EXTENSION}", outstream=output) self._app_output = output.getvalue().strip().splitlines() def _get_files_ignored_in_config(self): """" Returns a function that returns true if a given file string path is ignored in the plugin configuration file and false otherwise. """ ignored_files = [] if "IgnoreFiles" in self._package_config: ignored_files = self._package_config["IgnoreFiles"] # Pass "Package configuration file" as the source file path since # the actual configuration file name is unknown to this plugin and # this provides a generic description of the file that provided # the ignore file content. # # This information is only used for reporting (not used here) and # the ignore lines are being passed directly as they are given to # this plugin. return parse_gitignore_lines(ignored_files, "Package configuration file", self._abs_package_path) def _get_git_ignored_paths(self) -> List[str]: """" Returns a list of file absolute path strings to all files ignored in this git repository. If git is not found, an empty list will be returned. """ if not shutil.which("git"): logging.warn( "Git is not found on this system. Git submodule paths will not be considered.") return [] outstream_buffer = StringIO() exit_code = RunCmd("git", "ls-files --other", workingdir=self._abs_workspace_path, outstream=outstream_buffer, logging_level=logging.NOTSET) if (exit_code != 0): raise UncrustifyGitIgnoreFileException( f"An error occurred reading git ignore settings. This will prevent Uncrustify from running against the expected set of files.") # Note: This will potentially be a large list, but at least sorted rel_paths = outstream_buffer.getvalue().strip().splitlines() abs_paths = [] for path in rel_paths: abs_paths.append( os.path.normpath(os.path.join(self._abs_workspace_path, path))) return abs_paths def _get_git_submodule_paths(self) -> List[str]: """ Returns a list of directory absolute path strings to the root of each submodule in the workspace repository. If git is not found, an empty list will be returned. """ if not shutil.which("git"): logging.warn( "Git is not found on this system. Git submodule paths will not be considered.") return [] if os.path.isfile(os.path.join(self._abs_workspace_path, ".gitmodules")): logging.info( f".gitmodules file found. Excluding submodules in {self._package_name}.") outstream_buffer = StringIO() exit_code = RunCmd("git", "config --file .gitmodules --get-regexp path", workingdir=self._abs_workspace_path, outstream=outstream_buffer, logging_level=logging.NOTSET) if (exit_code != 0): raise UncrustifyGitSubmoduleException( f".gitmodule file detected but an error occurred reading the file. Cannot proceed with unknown submodule paths.") submodule_paths = [] for line in outstream_buffer.getvalue().strip().splitlines(): submodule_paths.append( os.path.normpath(os.path.join(self._abs_workspace_path, line.split()[1]))) return submodule_paths else: return [] def _get_template_file_contents(self) -> None: """ Gets the contents of Uncrustify template files if they are specified in the Uncrustify configuration file. """ self._file_template_contents = None self._func_template_contents = None # Allow no value to allow "set" statements in the config file which do # not specify value assignment parser = configparser.ConfigParser(allow_no_value=True) with open(self._app_config_file, 'r') as cf: parser.read_string("[dummy_section]\n" + cf.read()) try: file_template_name = parser["dummy_section"]["cmt_insert_file_header"] file_template_path = pathlib.Path(file_template_name) if not file_template_path.is_file(): file_template_path = pathlib.Path(os.path.join(self._plugin_path, file_template_name)) self._file_template_contents = file_template_path.read_text() except KeyError: logging.warn("A file header template is not specified in the config file.") except FileNotFoundError: logging.warn("The specified file header template file was not found.") try: func_template_name = parser["dummy_section"]["cmt_insert_func_header"] func_template_path = pathlib.Path(func_template_name) if not func_template_path.is_file(): func_template_path = pathlib.Path(os.path.join(self._plugin_path, func_template_name)) self._func_template_contents = func_template_path.read_text() except KeyError: logging.warn("A function header template is not specified in the config file.") except FileNotFoundError: logging.warn("The specified function header template file was not found.") def _initialize_app_info(self) -> None: """ Initialize Uncrustify application information. This function will determine the application path and version. """ # Verify Uncrustify is specified in the environment. if UncrustifyCheck.UNCRUSTIFY_PATH_ENV_KEY not in os.environ: raise UncrustifyAppEnvVarNotFoundException( f"Uncrustify environment variable {UncrustifyCheck.UNCRUSTIFY_PATH_ENV_KEY} is not present.") self._app_path = shutil.which('uncrustify', path=os.environ[UncrustifyCheck.UNCRUSTIFY_PATH_ENV_KEY]) if self._app_path is None: raise FileNotFoundError( errno.ENOENT, os.strerror(errno.ENOENT), self._app_path) self._app_path = os.path.normcase(os.path.normpath(self._app_path)) if not os.path.isfile(self._app_path): raise FileNotFoundError( errno.ENOENT, os.strerror(errno.ENOENT), self._app_path) # Verify Uncrustify is present at the expected path. return_buffer = StringIO() ret = RunCmd(self._app_path, "--version", outstream=return_buffer) if (ret != 0): raise UncrustifyAppVersionErrorException( f"Error occurred executing --version: {ret}.") # Log Uncrustify version information. self._app_version = return_buffer.getvalue().strip() self._tc.LogStdOut(f"Uncrustify version: {self._app_version}") version_aggregator.GetVersionAggregator().ReportVersion( "Uncrustify", self._app_version, version_aggregator.VersionTypes.INFO) def _initialize_config_file_info(self) -> None: """ Initialize Uncrustify configuration file info. The config file path is relative to the package root. """ self._app_config_file = UncrustifyCheck.DEFAULT_CONFIG_FILE_PATH if "ConfigFilePath" in self._package_config: self._app_config_file = self._package_config["ConfigFilePath"].strip() self._app_config_file = os.path.normpath( os.path.join(self._abs_package_path, self._app_config_file)) if not os.path.isfile(self._app_config_file): raise FileNotFoundError( errno.ENOENT, os.strerror(errno.ENOENT), self._app_config_file) def _initialize_environment_info(self, package_rel_path: str, edk2_path: Edk2Path, package_config: Dict[str, List[str]], tc: JunitReportTestCase) -> None: """ Initializes plugin environment information. """ self._abs_package_path = edk2_path.GetAbsolutePathOnThisSystemFromEdk2RelativePath( package_rel_path) self._abs_workspace_path = edk2_path.WorkspacePath self._package_config = package_config self._package_name = os.path.basename( os.path.normpath(package_rel_path)) self._plugin_name = self.__class__.__name__ self._plugin_path = os.path.dirname(os.path.realpath(__file__)) self._rel_package_path = package_rel_path self._tc = tc def _initialize_file_to_format_info(self) -> None: """ Forms the list of source files for Uncrustify to process. """ # Create a list of all the package relative file paths in the package to run against Uncrustify. rel_file_paths_to_format = list( UncrustifyCheck.STANDARD_PLUGIN_DEFINED_PATHS) # Allow the ci.yaml to remove any of the pre-defined standard paths if "IgnoreStandardPaths" in self._package_config: for a in self._package_config["IgnoreStandardPaths"]: if a.strip() in rel_file_paths_to_format: self._tc.LogStdOut( f"Ignoring standard path due to ci.yaml ignore: {a}") rel_file_paths_to_format.remove(a.strip()) else: raise UncrustifyInvalidIgnoreStandardPathsException(f"Invalid IgnoreStandardPaths value: {a}") # Allow the ci.yaml to specify additional include paths for this package if "AdditionalIncludePaths" in self._package_config: rel_file_paths_to_format.extend( self._package_config["AdditionalIncludePaths"]) self._abs_file_paths_to_format = [] for path in rel_file_paths_to_format: self._abs_file_paths_to_format.extend( [str(path.resolve()) for path in pathlib.Path(self._abs_package_path).rglob(path)]) # Remove files ignore in the plugin configuration file plugin_ignored_files = list(filter(self._get_files_ignored_in_config(), self._abs_file_paths_to_format)) if plugin_ignored_files: logging.info( f"{self._package_name} file count before plugin ignore file exclusion: {len(self._abs_file_paths_to_format)}") for path in plugin_ignored_files: if path in self._abs_file_paths_to_format: logging.info(f" File ignored in plugin config file: {path}") self._abs_file_paths_to_format.remove(path) logging.info( f"{self._package_name} file count after plugin ignore file exclusion: {len(self._abs_file_paths_to_format)}") if not "SkipGitExclusions" in self._package_config or not self._package_config["SkipGitExclusions"]: # Remove files ignored by git logging.info( f"{self._package_name} file count before git ignore file exclusion: {len(self._abs_file_paths_to_format)}") ignored_paths = self._get_git_ignored_paths() self._abs_file_paths_to_format = list( set(self._abs_file_paths_to_format).difference(ignored_paths)) logging.info( f"{self._package_name} file count after git ignore file exclusion: {len(self._abs_file_paths_to_format)}") # Remove files in submodules logging.info( f"{self._package_name} file count before submodule exclusion: {len(self._abs_file_paths_to_format)}") submodule_paths = tuple(self._get_git_submodule_paths()) for path in submodule_paths: logging.info(f" submodule path: {path}") self._abs_file_paths_to_format = [ f for f in self._abs_file_paths_to_format if not f.startswith(submodule_paths)] logging.info( f"{self._package_name} file count after submodule exclusion: {len(self._abs_file_paths_to_format)}") # Sort the files for more consistent results self._abs_file_paths_to_format.sort() def _initialize_test_case_output_options(self) -> None: """ Initializes options that influence test case output. """ self._audit_only_mode = False self._output_file_diffs = True if "AuditOnly" in self._package_config and self._package_config["AuditOnly"]: self._audit_only_mode = True if "OutputFileDiffs" in self._package_config and not self._package_config["OutputFileDiffs"]: self._output_file_diffs = False def _log_uncrustify_app_info(self) -> None: """ Logs Uncrustify application information. """ self._tc.LogStdOut(f"Found Uncrustify at {self._app_path}") self._tc.LogStdOut(f"Uncrustify version: {self._app_version}") self._tc.LogStdOut('\n') logging.info(f"Found Uncrustify at {self._app_path}") logging.info(f"Uncrustify version: {self._app_version}") logging.info('\n') def _process_uncrustify_results(self) -> None: """ Process the results from Uncrustify. Determines whether formatting errors are present and logs failures. """ formatted_files = [str(path.resolve()) for path in pathlib.Path( self._abs_package_path).rglob(f'*{UncrustifyCheck.FORMATTED_FILE_EXTENSION}')] self._formatted_file_error_count = len(formatted_files) if self._formatted_file_error_count > 0: logging.error( "Visit the following instructions to learn " "how to find the detailed formatting errors in Azure " "DevOps CI: " "https://github.com/tianocore/tianocore.github.io/wiki/EDK-II-Code-Formatting#how-to-find-uncrustify-formatting-errors-in-continuous-integration-ci") self._tc.LogStdError("Files with formatting errors:\n") if self._output_file_diffs: logging.info("Calculating file diffs. This might take a while...") for formatted_file in formatted_files: pre_formatted_file = formatted_file[:- len(UncrustifyCheck.FORMATTED_FILE_EXTENSION)] logging.error(pre_formatted_file) if (self._output_file_diffs or self._file_template_contents is not None or self._func_template_contents is not None): self._tc.LogStdError( f"Formatting errors in {os.path.relpath(pre_formatted_file, self._abs_package_path)}\n") with open(formatted_file) as ff: formatted_file_text = ff.read() if (self._file_template_contents is not None and self._file_template_contents in formatted_file_text): self._tc.LogStdError(f"File header is missing in {os.path.relpath(pre_formatted_file, self._abs_package_path)}\n") if (self._func_template_contents is not None and self._func_template_contents in formatted_file_text): self._tc.LogStdError(f"A function header is missing in {os.path.relpath(pre_formatted_file, self._abs_package_path)}\n") if self._output_file_diffs: with open(pre_formatted_file) as pf: pre_formatted_file_text = pf.read() for line in difflib.unified_diff(pre_formatted_file_text.split('\n'), formatted_file_text.split('\n'), fromfile=pre_formatted_file, tofile=formatted_file, n=3): self._tc.LogStdError(line) self._tc.LogStdError('\n') else: self._tc.LogStdError(pre_formatted_file) def _remove_tree(self, dir_path: str, ignore_errors: bool = False) -> None: """ Helper for removing a directory. Over time there have been many private implementations of this due to reliability issues in the shutil implementations. To consolidate on a single function this helper is added. On error try to change file attributes. Also add retry logic. This function is temporarily borrowed from edk2toollib.utility_functions since the version used in edk2 is not recent enough to include the function. This function should be replaced by "RemoveTree" when it is available. Args: - dir_path: Path to directory to remove. - ignore_errors: Whether to ignore errors during removal """ def _remove_readonly(func, path, _): """ Private function to attempt to change permissions on file/folder being deleted. """ os.chmod(path, os.stat.S_IWRITE) func(path) for _ in range(3): # retry up to 3 times try: shutil.rmtree(dir_path, ignore_errors=ignore_errors, onerror=_remove_readonly) except OSError as err: logging.warning(f"Failed to fully remove {dir_path}: {err}") else: break else: raise RuntimeError(f"Failed to remove {dir_path}") def _run_uncrustify(self) -> None: """ Runs Uncrustify for this instance of plugin execution. """ logging.info("Executing Uncrustify. This might take a while...") start_time = timeit.default_timer() self._execute_uncrustify() end_time = timeit.default_timer() - start_time execution_summary = f"Uncrustify executed against {len(self._abs_file_paths_to_format)} files in {self._package_name} in {end_time:.2f} seconds.\n" self._tc.LogStdOut(execution_summary) logging.info(execution_summary) if self._app_exit_code != 0 and self._app_exit_code != 1: raise UncrustifyAppExecutionException( f"Error {str(self._app_exit_code)} returned from Uncrustify:\n\n{str(self._app_output)}")
edk2-master
.pytool/Plugin/UncrustifyCheck/UncrustifyCheck.py
# @file EccCheck.py # # Copyright (c) 2021, Arm Limited. All rights reserved.<BR> # Copyright (c) 2020, Intel Corporation. All rights reserved.<BR> # SPDX-License-Identifier: BSD-2-Clause-Patent ## import os import shutil import re import csv import xml.dom.minidom from typing import List, Dict, Tuple import logging from io import StringIO from edk2toolext.environment import shell_environment from edk2toolext.environment.plugintypes.ci_build_plugin import ICiBuildPlugin from edk2toolext.environment.var_dict import VarDict from edk2toollib.utility_functions import RunCmd class EccCheck(ICiBuildPlugin): """ A CiBuildPlugin that finds the Ecc issues of newly added code in pull request. Configuration options: "EccCheck": { "ExceptionList": [], "IgnoreFiles": [] }, """ FindModifyFile = re.compile(r'\+\+\+ b\/(.*)') LineScopePattern = (r'@@ -\d*\,*\d* \+\d*\,*\d* @@.*') LineNumRange = re.compile(r'@@ -\d*\,*\d* \+(\d*)\,*(\d*) @@.*') def GetTestName(self, packagename: str, environment: VarDict) -> tuple: """ Provide the testcase name and classname for use in reporting testclassname: a descriptive string for the testcase can include whitespace classname: should be patterned <packagename>.<plugin>.<optionally any unique condition> Args: packagename: string containing name of package to build environment: The VarDict for the test to run in Returns: a tuple containing the testcase name and the classname (testcasename, classname) """ return ("Check for efi coding style for " + packagename, packagename + ".EccCheck") ## # External function of plugin. This function is used to perform the task of the ci_build_plugin Plugin # # - package is the edk2 path to package. This means workspace/packagepath relative. # - edk2path object configured with workspace and packages path # - PkgConfig Object (dict) for the pkg # - EnvConfig Object # - Plugin Manager Instance # - Plugin Helper Obj Instance # - Junit Logger # - output_stream the StringIO output stream from this plugin via logging def RunBuildPlugin(self, packagename, Edk2pathObj, pkgconfig, environment, PLM, PLMHelper, tc, output_stream=None): workspace_path = Edk2pathObj.WorkspacePath basetools_path = environment.GetValue("EDK_TOOLS_PATH") python_path = os.path.join(basetools_path, "Source", "Python") env = shell_environment.GetEnvironment() env.set_shell_var('PYTHONPATH', python_path) env.set_shell_var('WORKSPACE', workspace_path) env.set_shell_var('PACKAGES_PATH', os.pathsep.join(Edk2pathObj.PackagePathList)) self.ECC_PASS = True abs_pkg_path = Edk2pathObj.GetAbsolutePathOnThisSystemFromEdk2RelativePath(packagename) if abs_pkg_path is None: tc.SetSkipped() tc.LogStdError("No Package folder {0}".format(abs_pkg_path)) return 0 # Create temp directory temp_path = os.path.join(workspace_path, 'Build', '.pytool', 'Plugin', 'EccCheck') try: # Delete temp directory if os.path.exists(temp_path): shutil.rmtree(temp_path) # Copy package being scanned to temp_path shutil.copytree ( abs_pkg_path, os.path.join(temp_path, packagename), symlinks=True ) # Copy exception.xml to temp_path shutil.copyfile ( os.path.join(basetools_path, "Source", "Python", "Ecc", "exception.xml"), os.path.join(temp_path, "exception.xml") ) # Output file to use for git diff operations temp_diff_output = os.path.join (temp_path, 'diff.txt') self.ApplyConfig(pkgconfig, temp_path, packagename) modify_dir_list = self.GetModifyDir(packagename, temp_diff_output) patch = self.GetDiff(packagename, temp_diff_output) ecc_diff_range = self.GetDiffRange(patch, packagename, temp_path) # # Use temp_path as working directory when running ECC tool # self.GenerateEccReport(modify_dir_list, ecc_diff_range, temp_path, basetools_path) ecc_log = os.path.join(temp_path, "Ecc.log") if self.ECC_PASS: # Delete temp directory if os.path.exists(temp_path): shutil.rmtree(temp_path) tc.SetSuccess() return 0 else: with open(ecc_log, encoding='utf8') as output: ecc_output = output.readlines() for line in ecc_output: logging.error(line.strip()) # Delete temp directory if os.path.exists(temp_path): shutil.rmtree(temp_path) tc.SetFailed("EccCheck failed for {0}".format(packagename), "CHECK FAILED") return 1 except KeyboardInterrupt: # If EccCheck is interrupted by keybard interrupt, then return failure # Delete temp directory if os.path.exists(temp_path): shutil.rmtree(temp_path) tc.SetFailed("EccCheck interrupted for {0}".format(packagename), "CHECK FAILED") return 1 else: # If EccCheck fails for any other exception type, raise the exception # Delete temp directory if os.path.exists(temp_path): shutil.rmtree(temp_path) tc.SetFailed("EccCheck exception for {0}".format(packagename), "CHECK FAILED") raise return 1 def GetDiff(self, pkg: str, temp_diff_output: str) -> List[str]: patch = [] # # Generate unified diff between origin/master and HEAD. # params = "diff --output={} --unified=0 origin/master HEAD".format(temp_diff_output) RunCmd("git", params) with open(temp_diff_output) as file: patch = file.read().strip().split('\n') return patch def GetModifyDir(self, pkg: str, temp_diff_output: str) -> List[str]: # # Generate diff between origin/master and HEAD using --diff-filter to # exclude deleted and renamed files that do not need to be scanned by # ECC. Also use --name-status to only generate the names of the files # with differences. The output format of this git diff command is a # list of files with the change status and the filename. The filename # is always at the end of the line. Examples: # # M MdeModulePkg/Application/CapsuleApp/CapsuleApp.h # M MdeModulePkg/Application/UiApp/FrontPage.h # params = "diff --output={} --diff-filter=dr --name-status origin/master HEAD".format(temp_diff_output) RunCmd("git", params) dir_list = [] with open(temp_diff_output) as file: dir_list = file.read().strip().split('\n') modify_dir_list = [] for modify_dir in dir_list: # # Parse file name from the end of the line # file_path = modify_dir.strip().split() # # Skip lines that do not have at least 2 elements (status and file name) # if len(file_path) < 2: continue # # Parse the directory name from the file name # file_dir = os.path.dirname(file_path[-1]) # # Skip directory names that do not start with the package being scanned. # if file_dir.split('/')[0] != pkg: continue # # Skip directory names that are identical to the package being scanned. # The assumption here is that there are no source files at the package # root. Instead, the only expected files in the package root are # EDK II meta data files (DEC, DSC, FDF). # if file_dir == pkg: continue # # Skip directory names that are already in the modified dir list # if file_dir in modify_dir_list: continue # # Add the candidate directory to scan to the modified dir list # modify_dir_list.append(file_dir) # # Remove duplicates from modify_dir_list # Given a folder path, ECC performs a recursive scan of that folder. # If a parent and child folder are both present in modify_dir_list, # then ECC will perform redudanct scans of source files. In order # to prevent redundant scans, if a parent and child folder are both # present, then remove all the child folders. # # For example, if modified_dir_list contains the following elements: # MdeModulePkg/Core/Dxe # MdeModulePkg/Core/Dxe/Hand # MdeModulePkg/Core/Dxe/Mem # # Then MdeModulePkg/Core/Dxe/Hand and MdeModulePkg/Core/Dxe/Mem should # be removed because the files in those folders are covered by a scan # of MdeModulePkg/Core/Dxe. # filtered_list = [] for dir1 in modify_dir_list: Append = True for dir2 in modify_dir_list: if dir1 == dir2: continue common = os.path.commonpath([dir1, dir2]) if os.path.normpath(common) == os.path.normpath(dir2): Append = False break if Append and dir1 not in filtered_list: filtered_list.append(dir1) return filtered_list def GetDiffRange(self, patch_diff: List[str], pkg: str, temp_path: str) -> Dict[str, List[Tuple[int, int]]]: IsDelete = True StartCheck = False range_directory: Dict[str, List[Tuple[int, int]]] = {} for line in patch_diff: modify_file = self.FindModifyFile.findall(line) if modify_file and pkg in modify_file[0] and not StartCheck and os.path.isfile(modify_file[0]): modify_file_comment_dic = self.GetCommentRange(modify_file[0], temp_path) IsDelete = False StartCheck = True modify_file_dic = modify_file[0] modify_file_dic = modify_file_dic.replace("/", os.sep) range_directory[modify_file_dic] = [] elif line.startswith('--- '): StartCheck = False elif re.match(self.LineScopePattern, line, re.I) and not IsDelete and StartCheck: start_line = self.LineNumRange.search(line).group(1) line_range = self.LineNumRange.search(line).group(2) if not line_range: line_range = '1' range_directory[modify_file_dic].append((int(start_line), int(start_line) + int(line_range) - 1)) for i in modify_file_comment_dic: if int(i[0]) <= int(start_line) <= int(i[1]): range_directory[modify_file_dic].append(i) return range_directory def GetCommentRange(self, modify_file: str, temp_path: str) -> List[Tuple[int, int]]: comment_range: List[Tuple[int, int]] = [] modify_file_path = os.path.join(temp_path, modify_file) if not os.path.exists (modify_file_path): return comment_range with open(modify_file_path) as f: line_no = 1 Start = False for line in f: if line.startswith('/**'): start_no = line_no Start = True if line.startswith('**/') and Start: end_no = line_no Start = False comment_range.append((int(start_no), int(end_no))) line_no += 1 if comment_range and comment_range[0][0] == 1: del comment_range[0] return comment_range def GenerateEccReport(self, modify_dir_list: List[str], ecc_diff_range: Dict[str, List[Tuple[int, int]]], temp_path: str, basetools_path: str) -> None: ecc_need = False ecc_run = True config = os.path.normpath(os.path.join(basetools_path, "Source", "Python", "Ecc", "config.ini")) exception = os.path.normpath(os.path.join(temp_path, "exception.xml")) report = os.path.normpath(os.path.join(temp_path, "Ecc.csv")) for modify_dir in modify_dir_list: target = os.path.normpath(os.path.join(temp_path, modify_dir)) logging.info('Run ECC tool for the commit in %s' % modify_dir) ecc_need = True ecc_params = "-c {0} -e {1} -t {2} -r {3}".format(config, exception, target, report) return_code = RunCmd("Ecc", ecc_params, workingdir=temp_path) if return_code != 0: ecc_run = False break if not ecc_run: logging.error('Fail to run ECC tool') self.ParseEccReport(ecc_diff_range, temp_path) if not ecc_need: logging.info("Doesn't need run ECC check") return def ParseEccReport(self, ecc_diff_range: Dict[str, List[Tuple[int, int]]], temp_path: str) -> None: ecc_log = os.path.join(temp_path, "Ecc.log") ecc_csv = os.path.join(temp_path, "Ecc.csv") row_lines = [] ignore_error_code = self.GetIgnoreErrorCode() if os.path.exists(ecc_csv): with open(ecc_csv) as csv_file: reader = csv.reader(csv_file) for row in reader: for modify_file in ecc_diff_range: if modify_file in row[3]: for i in ecc_diff_range[modify_file]: line_no = int(row[4]) if i[0] <= line_no <= i[1] and row[1] not in ignore_error_code: row[0] = '\nEFI coding style error' row[1] = 'Error code: ' + row[1] row[3] = 'file: ' + row[3] row[4] = 'Line number: ' + row[4] row_line = '\n *'.join(row) row_lines.append(row_line) break break if row_lines: self.ECC_PASS = False with open(ecc_log, 'a') as log: all_line = '\n'.join(row_lines) all_line = all_line + '\n' log.writelines(all_line) return def ApplyConfig(self, pkgconfig: Dict[str, List[str]], temp_path: str, pkg: str) -> None: if "IgnoreFiles" in pkgconfig: for a in pkgconfig["IgnoreFiles"]: a = os.path.join(temp_path, pkg, a) a = a.replace(os.sep, "/") logging.info("Ignoring Files {0}".format(a)) if os.path.exists(a): if os.path.isfile(a): os.remove(a) elif os.path.isdir(a): shutil.rmtree(a) else: logging.error("EccCheck.IgnoreInf -> {0} not found in filesystem. Invalid ignore files".format(a)) if "ExceptionList" in pkgconfig: exception_list = pkgconfig["ExceptionList"] exception_xml = os.path.join(temp_path, "exception.xml") try: logging.info("Appending exceptions") self.AppendException(exception_list, exception_xml) except Exception as e: logging.error("Fail to apply exceptions") raise e return def AppendException(self, exception_list: List[str], exception_xml: str) -> None: error_code_list = exception_list[::2] keyword_list = exception_list[1::2] dom_tree = xml.dom.minidom.parse(exception_xml) root_node = dom_tree.documentElement for error_code, keyword in zip(error_code_list, keyword_list): customer_node = dom_tree.createElement("Exception") keyword_node = dom_tree.createElement("KeyWord") keyword_node_text_value = dom_tree.createTextNode(keyword) keyword_node.appendChild(keyword_node_text_value) customer_node.appendChild(keyword_node) error_code_node = dom_tree.createElement("ErrorID") error_code_text_value = dom_tree.createTextNode(error_code) error_code_node.appendChild(error_code_text_value) customer_node.appendChild(error_code_node) root_node.appendChild(customer_node) with open(exception_xml, 'w') as f: dom_tree.writexml(f, indent='', addindent='', newl='\n', encoding='UTF-8') return def GetIgnoreErrorCode(self) -> set: """ Below are kinds of error code that are accurate in ecc scanning of edk2 level. But EccCheck plugin is partial scanning so they are always false positive issues. The mapping relationship of error code and error message is listed BaseTools/Sourc/Python/Ecc/EccToolError.py """ ignore_error_code = { "10000", "10001", "10002", "10003", "10004", "10005", "10006", "10007", "10008", "10009", "10010", "10011", "10012", "10013", "10015", "10016", "10017", "10022", } return ignore_error_code
edk2-master
.pytool/Plugin/EccCheck/EccCheck.py
# @file DscCompleteCheck.py # # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: BSD-2-Clause-Patent ## import logging import os from edk2toolext.environment.plugintypes.ci_build_plugin import ICiBuildPlugin from edk2toollib.uefi.edk2.parsers.dsc_parser import DscParser from edk2toollib.uefi.edk2.parsers.inf_parser import InfParser from edk2toolext.environment.var_dict import VarDict class DscCompleteCheck(ICiBuildPlugin): """ A CiBuildPlugin that scans the package dsc file and confirms all modules (inf files) are listed in the components sections. Configuration options: "DscCompleteCheck": { "DscPath": "<path to dsc from root of pkg>" "IgnoreInf": [] # Ignore INF if found in filesystem by not dsc } """ def GetTestName(self, packagename: str, environment: VarDict) -> tuple: """ Provide the testcase name and classname for use in reporting Args: packagename: string containing name of package to build environment: The VarDict for the test to run in Returns: a tuple containing the testcase name and the classname (testcasename, classname) testclassname: a descriptive string for the testcase can include whitespace classname: should be patterned <packagename>.<plugin>.<optionally any unique condition> """ return ("Check the " + packagename + " DSC for a being complete", packagename + ".DscCompleteCheck") ## # External function of plugin. This function is used to perform the task of the MuBuild Plugin # # - package is the edk2 path to package. This means workspace/packagepath relative. # - edk2path object configured with workspace and packages path # - PkgConfig Object (dict) for the pkg # - VarDict containing the shell environment Build Vars # - Plugin Manager Instance # - Plugin Helper Obj Instance # - Junit Logger # - output_stream the StringIO output stream from this plugin via logging def RunBuildPlugin(self, packagename, Edk2pathObj, pkgconfig, environment, PLM, PLMHelper, tc, output_stream=None): overall_status = 0 # Parse the config for required DscPath element if "DscPath" not in pkgconfig: tc.SetSkipped() tc.LogStdError( "DscPath not found in config file. Nothing to check.") return -1 abs_pkg_path = Edk2pathObj.GetAbsolutePathOnThisSystemFromEdk2RelativePath( packagename) abs_dsc_path = os.path.join(abs_pkg_path, pkgconfig["DscPath"].strip()) wsr_dsc_path = Edk2pathObj.GetEdk2RelativePathFromAbsolutePath( abs_dsc_path) if abs_dsc_path is None or wsr_dsc_path == "" or not os.path.isfile(abs_dsc_path): tc.SetSkipped() tc.LogStdError("Package Dsc not found") return 0 # Get INF Files INFFiles = self.WalkDirectoryForExtension([".inf"], abs_pkg_path) INFFiles = [Edk2pathObj.GetEdk2RelativePathFromAbsolutePath( x) for x in INFFiles] # make edk2relative path so can compare with DSC # remove ignores if "IgnoreInf" in pkgconfig: for a in pkgconfig["IgnoreInf"]: a = a.replace(os.sep, "/") try: tc.LogStdOut("Ignoring INF {0}".format(a)) INFFiles.remove(a) except: tc.LogStdError( "DscCompleteCheck.IgnoreInf -> {0} not found in filesystem. Invalid ignore file".format(a)) logging.info( "DscCompleteCheck.IgnoreInf -> {0} not found in filesystem. Invalid ignore file".format(a)) # DSC Parser dp = DscParser() dp.SetBaseAbsPath(Edk2pathObj.WorkspacePath) dp.SetPackagePaths(Edk2pathObj.PackagePathList) dp.SetInputVars(environment.GetAllBuildKeyValues()) dp.ParseFile(wsr_dsc_path) # Check if INF in component section for INF in INFFiles: if not any(INF.strip() in x for x in dp.ThreeMods) and \ not any(INF.strip() in x for x in dp.SixMods) and \ not any(INF.strip() in x for x in dp.OtherMods): infp = InfParser().SetBaseAbsPath(Edk2pathObj.WorkspacePath) infp.SetPackagePaths(Edk2pathObj.PackagePathList) infp.ParseFile(INF) if("MODULE_TYPE" not in infp.Dict): tc.LogStdOut( "Ignoring INF. Missing key for MODULE_TYPE {0}".format(INF)) continue if(infp.Dict["MODULE_TYPE"] == "HOST_APPLICATION"): tc.LogStdOut( "Ignoring INF. Module type is HOST_APPLICATION {0}".format(INF)) continue if len(infp.SupportedPhases) == 1 and \ "HOST_APPLICATION" in infp.SupportedPhases: tc.LogStdOut( "Ignoring Library INF due to only supporting type HOST_APPLICATION {0}".format(INF)) continue logging.critical(INF + " not in " + wsr_dsc_path) tc.LogStdError("{0} not in {1}".format(INF, wsr_dsc_path)) overall_status = overall_status + 1 # If XML object exists, add result if overall_status != 0: tc.SetFailed("DscCompleteCheck {0} Failed. Errors {1}".format( wsr_dsc_path, overall_status), "CHECK_FAILED") else: tc.SetSuccess() return overall_status
edk2-master
.pytool/Plugin/DscCompleteCheck/DscCompleteCheck.py
## @ SplitFspBin.py # # Copyright (c) 2015 - 2022, Intel Corporation. All rights reserved.<BR> # SPDX-License-Identifier: BSD-2-Clause-Patent # ## import os import sys import uuid import copy import struct import argparse from ctypes import * from functools import reduce """ This utility supports some operations for Intel FSP 1.x/2.x image. It supports: - Display FSP 1.x/2.x information header - Split FSP 2.x image into individual FSP-T/M/S/O component - Rebase FSP 1.x/2.x components to a different base address - Generate FSP 1.x/2.x mapping C header file """ CopyRightHeaderFile = """/* * * Automatically generated file; DO NOT EDIT. * FSP mapping file * */ """ class c_uint24(Structure): """Little-Endian 24-bit Unsigned Integer""" _pack_ = 1 _fields_ = [('Data', (c_uint8 * 3))] def __init__(self, val=0): self.set_value(val) def __str__(self, indent=0): return '0x%.6x' % self.value def __int__(self): return self.get_value() def set_value(self, val): self.Data[0:3] = Val2Bytes(val, 3) def get_value(self): return Bytes2Val(self.Data[0:3]) value = property(get_value, set_value) class EFI_FIRMWARE_VOLUME_HEADER(Structure): _fields_ = [ ('ZeroVector', ARRAY(c_uint8, 16)), ('FileSystemGuid', ARRAY(c_uint8, 16)), ('FvLength', c_uint64), ('Signature', ARRAY(c_char, 4)), ('Attributes', c_uint32), ('HeaderLength', c_uint16), ('Checksum', c_uint16), ('ExtHeaderOffset', c_uint16), ('Reserved', c_uint8), ('Revision', c_uint8) ] class EFI_FIRMWARE_VOLUME_EXT_HEADER(Structure): _fields_ = [ ('FvName', ARRAY(c_uint8, 16)), ('ExtHeaderSize', c_uint32) ] class EFI_FFS_INTEGRITY_CHECK(Structure): _fields_ = [ ('Header', c_uint8), ('File', c_uint8) ] class EFI_FFS_FILE_HEADER(Structure): _fields_ = [ ('Name', ARRAY(c_uint8, 16)), ('IntegrityCheck', EFI_FFS_INTEGRITY_CHECK), ('Type', c_uint8), ('Attributes', c_uint8), ('Size', c_uint24), ('State', c_uint8) ] class EFI_COMMON_SECTION_HEADER(Structure): _fields_ = [ ('Size', c_uint24), ('Type', c_uint8) ] class FSP_COMMON_HEADER(Structure): _fields_ = [ ('Signature', ARRAY(c_char, 4)), ('HeaderLength', c_uint32) ] class FSP_INFORMATION_HEADER(Structure): _fields_ = [ ('Signature', ARRAY(c_char, 4)), ('HeaderLength', c_uint32), ('Reserved1', c_uint16), ('SpecVersion', c_uint8), ('HeaderRevision', c_uint8), ('ImageRevision', c_uint32), ('ImageId', ARRAY(c_char, 8)), ('ImageSize', c_uint32), ('ImageBase', c_uint32), ('ImageAttribute', c_uint16), ('ComponentAttribute', c_uint16), ('CfgRegionOffset', c_uint32), ('CfgRegionSize', c_uint32), ('Reserved2', c_uint32), ('TempRamInitEntryOffset', c_uint32), ('Reserved3', c_uint32), ('NotifyPhaseEntryOffset', c_uint32), ('FspMemoryInitEntryOffset', c_uint32), ('TempRamExitEntryOffset', c_uint32), ('FspSiliconInitEntryOffset', c_uint32), ('FspMultiPhaseSiInitEntryOffset', c_uint32), ('ExtendedImageRevision', c_uint16), ('Reserved4', c_uint16), ('FspMultiPhaseMemInitEntryOffset', c_uint32), ('FspSmmInitEntryOffset', c_uint32) ] class FSP_PATCH_TABLE(Structure): _fields_ = [ ('Signature', ARRAY(c_char, 4)), ('HeaderLength', c_uint16), ('HeaderRevision', c_uint8), ('Reserved', c_uint8), ('PatchEntryNum', c_uint32) ] class EFI_IMAGE_DATA_DIRECTORY(Structure): _fields_ = [ ('VirtualAddress', c_uint32), ('Size', c_uint32) ] class EFI_TE_IMAGE_HEADER(Structure): _fields_ = [ ('Signature', ARRAY(c_char, 2)), ('Machine', c_uint16), ('NumberOfSections', c_uint8), ('Subsystem', c_uint8), ('StrippedSize', c_uint16), ('AddressOfEntryPoint', c_uint32), ('BaseOfCode', c_uint32), ('ImageBase', c_uint64), ('DataDirectoryBaseReloc', EFI_IMAGE_DATA_DIRECTORY), ('DataDirectoryDebug', EFI_IMAGE_DATA_DIRECTORY) ] class EFI_IMAGE_DOS_HEADER(Structure): _fields_ = [ ('e_magic', c_uint16), ('e_cblp', c_uint16), ('e_cp', c_uint16), ('e_crlc', c_uint16), ('e_cparhdr', c_uint16), ('e_minalloc', c_uint16), ('e_maxalloc', c_uint16), ('e_ss', c_uint16), ('e_sp', c_uint16), ('e_csum', c_uint16), ('e_ip', c_uint16), ('e_cs', c_uint16), ('e_lfarlc', c_uint16), ('e_ovno', c_uint16), ('e_res', ARRAY(c_uint16, 4)), ('e_oemid', c_uint16), ('e_oeminfo', c_uint16), ('e_res2', ARRAY(c_uint16, 10)), ('e_lfanew', c_uint16) ] class EFI_IMAGE_FILE_HEADER(Structure): _fields_ = [ ('Machine', c_uint16), ('NumberOfSections', c_uint16), ('TimeDateStamp', c_uint32), ('PointerToSymbolTable', c_uint32), ('NumberOfSymbols', c_uint32), ('SizeOfOptionalHeader', c_uint16), ('Characteristics', c_uint16) ] class PE_RELOC_BLOCK_HEADER(Structure): _fields_ = [ ('PageRVA', c_uint32), ('BlockSize', c_uint32) ] class EFI_IMAGE_OPTIONAL_HEADER32(Structure): _fields_ = [ ('Magic', c_uint16), ('MajorLinkerVersion', c_uint8), ('MinorLinkerVersion', c_uint8), ('SizeOfCode', c_uint32), ('SizeOfInitializedData', c_uint32), ('SizeOfUninitializedData', c_uint32), ('AddressOfEntryPoint', c_uint32), ('BaseOfCode', c_uint32), ('BaseOfData', c_uint32), ('ImageBase', c_uint32), ('SectionAlignment', c_uint32), ('FileAlignment', c_uint32), ('MajorOperatingSystemVersion', c_uint16), ('MinorOperatingSystemVersion', c_uint16), ('MajorImageVersion', c_uint16), ('MinorImageVersion', c_uint16), ('MajorSubsystemVersion', c_uint16), ('MinorSubsystemVersion', c_uint16), ('Win32VersionValue', c_uint32), ('SizeOfImage', c_uint32), ('SizeOfHeaders', c_uint32), ('CheckSum' , c_uint32), ('Subsystem', c_uint16), ('DllCharacteristics', c_uint16), ('SizeOfStackReserve', c_uint32), ('SizeOfStackCommit' , c_uint32), ('SizeOfHeapReserve', c_uint32), ('SizeOfHeapCommit' , c_uint32), ('LoaderFlags' , c_uint32), ('NumberOfRvaAndSizes', c_uint32), ('DataDirectory', ARRAY(EFI_IMAGE_DATA_DIRECTORY, 16)) ] class EFI_IMAGE_OPTIONAL_HEADER32_PLUS(Structure): _fields_ = [ ('Magic', c_uint16), ('MajorLinkerVersion', c_uint8), ('MinorLinkerVersion', c_uint8), ('SizeOfCode', c_uint32), ('SizeOfInitializedData', c_uint32), ('SizeOfUninitializedData', c_uint32), ('AddressOfEntryPoint', c_uint32), ('BaseOfCode', c_uint32), ('ImageBase', c_uint64), ('SectionAlignment', c_uint32), ('FileAlignment', c_uint32), ('MajorOperatingSystemVersion', c_uint16), ('MinorOperatingSystemVersion', c_uint16), ('MajorImageVersion', c_uint16), ('MinorImageVersion', c_uint16), ('MajorSubsystemVersion', c_uint16), ('MinorSubsystemVersion', c_uint16), ('Win32VersionValue', c_uint32), ('SizeOfImage', c_uint32), ('SizeOfHeaders', c_uint32), ('CheckSum' , c_uint32), ('Subsystem', c_uint16), ('DllCharacteristics', c_uint16), ('SizeOfStackReserve', c_uint64), ('SizeOfStackCommit' , c_uint64), ('SizeOfHeapReserve', c_uint64), ('SizeOfHeapCommit' , c_uint64), ('LoaderFlags' , c_uint32), ('NumberOfRvaAndSizes', c_uint32), ('DataDirectory', ARRAY(EFI_IMAGE_DATA_DIRECTORY, 16)) ] class EFI_IMAGE_OPTIONAL_HEADER(Union): _fields_ = [ ('PeOptHdr', EFI_IMAGE_OPTIONAL_HEADER32), ('PePlusOptHdr', EFI_IMAGE_OPTIONAL_HEADER32_PLUS) ] class EFI_IMAGE_NT_HEADERS32(Structure): _fields_ = [ ('Signature', c_uint32), ('FileHeader', EFI_IMAGE_FILE_HEADER), ('OptionalHeader', EFI_IMAGE_OPTIONAL_HEADER) ] class EFI_IMAGE_DIRECTORY_ENTRY: EXPORT = 0 IMPORT = 1 RESOURCE = 2 EXCEPTION = 3 SECURITY = 4 BASERELOC = 5 DEBUG = 6 COPYRIGHT = 7 GLOBALPTR = 8 TLS = 9 LOAD_CONFIG = 10 class EFI_FV_FILETYPE: ALL = 0x00 RAW = 0x01 FREEFORM = 0x02 SECURITY_CORE = 0x03 PEI_CORE = 0x04 DXE_CORE = 0x05 PEIM = 0x06 DRIVER = 0x07 COMBINED_PEIM_DRIVER = 0x08 APPLICATION = 0x09 SMM = 0x0a FIRMWARE_VOLUME_IMAGE = 0x0b COMBINED_SMM_DXE = 0x0c SMM_CORE = 0x0d OEM_MIN = 0xc0 OEM_MAX = 0xdf DEBUG_MIN = 0xe0 DEBUG_MAX = 0xef FFS_MIN = 0xf0 FFS_MAX = 0xff FFS_PAD = 0xf0 class EFI_SECTION_TYPE: """Enumeration of all valid firmware file section types.""" ALL = 0x00 COMPRESSION = 0x01 GUID_DEFINED = 0x02 DISPOSABLE = 0x03 PE32 = 0x10 PIC = 0x11 TE = 0x12 DXE_DEPEX = 0x13 VERSION = 0x14 USER_INTERFACE = 0x15 COMPATIBILITY16 = 0x16 FIRMWARE_VOLUME_IMAGE = 0x17 FREEFORM_SUBTYPE_GUID = 0x18 RAW = 0x19 PEI_DEPEX = 0x1b SMM_DEPEX = 0x1c def AlignPtr (offset, alignment = 8): return (offset + alignment - 1) & ~(alignment - 1) def Bytes2Val (bytes): return reduce(lambda x,y: (x<<8)|y, bytes[::-1] ) def Val2Bytes (value, blen): return [(value>>(i*8) & 0xff) for i in range(blen)] def IsIntegerType (val): if sys.version_info[0] < 3: if type(val) in (int, long): return True else: if type(val) is int: return True return False def IsStrType (val): if sys.version_info[0] < 3: if type(val) is str: return True else: if type(val) is bytes: return True return False def HandleNameStr (val): if sys.version_info[0] < 3: rep = "0x%X ('%s')" % (Bytes2Val (bytearray (val)), val) else: rep = "0x%X ('%s')" % (Bytes2Val (bytearray (val)), str (val, 'utf-8')) return rep def OutputStruct (obj, indent = 0, plen = 0): if indent: body = '' else: body = (' ' * indent + '<%s>:\n') % obj.__class__.__name__ if plen == 0: plen = sizeof(obj) max_key_len = 26 pstr = (' ' * (indent + 1) + '{0:<%d} = {1}\n') % max_key_len for field in obj._fields_: key = field[0] val = getattr(obj, key) rep = '' if not isinstance(val, c_uint24) and isinstance(val, Structure): body += pstr.format(key, val.__class__.__name__) body += OutputStruct (val, indent + 1) plen -= sizeof(val) else: if IsStrType (val): rep = HandleNameStr (val) elif IsIntegerType (val): if (key == 'ImageRevision'): FspImageRevisionMajor = ((val >> 24) & 0xFF) FspImageRevisionMinor = ((val >> 16) & 0xFF) FspImageRevisionRevision = ((val >> 8) & 0xFF) FspImageRevisionBuildNumber = (val & 0xFF) rep = '0x%08X' % val elif (key == 'ExtendedImageRevision'): FspImageRevisionRevision |= (val & 0xFF00) FspImageRevisionBuildNumber |= ((val << 8) & 0xFF00) rep = "0x%04X ('%02X.%02X.%04X.%04X')" % (val, FspImageRevisionMajor, FspImageRevisionMinor, FspImageRevisionRevision, FspImageRevisionBuildNumber) elif field[1] == c_uint64: rep = '0x%016X' % val elif field[1] == c_uint32: rep = '0x%08X' % val elif field[1] == c_uint16: rep = '0x%04X' % val elif field[1] == c_uint8: rep = '0x%02X' % val else: rep = '0x%X' % val elif isinstance(val, c_uint24): rep = '0x%X' % val.get_value() elif 'c_ubyte_Array' in str(type(val)): if sizeof(val) == 16: if sys.version_info[0] < 3: rep = str(bytearray(val)) else: rep = bytes(val) rep = str(uuid.UUID(bytes_le = rep)).upper() else: res = ['0x%02X'%i for i in bytearray(val)] rep = '[%s]' % (','.join(res)) else: rep = str(val) plen -= sizeof(field[1]) body += pstr.format(key, rep) if plen <= 0: break return body class Section: def __init__(self, offset, secdata): self.SecHdr = EFI_COMMON_SECTION_HEADER.from_buffer (secdata, 0) self.SecData = secdata[0:int(self.SecHdr.Size)] self.Offset = offset class FirmwareFile: def __init__(self, offset, filedata): self.FfsHdr = EFI_FFS_FILE_HEADER.from_buffer (filedata, 0) self.FfsData = filedata[0:int(self.FfsHdr.Size)] self.Offset = offset self.SecList = [] def ParseFfs(self): ffssize = len(self.FfsData) offset = sizeof(self.FfsHdr) if self.FfsHdr.Name != '\xff' * 16: while offset < (ffssize - sizeof (EFI_COMMON_SECTION_HEADER)): sechdr = EFI_COMMON_SECTION_HEADER.from_buffer (self.FfsData, offset) sec = Section (offset, self.FfsData[offset:offset + int(sechdr.Size)]) self.SecList.append(sec) offset += int(sechdr.Size) offset = AlignPtr(offset, 4) class FirmwareVolume: def __init__(self, offset, fvdata): self.FvHdr = EFI_FIRMWARE_VOLUME_HEADER.from_buffer (fvdata, 0) self.FvData = fvdata[0 : self.FvHdr.FvLength] self.Offset = offset if self.FvHdr.ExtHeaderOffset > 0: self.FvExtHdr = EFI_FIRMWARE_VOLUME_EXT_HEADER.from_buffer (self.FvData, self.FvHdr.ExtHeaderOffset) else: self.FvExtHdr = None self.FfsList = [] self.ChildFvList = [] def ParseFv(self): fvsize = len(self.FvData) if self.FvExtHdr: offset = self.FvHdr.ExtHeaderOffset + self.FvExtHdr.ExtHeaderSize else: offset = self.FvHdr.HeaderLength offset = AlignPtr(offset) while offset < (fvsize - sizeof (EFI_FFS_FILE_HEADER)): ffshdr = EFI_FFS_FILE_HEADER.from_buffer (self.FvData, offset) if (ffshdr.Name == '\xff' * 16) and (int(ffshdr.Size) == 0xFFFFFF): offset = fvsize else: ffs = FirmwareFile (offset, self.FvData[offset:offset + int(ffshdr.Size)]) # check if there is child fv childfvfound = 0 if (ffs.FfsHdr.Type == EFI_FV_FILETYPE.FIRMWARE_VOLUME_IMAGE): csoffset = offset + sizeof (EFI_FFS_FILE_HEADER) csoffset = AlignPtr(csoffset, 4) # find fv section while csoffset < (offset + int(ffs.FfsHdr.Size)): cshdr = EFI_COMMON_SECTION_HEADER.from_buffer (self.FvData, csoffset) if (cshdr.Type == EFI_SECTION_TYPE.FIRMWARE_VOLUME_IMAGE): childfvfound = 1 break else: # check next section csoffset += int(cshdr.Size) csoffset = AlignPtr(csoffset, 4) if (childfvfound): childfvoffset = csoffset + sizeof (EFI_COMMON_SECTION_HEADER) childfvhdr = EFI_FIRMWARE_VOLUME_HEADER.from_buffer (self.FvData, childfvoffset) childfv = FirmwareVolume (childfvoffset, self.FvData[childfvoffset:childfvoffset + int(childfvhdr.FvLength)]) childfv.ParseFv () self.ChildFvList.append(childfv) else: ffs.ParseFfs() self.FfsList.append(ffs) offset += int(ffshdr.Size) offset = AlignPtr(offset) class FspImage: def __init__(self, offset, fih, fihoff, patch): self.Fih = fih self.FihOffset = fihoff self.Offset = offset self.FvIdxList = [] self.Type = "XTMSIXXXOXXXXXXX"[(fih.ComponentAttribute >> 12) & 0x0F] self.PatchList = patch self.PatchList.append(fihoff + 0x1C) def AppendFv(self, FvIdx): self.FvIdxList.append(FvIdx) def Patch(self, delta, fdbin): count = 0 applied = 0 for idx, patch in enumerate(self.PatchList): ptype = (patch>>24) & 0x0F if ptype not in [0x00, 0x0F]: raise Exception('ERROR: Invalid patch type %d !' % ptype) if patch & 0x80000000: patch = self.Fih.ImageSize - (0x1000000 - (patch & 0xFFFFFF)) else: patch = patch & 0xFFFFFF if (patch < self.Fih.ImageSize) and (patch + sizeof(c_uint32) <= self.Fih.ImageSize): offset = patch + self.Offset value = Bytes2Val(fdbin[offset:offset+sizeof(c_uint32)]) value += delta fdbin[offset:offset+sizeof(c_uint32)] = Val2Bytes(value, sizeof(c_uint32)) applied += 1 count += 1 # Don't count the FSP base address patch entry appended at the end if count != 0: count -= 1 applied -= 1 return (count, applied) class FirmwareDevice: def __init__(self, offset, fdfile): self.FvList = [] self.FspList = [] self.FdFile = fdfile self.Offset = 0 hfsp = open (self.FdFile, 'rb') self.FdData = bytearray(hfsp.read()) hfsp.close() def ParseFd(self): offset = 0 fdsize = len(self.FdData) self.FvList = [] while offset < (fdsize - sizeof (EFI_FIRMWARE_VOLUME_HEADER)): fvh = EFI_FIRMWARE_VOLUME_HEADER.from_buffer (self.FdData, offset) if b'_FVH' != fvh.Signature: raise Exception("ERROR: Invalid FV header !") fv = FirmwareVolume (offset, self.FdData[offset:offset + fvh.FvLength]) fv.ParseFv () self.FvList.append(fv) offset += fv.FvHdr.FvLength def CheckFsp (self): if len(self.FspList) == 0: return fih = None for fsp in self.FspList: if not fih: fih = fsp.Fih else: newfih = fsp.Fih if (newfih.ImageId != fih.ImageId) or (newfih.ImageRevision != fih.ImageRevision): raise Exception("ERROR: Inconsistent FSP ImageId or ImageRevision detected !") def ParseFsp(self): flen = 0 for idx, fv in enumerate(self.FvList): # Check if this FV contains FSP header if flen == 0: if len(fv.FfsList) == 0: continue ffs = fv.FfsList[0] if len(ffs.SecList) == 0: continue sec = ffs.SecList[0] if sec.SecHdr.Type != EFI_SECTION_TYPE.RAW: continue fihoffset = ffs.Offset + sec.Offset + sizeof(sec.SecHdr) fspoffset = fv.Offset offset = fspoffset + fihoffset fih = FSP_INFORMATION_HEADER.from_buffer (self.FdData, offset) if b'FSPH' != fih.Signature: continue offset += fih.HeaderLength offset = AlignPtr(offset, 4) plist = [] while True: fch = FSP_COMMON_HEADER.from_buffer (self.FdData, offset) if b'FSPP' != fch.Signature: offset += fch.HeaderLength offset = AlignPtr(offset, 4) else: fspp = FSP_PATCH_TABLE.from_buffer (self.FdData, offset) offset += sizeof(fspp) pdata = (c_uint32 * fspp.PatchEntryNum).from_buffer(self.FdData, offset) plist = list(pdata) break fsp = FspImage (fspoffset, fih, fihoffset, plist) fsp.AppendFv (idx) self.FspList.append(fsp) flen = fsp.Fih.ImageSize - fv.FvHdr.FvLength else: fsp.AppendFv (idx) flen -= fv.FvHdr.FvLength if flen < 0: raise Exception("ERROR: Incorrect FV size in image !") self.CheckFsp () class PeTeImage: def __init__(self, offset, data): self.Offset = offset tehdr = EFI_TE_IMAGE_HEADER.from_buffer (data, 0) if tehdr.Signature == b'VZ': # TE image self.TeHdr = tehdr elif tehdr.Signature == b'MZ': # PE image self.TeHdr = None self.DosHdr = EFI_IMAGE_DOS_HEADER.from_buffer (data, 0) self.PeHdr = EFI_IMAGE_NT_HEADERS32.from_buffer (data, self.DosHdr.e_lfanew) if self.PeHdr.Signature != 0x4550: raise Exception("ERROR: Invalid PE32 header !") if self.PeHdr.OptionalHeader.PeOptHdr.Magic == 0x10b: # PE32 image if self.PeHdr.FileHeader.SizeOfOptionalHeader < EFI_IMAGE_OPTIONAL_HEADER32.DataDirectory.offset: raise Exception("ERROR: Unsupported PE32 image !") if self.PeHdr.OptionalHeader.PeOptHdr.NumberOfRvaAndSizes <= EFI_IMAGE_DIRECTORY_ENTRY.BASERELOC: raise Exception("ERROR: No relocation information available !") elif self.PeHdr.OptionalHeader.PeOptHdr.Magic == 0x20b: # PE32+ image if self.PeHdr.FileHeader.SizeOfOptionalHeader < EFI_IMAGE_OPTIONAL_HEADER32_PLUS.DataDirectory.offset: raise Exception("ERROR: Unsupported PE32+ image !") if self.PeHdr.OptionalHeader.PePlusOptHdr.NumberOfRvaAndSizes <= EFI_IMAGE_DIRECTORY_ENTRY.BASERELOC: raise Exception("ERROR: No relocation information available !") else: raise Exception("ERROR: Invalid PE32 optional header !") self.Offset = offset self.Data = data self.RelocList = [] def IsTeImage(self): return self.TeHdr is not None def ParseReloc(self): if self.IsTeImage(): rsize = self.TeHdr.DataDirectoryBaseReloc.Size roffset = sizeof(self.TeHdr) - self.TeHdr.StrippedSize + self.TeHdr.DataDirectoryBaseReloc.VirtualAddress else: # Assuming PE32 image type (self.PeHdr.OptionalHeader.PeOptHdr.Magic == 0x10b) rsize = self.PeHdr.OptionalHeader.PeOptHdr.DataDirectory[EFI_IMAGE_DIRECTORY_ENTRY.BASERELOC].Size roffset = self.PeHdr.OptionalHeader.PeOptHdr.DataDirectory[EFI_IMAGE_DIRECTORY_ENTRY.BASERELOC].VirtualAddress if self.PeHdr.OptionalHeader.PePlusOptHdr.Magic == 0x20b: # PE32+ image rsize = self.PeHdr.OptionalHeader.PePlusOptHdr.DataDirectory[EFI_IMAGE_DIRECTORY_ENTRY.BASERELOC].Size roffset = self.PeHdr.OptionalHeader.PePlusOptHdr.DataDirectory[EFI_IMAGE_DIRECTORY_ENTRY.BASERELOC].VirtualAddress alignment = 4 offset = roffset while offset < roffset + rsize: offset = AlignPtr(offset, 4) blkhdr = PE_RELOC_BLOCK_HEADER.from_buffer(self.Data, offset) offset += sizeof(blkhdr) # Read relocation type,offset pairs rlen = blkhdr.BlockSize - sizeof(PE_RELOC_BLOCK_HEADER) rnum = int (rlen/sizeof(c_uint16)) rdata = (c_uint16 * rnum).from_buffer(self.Data, offset) for each in rdata: roff = each & 0xfff rtype = each >> 12 if rtype == 0: # IMAGE_REL_BASED_ABSOLUTE: continue if ((rtype != 3) and (rtype != 10)): # IMAGE_REL_BASED_HIGHLOW and IMAGE_REL_BASED_DIR64 raise Exception("ERROR: Unsupported relocation type %d!" % rtype) # Calculate the offset of the relocation aoff = blkhdr.PageRVA + roff if self.IsTeImage(): aoff += sizeof(self.TeHdr) - self.TeHdr.StrippedSize self.RelocList.append((rtype, aoff)) offset += sizeof(rdata) def Rebase(self, delta, fdbin): count = 0 if delta == 0: return count for (rtype, roff) in self.RelocList: if rtype == 3: # IMAGE_REL_BASED_HIGHLOW offset = roff + self.Offset value = Bytes2Val(fdbin[offset:offset+sizeof(c_uint32)]) value += delta fdbin[offset:offset+sizeof(c_uint32)] = Val2Bytes(value, sizeof(c_uint32)) count += 1 elif rtype == 10: # IMAGE_REL_BASED_DIR64 offset = roff + self.Offset value = Bytes2Val(fdbin[offset:offset+sizeof(c_uint64)]) value += delta fdbin[offset:offset+sizeof(c_uint64)] = Val2Bytes(value, sizeof(c_uint64)) count += 1 else: raise Exception('ERROR: Unknown relocation type %d !' % rtype) if self.IsTeImage(): offset = self.Offset + EFI_TE_IMAGE_HEADER.ImageBase.offset size = EFI_TE_IMAGE_HEADER.ImageBase.size else: offset = self.Offset + self.DosHdr.e_lfanew offset += EFI_IMAGE_NT_HEADERS32.OptionalHeader.offset if self.PeHdr.OptionalHeader.PePlusOptHdr.Magic == 0x20b: # PE32+ image offset += EFI_IMAGE_OPTIONAL_HEADER32_PLUS.ImageBase.offset size = EFI_IMAGE_OPTIONAL_HEADER32_PLUS.ImageBase.size else: offset += EFI_IMAGE_OPTIONAL_HEADER32.ImageBase.offset size = EFI_IMAGE_OPTIONAL_HEADER32.ImageBase.size value = Bytes2Val(fdbin[offset:offset+size]) + delta fdbin[offset:offset+size] = Val2Bytes(value, size) return count def ShowFspInfo (fspfile): fd = FirmwareDevice(0, fspfile) fd.ParseFd () fd.ParseFsp () print ("\nFound the following %d Firmware Volumes in FSP binary:" % (len(fd.FvList))) for idx, fv in enumerate(fd.FvList): name = fv.FvExtHdr.FvName if not name: name = '\xff' * 16 else: if sys.version_info[0] < 3: name = str(bytearray(name)) else: name = bytes(name) guid = uuid.UUID(bytes_le = name) print ("FV%d:" % idx) print (" GUID : %s" % str(guid).upper()) print (" Offset : 0x%08X" % fv.Offset) print (" Length : 0x%08X" % fv.FvHdr.FvLength) print ("\n") for fsp in fd.FspList: fvlist = map(lambda x : 'FV%d' % x, fsp.FvIdxList) print ("FSP_%s contains %s" % (fsp.Type, ','.join(fvlist))) print ("%s" % (OutputStruct(fsp.Fih, 0, fsp.Fih.HeaderLength))) def GenFspHdr (fspfile, outdir, hfile): fd = FirmwareDevice(0, fspfile) fd.ParseFd () fd.ParseFsp () if not hfile: hfile = os.path.splitext(os.path.basename(fspfile))[0] + '.h' fspname, ext = os.path.splitext(os.path.basename(hfile)) filename = os.path.join(outdir, fspname + ext) hfsp = open(filename, 'w') hfsp.write ('%s\n\n' % CopyRightHeaderFile) firstfv = True for fsp in fd.FspList: fih = fsp.Fih if firstfv: if sys.version_info[0] < 3: hfsp.write("#define FSP_IMAGE_ID 0x%016X /* '%s' */\n" % (Bytes2Val(bytearray(fih.ImageId)), fih.ImageId)) else: hfsp.write("#define FSP_IMAGE_ID 0x%016X /* '%s' */\n" % (Bytes2Val(bytearray(fih.ImageId)), str (fih.ImageId, 'utf-8'))) hfsp.write("#define FSP_IMAGE_REV 0x%08X \n\n" % fih.ImageRevision) firstfv = False fv = fd.FvList[fsp.FvIdxList[0]] hfsp.write ('#define FSP%s_BASE 0x%08X\n' % (fsp.Type, fih.ImageBase)) hfsp.write ('#define FSP%s_OFFSET 0x%08X\n' % (fsp.Type, fv.Offset)) hfsp.write ('#define FSP%s_LENGTH 0x%08X\n\n' % (fsp.Type, fih.ImageSize)) hfsp.close() def SplitFspBin (fspfile, outdir, nametemplate): fd = FirmwareDevice(0, fspfile) fd.ParseFd () fd.ParseFsp () for fsp in fd.FspList: if fsp.Fih.HeaderRevision < 3: raise Exception("ERROR: FSP 1.x is not supported by the split command !") ftype = fsp.Type if not nametemplate: nametemplate = fspfile fspname, ext = os.path.splitext(os.path.basename(nametemplate)) filename = os.path.join(outdir, fspname + '_' + fsp.Type + ext) hfsp = open(filename, 'wb') print ("Create FSP component file '%s'" % filename) for fvidx in fsp.FvIdxList: fv = fd.FvList[fvidx] hfsp.write(fv.FvData) hfsp.close() def GetImageFromFv (fd, parentfvoffset, fv, imglist): for ffs in fv.FfsList: for sec in ffs.SecList: if sec.SecHdr.Type in [EFI_SECTION_TYPE.TE, EFI_SECTION_TYPE.PE32]: # TE or PE32 offset = fd.Offset + parentfvoffset + fv.Offset + ffs.Offset + sec.Offset + sizeof(sec.SecHdr) imglist.append ((offset, len(sec.SecData) - sizeof(sec.SecHdr))) def RebaseFspBin (FspBinary, FspComponent, FspBase, OutputDir, OutputFile): fd = FirmwareDevice(0, FspBinary) fd.ParseFd () fd.ParseFsp () numcomp = len(FspComponent) baselist = FspBase if numcomp != len(baselist): print ("ERROR: Required number of base does not match number of FSP component !") return newfspbin = fd.FdData[:] for idx, fspcomp in enumerate(FspComponent): found = False for fsp in fd.FspList: # Is this FSP 1.x single binary? if fsp.Fih.HeaderRevision < 3: found = True ftype = 'X' break ftype = fsp.Type.lower() if ftype == fspcomp: found = True break if not found: print ("ERROR: Could not find FSP_%c component to rebase !" % fspcomp.upper()) return fspbase = baselist[idx] if fspbase.startswith('0x'): newbase = int(fspbase, 16) else: newbase = int(fspbase) oldbase = fsp.Fih.ImageBase delta = newbase - oldbase print ("Rebase FSP-%c from 0x%08X to 0x%08X:" % (ftype.upper(),oldbase,newbase)) imglist = [] for fvidx in fsp.FvIdxList: fv = fd.FvList[fvidx] GetImageFromFv (fd, 0, fv, imglist) # get image from child fv for childfv in fv.ChildFvList: print ("Get image from child fv of fv%d, parent fv offset: 0x%x" % (fvidx, fv.Offset)) GetImageFromFv (fd, fv.Offset, childfv, imglist) fcount = 0 pcount = 0 for (offset, length) in imglist: img = PeTeImage(offset, fd.FdData[offset:offset + length]) img.ParseReloc() pcount += img.Rebase(delta, newfspbin) fcount += 1 print (" Patched %d entries in %d TE/PE32 images." % (pcount, fcount)) (count, applied) = fsp.Patch(delta, newfspbin) print (" Patched %d entries using FSP patch table." % applied) if count != applied: print (" %d invalid entries are ignored !" % (count - applied)) if OutputFile == '': filename = os.path.basename(FspBinary) base, ext = os.path.splitext(filename) OutputFile = base + "_%08X" % newbase + ext fspname, ext = os.path.splitext(os.path.basename(OutputFile)) filename = os.path.join(OutputDir, fspname + ext) fd = open(filename, "wb") fd.write(newfspbin) fd.close() def main (): parser = argparse.ArgumentParser() subparsers = parser.add_subparsers(title='commands', dest="which") parser_rebase = subparsers.add_parser('rebase', help='rebase a FSP into a new base address') parser_rebase.set_defaults(which='rebase') parser_rebase.add_argument('-f', '--fspbin' , dest='FspBinary', type=str, help='FSP binary file path', required = True) parser_rebase.add_argument('-c', '--fspcomp', choices=['t','m','s','o','i'], nargs='+', dest='FspComponent', type=str, help='FSP component to rebase', default = "['t']", required = True) parser_rebase.add_argument('-b', '--newbase', dest='FspBase', nargs='+', type=str, help='Rebased FSP binary file name', default = '', required = True) parser_rebase.add_argument('-o', '--outdir' , dest='OutputDir', type=str, help='Output directory path', default = '.') parser_rebase.add_argument('-n', '--outfile', dest='OutputFile', type=str, help='Rebased FSP binary file name', default = '') parser_split = subparsers.add_parser('split', help='split a FSP into multiple components') parser_split.set_defaults(which='split') parser_split.add_argument('-f', '--fspbin' , dest='FspBinary', type=str, help='FSP binary file path', required = True) parser_split.add_argument('-o', '--outdir' , dest='OutputDir', type=str, help='Output directory path', default = '.') parser_split.add_argument('-n', '--nametpl', dest='NameTemplate', type=str, help='Output name template', default = '') parser_genhdr = subparsers.add_parser('genhdr', help='generate a header file for FSP binary') parser_genhdr.set_defaults(which='genhdr') parser_genhdr.add_argument('-f', '--fspbin' , dest='FspBinary', type=str, help='FSP binary file path', required = True) parser_genhdr.add_argument('-o', '--outdir' , dest='OutputDir', type=str, help='Output directory path', default = '.') parser_genhdr.add_argument('-n', '--hfile', dest='HFileName', type=str, help='Output header file name', default = '') parser_info = subparsers.add_parser('info', help='display FSP information') parser_info.set_defaults(which='info') parser_info.add_argument('-f', '--fspbin' , dest='FspBinary', type=str, help='FSP binary file path', required = True) args = parser.parse_args() if args.which in ['rebase', 'split', 'genhdr', 'info']: if not os.path.exists(args.FspBinary): raise Exception ("ERROR: Could not locate FSP binary file '%s' !" % args.FspBinary) if hasattr(args, 'OutputDir') and not os.path.exists(args.OutputDir): raise Exception ("ERROR: Invalid output directory '%s' !" % args.OutputDir) if args.which == 'rebase': RebaseFspBin (args.FspBinary, args.FspComponent, args.FspBase, args.OutputDir, args.OutputFile) elif args.which == 'split': SplitFspBin (args.FspBinary, args.OutputDir, args.NameTemplate) elif args.which == 'genhdr': GenFspHdr (args.FspBinary, args.OutputDir, args.HFileName) elif args.which == 'info': ShowFspInfo (args.FspBinary) else: parser.print_help() return 0 if __name__ == '__main__': sys.exit(main())
edk2-master
IntelFsp2Pkg/Tools/SplitFspBin.py
#!/usr/bin/env python ## @ FspDscBsf2Yaml.py # This script convert DSC or BSF format file into YAML format # # Copyright(c) 2021, Intel Corporation. All rights reserved.<BR> # SPDX-License-Identifier: BSD-2-Clause-Patent # ## import os import re import sys from collections import OrderedDict from datetime import date from FspGenCfgData import CFspBsf2Dsc, CGenCfgData __copyright_tmp__ = """## @file # # Slim Bootloader CFGDATA %s File. # # Copyright (c) %4d, Intel Corporation. All rights reserved.<BR> # SPDX-License-Identifier: BSD-2-Clause-Patent # ## """ class CFspDsc2Yaml(): def __init__(self): self._Hdr_key_list = ['EMBED', 'STRUCT'] self._Bsf_key_list = ['NAME', 'HELP', 'TYPE', 'PAGE', 'PAGES', 'OPTION', 'CONDITION', 'ORDER', 'MARKER', 'SUBT', 'FIELD', 'FIND'] self.gen_cfg_data = None self.cfg_reg_exp = re.compile( "^([_a-zA-Z0-9$\\(\\)]+)\\s*\\|\\s*(0x[0-9A-F]+|\\*)" "\\s*\\|\\s*(\\d+|0x[0-9a-fA-F]+)\\s*\\|\\s*(.+)") self.bsf_reg_exp = re.compile("(%s):{(.+?)}(?:$|\\s+)" % '|'.join(self._Bsf_key_list)) self.hdr_reg_exp = re.compile("(%s):{(.+?)}" % '|'.join(self._Hdr_key_list)) self.prefix = '' self.unused_idx = 0 self.offset = 0 self.base_offset = 0 def load_config_data_from_dsc(self, file_name): """ Load and parse a DSC CFGDATA file. """ gen_cfg_data = CGenCfgData('FSP') if file_name.endswith('.dsc'): if gen_cfg_data.ParseDscFile(file_name) != 0: raise Exception('DSC file parsing error !') if gen_cfg_data.CreateVarDict() != 0: raise Exception('DSC variable creation error !') else: raise Exception('Unsupported file "%s" !' % file_name) gen_cfg_data.UpdateDefaultValue() self.gen_cfg_data = gen_cfg_data def print_dsc_line(self): """ Debug function to print all DSC lines. """ for line in self.gen_cfg_data._DscLines: print(line) def format_value(self, field, text, indent=''): """ Format a CFGDATA item into YAML format. """ if (not text.startswith('!expand')) and (': ' in text): tgt = ':' if field == 'option' else '- ' text = text.replace(': ', tgt) lines = text.splitlines() if len(lines) == 1 and field != 'help': return text else: return '>\n ' + '\n '.join( [indent + i.lstrip() for i in lines]) def reformat_pages(self, val): # Convert XXX:YYY into XXX::YYY format for page definition parts = val.split(',') if len(parts) <= 1: return val new_val = [] for each in parts: nodes = each.split(':') if len(nodes) == 2: each = '%s::%s' % (nodes[0], nodes[1]) new_val.append(each) ret = ','.join(new_val) return ret def reformat_struct_value(self, utype, val): # Convert DSC UINT16/32/64 array into new format by # adding prefix 0:0[WDQ] to provide hint to the array format if utype in ['UINT16', 'UINT32', 'UINT64']: if val and val[0] == '{' and val[-1] == '}': if utype == 'UINT16': unit = 'W' elif utype == 'UINT32': unit = 'D' else: unit = 'Q' val = '{ 0:0%s, %s }' % (unit, val[1:-1]) return val def process_config(self, cfg): if 'page' in cfg: cfg['page'] = self.reformat_pages(cfg['page']) if 'struct' in cfg: cfg['value'] = self.reformat_struct_value( cfg['struct'], cfg['value']) def parse_dsc_line(self, dsc_line, config_dict, init_dict, include): """ Parse a line in DSC and update the config dictionary accordingly. """ init_dict.clear() match = re.match('g(CfgData|\\w+FspPkgTokenSpaceGuid)\\.(.+)', dsc_line) if match: match = self.cfg_reg_exp.match(match.group(2)) if not match: return False config_dict['cname'] = self.prefix + match.group(1) value = match.group(4).strip() length = match.group(3).strip() config_dict['length'] = length config_dict['value'] = value if match.group(2) == '*': self.offset += int(length, 0) else: org_offset = int(match.group(2), 0) if org_offset == 0: self.base_offset = self.offset offset = org_offset + self.base_offset if self.offset != offset: if offset > self.offset: init_dict['padding'] = offset - self.offset self.offset = offset + int(length, 0) return True match = re.match("^\\s*#\\s+!([<>])\\s+include\\s+(.+)", dsc_line) if match and len(config_dict) == 0: # !include should not be inside a config field # if so, do not convert include into YAML init_dict = dict(config_dict) config_dict.clear() config_dict['cname'] = '$ACTION' if match.group(1) == '<': config_dict['include'] = match.group(2) else: config_dict['include'] = '' return True match = re.match("^\\s*#\\s+(!BSF|!HDR)\\s+(.+)", dsc_line) if not match: return False remaining = match.group(2) if match.group(1) == '!BSF': result = self.bsf_reg_exp.findall(remaining) if not result: return False for each in result: key = each[0].lower() val = each[1] if key == 'field': name = each[1] if ':' not in name: raise Exception('Incorrect bit field format !') parts = name.split(':') config_dict['length'] = parts[1] config_dict['cname'] = '@' + parts[0] return True elif key in ['pages', 'page', 'find']: init_dict = dict(config_dict) config_dict.clear() config_dict['cname'] = '$ACTION' if key == 'find': config_dict['find'] = val else: config_dict['page'] = val return True elif key == 'subt': config_dict.clear() parts = each[1].split(':') tmp_name = parts[0][:-5] if tmp_name == 'CFGHDR': cfg_tag = '_$FFF_' sval = '!expand { %s_TMPL : [ ' % \ tmp_name + '%s, %s, ' % (parts[1], cfg_tag) + \ ', '.join(parts[2:]) + ' ] }' else: sval = '!expand { %s_TMPL : [ ' % \ tmp_name + ', '.join(parts[1:]) + ' ] }' config_dict.clear() config_dict['cname'] = tmp_name config_dict['expand'] = sval return True else: if key in ['name', 'help', 'option'] and \ val.startswith('+'): val = config_dict[key] + '\n' + val[1:] if val.strip() == '': val = "''" config_dict[key] = val else: match = self.hdr_reg_exp.match(remaining) if not match: return False key = match.group(1) remaining = match.group(2) if key == 'EMBED': parts = remaining.split(':') names = parts[0].split(',') if parts[-1] == 'END': prefix = '>' else: prefix = '<' skip = False if parts[1].startswith('TAG_'): tag_txt = '%s:%s' % (names[0], parts[1]) else: tag_txt = names[0] if parts[2] in ['START', 'END']: if names[0] == 'PCIE_RP_PIN_CTRL[]': skip = True else: tag_txt = '%s:%s' % (names[0], parts[1]) if not skip: config_dict.clear() config_dict['cname'] = prefix + tag_txt return True if key == 'STRUCT': text = remaining.strip() config_dict[key.lower()] = text return False def process_template_lines(self, lines): """ Process a line in DSC template section. """ template_name = '' bsf_temp_dict = OrderedDict() temp_file_dict = OrderedDict() include_file = ['.'] for line in lines: match = re.match("^\\s*#\\s+!([<>])\\s+include\\s+(.+)", line) if match: if match.group(1) == '<': include_file.append(match.group(2)) else: include_file.pop() match = re.match( "^\\s*#\\s+(!BSF)\\s+DEFT:{(.+?):(START|END)}", line) if match: if match.group(3) == 'START' and not template_name: template_name = match.group(2).strip() temp_file_dict[template_name] = list(include_file) bsf_temp_dict[template_name] = [] if match.group(3) == 'END' and \ (template_name == match.group(2).strip()) and \ template_name: template_name = '' else: if template_name: bsf_temp_dict[template_name].append(line) return bsf_temp_dict, temp_file_dict def process_option_lines(self, lines): """ Process a line in DSC config section. """ cfgs = [] struct_end = False config_dict = dict() init_dict = dict() include = [''] for line in lines: ret = self.parse_dsc_line(line, config_dict, init_dict, include) if ret: if 'padding' in init_dict: num = init_dict['padding'] init_dict.clear() padding_dict = {} cfgs.append(padding_dict) padding_dict['cname'] = 'UnusedUpdSpace%d' % \ self.unused_idx padding_dict['length'] = '0x%x' % num padding_dict['value'] = '{ 0 }' self.unused_idx += 1 if cfgs and cfgs[-1]['cname'][0] != '@' and \ config_dict['cname'][0] == '@': # it is a bit field, mark the previous one as virtual cname = cfgs[-1]['cname'] new_cfg = dict(cfgs[-1]) new_cfg['cname'] = '@$STRUCT' cfgs[-1].clear() cfgs[-1]['cname'] = cname cfgs.append(new_cfg) if cfgs and cfgs[-1]['cname'] == 'CFGHDR' and \ config_dict['cname'][0] == '<': # swap CfgHeader and the CFG_DATA order if ':' in config_dict['cname']: # replace the real TAG for CFG_DATA cfgs[-1]['expand'] = cfgs[-1]['expand'].replace( '_$FFF_', '0x%s' % config_dict['cname'].split(':')[1][4:]) cfgs.insert(-1, config_dict) else: self.process_config(config_dict) if struct_end: struct_end = False cfgs.insert(-1, config_dict) else: cfgs.append(config_dict) if config_dict['cname'][0] == '>': struct_end = True config_dict = dict(init_dict) return cfgs def variable_fixup(self, each): """ Fix up some variable definitions for SBL. """ key = each val = self.gen_cfg_data._MacroDict[each] return key, val def template_fixup(self, tmp_name, tmp_list): """ Fix up some special config templates for SBL """ return def config_fixup(self, cfg_list): """ Fix up some special config items for SBL. """ # Insert FSPT_UPD/FSPM_UPD/FSPS_UPD tag so as to create C strcture idxs = [] for idx, cfg in enumerate(cfg_list): if cfg['cname'].startswith('<FSP_UPD_HEADER'): idxs.append(idx) if len(idxs) != 3: return # Handle insert backwards so that the index does not change in the loop fsp_comp = 'SMT' idx_comp = 0 for idx in idxs[::-1]: # Add current FSP?_UPD start tag cfgfig_dict = {} cfgfig_dict['cname'] = '<FSP%s_UPD' % fsp_comp[idx_comp] cfg_list.insert(idx, cfgfig_dict) if idx_comp < 2: # Add previous FSP?_UPD end tag cfgfig_dict = {} cfgfig_dict['cname'] = '>FSP%s_UPD' % fsp_comp[idx_comp + 1] cfg_list.insert(idx, cfgfig_dict) idx_comp += 1 # Add final FSPS_UPD end tag cfgfig_dict = {} cfgfig_dict['cname'] = '>FSP%s_UPD' % fsp_comp[0] cfg_list.append(cfgfig_dict) return def get_section_range(self, section_name): """ Extract line number range from config file for a given section name. """ start = -1 end = -1 for idx, line in enumerate(self.gen_cfg_data._DscLines): if start < 0 and line.startswith('[%s]' % section_name): start = idx elif start >= 0 and line.startswith('['): end = idx break if start == -1: start = 0 if end == -1: end = len(self.gen_cfg_data._DscLines) return start, end def normalize_file_name(self, file, is_temp=False): """ Normalize file name convention so that it is consistent. """ if file.endswith('.dsc'): file = file[:-4] + '.yaml' dir_name = os.path.dirname(file) base_name = os.path.basename(file) if is_temp: if 'Template_' not in file: base_name = base_name.replace('Template', 'Template_') else: if 'CfgData_' not in file: base_name = base_name.replace('CfgData', 'CfgData_') if dir_name: path = dir_name + '/' + base_name else: path = base_name return path def output_variable(self): """ Output variable block into a line list. """ lines = [] for each in self.gen_cfg_data._MacroDict: key, value = self.variable_fixup(each) lines.append('%-30s : %s' % (key, value)) return lines def output_template(self): """ Output template block into a line list. """ self.offset = 0 self.base_offset = 0 start, end = self.get_section_range('PcdsDynamicVpd.Tmp') bsf_temp_dict, temp_file_dict = self.process_template_lines( self.gen_cfg_data._DscLines[start:end]) template_dict = dict() lines = [] file_lines = {} last_file = '.' file_lines[last_file] = [] for tmp_name in temp_file_dict: temp_file_dict[tmp_name][-1] = self.normalize_file_name( temp_file_dict[tmp_name][-1], True) if len(temp_file_dict[tmp_name]) > 1: temp_file_dict[tmp_name][-2] = self.normalize_file_name( temp_file_dict[tmp_name][-2], True) for tmp_name in bsf_temp_dict: file = temp_file_dict[tmp_name][-1] if last_file != file and len(temp_file_dict[tmp_name]) > 1: inc_file = temp_file_dict[tmp_name][-2] file_lines[inc_file].extend( ['', '- !include %s' % temp_file_dict[tmp_name][-1], '']) last_file = file if file not in file_lines: file_lines[file] = [] lines = file_lines[file] text = bsf_temp_dict[tmp_name] tmp_list = self.process_option_lines(text) self.template_fixup(tmp_name, tmp_list) template_dict[tmp_name] = tmp_list lines.append('%s: >' % tmp_name) lines.extend(self.output_dict(tmp_list, False)['.']) lines.append('\n') return file_lines def output_config(self): """ Output config block into a line list. """ self.offset = 0 self.base_offset = 0 start, end = self.get_section_range('PcdsDynamicVpd.Upd') cfgs = self.process_option_lines( self.gen_cfg_data._DscLines[start:end]) self.config_fixup(cfgs) file_lines = self.output_dict(cfgs, True) return file_lines def output_dict(self, cfgs, is_configs): """ Output one config item into a line list. """ file_lines = {} level = 0 file = '.' for each in cfgs: if 'length' in each: if not each['length'].endswith('b') and int(each['length'], 0) == 0: continue if 'include' in each: if each['include']: each['include'] = self.normalize_file_name( each['include']) file_lines[file].extend( ['', '- !include %s' % each['include'], '']) file = each['include'] else: file = '.' continue if file not in file_lines: file_lines[file] = [] lines = file_lines[file] name = each['cname'] prefix = name[0] if prefix == '<': level += 1 padding = ' ' * level if prefix not in '<>@': padding += ' ' else: name = name[1:] if prefix == '@': padding += ' ' if ':' in name: parts = name.split(':') name = parts[0] padding = padding[2:] if is_configs else padding if prefix != '>': if 'expand' in each: lines.append('%s- %s' % (padding, each['expand'])) else: lines.append('%s- %-12s :' % (padding, name)) for field in each: if field in ['cname', 'expand', 'include']: continue value_str = self.format_value( field, each[field], padding + ' ' * 16) full_line = ' %s %-12s : %s' % (padding, field, value_str) lines.extend(full_line.splitlines()) if prefix == '>': level -= 1 if level == 0: lines.append('') return file_lines def bsf_to_dsc(bsf_file, dsc_file): fsp_dsc = CFspBsf2Dsc(bsf_file) dsc_lines = fsp_dsc.get_dsc_lines() fd = open(dsc_file, 'w') fd.write('\n'.join(dsc_lines)) fd.close() return def dsc_to_yaml(dsc_file, yaml_file): dsc2yaml = CFspDsc2Yaml() dsc2yaml.load_config_data_from_dsc(dsc_file) cfgs = {} for cfg in ['Template', 'Option']: if cfg == 'Template': file_lines = dsc2yaml.output_template() else: file_lines = dsc2yaml.output_config() for file in file_lines: lines = file_lines[file] if file == '.': cfgs[cfg] = lines else: if ('/' in file or '\\' in file): continue file = os.path.basename(file) out_dir = os.path.dirname(file) fo = open(os.path.join(out_dir, file), 'w') fo.write(__copyright_tmp__ % ( cfg, date.today().year) + '\n\n') for line in lines: fo.write(line + '\n') fo.close() variables = dsc2yaml.output_variable() fo = open(yaml_file, 'w') fo.write(__copyright_tmp__ % ('Default', date.today().year)) if len(variables) > 0: fo.write('\n\nvariable:\n') for line in variables: fo.write(' ' + line + '\n') fo.write('\n\ntemplate:\n') for line in cfgs['Template']: fo.write(' ' + line + '\n') fo.write('\n\nconfigs:\n') for line in cfgs['Option']: fo.write(' ' + line + '\n') fo.close() def get_fsp_name_from_path(bsf_file): name = '' parts = bsf_file.split(os.sep) for part in parts: if part.endswith('FspBinPkg'): name = part[:-9] break if not name: raise Exception('Could not get FSP name from file path!') return name def usage(): print('\n'.join([ "FspDscBsf2Yaml Version 0.10", "Usage:", " FspDscBsf2Yaml BsfFile|DscFile YamlFile" ])) def main(): # # Parse the options and args # argc = len(sys.argv) if argc < 3: usage() return 1 bsf_file = sys.argv[1] yaml_file = sys.argv[2] if os.path.isdir(yaml_file): yaml_file = os.path.join( yaml_file, get_fsp_name_from_path(bsf_file) + '.yaml') if bsf_file.endswith('.dsc'): dsc_file = bsf_file bsf_file = '' else: dsc_file = os.path.splitext(yaml_file)[0] + '.dsc' bsf_to_dsc(bsf_file, dsc_file) dsc_to_yaml(dsc_file, yaml_file) print("'%s' was created successfully!" % yaml_file) return 0 if __name__ == '__main__': sys.exit(main())
edk2-master
IntelFsp2Pkg/Tools/FspDscBsf2Yaml.py
## @ PatchFv.py # # Copyright (c) 2014 - 2021, Intel Corporation. All rights reserved.<BR> # SPDX-License-Identifier: BSD-2-Clause-Patent # ## import os import re import sys # # Read data from file # # param [in] binfile Binary file # param [in] offset Offset # param [in] len Length # # retval value Value # def readDataFromFile (binfile, offset, len=1): fd = open(binfile, "r+b") fsize = os.path.getsize(binfile) offval = offset & 0xFFFFFFFF if (offval & 0x80000000): offval = fsize - (0xFFFFFFFF - offval + 1) fd.seek(offval) if sys.version_info[0] < 3: bytearray = [ord(b) for b in fd.read(len)] else: bytearray = [b for b in fd.read(len)] value = 0 idx = len - 1 while idx >= 0: value = value << 8 | bytearray[idx] idx = idx - 1 fd.close() return value # # Check FSP header is valid or not # # param [in] binfile Binary file # # retval boolean True: valid; False: invalid # def IsFspHeaderValid (binfile): fd = open (binfile, "rb") bindat = fd.read(0x200) # only read first 0x200 bytes fd.close() HeaderList = [b'FSPH' , b'FSPP' , b'FSPE'] # Check 'FSPH', 'FSPP', and 'FSPE' in the FSP header OffsetList = [] for each in HeaderList: if each in bindat: idx = bindat.index(each) else: idx = 0 OffsetList.append(idx) if not OffsetList[0] or not OffsetList[1]: # If 'FSPH' or 'FSPP' is missing, it will return false return False if sys.version_info[0] < 3: Revision = ord(bindat[OffsetList[0] + 0x0B]) else: Revision = bindat[OffsetList[0] + 0x0B] # # if revision is bigger than 1, it means it is FSP v1.1 or greater revision, which must contain 'FSPE'. # if Revision > 1 and not OffsetList[2]: return False # If FSP v1.1 or greater without 'FSPE', then return false return True # # Patch data in file # # param [in] binfile Binary file # param [in] offset Offset # param [in] value Patch value # param [in] len Length # # retval len Length # def patchDataInFile (binfile, offset, value, len=1): fd = open(binfile, "r+b") fsize = os.path.getsize(binfile) offval = offset & 0xFFFFFFFF if (offval & 0x80000000): offval = fsize - (0xFFFFFFFF - offval + 1) bytearray = [] idx = 0 while idx < len: bytearray.append(value & 0xFF) value = value >> 8 idx = idx + 1 fd.seek(offval) if sys.version_info[0] < 3: fd.write("".join(chr(b) for b in bytearray)) else: fd.write(bytes(bytearray)) fd.close() return len class Symbols: def __init__(self): self.dictSymbolAddress = {} self.dictGuidNameXref = {} self.dictFfsOffset = {} self.dictVariable = {} self.dictModBase = {} self.fdFile = None self.string = "" self.fdBase = 0xFFFFFFFF self.fdSize = 0 self.index = 0 self.fvList = [] self.parenthesisOpenSet = '([{<' self.parenthesisCloseSet = ')]}>' # # Get FD file # # retval self.fdFile Retrieve FD file # def getFdFile (self): return self.fdFile # # Get FD size # # retval self.fdSize Retrieve the size of FD file # def getFdSize (self): return self.fdSize def parseFvInfFile (self, infFile): fvInfo = {} fvFile = infFile[0:-4] + ".Fv" fvInfo['Name'] = os.path.splitext(os.path.basename(infFile))[0] fvInfo['Offset'] = self.getFvOffsetInFd(fvFile) fvInfo['Size'] = readDataFromFile (fvFile, 0x20, 4) fdIn = open(infFile, "r") rptLines = fdIn.readlines() fdIn.close() fvInfo['Base'] = 0 for rptLine in rptLines: match = re.match("^EFI_BASE_ADDRESS\s*=\s*(0x[a-fA-F0-9]+)", rptLine) if match: fvInfo['Base'] = int(match.group(1), 16) break self.fvList.append(dict(fvInfo)) return 0 # # Create dictionaries # # param [in] fvDir FV's directory # param [in] fvNames All FV's names # # retval 0 Created dictionaries successfully # def createDicts (self, fvDir, fvNames): # # If the fvDir is not a directory, then raise an exception # if not os.path.isdir(fvDir): raise Exception ("'%s' is not a valid directory!" % fvDir) # # if user provided fd name as a input, skip rest of the flow to # patch fd directly # fdFile = os.path.join(fvDir,fvNames + ".fd") if os.path.exists(fdFile): print("Tool identified Fd file as a input to patch '%s'" %fdFile) self.fdFile = fdFile self.fdSize = os.path.getsize(fdFile) return 0 # # If the Guid.xref is not existing in fvDir, then raise an exception # xrefFile = os.path.join(fvDir, "Guid.xref") if not os.path.exists(xrefFile): raise Exception("Cannot open GUID Xref file '%s'!" % xrefFile) # # Add GUID reference to dictionary # self.dictGuidNameXref = {} self.parseGuidXrefFile(xrefFile) # # Split up each FV from fvNames and get the fdBase # fvList = fvNames.split(":") fdBase = fvList.pop() if len(fvList) == 0: fvList.append(fdBase) # # If the FD file is not existing, then raise an exception # fdFile = os.path.join(fvDir, fdBase.strip() + ".fd") if not os.path.exists(fdFile): raise Exception("Cannot open FD file '%s'!" % fdFile) # # Get the size of the FD file # self.fdFile = fdFile self.fdSize = os.path.getsize(fdFile) # # If the INF file, which is the first element of fvList, is not existing, then raise an exception # infFile = os.path.join(fvDir, fvList[0].strip()) + ".inf" if not os.path.exists(infFile): raise Exception("Cannot open INF file '%s'!" % infFile) # # Parse INF file in order to get fdBase and then assign those values to dictVariable # self.parseInfFile(infFile) self.dictVariable = {} self.dictVariable["FDSIZE"] = self.fdSize self.dictVariable["FDBASE"] = self.fdBase # # Collect information from FV MAP file and FV TXT file then # put them into dictionaries # self.fvList = [] self.dictSymbolAddress = {} self.dictFfsOffset = {} for file in fvList: # # If the .Fv.map file is not existing, then raise an exception. # Otherwise, parse FV MAP file # fvFile = os.path.join(fvDir, file.strip()) + ".Fv" mapFile = fvFile + ".map" if not os.path.exists(mapFile): raise Exception("Cannot open MAP file '%s'!" % mapFile) infFile = fvFile[0:-3] + ".inf" self.parseFvInfFile(infFile) self.parseFvMapFile(mapFile) # # If the .Fv.txt file is not existing, then raise an exception. # Otherwise, parse FV TXT file # fvTxtFile = fvFile + ".txt" if not os.path.exists(fvTxtFile): raise Exception("Cannot open FV TXT file '%s'!" % fvTxtFile) self.parseFvTxtFile(fvTxtFile) for fv in self.fvList: self.dictVariable['_BASE_%s_' % fv['Name']] = fv['Base'] # # Search all MAP files in FFS directory if it exists then parse MOD MAP file # ffsDir = os.path.join(fvDir, "Ffs") if (os.path.isdir(ffsDir)): for item in os.listdir(ffsDir): if len(item) <= 0x24: continue mapFile =os.path.join(ffsDir, item, "%s.map" % item[0:0x24]) if not os.path.exists(mapFile): continue self.parseModMapFile(item[0x24:], mapFile) return 0 # # Get FV offset in FD file # # param [in] fvFile FV file # # retval offset Got FV offset successfully # def getFvOffsetInFd(self, fvFile): # # Check if the first 0x70 bytes of fvFile can be found in fdFile # fvHandle = open(fvFile, "r+b") fdHandle = open(self.fdFile, "r+b") offset = fdHandle.read().find(fvHandle.read(0x70)) fvHandle.close() fdHandle.close() if offset == -1: raise Exception("Could not locate FV file %s in FD!" % fvFile) return offset # # Parse INF file # # param [in] infFile INF file # # retval 0 Parsed INF file successfully # def parseInfFile(self, infFile): # # Get FV offset and search EFI_BASE_ADDRESS in the FD file # then assign the value of EFI_BASE_ADDRESS to fdBase # fvOffset = self.getFvOffsetInFd(infFile[0:-4] + ".Fv") fdIn = open(infFile, "r") rptLine = fdIn.readline() self.fdBase = 0xFFFFFFFF while (rptLine != "" ): #EFI_BASE_ADDRESS = 0xFFFDF400 match = re.match("^EFI_BASE_ADDRESS\s*=\s*(0x[a-fA-F0-9]+)", rptLine) if match is not None: self.fdBase = int(match.group(1), 16) - fvOffset break rptLine = fdIn.readline() fdIn.close() if self.fdBase == 0xFFFFFFFF: raise Exception("Could not find EFI_BASE_ADDRESS in INF file!" % infFile) return 0 # # Parse FV TXT file # # param [in] fvTxtFile .Fv.txt file # # retval 0 Parsed FV TXT file successfully # def parseFvTxtFile(self, fvTxtFile): fvName = os.path.basename(fvTxtFile)[0:-7].upper() # # Get information from .Fv.txt in order to create a dictionary # For example, # self.dictFfsOffset[912740BE-2284-4734-B971-84B027353F0C] = 0x000D4078 # fvOffset = self.getFvOffsetInFd(fvTxtFile[0:-4]) fdIn = open(fvTxtFile, "r") rptLine = fdIn.readline() while (rptLine != "" ): match = re.match("(0x[a-fA-F0-9]+)\s([0-9a-fA-F\-]+)", rptLine) if match is not None: if match.group(2) in self.dictFfsOffset: self.dictFfsOffset[fvName + ':' + match.group(2)] = "0x%08X" % (int(match.group(1), 16) + fvOffset) else: self.dictFfsOffset[match.group(2)] = "0x%08X" % (int(match.group(1), 16) + fvOffset) rptLine = fdIn.readline() fdIn.close() return 0 # # Parse FV MAP file # # param [in] mapFile .Fv.map file # # retval 0 Parsed FV MAP file successfully # def parseFvMapFile(self, mapFile): # # Get information from .Fv.map in order to create dictionaries # For example, # self.dictModBase[FspSecCore:BASE] = 4294592776 (0xfffa4908) # self.dictModBase[FspSecCore:ENTRY] = 4294606552 (0xfffa7ed8) # self.dictModBase[FspSecCore:TEXT] = 4294593080 (0xfffa4a38) # self.dictModBase[FspSecCore:DATA] = 4294612280 (0xfffa9538) # self.dictSymbolAddress[FspSecCore:_SecStartup] = 0x00fffa4a38 # fdIn = open(mapFile, "r") rptLine = fdIn.readline() modName = "" foundModHdr = False while (rptLine != "" ): if rptLine[0] != ' ': #DxeIpl (Fixed Flash Address, BaseAddress=0x00fffb4310, EntryPoint=0x00fffb4958,Type=PE) match = re.match("([_a-zA-Z0-9\-]+)\s\(.+BaseAddress=(0x[0-9a-fA-F]+),\s+EntryPoint=(0x[0-9a-fA-F]+),\s*Type=\w+\)", rptLine) if match is None: #DxeIpl (Fixed Flash Address, BaseAddress=0x00fffb4310, EntryPoint=0x00fffb4958) match = re.match("([_a-zA-Z0-9\-]+)\s\(.+BaseAddress=(0x[0-9a-fA-F]+),\s+EntryPoint=(0x[0-9a-fA-F]+)\)", rptLine) if match is not None: foundModHdr = True modName = match.group(1) if len(modName) == 36: modName = self.dictGuidNameXref[modName.upper()] self.dictModBase['%s:BASE' % modName] = int (match.group(2), 16) self.dictModBase['%s:ENTRY' % modName] = int (match.group(3), 16) #(GUID=86D70125-BAA3-4296-A62F-602BEBBB9081 .textbaseaddress=0x00fffb4398 .databaseaddress=0x00fffb4178) match = re.match("\(GUID=([A-Z0-9\-]+)\s+\.textbaseaddress=(0x[0-9a-fA-F]+)\s+\.databaseaddress=(0x[0-9a-fA-F]+)\)", rptLine) if match is not None: if foundModHdr: foundModHdr = False else: modName = match.group(1) if len(modName) == 36: modName = self.dictGuidNameXref[modName.upper()] self.dictModBase['%s:TEXT' % modName] = int (match.group(2), 16) self.dictModBase['%s:DATA' % modName] = int (match.group(3), 16) else: # 0x00fff8016c __ModuleEntryPoint foundModHdr = False match = re.match("^\s+(0x[a-z0-9]+)\s+([_a-zA-Z0-9]+)", rptLine) if match is not None: self.dictSymbolAddress["%s:%s"%(modName, match.group(2))] = match.group(1) rptLine = fdIn.readline() fdIn.close() return 0 # # Parse MOD MAP file # # param [in] moduleName Module name # param [in] mapFile .Fv.map file # # retval 0 Parsed MOD MAP file successfully # retval 1 There is no moduleEntryPoint in modSymbols # retval 2 There is no offset for moduleEntryPoint in modSymbols # def parseModMapFile(self, moduleName, mapFile): # # Get information from mapFile by moduleName in order to create a dictionary # For example, # self.dictSymbolAddress[FspSecCore:___guard_fids_count] = 0x00fffa4778 # modSymbols = {} fdIn = open(mapFile, "r") reportLines = fdIn.readlines() fdIn.close() moduleEntryPoint = "__ModuleEntryPoint" reportLine = reportLines[0] if reportLine.strip().find("Archive member included") != -1: #GCC # 0x0000000000001d55 IoRead8 patchMapFileMatchString = "\s+(0x[0-9a-fA-F]{16})\s+([^\s][^0x][_a-zA-Z0-9\-]+)\s" matchKeyGroupIndex = 2 matchSymbolGroupIndex = 1 prefix = '_' else: #MSFT #0003:00000190 _gComBase 00007a50 SerialPo patchMapFileMatchString = "^\s[0-9a-fA-F]{4}:[0-9a-fA-F]{8}\s+(\w+)\s+([0-9a-fA-F]{8,16}\s+)" matchKeyGroupIndex = 1 matchSymbolGroupIndex = 2 prefix = '' for reportLine in reportLines: match = re.match(patchMapFileMatchString, reportLine) if match is not None: modSymbols[prefix + match.group(matchKeyGroupIndex)] = match.group(matchSymbolGroupIndex) # Handle extra module patchable PCD variable in Linux map since it might have different format # .data._gPcd_BinaryPatch_PcdVpdBaseAddress # 0x0000000000003714 0x4 /tmp/ccmytayk.ltrans1.ltrans.o handleNext = False if matchSymbolGroupIndex == 1: for reportLine in reportLines: if handleNext: handleNext = False pcdName = match.group(1) match = re.match("\s+(0x[0-9a-fA-F]{16})\s+", reportLine) if match is not None: modSymbols[prefix + pcdName] = match.group(1) else: match = re.match("^\s\.data\.(_gPcd_BinaryPatch[_a-zA-Z0-9\-]+)", reportLine) if match is not None: handleNext = True continue if not moduleEntryPoint in modSymbols: if matchSymbolGroupIndex == 2: if not '_ModuleEntryPoint' in modSymbols: return 1 else: moduleEntryPoint = "_ModuleEntryPoint" else: return 1 modEntry = '%s:%s' % (moduleName,moduleEntryPoint) if not modEntry in self.dictSymbolAddress: modKey = '%s:ENTRY' % moduleName if modKey in self.dictModBase: baseOffset = self.dictModBase['%s:ENTRY' % moduleName] - int(modSymbols[moduleEntryPoint], 16) else: return 2 else: baseOffset = int(self.dictSymbolAddress[modEntry], 16) - int(modSymbols[moduleEntryPoint], 16) for symbol in modSymbols: fullSym = "%s:%s" % (moduleName, symbol) if not fullSym in self.dictSymbolAddress: self.dictSymbolAddress[fullSym] = "0x00%08x" % (baseOffset+ int(modSymbols[symbol], 16)) return 0 # # Parse Guid.xref file # # param [in] xrefFile the full directory of Guid.xref file # # retval 0 Parsed Guid.xref file successfully # def parseGuidXrefFile(self, xrefFile): # # Get information from Guid.xref in order to create a GuidNameXref dictionary # The dictGuidNameXref, for example, will be like # dictGuidNameXref [1BA0062E-C779-4582-8566-336AE8F78F09] = FspSecCore # fdIn = open(xrefFile, "r") rptLine = fdIn.readline() while (rptLine != "" ): match = re.match("([0-9a-fA-F\-]+)\s([_a-zA-Z0-9]+)", rptLine) if match is not None: self.dictGuidNameXref[match.group(1).upper()] = match.group(2) rptLine = fdIn.readline() fdIn.close() return 0 # # Get current character # # retval self.string[self.index] # retval '' Exception # def getCurr(self): try: return self.string[self.index] except Exception: return '' # # Check to see if it is last index # # retval self.index # def isLast(self): return self.index == len(self.string) # # Move to next index # def moveNext(self): self.index += 1 # # Skip space # def skipSpace(self): while not self.isLast(): if self.getCurr() in ' \t': self.moveNext() else: return # # Parse value # # retval value # def parseValue(self): self.skipSpace() var = '' while not self.isLast(): char = self.getCurr() if char.lower() in '_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789:-': var += char self.moveNext() else: break if ':' in var: partList = var.split(':') lenList = len(partList) if lenList != 2 and lenList != 3: raise Exception("Unrecognized expression %s" % var) modName = partList[lenList-2] modOff = partList[lenList-1] if ('-' not in modName) and (modOff[0] in '0123456789'): # MOD: OFFSET var = self.getModGuid(modName) + ":" + modOff if '-' in var: # GUID:OFFSET value = self.getGuidOff(var) else: value = self.getSymbols(var) self.synUsed = True else: if var[0] in '0123456789': value = self.getNumber(var) else: value = self.getVariable(var) return int(value) # # Parse single operation # # retval ~self.parseBrace() or self.parseValue() # def parseSingleOp(self): self.skipSpace() char = self.getCurr() if char == '~': self.moveNext() return ~self.parseBrace() else: return self.parseValue() # # Parse symbol of Brace([, {, <) # # retval value or self.parseSingleOp() # def parseBrace(self): self.skipSpace() char = self.getCurr() parenthesisType = self.parenthesisOpenSet.find(char) if parenthesisType >= 0: self.moveNext() value = self.parseExpr() self.skipSpace() if self.getCurr() != self.parenthesisCloseSet[parenthesisType]: raise Exception("No closing brace") self.moveNext() if parenthesisType == 1: # [ : Get content value = self.getContent(value) elif parenthesisType == 2: # { : To address value = self.toAddress(value) elif parenthesisType == 3: # < : To offset value = self.toOffset(value) return value else: return self.parseSingleOp() # # Parse symbol of Multiplier(*) # # retval value or self.parseSingleOp() # def parseMul(self): values = [self.parseBrace()] while True: self.skipSpace() char = self.getCurr() if char == '*': self.moveNext() values.append(self.parseBrace()) else: break value = 1 for each in values: value *= each return value # # Parse symbol of And(&) and Or(|) # # retval value # def parseAndOr(self): value = self.parseMul() op = None while True: self.skipSpace() char = self.getCurr() if char == '&': self.moveNext() value &= self.parseMul() elif char == '|': div_index = self.index self.moveNext() value |= self.parseMul() else: break return value # # Parse symbol of Add(+) and Minus(-) # # retval sum(values) # def parseAddMinus(self): values = [self.parseAndOr()] while True: self.skipSpace() char = self.getCurr() if char == '+': self.moveNext() values.append(self.parseAndOr()) elif char == '-': self.moveNext() values.append(-1 * self.parseAndOr()) else: break return sum(values) # # Parse expression # # retval self.parseAddMinus() # def parseExpr(self): return self.parseAddMinus() # # Get result # # retval value # def getResult(self): value = self.parseExpr() self.skipSpace() if not self.isLast(): raise Exception("Unexpected character found '%s'" % self.getCurr()) return value # # Get module GUID # # retval value # def getModGuid(self, var): guid = (guid for guid,name in self.dictGuidNameXref.items() if name==var) try: value = guid.next() except Exception: raise Exception("Unknown module name %s !" % var) return value # # Get variable # # retval value # def getVariable(self, var): value = self.dictVariable.get(var, None) if value == None: raise Exception("Unrecognized variable '%s'" % var) return value # # Get number # # retval value # def getNumber(self, var): var = var.strip() if var.startswith('0x'): # HEX value = int(var, 16) else: value = int(var, 10) return value # # Get content # # param [in] value # # retval value # def getContent(self, value): return readDataFromFile (self.fdFile, self.toOffset(value), 4) # # Change value to address # # param [in] value # # retval value # def toAddress(self, value): if value < self.fdSize: value = value + self.fdBase return value # # Change value to offset # # param [in] value # # retval value # def toOffset(self, value): offset = None for fvInfo in self.fvList: if (value >= fvInfo['Base']) and (value < fvInfo['Base'] + fvInfo['Size']): offset = value - fvInfo['Base'] + fvInfo['Offset'] if not offset: if (value >= self.fdBase) and (value < self.fdBase + self.fdSize): offset = value - self.fdBase else: offset = value if offset >= self.fdSize: raise Exception("Invalid file offset 0x%08x !" % value) return offset # # Get GUID offset # # param [in] value # # retval value # def getGuidOff(self, value): # GUID:Offset symbolName = value.split(':') if len(symbolName) == 3: fvName = symbolName[0].upper() keyName = '%s:%s' % (fvName, symbolName[1]) offStr = symbolName[2] elif len(symbolName) == 2: keyName = symbolName[0] offStr = symbolName[1] if keyName in self.dictFfsOffset: value = (int(self.dictFfsOffset[keyName], 16) + int(offStr, 16)) & 0xFFFFFFFF else: raise Exception("Unknown GUID %s !" % value) return value # # Get symbols # # param [in] value # # retval ret # def getSymbols(self, value): if value in self.dictSymbolAddress: # Module:Function ret = int (self.dictSymbolAddress[value], 16) else: raise Exception("Unknown symbol %s !" % value) return ret # # Evaluate symbols # # param [in] expression # param [in] isOffset # # retval value & 0xFFFFFFFF # def evaluate(self, expression, isOffset): self.index = 0 self.synUsed = False self.string = expression value = self.getResult() if isOffset: if self.synUsed: # Consider it as an address first value = self.toOffset(value) if value & 0x80000000: # Consider it as a negative offset next offset = (~value & 0xFFFFFFFF) + 1 if offset < self.fdSize: value = self.fdSize - offset if value >= self.fdSize: raise Exception("Invalid offset expression !") return value & 0xFFFFFFFF # # Print out the usage # def Usage(): print ("PatchFv Version 0.60") print ("Usage: \n\tPatchFv FvBuildDir [FvFileBaseNames:]FdFileBaseNameToPatch \"Offset, Value\"") print ("\tPatchFv FdFileDir FdFileName \"Offset, Value\"") def main(): # # Parse the options and args # symTables = Symbols() # # If the arguments are less than 4, then return an error. # if len(sys.argv) < 4: Usage() return 1 # # If it fails to create dictionaries, then return an error. # if symTables.createDicts(sys.argv[1], sys.argv[2]) != 0: print ("ERROR: Failed to create symbol dictionary!!") return 2 # # Get FD file and size # fdFile = symTables.getFdFile() fdSize = symTables.getFdSize() try: # # Check to see if FSP header is valid # ret = IsFspHeaderValid(fdFile) if ret == False: raise Exception ("The FSP header is not valid. Stop patching FD.") comment = "" for fvFile in sys.argv[3:]: # # Check to see if it has enough arguments # items = fvFile.split(",") if len (items) < 2: raise Exception("Expect more arguments for '%s'!" % fvFile) comment = "" command = "" params = [] for item in items: item = item.strip() if item.startswith("@"): comment = item[1:] elif item.startswith("$"): command = item[1:] else: if len(params) == 0: isOffset = True else : isOffset = False # # Parse symbols then append it to params # params.append (symTables.evaluate(item, isOffset)) # # Patch a new value into FD file if it is not a command # if command == "": # Patch a DWORD if len (params) == 2: offset = params[0] value = params[1] oldvalue = readDataFromFile(fdFile, offset, 4) ret = patchDataInFile (fdFile, offset, value, 4) - 4 else: raise Exception ("Patch command needs 2 parameters !") if ret: raise Exception ("Patch failed for offset 0x%08X" % offset) else: print ("Patched offset 0x%08X:[%08X] with value 0x%08X # %s" % (offset, oldvalue, value, comment)) elif command == "COPY": # # Copy binary block from source to destination # if len (params) == 3: src = symTables.toOffset(params[0]) dest = symTables.toOffset(params[1]) clen = symTables.toOffset(params[2]) if (dest + clen <= fdSize) and (src + clen <= fdSize): oldvalue = readDataFromFile(fdFile, src, clen) ret = patchDataInFile (fdFile, dest, oldvalue, clen) - clen else: raise Exception ("Copy command OFFSET or LENGTH parameter is invalid !") else: raise Exception ("Copy command needs 3 parameters !") if ret: raise Exception ("Copy failed from offset 0x%08X to offset 0x%08X!" % (src, dest)) else : print ("Copied %d bytes from offset 0x%08X ~ offset 0x%08X # %s" % (clen, src, dest, comment)) else: raise Exception ("Unknown command %s!" % command) return 0 except Exception as ex: print ("ERROR: %s" % ex) return 1 if __name__ == '__main__': sys.exit(main())
edk2-master
IntelFsp2Pkg/Tools/PatchFv.py
## @ GenCfgOpt.py # # Copyright (c) 2014 - 2022, Intel Corporation. All rights reserved.<BR> # SPDX-License-Identifier: BSD-2-Clause-Patent # ## import os import re import sys import struct from datetime import date from functools import reduce # Generated file copyright header __copyright_txt__ = """## @file # # THIS IS AUTO-GENERATED FILE BY BUILD TOOLS AND PLEASE DO NOT MAKE MODIFICATION. # # This file lists all VPD informations for a platform collected by build.exe. # # Copyright (c) %4d, Intel Corporation. All rights reserved.<BR> # This program and the accompanying materials # are licensed and made available under the terms and conditions of the BSD License # which accompanies this distribution. The full text of the license may be found at # http://opensource.org/licenses/bsd-license.php # # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. # """ __copyright_bsf__ = """/** @file Boot Setting File for Platform Configuration. Copyright (c) %4d, Intel Corporation. All rights reserved.<BR> This program and the accompanying materials are licensed and made available under the terms and conditions of the BSD License which accompanies this distribution. The full text of the license may be found at http://opensource.org/licenses/bsd-license.php THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. This file is automatically generated. Please do NOT modify !!! **/ """ __copyright_h__ = """/** @file Copyright (c) %4d, Intel Corporation. All rights reserved.<BR> Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. This file is automatically generated. Please do NOT modify !!! **/ """ BuildOptionPcd = [] class CLogicalExpression: def __init__(self): self.index = 0 self.string = '' def errExit(self, err = ''): print ("ERROR: Express parsing for:") print (" %s" % self.string) print (" %s^" % (' ' * self.index)) if err: print ("INFO : %s" % err) raise SystemExit def getNonNumber (self, n1, n2): if not n1.isdigit(): return n1 if not n2.isdigit(): return n2 return None def getCurr(self, lens = 1): try: if lens == -1: return self.string[self.index :] else: if self.index + lens > len(self.string): lens = len(self.string) - self.index return self.string[self.index : self.index + lens] except Exception: return '' def isLast(self): return self.index == len(self.string) def moveNext(self, len = 1): self.index += len def skipSpace(self): while not self.isLast(): if self.getCurr() in ' \t': self.moveNext() else: return def normNumber (self, val): return True if val else False def getNumber(self, var): var = var.strip() if re.match('^0x[a-fA-F0-9]+$', var): value = int(var, 16) elif re.match('^[+-]?\d+$', var): value = int(var, 10) else: value = None return value def parseValue(self): self.skipSpace() var = '' while not self.isLast(): char = self.getCurr() if re.match('^[\w.]', char): var += char self.moveNext() else: break val = self.getNumber(var) if val is None: value = var else: value = "%d" % val return value def parseSingleOp(self): self.skipSpace() if re.match('^NOT\W', self.getCurr(-1)): self.moveNext(3) op = self.parseBrace() val = self.getNumber (op) if val is None: self.errExit ("'%s' is not a number" % op) return "%d" % (not self.normNumber(int(op))) else: return self.parseValue() def parseBrace(self): self.skipSpace() char = self.getCurr() if char == '(': self.moveNext() value = self.parseExpr() self.skipSpace() if self.getCurr() != ')': self.errExit ("Expecting closing brace or operator") self.moveNext() return value else: value = self.parseSingleOp() return value def parseCompare(self): value = self.parseBrace() while True: self.skipSpace() char = self.getCurr() if char in ['<', '>']: self.moveNext() next = self.getCurr() if next == '=': op = char + next self.moveNext() else: op = char result = self.parseBrace() test = self.getNonNumber(result, value) if test is None: value = "%d" % self.normNumber(eval (value + op + result)) else: self.errExit ("'%s' is not a valid number for comparision" % test) elif char in ['=', '!']: op = self.getCurr(2) if op in ['==', '!=']: self.moveNext(2) result = self.parseBrace() test = self.getNonNumber(result, value) if test is None: value = "%d" % self.normNumber((eval (value + op + result))) else: value = "%d" % self.normNumber(eval ("'" + value + "'" + op + "'" + result + "'")) else: break else: break return value def parseAnd(self): value = self.parseCompare() while True: self.skipSpace() if re.match('^AND\W', self.getCurr(-1)): self.moveNext(3) result = self.parseCompare() test = self.getNonNumber(result, value) if test is None: value = "%d" % self.normNumber(int(value) & int(result)) else: self.errExit ("'%s' is not a valid op number for AND" % test) else: break return value def parseOrXor(self): value = self.parseAnd() op = None while True: self.skipSpace() op = None if re.match('^XOR\W', self.getCurr(-1)): self.moveNext(3) op = '^' elif re.match('^OR\W', self.getCurr(-1)): self.moveNext(2) op = '|' else: break if op: result = self.parseAnd() test = self.getNonNumber(result, value) if test is None: value = "%d" % self.normNumber(eval (value + op + result)) else: self.errExit ("'%s' is not a valid op number for XOR/OR" % test) return value def parseExpr(self): return self.parseOrXor() def getResult(self): value = self.parseExpr() self.skipSpace() if not self.isLast(): self.errExit ("Unexpected character found '%s'" % self.getCurr()) test = self.getNumber(value) if test is None: self.errExit ("Result '%s' is not a number" % value) return int(value) def evaluateExpress (self, Expr): self.index = 0 self.string = Expr if self.getResult(): Result = True else: Result = False return Result class CGenCfgOpt: def __init__(self, Mode = ''): self.Debug = False self.Error = '' self.Mode = Mode self._GlobalDataDef = """ GlobalDataDef SKUID = 0, "DEFAULT" EndGlobalData """ self._BuidinOptionTxt = """ List &EN_DIS Selection 0x1 , "Enabled" Selection 0x0 , "Disabled" EndList """ self._BsfKeyList = ['FIND','NAME','HELP','TYPE','PAGE', 'PAGES', 'BLOCK', 'OPTION','CONDITION','ORDER', 'MARKER', 'SUBT'] self._HdrKeyList = ['HEADER','STRUCT', 'EMBED', 'COMMENT'] self._BuidinOption = {'$EN_DIS' : 'EN_DIS'} self._MacroDict = {} self._VarDict = {} self._PcdsDict = {} self._CfgBlkDict = {} self._CfgPageDict = {} self._BsfTempDict = {} self._CfgItemList = [] self._DscLines = [] self._DscFile = '' self._MapVer = 0 self._DscTime = 0 def ParseMacros (self, MacroDefStr): # ['-DABC=1', '-D', 'CFG_DEBUG=1', '-D', 'CFG_OUTDIR=Build'] self._MacroDict = {} IsExpression = False for Macro in MacroDefStr: if Macro.startswith('-D'): IsExpression = True if len(Macro) > 2: Macro = Macro[2:] else : continue if IsExpression: IsExpression = False Match = re.match("(\w+)=(.+)", Macro) if Match: self._MacroDict[Match.group(1)] = Match.group(2) else: Match = re.match("(\w+)", Macro) if Match: self._MacroDict[Match.group(1)] = '' if len(self._MacroDict) == 0: Error = 1 else: Error = 0 if self.Debug: print ("INFO : Macro dictionary:") for Each in self._MacroDict: print (" $(%s) = [ %s ]" % (Each , self._MacroDict[Each])) return Error def EvaulateIfdef (self, Macro): Result = Macro in self._MacroDict if self.Debug: print ("INFO : Eval Ifdef [%s] : %s" % (Macro, Result)) return Result def ExpandMacros (self, Input, Preserve = False): Line = Input Match = re.findall("\$\(\w+\)", Input) if Match: for Each in Match: Variable = Each[2:-1] if Variable in self._MacroDict: Line = Line.replace(Each, self._MacroDict[Variable]) else: if self.Debug: print ("WARN : %s is not defined" % Each) if not Preserve: Line = Line.replace(Each, Each[2:-1]) return Line def ExpandPcds (self, Input): Line = Input Match = re.findall("(\w+\.\w+)", Input) if Match: for PcdName in Match: if PcdName in self._PcdsDict: Line = Line.replace(PcdName, self._PcdsDict[PcdName]) else: if self.Debug: print ("WARN : %s is not defined" % PcdName) return Line def EvaluateExpress (self, Expr): ExpExpr = self.ExpandPcds(Expr) ExpExpr = self.ExpandMacros(ExpExpr) LogExpr = CLogicalExpression() Result = LogExpr.evaluateExpress (ExpExpr) if self.Debug: print ("INFO : Eval Express [%s] : %s" % (Expr, Result)) return Result def ValueToByteArray (self, ValueStr, Length): Match = re.match("\{\s*FILE:(.+)\}", ValueStr) if Match: FileList = Match.group(1).split(',') Result = bytearray() for File in FileList: File = File.strip() BinPath = os.path.join(os.path.dirname(self._DscFile), File) Result.extend(bytearray(open(BinPath, 'rb').read())) else: try: Result = bytearray(self.ValueToList(ValueStr, Length)) except ValueError as e: raise Exception ("Bytes in '%s' must be in range 0~255 !" % ValueStr) if len(Result) < Length: Result.extend(b'\x00' * (Length - len(Result))) elif len(Result) > Length: raise Exception ("Value '%s' is too big to fit into %d bytes !" % (ValueStr, Length)) return Result[:Length] def ValueToList (self, ValueStr, Length): if ValueStr[0] == '{': Result = [] BinList = ValueStr[1:-1].split(',') InBitField = False LastInBitField = False Value = 0 BitLen = 0 for Element in BinList: InBitField = False Each = Element.strip() if len(Each) == 0: pass else: if Each[0] in ['"', "'"]: Result.extend(list(bytearray(Each[1:-1], 'utf-8'))) elif ':' in Each: Match = re.match("(.+):(\d+)b", Each) if Match is None: raise Exception("Invald value list format '%s' !" % Each) InBitField = True CurrentBitLen = int(Match.group(2)) CurrentValue = ((self.EvaluateExpress(Match.group(1)) & (1<<CurrentBitLen) - 1)) << BitLen else: Result.append(self.EvaluateExpress(Each.strip())) if InBitField: Value += CurrentValue BitLen += CurrentBitLen if LastInBitField and ((not InBitField) or (Element == BinList[-1])): if BitLen % 8 != 0: raise Exception("Invald bit field length!") Result.extend(Val2Bytes(Value, BitLen // 8)) Value = 0 BitLen = 0 LastInBitField = InBitField elif ValueStr.startswith("'") and ValueStr.endswith("'"): Result = Str2Bytes (ValueStr, Length) elif ValueStr.startswith('"') and ValueStr.endswith('"'): Result = Str2Bytes (ValueStr, Length) else: Result = Val2Bytes (self.EvaluateExpress(ValueStr), Length) return Result def FormatListValue(self, ConfigDict): Struct = ConfigDict['struct'] if Struct not in ['UINT8','UINT16','UINT32','UINT64']: return dataarray = [] binlist = ConfigDict['value'][1:-1].split(',') for each in binlist: each = each.strip() if each.startswith('0x'): value = int(each, 16) else: value = int(each) dataarray.append(value) unit = int(Struct[4:]) / 8 if int(ConfigDict['length']) != unit * len(dataarray): raise Exception("Array size is not proper for '%s' !" % ConfigDict['cname']) bytearray = [] for each in dataarray: value = each for loop in range(int(unit)): bytearray.append("0x%02X" % (value & 0xFF)) value = value >> 8 newvalue = '{' + ','.join(bytearray) + '}' ConfigDict['value'] = newvalue return "" def ParseDscFile (self, DscFile, FvDir): Hardcode = False AutoAlign = False self._CfgItemList = [] self._CfgPageDict = {} self._CfgBlkDict = {} self._DscFile = DscFile self._FvDir = FvDir self._DscLines = [] self._BsfTempDict = {} # Initial DSC time is parent DSC time. self._DscTime = os.path.getmtime(DscFile) CfgDict = {} IsDefSect = False IsPcdSect = False IsUpdSect = False IsVpdSect = False IsTmpSect = False TemplateName = '' IfStack = [] ElifStack = [] Error = 0 ConfigDict = {} if type(DscFile) is list: # it is DSC lines already DscLines = DscFile self._DscFile = '.' else: DscFd = open(DscFile, "r") DscLines = DscFd.readlines() DscFd.close() self._DscFile = DscFile SkipLines = 0 MaxAlign = 32 #Default align to 32, but if there are 64 bit unit, align to 64 SizeAlign = 0 #record the struct max align Base = 0 #Starting offset of sub-structure. while len(DscLines): DscLine = DscLines.pop(0).strip() if SkipLines == 0: self._DscLines.append (DscLine) else: SkipLines = SkipLines - 1 if len(DscLine) == 0: continue Handle = False Match = re.match("^\[(.+)\]", DscLine) if Match is not None: IsDefSect = False IsPcdSect = False IsVpdSect = False IsUpdSect = False IsTmpSect = False SectionName = Match.group(1).lower() if SectionName == "Defines".lower(): IsDefSect = True if (SectionName == "PcdsFeatureFlag".lower() or SectionName == "PcdsFixedAtBuild".lower()): IsPcdSect = True elif SectionName == "PcdsDynamicVpd.Tmp".lower(): IsTmpSect = True elif SectionName == "PcdsDynamicVpd.Upd".lower(): ConfigDict = {} ConfigDict['header'] = 'ON' ConfigDict['region'] = 'UPD' ConfigDict['order'] = -1 ConfigDict['page'] = '' ConfigDict['name'] = '' ConfigDict['find'] = '' ConfigDict['marker'] = '' ConfigDict['struct'] = '' ConfigDict['embed'] = '' ConfigDict['comment'] = '' ConfigDict['subreg'] = [] ConfigDict['condition'] = '' ConfigDict['option'] = '' IsUpdSect = True Offset = 0 else: if IsDefSect or IsPcdSect or IsUpdSect or IsVpdSect or IsTmpSect: Match = False if DscLine[0] != '!' else True if Match: Match = re.match("^!(else|endif|ifdef|ifndef|if|elseif|include)\s*(.+)?$", DscLine.split("#")[0]) Keyword = Match.group(1) if Match else '' Remaining = Match.group(2) if Match else '' Remaining = '' if Remaining is None else Remaining.strip() if Keyword in ['if', 'elseif', 'ifdef', 'ifndef', 'include'] and not Remaining: raise Exception ("ERROR: Expression is expected after '!if' or !elseif' for line '%s'" % DscLine) if Keyword == 'else': if IfStack: IfStack[-1] = not IfStack[-1] else: raise Exception ("ERROR: No paired '!if' found for '!else' for line '%s'" % DscLine) elif Keyword == 'endif': if IfStack: IfStack.pop() Level = ElifStack.pop() if Level > 0: del IfStack[-Level:] else: raise Exception ("ERROR: No paired '!if' found for '!endif' for line '%s'" % DscLine) elif Keyword == 'ifdef' or Keyword == 'ifndef': Result = self.EvaulateIfdef (Remaining) if Keyword == 'ifndef': Result = not Result IfStack.append(Result) ElifStack.append(0) elif Keyword == 'if' or Keyword == 'elseif': Result = self.EvaluateExpress(Remaining) if Keyword == "if": ElifStack.append(0) IfStack.append(Result) else: #elseif if IfStack: IfStack[-1] = not IfStack[-1] IfStack.append(Result) ElifStack[-1] = ElifStack[-1] + 1 else: raise Exception ("ERROR: No paired '!if' found for '!elif' for line '%s'" % DscLine) else: if IfStack: Handle = reduce(lambda x,y: x and y, IfStack) else: Handle = True if Handle: Match = re.match("!include\s+(.+)", DscLine) if Match: IncludeFilePath = Match.group(1) IncludeFilePath = self.ExpandMacros(IncludeFilePath) PackagesPath = os.getenv("PACKAGES_PATH") if PackagesPath: for PackagePath in PackagesPath.split(os.pathsep): IncludeFilePathAbs = os.path.join(os.path.normpath(PackagePath), os.path.normpath(IncludeFilePath)) if os.path.exists(IncludeFilePathAbs): IncludeDsc = open(IncludeFilePathAbs, "r") break else: IncludeDsc = open(IncludeFilePath, "r") if IncludeDsc == None: print("ERROR: Cannot open file '%s'" % IncludeFilePath) raise SystemExit # Update DscTime when newer DSC time found. CurrentDscTime = os.path.getmtime(os.path.realpath(IncludeDsc.name)) if CurrentDscTime > self._DscTime: self._DscTime = CurrentDscTime NewDscLines = IncludeDsc.readlines() IncludeDsc.close() DscLines = NewDscLines + DscLines del self._DscLines[-1] Offset = 0 else: if DscLine.startswith('!'): print("ERROR: Unrecognized directive for line '%s'" % DscLine) raise SystemExit if not Handle: del self._DscLines[-1] continue if IsDefSect: #DEFINE UPD_TOOL_GUID = 8C3D856A-9BE6-468E-850A-24F7A8D38E09 #DEFINE FSP_T_UPD_TOOL_GUID = 34686CA3-34F9-4901-B82A-BA630F0714C6 #DEFINE FSP_M_UPD_TOOL_GUID = 39A250DB-E465-4DD1-A2AC-E2BD3C0E2385 #DEFINE FSP_S_UPD_TOOL_GUID = CAE3605B-5B34-4C85-B3D7-27D54273C40F Match = re.match("^\s*(?:DEFINE\s+)*(\w+)\s*=\s*(.+)", DscLine) if Match: self._MacroDict[Match.group(1)] = self.ExpandMacros(Match.group(2)) if self.Debug: print ("INFO : DEFINE %s = [ %s ]" % (Match.group(1), self.ExpandMacros(Match.group(2)))) elif IsPcdSect: #gSiPkgTokenSpaceGuid.PcdTxtEnable|FALSE #gSiPkgTokenSpaceGuid.PcdOverclockEnable|TRUE Match = re.match("^\s*([\w\.]+)\s*\|\s*(\w+)", DscLine) if Match: self._PcdsDict[Match.group(1)] = Match.group(2) if self.Debug: print ("INFO : PCD %s = [ %s ]" % (Match.group(1), Match.group(2))) i = 0 while i < len(BuildOptionPcd): Match = re.match("\s*([\w\.]+)\s*\=\s*(\w+)", BuildOptionPcd[i]) if Match: self._PcdsDict[Match.group(1)] = Match.group(2) i += 1 elif IsTmpSect: # !BSF DEFT:{GPIO_TMPL:START} Match = re.match("^\s*#\s+(!BSF)\s+DEFT:{(.+?):(START|END)}", DscLine) if Match: if Match.group(3) == 'START' and not TemplateName: TemplateName = Match.group(2).strip() self._BsfTempDict[TemplateName] = [] if Match.group(3) == 'END' and (TemplateName == Match.group(2).strip()) and TemplateName: TemplateName = '' else: if TemplateName: Match = re.match("^!include\s*(.+)?$", DscLine) if Match: continue self._BsfTempDict[TemplateName].append(DscLine) else: Match = re.match("^\s*#\s+(!BSF|@Bsf|!HDR)\s+(.+)", DscLine) if Match: Remaining = Match.group(2) if Match.group(1) == '!BSF' or Match.group(1) == '@Bsf': Match = re.match("(?:^|.+\s+)PAGES:{(.+?)}", Remaining) if Match: # !BSF PAGES:{HSW:"Haswell System Agent", LPT:"Lynx Point PCH"} PageList = Match.group(1).split(',') for Page in PageList: Page = Page.strip() Match = re.match("(\w+):\"(.+)\"", Page) if Match != None: self._CfgPageDict[Match.group(1)] = Match.group(2) Match = re.match("(?:^|.+\s+)BLOCK:{NAME:\"(.+)\"\s*,\s*VER:\"(.+)\"\s*}", Remaining) if Match: self._CfgBlkDict['name'] = Match.group(1) self._CfgBlkDict['ver'] = Match.group(2) for Key in self._BsfKeyList: Match = re.match("(?:^|.+\s+)%s:{(.+?)}" % Key, Remaining) if Match: if Key in ['NAME', 'HELP', 'OPTION'] and Match.group(1).startswith('+'): ConfigDict[Key.lower()] += Match.group(1)[1:] else: ConfigDict[Key.lower()] = Match.group(1) else: for Key in self._HdrKeyList: Match = re.match("(?:^|.+\s+)%s:{(.+?)}" % Key, Remaining) if Match: ConfigDict[Key.lower()] = Match.group(1) Match = re.match("^\s*#\s+@Prompt\s+(.+)", DscLine) if Match: ConfigDict['name'] = Match.group(1) Match = re.match("^\s*#\s*@ValidList\s*(.+)\s*\|\s*(.+)\s*\|\s*(.+)\s*", DscLine) if Match: if Match.group(2).strip() in self._BuidinOption: ConfigDict['option'] = Match.group(2).strip() else: OptionValueList = Match.group(2).split(',') OptionStringList = Match.group(3).split(',') Index = 0 for Option in OptionValueList: Option = Option.strip() ConfigDict['option'] = ConfigDict['option'] + str(Option) + ':' + OptionStringList[Index].strip() Index += 1 if Index in range(len(OptionValueList)): ConfigDict['option'] += ', ' ConfigDict['type'] = "Combo" Match = re.match("^\s*#\s*@ValidRange\s*(.+)\s*\|\s*(.+)\s*-\s*(.+)\s*", DscLine) if Match: if "0x" in Match.group(2) or "0x" in Match.group(3): ConfigDict['type'] = "EditNum, HEX, (%s,%s)" % (Match.group(2), Match.group(3)) else: ConfigDict['type'] = "EditNum, DEC, (%s,%s)" % (Match.group(2), Match.group(3)) Match = re.match("^\s*##\s+(.+)", DscLine) if Match: ConfigDict['help'] = Match.group(1) # Check VPD/UPD if IsUpdSect: Match = re.match("^([_a-zA-Z0-9]+).([_a-zA-Z0-9]+)\s*\|\s*(0x[0-9A-F]+|\*)\s*\|\s*(\d+|0x[0-9a-fA-F]+)\s*\|\s*(.+)",DscLine) else: Match = re.match("^([_a-zA-Z0-9]+).([_a-zA-Z0-9]+)\s*\|\s*(0x[0-9A-F]+)(?:\s*\|\s*(.+))?", DscLine) if Match: ConfigDict['space'] = Match.group(1) ConfigDict['cname'] = Match.group(2) if Match.group(3) != '*': Hardcode = True Offset = int (Match.group(3), 16) else: AutoAlign = True if Hardcode and AutoAlign: print("Hardcode and auto-align mixed mode is not supported by GenCfgOpt") raise SystemExit ConfigDict['offset'] = Offset if ConfigDict['order'] == -1: ConfigDict['order'] = ConfigDict['offset'] << 8 else: (Major, Minor) = ConfigDict['order'].split('.') ConfigDict['order'] = (int (Major, 16) << 8 ) + int (Minor, 16) if IsUpdSect: Value = Match.group(5).strip() if Match.group(4).startswith("0x"): Length = int (Match.group(4), 16) else : Length = int (Match.group(4)) Offset += Length else: Value = Match.group(4) if Value is None: Value = '' Value = Value.strip() if '|' in Value: Match = re.match("^.+\s*\|\s*(.+)", Value) if Match: Value = Match.group(1) Length = -1 ConfigDict['length'] = Length Match = re.match("\$\((\w+)\)", Value) if Match: if Match.group(1) in self._MacroDict: Value = self._MacroDict[Match.group(1)] ConfigDict['value'] = Value if (len(Value) > 0) and (Value[0] == '{'): Value = self.FormatListValue(ConfigDict) if ConfigDict['name'] == '': # Clear BSF specific items ConfigDict['bsfname'] = '' ConfigDict['help'] = '' ConfigDict['type'] = '' ConfigDict['option'] = '' if IsUpdSect and AutoAlign: ItemLength = int(ConfigDict['length']) ItemOffset = int(ConfigDict['offset']) ItemStruct = ConfigDict['struct'] Unit = 1 if ItemLength in [1, 2, 4, 8] and not ConfigDict['value'].startswith('{'): Unit = ItemLength # If there are 64 bit unit, align to 64 if Unit == 8: MaxAlign = 64 SizeAlign = 8 if ItemStruct != '': UnitDict = {'UINT8':1, 'UINT16':2, 'UINT32':4, 'UINT64':8} if ItemStruct in ['UINT8', 'UINT16', 'UINT32', 'UINT64']: Unit = UnitDict[ItemStruct] # If there are 64 bit unit, align to 64 if Unit == 8: MaxAlign = 64 SizeAlign = max(SizeAlign, Unit) if (ConfigDict['embed'].find(':START') != -1): Base = ItemOffset SubOffset = ItemOffset - Base SubRemainder = SubOffset % Unit if SubRemainder: Diff = Unit - SubRemainder Offset = Offset + Diff ItemOffset = ItemOffset + Diff if (ConfigDict['embed'].find(':END') != -1): Remainder = Offset % (MaxAlign/8) # MaxAlign is either 32 or 64 if Remainder: Diff = int((MaxAlign/8) - Remainder) Offset = Offset + Diff ItemOffset = ItemOffset + Diff MaxAlign = 32 # Reset to default 32 align when struct end if (ConfigDict['cname'] == 'UpdTerminator'): # ItemLength is the size of UpdTerminator # Itemlength might be 16, 32, or 64 # Struct align to 64 if UpdTerminator # or struct size is 64 bit, else align to 32 Remainder = Offset % max(ItemLength/8, 4, SizeAlign) Offset = Offset + ItemLength if Remainder: Diff = int(max(ItemLength/8, 4, SizeAlign) - Remainder) ItemOffset = ItemOffset + Diff ConfigDict['offset'] = ItemOffset self._CfgItemList.append(ConfigDict.copy()) ConfigDict['name'] = '' ConfigDict['find'] = '' ConfigDict['struct'] = '' ConfigDict['embed'] = '' ConfigDict['comment'] = '' ConfigDict['marker'] = '' ConfigDict['order'] = -1 ConfigDict['subreg'] = [] ConfigDict['option'] = '' else: # It could be a virtual item as below # !BSF FIELD:{SerialDebugPortAddress0:1} # or # @Bsf FIELD:{SerialDebugPortAddress0:1b} Match = re.match("^\s*#\s+(!BSF|@Bsf)\s+FIELD:{(.+):(\d+)([Bb])?}", DscLine) if Match: SubCfgDict = ConfigDict.copy() if (Match.group(4) == None) or (Match.group(4) == 'B'): UnitBitLen = 8 elif Match.group(4) == 'b': UnitBitLen = 1 else: print("ERROR: Invalide BSF FIELD length for line '%s'" % DscLine) raise SystemExit SubCfgDict['cname'] = Match.group(2) SubCfgDict['bitlength'] = int (Match.group(3)) * UnitBitLen if SubCfgDict['bitlength'] > 0: LastItem = self._CfgItemList[-1] if len(LastItem['subreg']) == 0: SubOffset = 0 else: SubOffset = LastItem['subreg'][-1]['bitoffset'] + LastItem['subreg'][-1]['bitlength'] SubCfgDict['bitoffset'] = SubOffset LastItem['subreg'].append (SubCfgDict.copy()) ConfigDict['name'] = '' return Error def GetBsfBitFields (self, subitem, bytes): start = subitem['bitoffset'] end = start + subitem['bitlength'] bitsvalue = ''.join('{0:08b}'.format(i) for i in bytes[::-1]) bitsvalue = bitsvalue[::-1] bitslen = len(bitsvalue) if start > bitslen or end > bitslen: raise Exception ("Invalid bits offset [%d,%d] %d for %s" % (start, end, bitslen, subitem['name'])) return '0x%X' % (int(bitsvalue[start:end][::-1], 2)) def UpdateSubRegionDefaultValue (self): Error = 0 for Item in self._CfgItemList: if len(Item['subreg']) == 0: continue bytearray = [] if Item['value'][0] == '{': binlist = Item['value'][1:-1].split(',') for each in binlist: each = each.strip() if each.startswith('0x'): value = int(each, 16) else: value = int(each) bytearray.append(value) else: if Item['value'].startswith('0x'): value = int(Item['value'], 16) else: value = int(Item['value']) idx = 0 while idx < Item['length']: bytearray.append(value & 0xFF) value = value >> 8 idx = idx + 1 for SubItem in Item['subreg']: valuestr = self.GetBsfBitFields(SubItem, bytearray) SubItem['value'] = valuestr return Error def NoDscFileChange (self, OutPutFile): NoFileChange = True if not os.path.exists(OutPutFile): NoFileChange = False else: OutputTime = os.path.getmtime(OutPutFile) if self._DscTime > OutputTime: NoFileChange = False return NoFileChange def CreateSplitUpdTxt (self, UpdTxtFile): GuidList = ['FSP_T_UPD_TOOL_GUID','FSP_M_UPD_TOOL_GUID','FSP_S_UPD_TOOL_GUID','FSP_I_UPD_TOOL_GUID'] SignatureList = ['0x545F', '0x4D5F','0x535F','0x495F'] # _T, _M, _S and _I signature for FSPT, FSPM, FSPS, FSPI for Index in range(len(GuidList)): UpdTxtFile = '' FvDir = self._FvDir if GuidList[Index] not in self._MacroDict: NoFSPI = False if GuidList[Index] == 'FSP_I_UPD_TOOL_GUID': NoFSPI = True continue else: self.Error = "%s definition is missing in DSC file" % (GuidList[Index]) return 1 if UpdTxtFile == '': UpdTxtFile = os.path.join(FvDir, self._MacroDict[GuidList[Index]] + '.txt') if (self.NoDscFileChange (UpdTxtFile)): # DSC has not been modified yet # So don't have to re-generate other files self.Error = 'No DSC file change, skip to create UPD TXT file' return 256 TxtFd = open(UpdTxtFile, "w") TxtFd.write("%s\n" % (__copyright_txt__ % date.today().year)) NextOffset = 0 SpaceIdx = 0 StartAddr = 0 EndAddr = 0 Default = 'DEFAULT|' InRange = False for Item in self._CfgItemList: if Item['cname'] == 'Signature' and str(Item['value'])[0:6] == SignatureList[Index]: StartAddr = Item['offset'] NextOffset = StartAddr InRange = True if Item['cname'] == 'UpdTerminator' and InRange == True: EndAddr = Item['offset'] InRange = False InRange = False for Item in self._CfgItemList: if Item['cname'] == 'Signature' and str(Item['value'])[0:6] == SignatureList[Index]: InRange = True if InRange != True: continue if Item['cname'] == 'UpdTerminator': InRange = False if Item['region'] != 'UPD': continue Offset = Item['offset'] if StartAddr > Offset or EndAddr < Offset: continue if NextOffset < Offset: # insert one line TxtFd.write("%s.UnusedUpdSpace%d|%s0x%04X|0x%04X|{0}\n" % (Item['space'], SpaceIdx, Default, NextOffset - StartAddr, Offset - NextOffset)) SpaceIdx = SpaceIdx + 1 NextOffset = Offset + Item['length'] TxtFd.write("%s.%s|%s0x%04X|%s|%s\n" % (Item['space'],Item['cname'],Default,Item['offset'] - StartAddr,Item['length'],Item['value'])) TxtFd.close() return 0 def CreateVarDict (self): Error = 0 self._VarDict = {} if len(self._CfgItemList) > 0: Item = self._CfgItemList[-1] self._VarDict['_LENGTH_'] = '%d' % (Item['offset'] + Item['length']) for Item in self._CfgItemList: Embed = Item['embed'] Match = re.match("^(\w+):(\w+):(START|END)", Embed) if Match: StructName = Match.group(1) VarName = '_%s_%s_' % (Match.group(3), StructName) if Match.group(3) == 'END': self._VarDict[VarName] = Item['offset'] + Item['length'] self._VarDict['_LENGTH_%s_' % StructName] = \ self._VarDict['_END_%s_' % StructName] - self._VarDict['_START_%s_' % StructName] if Match.group(2).startswith('TAG_'): if (self.Mode != 'FSP') and (self._VarDict['_LENGTH_%s_' % StructName] % 4): raise Exception("Size of structure '%s' is %d, not DWORD aligned !" % (StructName, self._VarDict['_LENGTH_%s_' % StructName])) self._VarDict['_TAG_%s_' % StructName] = int (Match.group(2)[4:], 16) & 0xFFF else: self._VarDict[VarName] = Item['offset'] if Item['marker']: self._VarDict['_OFFSET_%s_' % Item['marker'].strip()] = Item['offset'] return Error def UpdateBsfBitUnit (self, Item): BitTotal = 0 BitOffset = 0 StartIdx = 0 Unit = None UnitDec = {1:'BYTE', 2:'WORD', 4:'DWORD', 8:'QWORD'} for Idx, SubItem in enumerate(Item['subreg']): if Unit is None: Unit = SubItem['bitunit'] BitLength = SubItem['bitlength'] BitTotal += BitLength BitOffset += BitLength if BitOffset > 64 or BitOffset > Unit * 8: break if BitOffset == Unit * 8: for SubIdx in range (StartIdx, Idx + 1): Item['subreg'][SubIdx]['bitunit'] = Unit BitOffset = 0 StartIdx = Idx + 1 Unit = None if BitOffset > 0: raise Exception ("Bit fields cannot fit into %s for '%s.%s' !" % (UnitDec[Unit], Item['cname'], SubItem['cname'])) ExpectedTotal = Item['length'] * 8 if Item['length'] * 8 != BitTotal: raise Exception ("Bit fields total length (%d) does not match length (%d) of '%s' !" % (BitTotal, ExpectedTotal, Item['cname'])) def UpdateDefaultValue (self): Error = 0 for Idx, Item in enumerate(self._CfgItemList): if len(Item['subreg']) == 0: Value = Item['value'] if (len(Value) > 0) and (Value[0] == '{' or Value[0] == "'" or Value[0] == '"'): # {XXX} or 'XXX' strings self.FormatListValue(self._CfgItemList[Idx]) else: Match = re.match("(0x[0-9a-fA-F]+|[0-9]+)", Value) if not Match: NumValue = self.EvaluateExpress (Value) Item['value'] = '0x%X' % NumValue else: ValArray = self.ValueToByteArray (Item['value'], Item['length']) for SubItem in Item['subreg']: SubItem['value'] = self.GetBsfBitFields(SubItem, ValArray) self.UpdateBsfBitUnit (Item) return Error def ProcessMultilines (self, String, MaxCharLength): Multilines = '' StringLength = len(String) CurrentStringStart = 0 StringOffset = 0 BreakLineDict = [] if len(String) <= MaxCharLength: while (StringOffset < StringLength): if StringOffset >= 1: if String[StringOffset - 1] == '\\' and String[StringOffset] == 'n': BreakLineDict.append (StringOffset + 1) StringOffset += 1 if BreakLineDict != []: for Each in BreakLineDict: Multilines += " %s\n" % String[CurrentStringStart:Each].lstrip() CurrentStringStart = Each if StringLength - CurrentStringStart > 0: Multilines += " %s\n" % String[CurrentStringStart:].lstrip() else: Multilines = " %s\n" % String else: NewLineStart = 0 NewLineCount = 0 FoundSpaceChar = False while (StringOffset < StringLength): if StringOffset >= 1: if NewLineCount >= MaxCharLength - 1: if String[StringOffset] == ' ' and StringLength - StringOffset > 10: BreakLineDict.append (NewLineStart + NewLineCount) NewLineStart = NewLineStart + NewLineCount NewLineCount = 0 FoundSpaceChar = True elif StringOffset == StringLength - 1 and FoundSpaceChar == False: BreakLineDict.append (0) if String[StringOffset - 1] == '\\' and String[StringOffset] == 'n': BreakLineDict.append (StringOffset + 1) NewLineStart = StringOffset + 1 NewLineCount = 0 StringOffset += 1 NewLineCount += 1 if BreakLineDict != []: BreakLineDict.sort () for Each in BreakLineDict: if Each > 0: Multilines += " %s\n" % String[CurrentStringStart:Each].lstrip() CurrentStringStart = Each if StringLength - CurrentStringStart > 0: Multilines += " %s\n" % String[CurrentStringStart:].lstrip() return Multilines def CreateField (self, Item, Name, Length, Offset, Struct, BsfName, Help, Option, BitsLength = None): PosName = 28 PosComment = 30 NameLine='' HelpLine='' OptionLine='' if Length == 0 and Name == 'Dummy': return '\n' IsArray = False if Length in [1,2,4,8]: Type = "UINT%d" % (Length * 8) if Name.startswith("UnusedUpdSpace") and Length != 1: IsArray = True Type = "UINT8" else: IsArray = True Type = "UINT8" if Item and Item['value'].startswith('{'): Type = "UINT8" IsArray = True if Struct != '': Type = Struct if Struct in ['UINT8','UINT16','UINT32','UINT64']: IsArray = True Unit = int(Type[4:]) / 8 Length = Length / Unit else: IsArray = False if IsArray: Name = Name + '[%d]' % Length if len(Type) < PosName: Space1 = PosName - len(Type) else: Space1 = 1 if BsfName != '': NameLine=" - %s\n" % BsfName else: NameLine="\n" if Help != '': HelpLine = self.ProcessMultilines (Help, 80) if Option != '': OptionLine = self.ProcessMultilines (Option, 80) if Offset is None: OffsetStr = '????' else: OffsetStr = '0x%04X' % Offset if BitsLength is None: BitsLength = '' else: BitsLength = ' : %d' % BitsLength return "\n/** Offset %s%s%s%s**/\n %s%s%s%s;\n" % (OffsetStr, NameLine, HelpLine, OptionLine, Type, ' ' * Space1, Name, BitsLength) def PostProcessBody (self, TextBody): NewTextBody = [] OldTextBody = [] IncludeLine = False StructName = '' VariableName = '' IsUpdHdrDefined = False IsUpdHeader = False for Line in TextBody: SplitToLines = Line.splitlines() MatchComment = re.match("^/\*\sCOMMENT:(\w+):([\w|\W|\s]+)\s\*/\s([\s\S]*)", SplitToLines[0]) if MatchComment: if MatchComment.group(1) == 'FSP_UPD_HEADER': IsUpdHeader = True else: IsUpdHeader = False if IsUpdHdrDefined != True or IsUpdHeader != True: CommentLine = " " + MatchComment.group(2) + "\n" NewTextBody.append("/**" + CommentLine + "**/\n") Line = Line[(len(SplitToLines[0]) + 1):] Match = re.match("^/\*\sEMBED_STRUCT:(\w+):(\w+):(START|END)\s\*/\s([\s\S]*)", Line) if Match: Line = Match.group(4) if Match.group(1) == 'FSP_UPD_HEADER': IsUpdHeader = True else: IsUpdHeader = False if Match and Match.group(3) == 'START': if IsUpdHdrDefined != True or IsUpdHeader != True: NewTextBody.append ('typedef struct {\n') StructName = Match.group(1) VariableName = Match.group(2) MatchOffset = re.search('/\*\*\sOffset\s0x([a-fA-F0-9]+)', Line) if MatchOffset: Offset = int(MatchOffset.group(1), 16) else: Offset = None Line IncludeLine = True OldTextBody.append (self.CreateField (None, VariableName, 0, Offset, StructName, '', '', '')) if IncludeLine: if IsUpdHdrDefined != True or IsUpdHeader != True: NewTextBody.append (Line) else: OldTextBody.append (Line) if Match and Match.group(3) == 'END': if (StructName != Match.group(1)) or (VariableName != Match.group(2)): print ("Unmatched struct name '%s' and '%s' !" % (StructName, Match.group(1))) else: if IsUpdHdrDefined != True or IsUpdHeader != True: NewTextBody.append ('} %s;\n\n' % StructName) IsUpdHdrDefined = True IncludeLine = False NewTextBody.extend(OldTextBody) return NewTextBody def WriteLinesWithoutTailingSpace (self, HeaderFd, Line): TxtBody2 = Line.splitlines(True) for Line2 in TxtBody2: Line2 = Line2.rstrip() Line2 += '\n' HeaderFd.write (Line2) return 0 def CreateHeaderFile (self, InputHeaderFile): FvDir = self._FvDir HeaderFileName = 'FspUpd.h' HeaderFile = os.path.join(FvDir, HeaderFileName) # Check if header needs to be recreated if (self.NoDscFileChange (HeaderFile)): # DSC has not been modified yet # So don't have to re-generate other files self.Error = 'No DSC file change, skip to create UPD header file' return 256 TxtBody = [] for Item in self._CfgItemList: if str(Item['cname']) == 'Signature' and Item['length'] == 8: Value = int(Item['value'], 16) Chars = [] while Value != 0x0: Chars.append(chr(Value & 0xFF)) Value = Value >> 8 SignatureStr = ''.join(Chars) # Signature will be _T / _M / _S / _I for FSPT / FSPM / FSPS /FSPI accordingly if '_T' in SignatureStr[6:6+2]: TxtBody.append("#define FSPT_UPD_SIGNATURE %s /* '%s' */\n\n" % (Item['value'], SignatureStr)) elif '_M' in SignatureStr[6:6+2]: TxtBody.append("#define FSPM_UPD_SIGNATURE %s /* '%s' */\n\n" % (Item['value'], SignatureStr)) elif '_S' in SignatureStr[6:6+2]: TxtBody.append("#define FSPS_UPD_SIGNATURE %s /* '%s' */\n\n" % (Item['value'], SignatureStr)) elif '_I' in SignatureStr[6:6+2]: if NoFSPI == False: TxtBody.append("#define FSPI_UPD_SIGNATURE %s /* '%s' */\n\n" % (Item['value'], SignatureStr)) TxtBody.append("\n") for Region in ['UPD']: UpdOffsetTable = [] UpdSignature = ['0x545F', '0x4D5F', '0x535F', '0x495F'] #['_T', '_M', '_S', '_I'] signature for FSPT, FSPM, FSPS, FSPI UpdStructure = ['FSPT_UPD', 'FSPM_UPD', 'FSPS_UPD', 'FSPI_UPD'] for Item in self._CfgItemList: if Item["cname"] == 'Signature' and Item["value"][0:6] in UpdSignature: Item["offset"] = 0 # re-initialize offset to 0 when new UPD structure starting UpdOffsetTable.append (Item["offset"]) for UpdIdx in range(len(UpdOffsetTable)): CommentLine = "" for Item in self._CfgItemList: if Item["comment"] != '' and Item["offset"] >= UpdOffsetTable[UpdIdx]: MatchComment = re.match("^(U|V)PD_DATA_REGION:([\w|\W|\s]+)", Item["comment"]) if MatchComment and MatchComment.group(1) == Region[0]: CommentLine = " " + MatchComment.group(2) + "\n" TxtBody.append("/**" + CommentLine + "**/\n") elif Item["offset"] >= UpdOffsetTable[UpdIdx] and Item["comment"] == '': Match = re.match("^FSP([\w|\W|\s])_UPD", UpdStructure[UpdIdx]) if Match: TxtBody.append("/** Fsp " + Match.group(1) + " UPD Configuration\n**/\n") TxtBody.append("typedef struct {\n") NextOffset = 0 SpaceIdx = 0 Offset = 0 LastVisible = True ResvOffset = 0 ResvIdx = 0 LineBuffer = [] InRange = False for Item in self._CfgItemList: if Item['cname'] == 'Signature' and str(Item['value'])[0:6] == UpdSignature[UpdIdx] or Region[0] == 'V': InRange = True if InRange != True: continue if Item['cname'] == 'UpdTerminator': InRange = False if Item['region'] != Region: continue if Item["offset"] < UpdOffsetTable[UpdIdx]: continue NextVisible = LastVisible if LastVisible and (Item['header'] == 'OFF'): NextVisible = False ResvOffset = Item['offset'] elif (not LastVisible) and Item['header'] == 'ON': NextVisible = True Name = "Reserved" + Region[0] + "pdSpace%d" % ResvIdx ResvIdx = ResvIdx + 1 TxtBody.append(self.CreateField (Item, Name, Item["offset"] - ResvOffset, ResvOffset, '', '', '', '')) if Offset < Item["offset"]: if LastVisible: Name = "Unused" + Region[0] + "pdSpace%d" % SpaceIdx LineBuffer.append(self.CreateField (Item, Name, Item["offset"] - Offset, Offset, '', '', '', '')) SpaceIdx = SpaceIdx + 1 Offset = Item["offset"] LastVisible = NextVisible Offset = Offset + Item["length"] if LastVisible: for Each in LineBuffer: TxtBody.append (Each) LineBuffer = [] Comment = Item["comment"] Embed = Item["embed"].upper() if Embed.endswith(':START') or Embed.endswith(':END'): if not Comment == '' and Embed.endswith(':START'): Marker = '/* COMMENT:%s */ \n' % Item["comment"] Marker = Marker + '/* EMBED_STRUCT:%s */ ' % Item["embed"] else: Marker = '/* EMBED_STRUCT:%s */ ' % Item["embed"] else: if Embed == '': Marker = '' else: self.Error = "Invalid embedded structure format '%s'!\n" % Item["embed"] return 4 Line = Marker + self.CreateField (Item, Item["cname"], Item["length"], Item["offset"], Item['struct'], Item['name'], Item['help'], Item['option']) TxtBody.append(Line) if Item['cname'] == 'UpdTerminator': break TxtBody.append("} " + UpdStructure[UpdIdx] + ";\n\n") # Handle the embedded data structure TxtBody = self.PostProcessBody (TxtBody) HeaderTFileName = 'FsptUpd.h' HeaderMFileName = 'FspmUpd.h' HeaderSFileName = 'FspsUpd.h' HeaderIFileName = 'FspiUpd.h' UpdRegionCheck = ['FSPT', 'FSPM', 'FSPS', 'FSPI'] # FSPX_UPD_REGION UpdConfigCheck = ['FSP_T', 'FSP_M', 'FSP_S', 'FSP_I'] # FSP_X_CONFIG, FSP_X_TEST_CONFIG, FSP_X_RESTRICTED_CONFIG UpdSignatureCheck = ['FSPT_UPD_SIGNATURE', 'FSPM_UPD_SIGNATURE', 'FSPS_UPD_SIGNATURE', 'FSPI_UPD_SIGNATURE'] ExcludedSpecificUpd = ['FSPT_ARCH_UPD', 'FSPM_ARCH_UPD', 'FSPS_ARCH_UPD', 'FSPI_ARCH_UPD'] ExcludedSpecificUpd1 = ['FSPT_ARCH2_UPD', 'FSPM_ARCH2_UPD', 'FSPS_ARCH2_UPD'] IncLines = [] if InputHeaderFile != '': if not os.path.exists(InputHeaderFile): self.Error = "Input header file '%s' does not exist" % InputHeaderFile return 6 InFd = open(InputHeaderFile, "r") IncLines = InFd.readlines() InFd.close() for item in range(len(UpdRegionCheck)): if UpdRegionCheck[item] == 'FSPT': HeaderFd = open(os.path.join(FvDir, HeaderTFileName), "w") FileBase = os.path.basename(os.path.join(FvDir, HeaderTFileName)) elif UpdRegionCheck[item] == 'FSPM': HeaderFd = open(os.path.join(FvDir, HeaderMFileName), "w") FileBase = os.path.basename(os.path.join(FvDir, HeaderMFileName)) elif UpdRegionCheck[item] == 'FSPS': HeaderFd = open(os.path.join(FvDir, HeaderSFileName), "w") FileBase = os.path.basename(os.path.join(FvDir, HeaderSFileName)) elif UpdRegionCheck[item] == 'FSPI': HeaderFd = open(os.path.join(FvDir, HeaderIFileName), "w") FileBase = os.path.basename(os.path.join(FvDir, HeaderIFileName)) FileName = FileBase.replace(".", "_").upper() HeaderFd.write("%s\n" % (__copyright_h__ % date.today().year)) HeaderFd.write("#ifndef __%s__\n" % FileName) HeaderFd.write("#define __%s__\n\n" % FileName) HeaderFd.write("#include <%s>\n\n" % HeaderFileName) HeaderFd.write("#pragma pack(1)\n\n") Export = False for Line in IncLines: Match = re.search ("!EXPORT\s+([A-Z]+)\s+EXTERNAL_BOOTLOADER_STRUCT_(BEGIN|END)\s+", Line) if Match: if Match.group(2) == "BEGIN" and Match.group(1) == UpdRegionCheck[item]: Export = True continue else: Export = False continue if Export: HeaderFd.write(Line) HeaderFd.write("\n") Index = 0 StartIndex = 0 EndIndex = 0 StructStart = [] StructStartWithComment = [] StructEnd = [] for Line in TxtBody: Index += 1 Match = re.match("(typedef struct {)", Line) if Match: StartIndex = Index - 1 Match = re.match("}\s([_A-Z0-9]+);", Line) if Match and (UpdRegionCheck[item] in Match.group(1) or UpdConfigCheck[item] in Match.group(1)) and (ExcludedSpecificUpd[item] not in Match.group(1)) and (ExcludedSpecificUpd1[item] not in Match.group(1)): EndIndex = Index StructStart.append(StartIndex) StructEnd.append(EndIndex) Index = 0 for Line in TxtBody: Index += 1 for Item in range(len(StructStart)): if Index == StructStart[Item]: Match = re.match("^(/\*\*\s*)", Line) if Match: StructStartWithComment.append(StructStart[Item]) else: StructStartWithComment.append(StructStart[Item] + 1) Index = 0 for Line in TxtBody: Index += 1 for Item in range(len(StructStart)): if Index >= StructStartWithComment[Item] and Index <= StructEnd[Item]: self.WriteLinesWithoutTailingSpace(HeaderFd, Line) HeaderFd.write("#pragma pack()\n\n") HeaderFd.write("#endif\n") HeaderFd.close() HeaderFd = open(HeaderFile, "w") FileBase = os.path.basename(HeaderFile) FileName = FileBase.replace(".", "_").upper() HeaderFd.write("%s\n" % (__copyright_h__ % date.today().year)) HeaderFd.write("#ifndef __%s__\n" % FileName) HeaderFd.write("#define __%s__\n\n" % FileName) HeaderFd.write("#include <FspEas.h>\n\n") HeaderFd.write("#pragma pack(1)\n\n") for item in range(len(UpdRegionCheck)): Index = 0 StartIndex = 0 EndIndex = 0 StructStart = [] StructStartWithComment = [] StructEnd = [] for Line in TxtBody: Index += 1 Match = re.match("(typedef struct {)", Line) if Match: StartIndex = Index - 1 Match = re.match("#define\s([_A-Z0-9]+)\s*", Line) if Match and (UpdSignatureCheck[item] in Match.group(1) or UpdSignatureCheck[item] in Match.group(1)): StructStart.append(Index - 1) StructEnd.append(Index) Index = 0 for Line in TxtBody: Index += 1 for Item in range(len(StructStart)): if Index == StructStart[Item]: Match = re.match("^(/\*\*\s*)", Line) if Match: StructStartWithComment.append(StructStart[Item]) else: StructStartWithComment.append(StructStart[Item] + 1) Index = 0 for Line in TxtBody: Index += 1 for Item in range(len(StructStart)): if Index >= StructStartWithComment[Item] and Index <= StructEnd[Item]: self.WriteLinesWithoutTailingSpace(HeaderFd, Line) HeaderFd.write("#pragma pack()\n\n") HeaderFd.write("#endif\n") HeaderFd.close() return 0 def WriteBsfStruct (self, BsfFd, Item): LogExpr = CLogicalExpression() if Item['type'] == "None": Space = "gPlatformFspPkgTokenSpaceGuid" else: Space = Item['space'] Line = " $%s_%s" % (Space, Item['cname']) Match = re.match("\s*\{([x0-9a-fA-F,\s]+)\}\s*", Item['value']) if Match: DefaultValue = Match.group(1).strip() else: DefaultValue = Item['value'].strip() if 'bitlength' in Item: BsfFd.write(" %s%s%4d bits $_DEFAULT_ = %s\n" % (Line, ' ' * (64 - len(Line)), Item['bitlength'], DefaultValue)) else: BsfFd.write(" %s%s%4d bytes $_DEFAULT_ = %s\n" % (Line, ' ' * (64 - len(Line)), Item['length'], DefaultValue)) TmpList = [] if Item['type'] == "Combo": if not Item['option'] in self._BuidinOption: OptList = Item['option'].split(',') for Option in OptList: Option = Option.strip() (OpVal, OpStr) = Option.split(':') test = LogExpr.getNumber (OpVal) if test is None: raise Exception("Selection Index '%s' is not a number" % OpVal) TmpList.append((OpVal, OpStr)) return TmpList def WriteBsfOption (self, BsfFd, Item): PcdName = Item['space'] + '_' + Item['cname'] WriteHelp = 0 if Item['type'] == "Combo": if Item['option'] in self._BuidinOption: Options = self._BuidinOption[Item['option']] else: Options = PcdName BsfFd.write(' %s $%s, "%s", &%s,\n' % (Item['type'], PcdName, Item['name'], Options)) WriteHelp = 1 elif Item['type'].startswith("EditNum"): Match = re.match("EditNum\s*,\s*(HEX|DEC)\s*,\s*\((\d+|0x[0-9A-Fa-f]+)\s*,\s*(\d+|0x[0-9A-Fa-f]+)\)", Item['type']) if Match: BsfFd.write(' EditNum $%s, "%s", %s,\n' % (PcdName, Item['name'], Match.group(1))) WriteHelp = 2 elif Item['type'].startswith("EditText"): BsfFd.write(' %s $%s, "%s",\n' % (Item['type'], PcdName, Item['name'])) WriteHelp = 1 elif Item['type'] == "Table": Columns = Item['option'].split(',') if len(Columns) != 0: BsfFd.write(' %s $%s "%s",' % (Item['type'], PcdName, Item['name'])) for Col in Columns: Fmt = Col.split(':') if len(Fmt) != 3: raise Exception("Column format '%s' is invalid !" % Fmt) try: Dtype = int(Fmt[1].strip()) except: raise Exception("Column size '%s' is invalid !" % Fmt[1]) BsfFd.write('\n Column "%s", %d bytes, %s' % (Fmt[0].strip(), Dtype, Fmt[2].strip())) BsfFd.write(',\n') WriteHelp = 1 if WriteHelp > 0: HelpLines = Item['help'].split('\\n\\r') FirstLine = True for HelpLine in HelpLines: if FirstLine: FirstLine = False BsfFd.write(' Help "%s"\n' % (HelpLine)) else: BsfFd.write(' "%s"\n' % (HelpLine)) if WriteHelp == 2: BsfFd.write(' "Valid range: %s ~ %s"\n' % (Match.group(2), Match.group(3))) def GenerateBsfFile (self, BsfFile): if BsfFile == '': self.Error = "BSF output file '%s' is invalid" % BsfFile return 1 if (self.NoDscFileChange (BsfFile)): # DSC has not been modified yet # So don't have to re-generate other files self.Error = 'No DSC file change, skip to create UPD BSF file' return 256 Error = 0 OptionDict = {} BsfFd = open(BsfFile, "w") BsfFd.write("%s\n" % (__copyright_bsf__ % date.today().year)) BsfFd.write("%s\n" % self._GlobalDataDef) BsfFd.write("StructDef\n") NextOffset = -1 for Item in self._CfgItemList: if Item['find'] != '': BsfFd.write('\n Find "%s"\n' % Item['find']) NextOffset = Item['offset'] + Item['length'] if Item['name'] != '': if NextOffset != Item['offset']: BsfFd.write(" Skip %d bytes\n" % (Item['offset'] - NextOffset)) if len(Item['subreg']) > 0: NextOffset = Item['offset'] BitsOffset = NextOffset * 8 for SubItem in Item['subreg']: BitsOffset += SubItem['bitlength'] if SubItem['name'] == '': if 'bitlength' in SubItem: BsfFd.write(" Skip %d bits\n" % (SubItem['bitlength'])) else: BsfFd.write(" Skip %d bytes\n" % (SubItem['length'])) else: Options = self.WriteBsfStruct(BsfFd, SubItem) if len(Options) > 0: OptionDict[SubItem['space']+'_'+SubItem['cname']] = Options NextBitsOffset = (Item['offset'] + Item['length']) * 8 if NextBitsOffset > BitsOffset: BitsGap = NextBitsOffset - BitsOffset BitsRemain = BitsGap % 8 if BitsRemain: BsfFd.write(" Skip %d bits\n" % BitsRemain) BitsGap -= BitsRemain BytesRemain = int(BitsGap / 8) if BytesRemain: BsfFd.write(" Skip %d bytes\n" % BytesRemain) NextOffset = Item['offset'] + Item['length'] else: NextOffset = Item['offset'] + Item['length'] Options = self.WriteBsfStruct(BsfFd, Item) if len(Options) > 0: OptionDict[Item['space']+'_'+Item['cname']] = Options BsfFd.write("\nEndStruct\n\n") BsfFd.write("%s" % self._BuidinOptionTxt) for Each in OptionDict: BsfFd.write("List &%s\n" % Each) for Item in OptionDict[Each]: BsfFd.write(' Selection %s , "%s"\n' % (Item[0], Item[1])) BsfFd.write("EndList\n\n") BsfFd.write("BeginInfoBlock\n") BsfFd.write(' PPVer "%s"\n' % (self._CfgBlkDict['ver'])) BsfFd.write(' Description "%s"\n' % (self._CfgBlkDict['name'])) BsfFd.write("EndInfoBlock\n\n") for Each in self._CfgPageDict: BsfFd.write('Page "%s"\n' % self._CfgPageDict[Each]) BsfItems = [] for Item in self._CfgItemList: if Item['name'] != '': if Item['page'] != Each: continue if len(Item['subreg']) > 0: for SubItem in Item['subreg']: if SubItem['name'] != '': BsfItems.append(SubItem) else: BsfItems.append(Item) BsfItems.sort(key=lambda x: x['order']) for Item in BsfItems: self.WriteBsfOption (BsfFd, Item) BsfFd.write("EndPage\n\n") BsfFd.close() return Error def Usage(): print ("GenCfgOpt Version 0.59") print ("Usage:") print (" GenCfgOpt UPDTXT PlatformDscFile BuildFvDir [-D Macros]") print (" GenCfgOpt HEADER PlatformDscFile BuildFvDir InputHFile [-D Macros]") print (" GenCfgOpt GENBSF PlatformDscFile BuildFvDir BsfOutFile [-D Macros]") def Main(): # # Parse the options and args # i = 1 GenCfgOpt = CGenCfgOpt() while i < len(sys.argv): if sys.argv[i].strip().lower() == "--pcd": BuildOptionPcd.append(sys.argv[i+1]) i += 1 i += 1 argc = len(sys.argv) if argc < 4: Usage() return 1 else: DscFile = sys.argv[2] if not os.path.exists(DscFile): print ("ERROR: Cannot open DSC file '%s' !" % DscFile) return 2 OutFile = '' if argc > 4: if sys.argv[4][0] == '-': Start = 4 else: OutFile = sys.argv[4] Start = 5 if argc > Start: if GenCfgOpt.ParseMacros(sys.argv[Start:]) != 0: print ("ERROR: Macro parsing failed !") return 3 FvDir = sys.argv[3] if not os.path.exists(FvDir): os.makedirs(FvDir) if GenCfgOpt.ParseDscFile(DscFile, FvDir) != 0: print ("ERROR: %s !" % GenCfgOpt.Error) return 5 if GenCfgOpt.UpdateSubRegionDefaultValue() != 0: print ("ERROR: %s !" % GenCfgOpt.Error) return 7 if sys.argv[1] == "UPDTXT": Ret = GenCfgOpt.CreateSplitUpdTxt(OutFile) if Ret != 0: # No change is detected if Ret == 256: print ("INFO: %s !" % (GenCfgOpt.Error)) else : print ("ERROR: %s !" % (GenCfgOpt.Error)) return Ret elif sys.argv[1] == "HEADER": Ret = GenCfgOpt.CreateHeaderFile(OutFile) if Ret != 0: # No change is detected if Ret == 256: print ("INFO: %s !" % (GenCfgOpt.Error)) else : print ("ERROR: %s !" % (GenCfgOpt.Error)) return 8 return Ret elif sys.argv[1] == "GENBSF": Ret = GenCfgOpt.GenerateBsfFile(OutFile) if Ret != 0: # No change is detected if Ret == 256: print ("INFO: %s !" % (GenCfgOpt.Error)) else : print ("ERROR: %s !" % (GenCfgOpt.Error)) return 9 return Ret else: if argc < 5: Usage() return 1 print ("ERROR: Unknown command '%s' !" % sys.argv[1]) Usage() return 1 return 0 return 0 if __name__ == '__main__': sys.exit(Main())
edk2-master
IntelFsp2Pkg/Tools/GenCfgOpt.py
# @ GenCfgData.py # # Copyright (c) 2014 - 2021, Intel Corporation. All rights reserved.<BR> # SPDX-License-Identifier: BSD-2-Clause-Patent # ## import os import re import sys import marshal from functools import reduce from datetime import date # Generated file copyright header __copyright_tmp__ = """/** @file Configuration %s File. Copyright (c) %4d, Intel Corporation. All rights reserved.<BR> SPDX-License-Identifier: BSD-2-Clause-Patent This file is automatically generated. Please do NOT modify !!! **/ """ __copyright_dsc__ = """## @file # # Copyright (c) %04d, Intel Corporation. All rights reserved.<BR> # SPDX-License-Identifier: BSD-2-Clause-Patent # ## [PcdsDynamicVpd.Upd] # # Global definitions in BSF # !BSF BLOCK:{NAME:"FSP UPD Configuration", VER:"0.1"} # """ def Bytes2Val(Bytes): return reduce(lambda x, y: (x << 8) | y, Bytes[::-1]) def Bytes2Str(Bytes): return '{ %s }' % (', '.join('0x%02X' % i for i in Bytes)) def Str2Bytes(Value, Blen): Result = bytearray(Value[1:-1], 'utf-8') # Excluding quotes if len(Result) < Blen: Result.extend(b'\x00' * (Blen - len(Result))) return Result def Val2Bytes(Value, Blen): return [(Value >> (i * 8) & 0xff) for i in range(Blen)] def Array2Val(ValStr): ValStr = ValStr.strip() if ValStr.startswith('{'): ValStr = ValStr[1:] if ValStr.endswith('}'): ValStr = ValStr[:-1] if ValStr.startswith("'"): ValStr = ValStr[1:] if ValStr.endswith("'"): ValStr = ValStr[:-1] Value = 0 for Each in ValStr.split(',')[::-1]: Each = Each.strip() if Each.startswith('0x'): Base = 16 else: Base = 10 Value = (Value << 8) | int(Each, Base) return Value def GetCopyrightHeader(FileType, AllowModify=False): FileDescription = { 'bsf': 'Boot Setting', 'dsc': 'Definition', 'dlt': 'Delta', 'inc': 'C Binary Blob', 'h': 'C Struct Header' } if FileType in ['bsf', 'dsc', 'dlt']: CommentChar = '#' else: CommentChar = '' Lines = __copyright_tmp__.split('\n') if AllowModify: Lines = [Line for Line in Lines if 'Please do NOT modify' not in Line] CopyrightHdr = '\n'.join('%s%s' % ( CommentChar, Line) for Line in Lines)[:-1] + '\n' return CopyrightHdr % (FileDescription[FileType], date.today().year) class CLogicalExpression: def __init__(self): self.index = 0 self.string = '' def errExit(self, err=''): print("ERROR: Express parsing for:") print(" %s" % self.string) print(" %s^" % (' ' * self.index)) if err: print("INFO : %s" % err) raise SystemExit def getNonNumber(self, n1, n2): if not n1.isdigit(): return n1 if not n2.isdigit(): return n2 return None def getCurr(self, lens=1): try: if lens == -1: return self.string[self.index:] else: if self.index + lens > len(self.string): lens = len(self.string) - self.index return self.string[self.index: self.index + lens] except Exception: return '' def isLast(self): return self.index == len(self.string) def moveNext(self, len=1): self.index += len def skipSpace(self): while not self.isLast(): if self.getCurr() in ' \t': self.moveNext() else: return def normNumber(self, val): return True if val else False def getNumber(self, var): var = var.strip() if re.match('^0x[a-fA-F0-9]+$', var): value = int(var, 16) elif re.match('^[+-]?\\d+$', var): value = int(var, 10) else: value = None return value def parseValue(self): self.skipSpace() var = '' while not self.isLast(): char = self.getCurr() if re.match('^[\\w.]', char): var += char self.moveNext() else: break val = self.getNumber(var) if val is None: value = var else: value = "%d" % val return value def parseSingleOp(self): self.skipSpace() if re.match('^NOT\\W', self.getCurr(-1)): self.moveNext(3) op = self.parseBrace() val = self.getNumber(op) if val is None: self.errExit("'%s' is not a number" % op) return "%d" % (not self.normNumber(int(op))) else: return self.parseValue() def parseBrace(self): self.skipSpace() char = self.getCurr() if char == '(': self.moveNext() value = self.parseExpr() self.skipSpace() if self.getCurr() != ')': self.errExit("Expecting closing brace or operator") self.moveNext() return value else: value = self.parseSingleOp() return value def parseCompare(self): value = self.parseBrace() while True: self.skipSpace() char = self.getCurr() if char in ['<', '>']: self.moveNext() next = self.getCurr() if next == '=': op = char + next self.moveNext() else: op = char result = self.parseBrace() test = self.getNonNumber(result, value) if test is None: value = "%d" % self.normNumber(eval(value + op + result)) else: self.errExit("'%s' is not a valid number for comparision" % test) elif char in ['=', '!']: op = self.getCurr(2) if op in ['==', '!=']: self.moveNext(2) result = self.parseBrace() test = self.getNonNumber(result, value) if test is None: value = "%d" % self.normNumber((eval(value + op + result))) else: value = "%d" % self.normNumber(eval("'" + value + "'" + op + "'" + result + "'")) else: break else: break return value def parseAnd(self): value = self.parseCompare() while True: self.skipSpace() if re.match('^AND\\W', self.getCurr(-1)): self.moveNext(3) result = self.parseCompare() test = self.getNonNumber(result, value) if test is None: value = "%d" % self.normNumber(int(value) & int(result)) else: self.errExit("'%s' is not a valid op number for AND" % test) else: break return value def parseOrXor(self): value = self.parseAnd() op = None while True: self.skipSpace() op = None if re.match('^XOR\\W', self.getCurr(-1)): self.moveNext(3) op = '^' elif re.match('^OR\\W', self.getCurr(-1)): self.moveNext(2) op = '|' else: break if op: result = self.parseAnd() test = self.getNonNumber(result, value) if test is None: value = "%d" % self.normNumber(eval(value + op + result)) else: self.errExit("'%s' is not a valid op number for XOR/OR" % test) return value def parseExpr(self): return self.parseOrXor() def getResult(self): value = self.parseExpr() self.skipSpace() if not self.isLast(): self.errExit("Unexpected character found '%s'" % self.getCurr()) test = self.getNumber(value) if test is None: self.errExit("Result '%s' is not a number" % value) return int(value) def evaluateExpress(self, Expr): self.index = 0 self.string = Expr if self.getResult(): Result = True else: Result = False return Result class CFspBsf2Dsc: def __init__(self, bsf_file): self.cfg_list = CFspBsf2Dsc.parse_bsf(bsf_file) def get_dsc_lines(self): return CFspBsf2Dsc.generate_dsc(self.cfg_list) def save_dsc(self, dsc_file): return CFspBsf2Dsc.generate_dsc(self.cfg_list, dsc_file) @staticmethod def parse_bsf(bsf_file): fd = open(bsf_file, 'r') bsf_txt = fd.read() fd.close() find_list = [] regex = re.compile(r'\s+Find\s+"(.*?)"(.*?)^\s+(\$(.*?)|Skip)\s+', re.S | re.MULTILINE) for match in regex.finditer(bsf_txt): find = match.group(1) name = match.group(3) line = bsf_txt[:match.end()].count("\n") find_list.append((name, find, line)) idx = 0 count = 0 prefix = '' chk_dict = {} cfg_list = [] cfg_temp = {'find': '', 'cname': '', 'length': 0, 'value': '0', 'type': 'Reserved', 'isbit': False, 'embed': '', 'page': '', 'option': '', 'instance': 0} regex = re.compile( r'^\s+(\$(.*?)|Skip)\s+(\d+)\s+(bits|bytes)(\s+\$_DEFAULT_\s' r'+=\s+(.+?))?$', re.S | re.MULTILINE) for match in regex.finditer(bsf_txt): dlen = int(match.group(3)) if match.group(1) == 'Skip': key = 'gPlatformFspPkgTokenSpaceGuid_BsfSkip%d' % idx val = ', '.join(['%02X' % ord(i) for i in '\x00' * dlen]) idx += 1 option = '$SKIP' else: key = match.group(2) val = match.group(6) option = '' is_bit = True if match.group(4) == 'bits' else False cfg_item = dict(cfg_temp) line = bsf_txt[:match.end()].count("\n") finds = [i for i in find_list if line >= i[2]] if len(finds) > 0: prefix = finds[0][1] cfg_item['embed'] = '%s:TAG_%03X:START' % \ (prefix, ord(prefix[-1])) cfg_item['find'] = prefix cfg_item['cname'] = 'Signature' cfg_item['length'] = len(finds[0][1]) str2byte = Str2Bytes("'" + finds[0][1] + "'", len(finds[0][1])) cfg_item['value'] = '0x%X' % Bytes2Val(str2byte) cfg_list.append(dict(cfg_item)) cfg_item = dict(cfg_temp) find_list.pop(0) count = 0 cfg_item['cname'] = key cfg_item['length'] = dlen cfg_item['value'] = val cfg_item['option'] = option cfg_item['isbit'] = is_bit if key not in chk_dict.keys(): chk_dict[key] = 0 else: chk_dict[key] += 1 cfg_item['instance'] = chk_dict[key] cfg_list.append(cfg_item) count += 1 if prefix: cfg_item = dict(cfg_temp) cfg_item['cname'] = 'Dummy' cfg_item['embed'] = '%s:%03X:END' % (prefix, ord(prefix[-1])) cfg_list.append(cfg_item) option_dict = {} selreg = re.compile( r'\s+Selection\s*(.+?)\s*,\s*"(.*?)"$', re.S | re.MULTILINE) regex = re.compile( r'^List\s&(.+?)$(.+?)^EndList$', re.S | re.MULTILINE) for match in regex.finditer(bsf_txt): key = match.group(1) option_dict[key] = [] for select in selreg.finditer(match.group(2)): option_dict[key].append( (int(select.group(1), 0), select.group(2))) chk_dict = {} pagereg = re.compile( r'^Page\s"(.*?)"$(.+?)^EndPage$', re.S | re.MULTILINE) for match in pagereg.finditer(bsf_txt): page = match.group(1) for line in match.group(2).splitlines(): match = re.match( r'\s+(Combo|EditNum)\s\$(.+?),\s"(.*?)",\s(.+?),$', line) if match: cname = match.group(2) if cname not in chk_dict.keys(): chk_dict[cname] = 0 else: chk_dict[cname] += 1 instance = chk_dict[cname] cfg_idxs = [i for i, j in enumerate(cfg_list) if j['cname'] == cname and j['instance'] == instance] if len(cfg_idxs) != 1: raise Exception( "Multiple CFG item '%s' found !" % cname) cfg_item = cfg_list[cfg_idxs[0]] cfg_item['page'] = page cfg_item['type'] = match.group(1) cfg_item['prompt'] = match.group(3) cfg_item['range'] = None if cfg_item['type'] == 'Combo': cfg_item['option'] = option_dict[match.group(4)[1:]] elif cfg_item['type'] == 'EditNum': cfg_item['option'] = match.group(4) match = re.match(r'\s+ Help\s"(.*?)"$', line) if match: cfg_item['help'] = match.group(1) match = re.match(r'\s+"Valid\srange:\s(.*)"$', line) if match: parts = match.group(1).split() cfg_item['option'] = ( (int(parts[0], 0), int(parts[2], 0), cfg_item['option'])) return cfg_list @staticmethod def generate_dsc(option_list, dsc_file=None): dsc_lines = [] header = '%s' % (__copyright_dsc__ % date.today().year) dsc_lines.extend(header.splitlines()) pages = [] for cfg_item in option_list: if cfg_item['page'] and (cfg_item['page'] not in pages): pages.append(cfg_item['page']) page_id = 0 for page in pages: dsc_lines.append(' # !BSF PAGES:{PG%02X::"%s"}' % (page_id, page)) page_id += 1 dsc_lines.append('') last_page = '' is_bit = False dlen = 0 dval = 0 bit_fields = [] for idx, option in enumerate(option_list): if not is_bit and option['isbit']: is_bit = True dlen = 0 dval = 0 idxs = idx if is_bit and not option['isbit']: is_bit = False if dlen % 8 != 0: raise Exception("Bit fields are not aligned at " "byte boundary !") bit_fields.append((idxs, idx, dlen, dval)) if is_bit: blen = option['length'] bval = int(option['value'], 0) dval = dval + ((bval & ((1 << blen) - 1)) << dlen) print(dlen, blen, bval, hex(dval)) dlen += blen struct_idx = 0 for idx, option in enumerate(option_list): dsc_lines.append('') default = option['value'] pos = option['cname'].find('_') name = option['cname'][pos + 1:] for start_idx, end_idx, bits_len, bits_val in bit_fields: if idx == start_idx: val_str = Bytes2Str(Val2Bytes(bits_val, bits_len // 8)) dsc_lines.append(' # !HDR STRUCT:{BIT_FIELD_DATA_%d}' % struct_idx) dsc_lines.append(' # !BSF NAME:{BIT_FIELD_STRUCT}') dsc_lines.append(' gCfgData.BitFiledStruct%d ' ' | * | 0x%04X | %s' % (struct_idx, bits_len // 8, val_str)) dsc_lines.append('') struct_idx += 1 if option['find']: dsc_lines.append(' # !BSF FIND:{%s}' % option['find']) dsc_lines.append('') if option['instance'] > 0: name = name + '_%s' % option['instance'] if option['embed']: dsc_lines.append(' # !HDR EMBED:{%s}' % option['embed']) if option['type'] == 'Reserved': dsc_lines.append(' # !BSF NAME:{Reserved} TYPE:{Reserved}') if option['option'] == '$SKIP': dsc_lines.append(' # !BSF OPTION:{$SKIP}') else: prompt = option['prompt'] if last_page != option['page']: last_page = option['page'] dsc_lines.append(' # !BSF PAGE:{PG%02X}' % (pages.index(option['page']))) if option['type'] == 'Combo': dsc_lines.append(' # !BSF NAME:{%s} TYPE:{%s}' % (prompt, option['type'])) ops = [] for val, text in option['option']: ops.append('0x%x:%s' % (val, text)) dsc_lines.append(' # !BSF OPTION:{%s}' % (', '.join(ops))) elif option['type'] == 'EditNum': cfg_len = option['length'] if ',' in default and cfg_len > 8: dsc_lines.append(' # !BSF NAME:{%s} TYPE:{Table}' % (prompt)) if cfg_len > 16: cfg_len = 16 ops = [] for i in range(cfg_len): ops.append('%X:1:HEX' % i) dsc_lines.append(' # !BSF OPTION:{%s}' % (', '.join(ops))) else: dsc_lines.append( ' # !BSF NAME:{%s} TYPE:{%s, %s, (0x%X, 0x%X)}' % (prompt, option['type'], option['option'][2], option['option'][0], option['option'][1])) dsc_lines.append(' # !BSF HELP:{%s}' % option['help']) if ',' in default: default = '{%s}' % default if option['isbit']: dsc_lines.append(' # !BSF FIELD:{%s:%db}' % (name, option['length'])) else: dsc_lines.append(' gCfgData.%-30s | * | 0x%04X | %s' % (name, option['length'], default)) if dsc_file: fd = open(dsc_file, 'w') fd.write('\n'.join(dsc_lines)) fd.close() return dsc_lines class CGenCfgData: def __init__(self, Mode=''): self.Debug = False self.Error = '' self.ReleaseMode = True self.Mode = Mode self._GlobalDataDef = """ GlobalDataDef SKUID = 0, "DEFAULT" EndGlobalData """ self._BuidinOptionTxt = """ List &EN_DIS Selection 0x1 , "Enabled" Selection 0x0 , "Disabled" EndList """ self._StructType = ['UINT8', 'UINT16', 'UINT32', 'UINT64'] self._BsfKeyList = ['FIND', 'NAME', 'HELP', 'TYPE', 'PAGE', 'PAGES', 'BLOCK', 'OPTION', 'CONDITION', 'ORDER', 'MARKER', 'SUBT'] self._HdrKeyList = ['HEADER', 'STRUCT', 'EMBED', 'COMMENT'] self._BuidinOption = {'$EN_DIS': 'EN_DIS'} self._MacroDict = {} self._VarDict = {} self._PcdsDict = {} self._CfgBlkDict = {} self._CfgPageDict = {} self._CfgOptsDict = {} self._BsfTempDict = {} self._CfgItemList = [] self._DscLines = [] self._DscFile = '' self._CfgPageTree = {} self._MapVer = 0 self._MinCfgTagId = 0x100 def ParseMacros(self, MacroDefStr): # ['-DABC=1', '-D', 'CFG_DEBUG=1', '-D', 'CFG_OUTDIR=Build'] self._MacroDict = {} IsExpression = False for Macro in MacroDefStr: if Macro.startswith('-D'): IsExpression = True if len(Macro) > 2: Macro = Macro[2:] else: continue if IsExpression: IsExpression = False Match = re.match("(\\w+)=(.+)", Macro) if Match: self._MacroDict[Match.group(1)] = Match.group(2) else: Match = re.match("(\\w+)", Macro) if Match: self._MacroDict[Match.group(1)] = '' if len(self._MacroDict) == 0: Error = 1 else: Error = 0 if self.Debug: print("INFO : Macro dictionary:") for Each in self._MacroDict: print(" $(%s) = [ %s ]" % (Each, self._MacroDict[Each])) return Error def EvaulateIfdef(self, Macro): Result = Macro in self._MacroDict if self.Debug: print("INFO : Eval Ifdef [%s] : %s" % (Macro, Result)) return Result def ExpandMacros(self, Input, Preserve=False): Line = Input Match = re.findall("\\$\\(\\w+\\)", Input) if Match: for Each in Match: Variable = Each[2:-1] if Variable in self._MacroDict: Line = Line.replace(Each, self._MacroDict[Variable]) else: if self.Debug: print("WARN : %s is not defined" % Each) if not Preserve: Line = Line.replace(Each, Each[2:-1]) return Line def ExpandPcds(self, Input): Line = Input Match = re.findall("(\\w+\\.\\w+)", Input) if Match: for PcdName in Match: if PcdName in self._PcdsDict: Line = Line.replace(PcdName, self._PcdsDict[PcdName]) else: if self.Debug: print("WARN : %s is not defined" % PcdName) return Line def EvaluateExpress(self, Expr): ExpExpr = self.ExpandPcds(Expr) ExpExpr = self.ExpandMacros(ExpExpr) LogExpr = CLogicalExpression() Result = LogExpr.evaluateExpress(ExpExpr) if self.Debug: print("INFO : Eval Express [%s] : %s" % (Expr, Result)) return Result def ValueToByteArray(self, ValueStr, Length): Match = re.match("\\{\\s*FILE:(.+)\\}", ValueStr) if Match: FileList = Match.group(1).split(',') Result = bytearray() for File in FileList: File = File.strip() BinPath = os.path.join(os.path.dirname(self._DscFile), File) Result.extend(bytearray(open(BinPath, 'rb').read())) else: try: Result = bytearray(self.ValueToList(ValueStr, Length)) except ValueError: raise Exception("Bytes in '%s' must be in range 0~255 !" % ValueStr) if len(Result) < Length: Result.extend(b'\x00' * (Length - len(Result))) elif len(Result) > Length: raise Exception("Value '%s' is too big to fit into %d bytes !" % (ValueStr, Length)) return Result[:Length] def ValueToList(self, ValueStr, Length): if ValueStr[0] == '{': Result = [] BinList = ValueStr[1:-1].split(',') InBitField = False LastInBitField = False Value = 0 BitLen = 0 for Element in BinList: InBitField = False Each = Element.strip() if len(Each) == 0: pass else: if Each[0] in ['"', "'"]: Result.extend(list(bytearray(Each[1:-1], 'utf-8'))) elif ':' in Each: Match = re.match("(.+):(\\d+)b", Each) if Match is None: raise Exception("Invald value list format '%s' !" % Each) InBitField = True CurrentBitLen = int(Match.group(2)) CurrentValue = ((self.EvaluateExpress(Match.group(1)) & (1 << CurrentBitLen) - 1)) << BitLen else: Result.append(self.EvaluateExpress(Each.strip())) if InBitField: Value += CurrentValue BitLen += CurrentBitLen if LastInBitField and ((not InBitField) or (Element == BinList[-1])): if BitLen % 8 != 0: raise Exception("Invald bit field length!") Result.extend(Val2Bytes(Value, BitLen // 8)) Value = 0 BitLen = 0 LastInBitField = InBitField elif ValueStr.startswith("'") and ValueStr.endswith("'"): Result = Str2Bytes(ValueStr, Length) elif ValueStr.startswith('"') and ValueStr.endswith('"'): Result = Str2Bytes(ValueStr, Length) else: Result = Val2Bytes(self.EvaluateExpress(ValueStr), Length) return Result def FormatDeltaValue(self, ConfigDict): ValStr = ConfigDict['value'] if ValStr[0] == "'": # Remove padding \x00 in the value string ValStr = "'%s'" % ValStr[1:-1].rstrip('\x00') Struct = ConfigDict['struct'] if Struct in self._StructType: # Format the array using its struct type Unit = int(Struct[4:]) // 8 Value = Array2Val(ConfigDict['value']) Loop = ConfigDict['length'] // Unit Values = [] for Each in range(Loop): Values.append(Value & ((1 << (Unit * 8)) - 1)) Value = Value >> (Unit * 8) ValStr = '{ ' + ', '.join([('0x%%0%dX' % (Unit * 2)) % x for x in Values]) + ' }' return ValStr def FormatListValue(self, ConfigDict): Struct = ConfigDict['struct'] if Struct not in self._StructType: return DataList = self.ValueToList(ConfigDict['value'], ConfigDict['length']) Unit = int(Struct[4:]) // 8 if int(ConfigDict['length']) != Unit * len(DataList): # Fallback to byte array Unit = 1 if int(ConfigDict['length']) != len(DataList): raise Exception("Array size is not proper for '%s' !" % ConfigDict['cname']) ByteArray = [] for Value in DataList: for Loop in range(Unit): ByteArray.append("0x%02X" % (Value & 0xFF)) Value = Value >> 8 NewValue = '{' + ','.join(ByteArray) + '}' ConfigDict['value'] = NewValue return "" def GetOrderNumber(self, Offset, Order, BitOff=0): if isinstance(Order, int): if Order == -1: Order = Offset << 16 else: (Major, Minor) = Order.split('.') Order = (int(Major, 16) << 16) + ((int(Minor, 16) & 0xFF) << 8) return Order + (BitOff & 0xFF) def SubtituteLine(self, Line, Args): Args = Args.strip() Vars = Args.split(':') Line = self.ExpandMacros(Line, True) for Idx in range(len(Vars)-1, 0, -1): Line = Line.replace('$(%d)' % Idx, Vars[Idx].strip()) return Line def CfgDuplicationCheck(self, CfgDict, Name): if not self.Debug: return if Name == 'Dummy': return if Name not in CfgDict: CfgDict[Name] = 1 else: print("WARNING: Duplicated item found '%s' !" % CfgDict['cname']) def AddBsfChildPage(self, Child, Parent='root'): def AddBsfChildPageRecursive(PageTree, Parent, Child): Key = next(iter(PageTree)) if Parent == Key: PageTree[Key].append({Child: []}) return True else: Result = False for Each in PageTree[Key]: if AddBsfChildPageRecursive(Each, Parent, Child): Result = True break return Result return AddBsfChildPageRecursive(self._CfgPageTree, Parent, Child) def ParseDscFile(self, DscFile): self._DscLines = [] self._CfgItemList = [] self._CfgPageDict = {} self._CfgBlkDict = {} self._BsfTempDict = {} self._CfgPageTree = {'root': []} CfgDict = {} SectionNameList = ["Defines".lower(), "PcdsFeatureFlag".lower(), "PcdsDynamicVpd.Tmp".lower(), "PcdsDynamicVpd.Upd".lower()] IsDefSect = False IsPcdSect = False IsUpdSect = False IsTmpSect = False TemplateName = '' IfStack = [] ElifStack = [] Error = 0 ConfigDict = {} if type(DscFile) is list: # it is DSC lines already DscLines = DscFile self._DscFile = '.' else: DscFd = open(DscFile, "r") DscLines = DscFd.readlines() DscFd.close() self._DscFile = DscFile BsfRegExp = re.compile("(%s):{(.+?)}(?:$|\\s+)" % '|'. join(self._BsfKeyList)) HdrRegExp = re.compile("(%s):{(.+?)}" % '|'.join(self._HdrKeyList)) CfgRegExp = re.compile("^([_a-zA-Z0-9]+)\\s*\\|\\s*\ (0x[0-9A-F]+|\\*)\\s*\\|\\s*(\\d+|0x[0-9a-fA-F]+)\\s*\\|\\s*(.+)") TksRegExp = re.compile("^(g[_a-zA-Z0-9]+\\.)(.+)") SkipLines = 0 while len(DscLines): DscLine = DscLines.pop(0).strip() if SkipLines == 0: self._DscLines.append(DscLine) else: SkipLines = SkipLines - 1 if len(DscLine) == 0: continue Handle = False Match = re.match("^\\[(.+)\\]", DscLine) if Match is not None: IsDefSect = False IsPcdSect = False IsUpdSect = False IsTmpSect = False SectionName = Match.group(1).lower() if SectionName == SectionNameList[0]: IsDefSect = True if SectionName == SectionNameList[1]: IsPcdSect = True elif SectionName == SectionNameList[2]: IsTmpSect = True elif SectionName == SectionNameList[3]: ConfigDict = { 'header': 'ON', 'page': '', 'name': '', 'find': '', 'struct': '', 'embed': '', 'marker': '', 'option': '', 'comment': '', 'condition': '', 'order': -1, 'subreg': [] } IsUpdSect = True Offset = 0 else: if IsDefSect or IsPcdSect or IsUpdSect or IsTmpSect: Match = False if DscLine[0] != '!' else True if Match: Match = re.match("^!(else|endif|ifdef|ifndef|if|elseif\ |include)\\s*(.+)?$", DscLine.split("#")[0]) Keyword = Match.group(1) if Match else '' Remaining = Match.group(2) if Match else '' Remaining = '' if Remaining is None else Remaining.strip() if Keyword in ['if', 'elseif', 'ifdef', 'ifndef', 'include' ] and not Remaining: raise Exception("ERROR: Expression is expected after \ '!if' or !elseif' for line '%s'" % DscLine) if Keyword == 'else': if IfStack: IfStack[-1] = not IfStack[-1] else: raise Exception("ERROR: No paired '!if' found for \ '!else' for line '%s'" % DscLine) elif Keyword == 'endif': if IfStack: IfStack.pop() Level = ElifStack.pop() if Level > 0: del IfStack[-Level:] else: raise Exception("ERROR: No paired '!if' found for \ '!endif' for line '%s'" % DscLine) elif Keyword == 'ifdef' or Keyword == 'ifndef': Result = self.EvaulateIfdef(Remaining) if Keyword == 'ifndef': Result = not Result IfStack.append(Result) ElifStack.append(0) elif Keyword == 'if' or Keyword == 'elseif': Result = self.EvaluateExpress(Remaining) if Keyword == "if": ElifStack.append(0) IfStack.append(Result) else: # elseif if IfStack: IfStack[-1] = not IfStack[-1] IfStack.append(Result) ElifStack[-1] = ElifStack[-1] + 1 else: raise Exception("ERROR: No paired '!if' found for \ '!elif' for line '%s'" % DscLine) else: if IfStack: Handle = reduce(lambda x, y: x and y, IfStack) else: Handle = True if Handle: if Keyword == 'include': Remaining = self.ExpandMacros(Remaining) # Relative to DSC filepath IncludeFilePath = os.path.join( os.path.dirname(self._DscFile), Remaining) if not os.path.exists(IncludeFilePath): # Relative to repository to find \ # dsc in common platform IncludeFilePath = os.path.join( os.path.dirname(self._DscFile), "..", Remaining) try: IncludeDsc = open(IncludeFilePath, "r") except Exception: raise Exception("ERROR: Cannot open \ file '%s'." % IncludeFilePath) NewDscLines = IncludeDsc.readlines() IncludeDsc.close() DscLines = NewDscLines + DscLines del self._DscLines[-1] else: if DscLine.startswith('!'): raise Exception("ERROR: Unrecoginized \ directive for line '%s'" % DscLine) if not Handle: del self._DscLines[-1] continue if IsDefSect: Match = re.match("^\\s*(?:DEFINE\\s+)*(\\w+)\\s*=\\s*(.+)", DscLine) if Match: self._MacroDict[Match.group(1)] = Match.group(2) if self.Debug: print("INFO : DEFINE %s = [ %s ]" % (Match.group(1), Match.group(2))) elif IsPcdSect: Match = re.match("^\\s*([\\w\\.]+)\\s*\\|\\s*(\\w+)", DscLine) if Match: self._PcdsDict[Match.group(1)] = Match.group(2) if self.Debug: print("INFO : PCD %s = [ %s ]" % (Match.group(1), Match.group(2))) elif IsTmpSect: # !BSF DEFT:{GPIO_TMPL:START} Match = re.match("^\\s*#\\s+(!BSF)\\s+DEFT:{(.+?):\ (START|END)}", DscLine) if Match: if Match.group(3) == 'START' and not TemplateName: TemplateName = Match.group(2).strip() self._BsfTempDict[TemplateName] = [] if Match.group(3) == 'END' and ( TemplateName == Match.group(2).strip() ) and TemplateName: TemplateName = '' else: if TemplateName: Match = re.match("^!include\\s*(.+)?$", DscLine) if Match: continue self._BsfTempDict[TemplateName].append(DscLine) else: Match = re.match("^\\s*#\\s+(!BSF|!HDR)\\s+(.+)", DscLine) if Match: Remaining = Match.group(2) if Match.group(1) == '!BSF': Result = BsfRegExp.findall(Remaining) if Result: for Each in Result: Key = Each[0] Remaining = Each[1] if Key == 'BLOCK': Match = re.match( "NAME:\"(.+)\"\\s*,\\s*\ VER:\"(.+)\"\\s*", Remaining) if Match: self._CfgBlkDict['name'] = \ Match.group(1) self._CfgBlkDict['ver'] = Match.group(2 ) elif Key == 'SUBT': # GPIO_TMPL:1:2:3 Remaining = Remaining.strip() Match = re.match("(\\w+)\\s*:", Remaining) if Match: TemplateName = Match.group(1) for Line in self._BsfTempDict[ TemplateName][::-1]: NewLine = self.SubtituteLine( Line, Remaining) DscLines.insert(0, NewLine) SkipLines += 1 elif Key == 'PAGES': # !BSF PAGES:{HSW:"Haswell System Agent", \ # LPT:"Lynx Point PCH"} PageList = Remaining.split(',') for Page in PageList: Page = Page.strip() Match = re.match('(\\w+):\ (\\w*:)?\\"(.+)\\"', Page) if Match: PageName = Match.group(1) ParentName = Match.group(2) if not ParentName or \ ParentName == ':': ParentName = 'root' else: ParentName = ParentName[:-1] if not self.AddBsfChildPage( PageName, ParentName): raise Exception("Cannot find \ parent page '%s'!" % ParentName) self._CfgPageDict[ PageName] = Match.group(3) else: raise Exception("Invalid page \ definitions '%s'!" % Page) elif Key in ['NAME', 'HELP', 'OPTION' ] and Remaining.startswith('+'): # Allow certain options to be extended \ # to multiple lines ConfigDict[Key.lower()] += Remaining[1:] else: if Key == 'NAME': Remaining = Remaining.strip() elif Key == 'CONDITION': Remaining = self.ExpandMacros( Remaining.strip()) ConfigDict[Key.lower()] = Remaining else: Match = HdrRegExp.match(Remaining) if Match: Key = Match.group(1) Remaining = Match.group(2) if Key == 'EMBED': Parts = Remaining.split(':') Names = Parts[0].split(',') DummyDict = ConfigDict.copy() if len(Names) > 1: Remaining = Names[0] + ':' + ':'.join( Parts[1:]) DummyDict['struct'] = Names[1] else: DummyDict['struct'] = Names[0] DummyDict['cname'] = 'Dummy' DummyDict['name'] = '' DummyDict['embed'] = Remaining DummyDict['offset'] = Offset DummyDict['length'] = 0 DummyDict['value'] = '0' DummyDict['type'] = 'Reserved' DummyDict['help'] = '' DummyDict['subreg'] = [] self._CfgItemList.append(DummyDict) else: ConfigDict[Key.lower()] = Remaining # Check CFG line # gCfgData.VariableName | * | 0x01 | 0x1 Clear = False Match = TksRegExp.match(DscLine) if Match: DscLine = 'gCfgData.%s' % Match.group(2) if DscLine.startswith('gCfgData.'): Match = CfgRegExp.match(DscLine[9:]) else: Match = None if Match: ConfigDict['space'] = 'gCfgData' ConfigDict['cname'] = Match.group(1) if Match.group(2) != '*': Offset = int(Match.group(2), 16) ConfigDict['offset'] = Offset ConfigDict['order'] = self.GetOrderNumber( ConfigDict['offset'], ConfigDict['order']) Value = Match.group(4).strip() if Match.group(3).startswith("0x"): Length = int(Match.group(3), 16) else: Length = int(Match.group(3)) Offset += Length ConfigDict['length'] = Length Match = re.match("\\$\\((\\w+)\\)", Value) if Match: if Match.group(1) in self._MacroDict: Value = self._MacroDict[Match.group(1)] ConfigDict['value'] = Value if re.match("\\{\\s*FILE:(.+)\\}", Value): # Expand embedded binary file ValArray = self.ValueToByteArray(ConfigDict['value'], ConfigDict['length']) NewValue = Bytes2Str(ValArray) self._DscLines[-1] = re.sub(r'(.*)(\{\s*FILE:.+\})', r'\1 %s' % NewValue, self._DscLines[-1]) ConfigDict['value'] = NewValue if ConfigDict['name'] == '': # Clear BSF specific items ConfigDict['bsfname'] = '' ConfigDict['help'] = '' ConfigDict['type'] = '' ConfigDict['option'] = '' self.CfgDuplicationCheck(CfgDict, ConfigDict['cname']) self._CfgItemList.append(ConfigDict.copy()) Clear = True else: # It could be a virtual item as below # !BSF FIELD:{SerialDebugPortAddress0:1} # or # @Bsf FIELD:{SerialDebugPortAddress0:1b} Match = re.match(r"^\s*#\s+(!BSF)\s+FIELD:{(.+)}", DscLine) if Match: BitFieldTxt = Match.group(2) Match = re.match("(.+):(\\d+)b([BWDQ])?", BitFieldTxt) if not Match: raise Exception("Incorrect bit field \ format '%s' !" % BitFieldTxt) UnitBitLen = 1 SubCfgDict = ConfigDict.copy() SubCfgDict['cname'] = Match.group(1) SubCfgDict['bitlength'] = int( Match.group(2)) * UnitBitLen if SubCfgDict['bitlength'] > 0: LastItem = self._CfgItemList[-1] if len(LastItem['subreg']) == 0: SubOffset = 0 else: SubOffset = \ LastItem['subreg'][-1]['bitoffset'] \ + LastItem['subreg'][-1]['bitlength'] if Match.group(3) == 'B': SubCfgDict['bitunit'] = 1 elif Match.group(3) == 'W': SubCfgDict['bitunit'] = 2 elif Match.group(3) == 'Q': SubCfgDict['bitunit'] = 8 else: SubCfgDict['bitunit'] = 4 SubCfgDict['bitoffset'] = SubOffset SubCfgDict['order'] = self.GetOrderNumber( SubCfgDict['offset'], SubCfgDict['order'], SubOffset) SubCfgDict['value'] = '' SubCfgDict['cname'] = '%s_%s' % (LastItem['cname'], Match.group(1)) self.CfgDuplicationCheck(CfgDict, SubCfgDict['cname']) LastItem['subreg'].append(SubCfgDict.copy()) Clear = True if Clear: ConfigDict['name'] = '' ConfigDict['find'] = '' ConfigDict['struct'] = '' ConfigDict['embed'] = '' ConfigDict['marker'] = '' ConfigDict['comment'] = '' ConfigDict['order'] = -1 ConfigDict['subreg'] = [] ConfigDict['option'] = '' ConfigDict['condition'] = '' return Error def GetBsfBitFields(self, subitem, bytes): start = subitem['bitoffset'] end = start + subitem['bitlength'] bitsvalue = ''.join('{0:08b}'.format(i) for i in bytes[::-1]) bitsvalue = bitsvalue[::-1] bitslen = len(bitsvalue) if start > bitslen or end > bitslen: raise Exception("Invalid bits offset [%d,%d] %d for %s" % (start, end, bitslen, subitem['name'])) return '0x%X' % (int(bitsvalue[start:end][::-1], 2)) def UpdateBsfBitFields(self, SubItem, NewValue, ValueArray): Start = SubItem['bitoffset'] End = Start + SubItem['bitlength'] Blen = len(ValueArray) BitsValue = ''.join('{0:08b}'.format(i) for i in ValueArray[::-1]) BitsValue = BitsValue[::-1] BitsLen = len(BitsValue) if Start > BitsLen or End > BitsLen: raise Exception("Invalid bits offset [%d,%d] %d for %s" % (Start, End, BitsLen, SubItem['name'])) BitsValue = BitsValue[:Start] + '{0:0{1}b}'.format( NewValue, SubItem['bitlength'])[::-1] + BitsValue[End:] ValueArray[:] = bytearray.fromhex( '{0:0{1}x}'.format(int(BitsValue[::-1], 2), Blen * 2))[::-1] def CreateVarDict(self): Error = 0 self._VarDict = {} if len(self._CfgItemList) > 0: Item = self._CfgItemList[-1] self._VarDict['_LENGTH_'] = '%d' % (Item['offset'] + Item['length']) for Item in self._CfgItemList: Embed = Item['embed'] Match = re.match("^(\\w+):(\\w+):(START|END)", Embed) if Match: StructName = Match.group(1) VarName = '_%s_%s_' % (Match.group(3), StructName) if Match.group(3) == 'END': self._VarDict[VarName] = Item['offset'] + Item['length'] self._VarDict['_LENGTH_%s_' % StructName] = \ self._VarDict['_END_%s_' % StructName] - \ self._VarDict['_START_%s_' % StructName] if Match.group(2).startswith('TAG_'): if (self.Mode != 'FSP') and (self._VarDict ['_LENGTH_%s_' % StructName] % 4): raise Exception("Size of structure '%s' is %d, \ not DWORD aligned !" % (StructName, self._VarDict['_LENGTH_%s_' % StructName])) self._VarDict['_TAG_%s_' % StructName] = int( Match.group(2)[4:], 16) & 0xFFF else: self._VarDict[VarName] = Item['offset'] if Item['marker']: self._VarDict['_OFFSET_%s_' % Item['marker'].strip()] = \ Item['offset'] return Error def UpdateBsfBitUnit(self, Item): BitTotal = 0 BitOffset = 0 StartIdx = 0 Unit = None UnitDec = {1: 'BYTE', 2: 'WORD', 4: 'DWORD', 8: 'QWORD'} for Idx, SubItem in enumerate(Item['subreg']): if Unit is None: Unit = SubItem['bitunit'] BitLength = SubItem['bitlength'] BitTotal += BitLength BitOffset += BitLength if BitOffset > 64 or BitOffset > Unit * 8: break if BitOffset == Unit * 8: for SubIdx in range(StartIdx, Idx + 1): Item['subreg'][SubIdx]['bitunit'] = Unit BitOffset = 0 StartIdx = Idx + 1 Unit = None if BitOffset > 0: raise Exception("Bit fields cannot fit into %s for \ '%s.%s' !" % (UnitDec[Unit], Item['cname'], SubItem['cname'])) ExpectedTotal = Item['length'] * 8 if Item['length'] * 8 != BitTotal: raise Exception("Bit fields total length (%d) does not match \ length (%d) of '%s' !" % (BitTotal, ExpectedTotal, Item['cname'])) def UpdateDefaultValue(self): Error = 0 for Idx, Item in enumerate(self._CfgItemList): if len(Item['subreg']) == 0: Value = Item['value'] if (len(Value) > 0) and (Value[0] == '{' or Value[0] == "'" or Value[0] == '"'): # {XXX} or 'XXX' strings self.FormatListValue(self._CfgItemList[Idx]) else: Match = re.match("(0x[0-9a-fA-F]+|[0-9]+)", Value) if not Match: NumValue = self.EvaluateExpress(Value) Item['value'] = '0x%X' % NumValue else: ValArray = self.ValueToByteArray(Item['value'], Item['length']) for SubItem in Item['subreg']: SubItem['value'] = self.GetBsfBitFields(SubItem, ValArray) self.UpdateBsfBitUnit(Item) return Error @staticmethod def ExpandIncludeFiles(FilePath, CurDir=''): if CurDir == '': CurDir = os.path.dirname(FilePath) FilePath = os.path.basename(FilePath) InputFilePath = os.path.join(CurDir, FilePath) File = open(InputFilePath, "r") Lines = File.readlines() File.close() NewLines = [] for LineNum, Line in enumerate(Lines): Match = re.match("^!include\\s*(.+)?$", Line) if Match: IncPath = Match.group(1) TmpPath = os.path.join(CurDir, IncPath) OrgPath = TmpPath if not os.path.exists(TmpPath): CurDir = os.path.join(os.path.dirname( os.path.realpath(__file__)), "..", "..") TmpPath = os.path.join(CurDir, IncPath) if not os.path.exists(TmpPath): raise Exception("ERROR: Cannot open include file '%s'." % OrgPath) else: NewLines.append(('# Included from file: %s\n' % IncPath, TmpPath, 0)) NewLines.append(('# %s\n' % ('=' * 80), TmpPath, 0)) NewLines.extend(CGenCfgData.ExpandIncludeFiles (IncPath, CurDir)) else: NewLines.append((Line, InputFilePath, LineNum)) return NewLines def OverrideDefaultValue(self, DltFile): Error = 0 DltLines = CGenCfgData.ExpandIncludeFiles(DltFile) PlatformId = None for Line, FilePath, LineNum in DltLines: Line = Line.strip() if not Line or Line.startswith('#'): continue Match = re.match("\\s*(\\w+)\\.(\\w+)(\\.\\w+)?\\s*\\|\\s*(.+)", Line) if not Match: raise Exception("Unrecognized line '%s' (File:'%s' Line:%d) !" % (Line, FilePath, LineNum + 1)) Found = False InScope = False for Idx, Item in enumerate(self._CfgItemList): if not InScope: if not (Item['embed'].endswith(':START') and Item['embed'].startswith(Match.group(1))): continue InScope = True if Item['cname'] == Match.group(2): Found = True break if Item['embed'].endswith(':END') and \ Item['embed'].startswith(Match.group(1)): break Name = '%s.%s' % (Match.group(1), Match.group(2)) if not Found: ErrItem = Match.group(2) if InScope else Match.group(1) raise Exception("Invalid configuration '%s' in '%s' \ (File:'%s' Line:%d) !" % (ErrItem, Name, FilePath, LineNum + 1)) ValueStr = Match.group(4).strip() if Match.group(3) is not None: # This is a subregion item BitField = Match.group(3)[1:] Found = False if len(Item['subreg']) > 0: for SubItem in Item['subreg']: if SubItem['cname'] == '%s_%s' % \ (Item['cname'], BitField): Found = True break if not Found: raise Exception("Invalid configuration bit field \ '%s' in '%s.%s' (File:'%s' Line:%d) !" % (BitField, Name, BitField, FilePath, LineNum + 1)) try: Value = int(ValueStr, 16) if ValueStr.startswith('0x') \ else int(ValueStr, 10) except Exception: raise Exception("Invalid value '%s' for bit field '%s.%s' \ (File:'%s' Line:%d) !" % (ValueStr, Name, BitField, FilePath, LineNum + 1)) if Value >= 2 ** SubItem['bitlength']: raise Exception("Invalid configuration bit field value \ '%s' for '%s.%s' (File:'%s' Line:%d) !" % (Value, Name, BitField, FilePath, LineNum + 1)) ValArray = self.ValueToByteArray(Item['value'], Item['length']) self.UpdateBsfBitFields(SubItem, Value, ValArray) if Item['value'].startswith('{'): Item['value'] = '{' + ', '.join('0x%02X' % i for i in ValArray) + '}' else: BitsValue = ''.join('{0:08b}'.format(i) for i in ValArray[::-1]) Item['value'] = '0x%X' % (int(BitsValue, 2)) else: if Item['value'].startswith('{') and \ not ValueStr.startswith('{'): raise Exception("Data array required for '%s' \ (File:'%s' Line:%d) !" % (Name, FilePath, LineNum + 1)) Item['value'] = ValueStr if Name == 'PLATFORMID_CFG_DATA.PlatformId': PlatformId = ValueStr if (PlatformId is None) and (self.Mode != 'FSP'): raise Exception("PLATFORMID_CFG_DATA.PlatformId is missing \ in file '%s' !" % (DltFile)) return Error def ProcessMultilines(self, String, MaxCharLength): Multilines = '' StringLength = len(String) CurrentStringStart = 0 StringOffset = 0 BreakLineDict = [] if len(String) <= MaxCharLength: while (StringOffset < StringLength): if StringOffset >= 1: if String[StringOffset - 1] == '\\' and \ String[StringOffset] == 'n': BreakLineDict.append(StringOffset + 1) StringOffset += 1 if BreakLineDict != []: for Each in BreakLineDict: Multilines += " %s\n" % String[CurrentStringStart:Each].\ lstrip() CurrentStringStart = Each if StringLength - CurrentStringStart > 0: Multilines += " %s\n" % String[CurrentStringStart:].\ lstrip() else: Multilines = " %s\n" % String else: NewLineStart = 0 NewLineCount = 0 FoundSpaceChar = False while(StringOffset < StringLength): if StringOffset >= 1: if NewLineCount >= MaxCharLength - 1: if String[StringOffset] == ' ' and \ StringLength - StringOffset > 10: BreakLineDict.append(NewLineStart + NewLineCount) NewLineStart = NewLineStart + NewLineCount NewLineCount = 0 FoundSpaceChar = True elif StringOffset == StringLength - 1 \ and FoundSpaceChar is False: BreakLineDict.append(0) if String[StringOffset - 1] == '\\' and \ String[StringOffset] == 'n': BreakLineDict.append(StringOffset + 1) NewLineStart = StringOffset + 1 NewLineCount = 0 StringOffset += 1 NewLineCount += 1 if BreakLineDict != []: BreakLineDict.sort() for Each in BreakLineDict: if Each > 0: Multilines += " %s\n" % String[ CurrentStringStart:Each].lstrip() CurrentStringStart = Each if StringLength - CurrentStringStart > 0: Multilines += " %s\n" % String[CurrentStringStart:].\ lstrip() return Multilines def CreateField(self, Item, Name, Length, Offset, Struct, BsfName, Help, Option, BitsLength=None): PosName = 28 NameLine = '' HelpLine = '' OptionLine = '' if Length == 0 and Name == 'Dummy': return '\n' IsArray = False if Length in [1, 2, 4, 8]: Type = "UINT%d" % (Length * 8) else: IsArray = True Type = "UINT8" if Item and Item['value'].startswith('{'): Type = "UINT8" IsArray = True if Struct != '': Type = Struct if Struct in ['UINT8', 'UINT16', 'UINT32', 'UINT64']: IsArray = True Unit = int(Type[4:]) // 8 Length = Length / Unit else: IsArray = False if IsArray: Name = Name + '[%d]' % Length if len(Type) < PosName: Space1 = PosName - len(Type) else: Space1 = 1 if BsfName != '': NameLine = " %s\n" % BsfName else: NameLine = "\n" if Help != '': HelpLine = self.ProcessMultilines(Help, 80) if Option != '': OptionLine = self.ProcessMultilines(Option, 80) if BitsLength is None: BitsLength = '' else: BitsLength = ' : %d' % BitsLength return "\n/** %s%s%s**/\n %s%s%s%s;\n" % \ (NameLine, HelpLine, OptionLine, Type, ' ' * Space1, Name, BitsLength) def SplitTextBody(self, TextBody): Marker1 = '{ /* _COMMON_STRUCT_START_ */' Marker2 = '; /* _COMMON_STRUCT_END_ */' ComBody = [] TxtBody = [] IsCommon = False for Line in TextBody: if Line.strip().endswith(Marker1): Line = Line.replace(Marker1[1:], '') IsCommon = True if Line.strip().endswith(Marker2): Line = Line.replace(Marker2[1:], '') if IsCommon: ComBody.append(Line) IsCommon = False continue if IsCommon: ComBody.append(Line) else: TxtBody.append(Line) return ComBody, TxtBody def GetStructArrayInfo(self, Input): ArrayStr = Input.split('[') Name = ArrayStr[0] if len(ArrayStr) > 1: NumStr = ''.join(c for c in ArrayStr[-1] if c.isdigit()) NumStr = '1000' if len(NumStr) == 0 else NumStr ArrayNum = int(NumStr) else: ArrayNum = 0 return Name, ArrayNum def PostProcessBody(self, TextBody, IncludeEmbedOnly=True): NewTextBody = [] OldTextBody = [] IncTextBody = [] StructBody = [] IncludeLine = False EmbedFound = False StructName = '' ArrayVarName = '' VariableName = '' Count = 0 Level = 0 IsCommonStruct = False for Line in TextBody: if Line.startswith('#define '): IncTextBody.append(Line) continue if not Line.startswith('/* EMBED_STRUCT:'): Match = False else: Match = re.match("^/\\*\\sEMBED_STRUCT:([\\w\\[\\]\\*]+):\ ([\\w\\[\\]\\*]+):(\\w+):(START|END)([\\s\\d]+)\\*/([\\s\\S]*)", Line) if Match: ArrayMarker = Match.group(5) if Match.group(4) == 'END': Level -= 1 if Level == 0: Line = Match.group(6) else: # 'START' Level += 1 if Level == 1: Line = Match.group(6) else: EmbedFound = True TagStr = Match.group(3) if TagStr.startswith('TAG_'): try: TagVal = int(TagStr[4:], 16) except Exception: TagVal = -1 if (TagVal >= 0) and (TagVal < self._MinCfgTagId): IsCommonStruct = True if Level == 1: if IsCommonStruct: Suffix = ' /* _COMMON_STRUCT_START_ */' else: Suffix = '' StructBody = ['typedef struct {%s' % Suffix] StructName = Match.group(1) StructType = Match.group(2) VariableName = Match.group(3) MatchOffset = re.search('/\\*\\*\\sOffset\\s0x\ ([a-fA-F0-9]+)', Line) if MatchOffset: Offset = int(MatchOffset.group(1), 16) else: Offset = None IncludeLine = True ModifiedStructType = StructType.rstrip() if ModifiedStructType.endswith(']'): Idx = ModifiedStructType.index('[') if ArrayMarker != ' ': # Auto array size OldTextBody.append('') ArrayVarName = VariableName if int(ArrayMarker) == 1000: Count = 1 else: Count = int(ArrayMarker) + 1000 else: if Count < 1000: Count += 1 VariableTemp = ArrayVarName + '[%d]' % ( Count if Count < 1000 else Count - 1000) OldTextBody[-1] = self.CreateField( None, VariableTemp, 0, Offset, ModifiedStructType[:Idx], '', 'Structure Array', '') else: ArrayVarName = '' OldTextBody.append(self.CreateField( None, VariableName, 0, Offset, ModifiedStructType, '', '', '')) if IncludeLine: StructBody.append(Line) else: OldTextBody.append(Line) if Match and Match.group(4) == 'END': if Level == 0: if (StructType != Match.group(2)) or \ (VariableName != Match.group(3)): print("Unmatched struct name '%s' and '%s' !" % (StructName, Match.group(2))) else: if IsCommonStruct: Suffix = ' /* _COMMON_STRUCT_END_ */' else: Suffix = '' Line = '} %s;%s\n\n\n' % (StructName, Suffix) StructBody.append(Line) if (Line not in NewTextBody) and \ (Line not in OldTextBody): NewTextBody.extend(StructBody) IncludeLine = False IsCommonStruct = False if not IncludeEmbedOnly: NewTextBody.extend(OldTextBody) if EmbedFound: NewTextBody = self.PostProcessBody(NewTextBody, False) NewTextBody = IncTextBody + NewTextBody return NewTextBody def WriteHeaderFile(self, TxtBody, FileName, Type='h'): FileNameDef = os.path.basename(FileName).replace('.', '_') FileNameDef = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', FileNameDef) FileNameDef = re.sub('([a-z0-9])([A-Z])', r'\1_\2', FileNameDef).upper() Lines = [] Lines.append("%s\n" % GetCopyrightHeader(Type)) Lines.append("#ifndef __%s__\n" % FileNameDef) Lines.append("#define __%s__\n\n" % FileNameDef) if Type == 'h': Lines.append("#pragma pack(1)\n\n") Lines.extend(TxtBody) if Type == 'h': Lines.append("#pragma pack()\n\n") Lines.append("#endif\n") # Don't rewrite if the contents are the same Create = True if os.path.exists(FileName): HdrFile = open(FileName, "r") OrgTxt = HdrFile.read() HdrFile.close() NewTxt = ''.join(Lines) if OrgTxt == NewTxt: Create = False if Create: HdrFile = open(FileName, "w") HdrFile.write(''.join(Lines)) HdrFile.close() def CreateHeaderFile(self, HdrFileName, ComHdrFileName=''): LastStruct = '' SpaceIdx = 0 Offset = 0 FieldIdx = 0 LastFieldIdx = 0 ResvOffset = 0 ResvIdx = 0 TxtBody = [] LineBuffer = [] CfgTags = [] LastVisible = True TxtBody.append("typedef struct {\n") for Item in self._CfgItemList: # Search for CFGDATA tags Embed = Item["embed"].upper() if Embed.endswith(':START'): Match = re.match(r'(\w+)_CFG_DATA:TAG_([0-9A-F]+):START', Embed) if Match: TagName = Match.group(1) TagId = int(Match.group(2), 16) CfgTags.append((TagId, TagName)) # Only process visible items NextVisible = LastVisible if LastVisible and (Item['header'] == 'OFF'): NextVisible = False ResvOffset = Item['offset'] elif (not LastVisible) and Item['header'] == 'ON': NextVisible = True Name = "ReservedUpdSpace%d" % ResvIdx ResvIdx = ResvIdx + 1 TxtBody.append(self.CreateField( Item, Name, Item["offset"] - ResvOffset, ResvOffset, '', '', '', '')) FieldIdx += 1 if Offset < Item["offset"]: if LastVisible: Name = "UnusedUpdSpace%d" % SpaceIdx LineBuffer.append(self.CreateField (Item, Name, Item["offset"] - Offset, Offset, '', '', '', '')) FieldIdx += 1 SpaceIdx = SpaceIdx + 1 Offset = Item["offset"] LastVisible = NextVisible Offset = Offset + Item["length"] if LastVisible: for Each in LineBuffer: TxtBody.append(Each) LineBuffer = [] Embed = Item["embed"].upper() if Embed.endswith(':START') or Embed.endswith(':END'): # EMBED_STRUCT: StructName : \ # ItemName : VariableName : START|END Name, ArrayNum = self.GetStructArrayInfo(Item["struct"]) Remaining = Item["embed"] if (LastFieldIdx + 1 == FieldIdx) and (LastStruct == Name): ArrayMarker = ' ' else: ArrayMarker = '%d' % ArrayNum LastFieldIdx = FieldIdx LastStruct = Name Marker = '/* EMBED_STRUCT:%s:%s%s*/ ' % (Name, Remaining, ArrayMarker) # if Embed.endswith(':START') and Comment != '': # Marker = '/* COMMENT:%s */ \n' % Item["comment"] + Marker else: if Embed == '': Marker = '' else: self.Error = "Invalid embedded structure \ format '%s'!\n" % Item["embed"] return 4 # Generate bit fields for structure if len(Item['subreg']) > 0 and Item["struct"]: StructType = Item["struct"] StructName, ArrayNum = self.GetStructArrayInfo(StructType) if (LastFieldIdx + 1 == FieldIdx) and \ (LastStruct == Item["struct"]): ArrayMarker = ' ' else: ArrayMarker = '%d' % ArrayNum TxtBody.append('/* EMBED_STRUCT:%s:%s:%s:START%s*/\n' % (StructName, StructType, Item["cname"], ArrayMarker)) for SubItem in Item['subreg']: Name = SubItem["cname"] if Name.startswith(Item["cname"]): Name = Name[len(Item["cname"]) + 1:] Line = self.CreateField( SubItem, Name, SubItem["bitunit"], SubItem["offset"], SubItem['struct'], SubItem['name'], SubItem['help'], SubItem['option'], SubItem['bitlength']) TxtBody.append(Line) TxtBody.append('/* EMBED_STRUCT:%s:%s:%s:END%s*/\n' % (StructName, StructType, Item["cname"], ArrayMarker)) LastFieldIdx = FieldIdx LastStruct = Item["struct"] FieldIdx += 1 else: FieldIdx += 1 Line = Marker + self.CreateField( Item, Item["cname"], Item["length"], Item["offset"], Item['struct'], Item['name'], Item['help'], Item['option']) TxtBody.append(Line) TxtBody.append("}\n\n") # Handle the embedded data structure TxtBody = self.PostProcessBody(TxtBody) ComBody, TxtBody = self.SplitTextBody(TxtBody) # Prepare TAG defines PltTagDefTxt = ['\n'] ComTagDefTxt = ['\n'] for TagId, TagName in sorted(CfgTags): TagLine = '#define %-30s 0x%03X\n' % ('CDATA_%s_TAG' % TagName, TagId) if TagId < self._MinCfgTagId: # TAG ID < 0x100, it is a generic TAG ComTagDefTxt.append(TagLine) else: PltTagDefTxt.append(TagLine) PltTagDefTxt.append('\n\n') ComTagDefTxt.append('\n\n') # Write file back self.WriteHeaderFile(PltTagDefTxt + TxtBody, HdrFileName) if ComHdrFileName: self.WriteHeaderFile(ComTagDefTxt + ComBody, ComHdrFileName) return 0 def UpdateConfigItemValue(self, Item, ValueStr): IsArray = True if Item['value'].startswith('{') else False IsString = True if Item['value'].startswith("'") else False Bytes = self.ValueToByteArray(ValueStr, Item['length']) if IsString: NewValue = "'%s'" % Bytes.decode("utf-8") elif IsArray: NewValue = Bytes2Str(Bytes) else: Fmt = '0x%X' if Item['value'].startswith('0x') else '%d' NewValue = Fmt % Bytes2Val(Bytes) Item['value'] = NewValue def LoadDefaultFromBinaryArray(self, BinDat, IgnoreFind=False): FindOff = 0 StartOff = 0 for Item in self._CfgItemList: if Item['length'] == 0: continue if not IgnoreFind and Item['find']: FindBin = Item['find'].encode() Offset = BinDat.find(FindBin) if Offset >= 0: TestOff = BinDat[Offset+len(FindBin):].find(FindBin) if TestOff >= 0: raise Exception('Multiple match found for "%s" !' % Item['find']) FindOff = Offset + len(FindBin) StartOff = Item['offset'] else: raise Exception('Could not find "%s" !' % Item['find']) if Item['offset'] + Item['length'] > len(BinDat): raise Exception('Mismatching format between DSC \ and BIN files !') Offset = FindOff + (Item['offset'] - StartOff) ValStr = Bytes2Str(BinDat[Offset: Offset + Item['length']]) self.UpdateConfigItemValue(Item, ValStr) self.UpdateDefaultValue() def PatchBinaryArray(self, BinDat): FileOff = 0 Offset = 0 FindOff = 0 PatchList = [] CfgBin = bytearray() for Item in self._CfgItemList: if Item['length'] == 0: continue if Item['find']: if len(CfgBin) > 0: PatchList.append((FileOff, CfgBin)) FindBin = Item['find'].encode() FileOff = BinDat.find(FindBin) if FileOff < 0: raise Exception('Could not find "%s" !' % Item['find']) else: TestOff = BinDat[FileOff+len(FindBin):].find(FindBin) if TestOff >= 0: raise Exception('Multiple match found for "%s" !' % Item['find']) FileOff += len(FindBin) Offset = Item['offset'] FindOff = Offset CfgBin = bytearray() if Item['offset'] > Offset: Gap = Item['offset'] - Offset CfgBin.extend(b'\x00' * Gap) if Item['type'] == 'Reserved' and Item['option'] == '$SKIP': # keep old data NewOff = FileOff + (Offset - FindOff) FileData = bytearray(BinDat[NewOff: NewOff + Item['length']]) CfgBin.extend(FileData) else: CfgBin.extend(self.ValueToByteArray(Item['value'], Item['length'])) Offset = Item['offset'] + Item['length'] if len(CfgBin) > 0: PatchList.append((FileOff, CfgBin)) for FileOff, CfgBin in PatchList: Length = len(CfgBin) if FileOff + Length < len(BinDat): BinDat[FileOff:FileOff+Length] = CfgBin[:] return BinDat def GenerateBinaryArray(self): Offset = 0 BinDat = bytearray() for Item in self._CfgItemList: if Item['offset'] > Offset: Gap = Item['offset'] - Offset BinDat.extend(b'\x00' * Gap) BinDat.extend(self.ValueToByteArray(Item['value'], Item['length'])) Offset = Item['offset'] + Item['length'] return BinDat def GenerateBinary(self, BinFileName): BinFile = open(BinFileName, "wb") BinFile.write(self.GenerateBinaryArray()) BinFile.close() return 0 def GenerateDataIncFile(self, DatIncFileName, BinFile=None): # Put a prefix GUID before CFGDATA so that it can be located later on Prefix = b'\xa7\xbd\x7f\x73\x20\x1e\x46\xd6\xbe\x8f\ x64\x12\x05\x8d\x0a\xa8' if BinFile: Fin = open(BinFile, 'rb') BinDat = Prefix + bytearray(Fin.read()) Fin.close() else: BinDat = Prefix + self.GenerateBinaryArray() FileName = os.path.basename(DatIncFileName).upper() FileName = FileName.replace('.', '_') TxtLines = [] TxtLines.append("UINT8 mConfigDataBlob[%d] = {\n" % len(BinDat)) Count = 0 Line = [' '] for Each in BinDat: Line.append('0x%02X, ' % Each) Count = Count + 1 if (Count & 0x0F) == 0: Line.append('\n') TxtLines.append(''.join(Line)) Line = [' '] if len(Line) > 1: TxtLines.append(''.join(Line) + '\n') TxtLines.append("};\n\n") self.WriteHeaderFile(TxtLines, DatIncFileName, 'inc') return 0 def CheckCfgData(self): # Check if CfgData contains any duplicated name def AddItem(Item, ChkList): Name = Item['cname'] if Name in ChkList: return Item if Name not in ['Dummy', 'Reserved', 'CfgHeader', 'CondValue']: ChkList.append(Name) return None Duplicate = None ChkList = [] for Item in self._CfgItemList: Duplicate = AddItem(Item, ChkList) if not Duplicate: for SubItem in Item['subreg']: Duplicate = AddItem(SubItem, ChkList) if Duplicate: break if Duplicate: break if Duplicate: self.Error = "Duplicated CFGDATA '%s' found !\n" % \ Duplicate['cname'] return -1 return 0 def PrintData(self): for Item in self._CfgItemList: if not Item['length']: continue print("%-10s @Offset:0x%04X Len:%3d Val:%s" % (Item['cname'], Item['offset'], Item['length'], Item['value'])) for SubItem in Item['subreg']: print(" %-20s BitOff:0x%04X BitLen:%-3d Val:%s" % (SubItem['cname'], SubItem['bitoffset'], SubItem['bitlength'], SubItem['value'])) def FormatArrayValue(self, Input, Length): Dat = self.ValueToByteArray(Input, Length) return ','.join('0x%02X' % Each for Each in Dat) def GetItemOptionList(self, Item): TmpList = [] if Item['type'] == "Combo": if not Item['option'] in self._BuidinOption: OptList = Item['option'].split(',') for Option in OptList: Option = Option.strip() try: (OpVal, OpStr) = Option.split(':') except Exception: raise Exception("Invalide option format '%s' !" % Option) TmpList.append((OpVal, OpStr)) return TmpList def WriteBsfStruct(self, BsfFd, Item): if Item['type'] == "None": Space = "gPlatformFspPkgTokenSpaceGuid" else: Space = Item['space'] Line = " $%s_%s" % (Space, Item['cname']) Match = re.match("\\s*(\\{.+\\})\\s*", Item['value']) if Match: DefaultValue = self.FormatArrayValue(Match.group(1).strip(), Item['length']) else: DefaultValue = Item['value'].strip() if 'bitlength' in Item: if Item['bitlength']: BsfFd.write(" %s%s%4d bits $_DEFAULT_ = %s\n" % (Line, ' ' * (64 - len(Line)), Item['bitlength'], DefaultValue)) else: if Item['length']: BsfFd.write(" %s%s%4d bytes $_DEFAULT_ = %s\n" % (Line, ' ' * (64 - len(Line)), Item['length'], DefaultValue)) return self.GetItemOptionList(Item) def GetBsfOption(self, OptionName): if OptionName in self._CfgOptsDict: return self._CfgOptsDict[OptionName] else: return OptionName def WriteBsfOption(self, BsfFd, Item): PcdName = Item['space'] + '_' + Item['cname'] WriteHelp = 0 BsfLines = [] if Item['type'] == "Combo": if Item['option'] in self._BuidinOption: Options = self._BuidinOption[Item['option']] else: Options = self.GetBsfOption(PcdName) BsfLines.append(' %s $%s, "%s", &%s,\n' % ( Item['type'], PcdName, Item['name'], Options)) WriteHelp = 1 elif Item['type'].startswith("EditNum"): Match = re.match("EditNum\\s*,\\s*(HEX|DEC)\\s*,\\s*\\(\ (\\d+|0x[0-9A-Fa-f]+)\\s*,\\s*(\\d+|0x[0-9A-Fa-f]+)\\)", Item['type']) if Match: BsfLines.append(' EditNum $%s, "%s", %s,\n' % ( PcdName, Item['name'], Match.group(1))) WriteHelp = 2 elif Item['type'].startswith("EditText"): BsfLines.append(' %s $%s, "%s",\n' % (Item['type'], PcdName, Item['name'])) WriteHelp = 1 elif Item['type'] == "Table": Columns = Item['option'].split(',') if len(Columns) != 0: BsfLines.append(' %s $%s "%s",' % (Item['type'], PcdName, Item['name'])) for Col in Columns: Fmt = Col.split(':') if len(Fmt) != 3: raise Exception("Column format '%s' is invalid !" % Fmt) try: Dtype = int(Fmt[1].strip()) except Exception: raise Exception("Column size '%s' is invalid !" % Fmt[1]) BsfLines.append('\n Column "%s", %d bytes, %s' % (Fmt[0].strip(), Dtype, Fmt[2].strip())) BsfLines.append(',\n') WriteHelp = 1 if WriteHelp > 0: HelpLines = Item['help'].split('\\n\\r') FirstLine = True for HelpLine in HelpLines: if FirstLine: FirstLine = False BsfLines.append(' Help "%s"\n' % (HelpLine)) else: BsfLines.append(' "%s"\n' % (HelpLine)) if WriteHelp == 2: BsfLines.append(' "Valid range: %s ~ %s"\n' % (Match.group(2), Match.group(3))) if len(Item['condition']) > 4: CondList = Item['condition'].split(',') Idx = 0 for Cond in CondList: Cond = Cond.strip() if Cond.startswith('#'): BsfLines.insert(Idx, Cond + '\n') Idx += 1 elif Cond.startswith('@#'): BsfLines.append(Cond[1:] + '\n') for Line in BsfLines: BsfFd.write(Line) def WriteBsfPages(self, PageTree, BsfFd): BsfFd.write('\n') Key = next(iter(PageTree)) for Page in PageTree[Key]: PageName = next(iter(Page)) BsfFd.write('Page "%s"\n' % self._CfgPageDict[PageName]) if len(PageTree[Key]): self.WriteBsfPages(Page, BsfFd) BsfItems = [] for Item in self._CfgItemList: if Item['name'] != '': if Item['page'] != PageName: continue if len(Item['subreg']) > 0: for SubItem in Item['subreg']: if SubItem['name'] != '': BsfItems.append(SubItem) else: BsfItems.append(Item) BsfItems.sort(key=lambda x: x['order']) for Item in BsfItems: self.WriteBsfOption(BsfFd, Item) BsfFd.write("EndPage\n\n") def GenerateBsfFile(self, BsfFile): if BsfFile == '': self.Error = "BSF output file '%s' is invalid" % BsfFile return 1 Error = 0 OptionDict = {} BsfFd = open(BsfFile, "w") BsfFd.write("%s\n" % GetCopyrightHeader('bsf')) BsfFd.write("%s\n" % self._GlobalDataDef) BsfFd.write("StructDef\n") NextOffset = -1 for Item in self._CfgItemList: if Item['find'] != '': BsfFd.write('\n Find "%s"\n' % Item['find']) NextOffset = Item['offset'] + Item['length'] if Item['name'] != '': if NextOffset != Item['offset']: BsfFd.write(" Skip %d bytes\n" % (Item['offset'] - NextOffset)) if len(Item['subreg']) > 0: NextOffset = Item['offset'] BitsOffset = NextOffset * 8 for SubItem in Item['subreg']: BitsOffset += SubItem['bitlength'] if SubItem['name'] == '': if 'bitlength' in SubItem: BsfFd.write(" Skip %d bits\n" % (SubItem['bitlength'])) else: BsfFd.write(" Skip %d bytes\n" % (SubItem['length'])) else: Options = self.WriteBsfStruct(BsfFd, SubItem) if len(Options) > 0: OptionDict[SubItem ['space']+'_'+SubItem ['cname']] = Options NextBitsOffset = (Item['offset'] + Item['length']) * 8 if NextBitsOffset > BitsOffset: BitsGap = NextBitsOffset - BitsOffset BitsRemain = BitsGap % 8 if BitsRemain: BsfFd.write(" Skip %d bits\n" % BitsRemain) BitsGap -= BitsRemain BytesRemain = BitsGap // 8 if BytesRemain: BsfFd.write(" Skip %d bytes\n" % BytesRemain) NextOffset = Item['offset'] + Item['length'] else: NextOffset = Item['offset'] + Item['length'] Options = self.WriteBsfStruct(BsfFd, Item) if len(Options) > 0: OptionDict[Item['space']+'_'+Item['cname']] = Options BsfFd.write("\nEndStruct\n\n") BsfFd.write("%s" % self._BuidinOptionTxt) NameList = [] OptionList = [] for Each in sorted(OptionDict): if OptionDict[Each] not in OptionList: NameList.append(Each) OptionList.append(OptionDict[Each]) BsfFd.write("List &%s\n" % Each) for Item in OptionDict[Each]: BsfFd.write(' Selection %s , "%s"\n' % (self.EvaluateExpress(Item[0]), Item[1])) BsfFd.write("EndList\n\n") else: # Item has idential options as other item # Try to reuse the previous options instead Idx = OptionList.index(OptionDict[Each]) self._CfgOptsDict[Each] = NameList[Idx] BsfFd.write("BeginInfoBlock\n") BsfFd.write(' PPVer "%s"\n' % (self._CfgBlkDict['ver'])) BsfFd.write(' Description "%s"\n' % (self._CfgBlkDict['name'])) BsfFd.write("EndInfoBlock\n\n") self.WriteBsfPages(self._CfgPageTree, BsfFd) BsfFd.close() return Error def WriteDeltaLine(self, OutLines, Name, ValStr, IsArray): if IsArray: Output = '%s | { %s }' % (Name, ValStr) else: Output = '%s | 0x%X' % (Name, Array2Val(ValStr)) OutLines.append(Output) def WriteDeltaFile(self, OutFile, PlatformId, OutLines): DltFd = open(OutFile, "w") DltFd.write("%s\n" % GetCopyrightHeader('dlt', True)) if PlatformId is not None: DltFd.write('#\n') DltFd.write('# Delta configuration values \ for platform ID 0x%04X\n' % PlatformId) DltFd.write('#\n\n') for Line in OutLines: DltFd.write('%s\n' % Line) DltFd.close() def GenerateDeltaFile(self, OutFile, AbsfFile): # Parse ABSF Build in dict if not os.path.exists(AbsfFile): Lines = [] else: with open(AbsfFile) as Fin: Lines = Fin.readlines() AbsfBuiltValDict = {} Process = False for Line in Lines: Line = Line.strip() if Line.startswith('StructDef'): Process = True if Line.startswith('EndStruct'): break if not Process: continue Match = re.match('\\s*\\$gCfgData_(\\w+)\\s+\ (\\d+)\\s+(bits|bytes)\\s+\\$_AS_BUILT_\\s+=\\s+(.+)\\$', Line) if Match: if Match.group(1) not in AbsfBuiltValDict: AbsfBuiltValDict[Match.group(1)] = Match.group(4).strip() else: raise Exception("Duplicated configuration \ name '%s' found !", Match.group(1)) # Match config item in DSC PlatformId = None OutLines = [] TagName = '' Level = 0 for Item in self._CfgItemList: Name = None if Level == 0 and Item['embed'].endswith(':START'): TagName = Item['embed'].split(':')[0] Level += 1 if Item['cname'] in AbsfBuiltValDict: ValStr = AbsfBuiltValDict[Item['cname']] Name = '%s.%s' % (TagName, Item['cname']) if not Item['subreg'] and Item['value'].startswith('{'): Value = Array2Val(Item['value']) IsArray = True else: Value = int(Item['value'], 16) IsArray = False AbsfVal = Array2Val(ValStr) if AbsfVal != Value: if 'PLATFORMID_CFG_DATA.PlatformId' == Name: PlatformId = AbsfVal self.WriteDeltaLine(OutLines, Name, ValStr, IsArray) else: if 'PLATFORMID_CFG_DATA.PlatformId' == Name: raise Exception("'PlatformId' has the \ same value as DSC default !") if Item['subreg']: for SubItem in Item['subreg']: if SubItem['cname'] in AbsfBuiltValDict: ValStr = AbsfBuiltValDict[SubItem['cname']] if Array2Val(ValStr) == int(SubItem['value'], 16): continue Name = '%s.%s.%s' % (TagName, Item['cname'], SubItem['cname']) self.WriteDeltaLine(OutLines, Name, ValStr, False) if Item['embed'].endswith(':END'): Level -= 1 if PlatformId is None and Lines: raise Exception("'PlatformId' configuration \ is missing in ABSF file!") else: PlatformId = 0 self.WriteDeltaFile(OutFile, PlatformId, Lines) return 0 def GenerateDscFile(self, OutFile): DscFd = open(OutFile, "w") for Line in self._DscLines: DscFd.write(Line + '\n') DscFd.close() return 0 def Usage(): print('\n'.join([ "GenCfgData Version 0.01", "Usage:", " GenCfgData GENINC BinFile \ IncOutFile [-D Macros]", " GenCfgData GENPKL DscFile \ PklOutFile [-D Macros]", " GenCfgData GENINC DscFile[;DltFile] \ IncOutFile [-D Macros]", " GenCfgData GENBIN DscFile[;DltFile] \ BinOutFile [-D Macros]", " GenCfgData GENBSF DscFile[;DltFile] \ BsfOutFile [-D Macros]", " GenCfgData GENDLT DscFile[;AbsfFile] \ DltOutFile [-D Macros]", " GenCfgData GENDSC DscFile \ DscOutFile [-D Macros]", " GenCfgData GENHDR DscFile[;DltFile] \ HdrOutFile[;ComHdrOutFile] [-D Macros]" ])) def Main(): # # Parse the options and args # argc = len(sys.argv) if argc < 4: Usage() return 1 GenCfgData = CGenCfgData() Command = sys.argv[1].upper() OutFile = sys.argv[3] if argc > 5 and GenCfgData.ParseMacros(sys.argv[4:]) != 0: raise Exception("ERROR: Macro parsing failed !") FileList = sys.argv[2].split(';') if len(FileList) == 2: DscFile = FileList[0] DltFile = FileList[1] elif len(FileList) == 1: DscFile = FileList[0] DltFile = '' else: raise Exception("ERROR: Invalid parameter '%s' !" % sys.argv[2]) if Command == "GENDLT" and DscFile.endswith('.dlt'): # It needs to expand an existing DLT file DltFile = DscFile Lines = CGenCfgData.ExpandIncludeFiles(DltFile) OutTxt = ''.join([x[0] for x in Lines]) OutFile = open(OutFile, "w") OutFile.write(OutTxt) OutFile.close() return 0 if not os.path.exists(DscFile): raise Exception("ERROR: Cannot open file '%s' !" % DscFile) CfgBinFile = '' if DltFile: if not os.path.exists(DltFile): raise Exception("ERROR: Cannot open file '%s' !" % DltFile) if Command == "GENDLT": CfgBinFile = DltFile DltFile = '' BinFile = '' if (DscFile.lower().endswith('.bin')) and (Command == "GENINC"): # It is binary file BinFile = DscFile DscFile = '' if BinFile: if GenCfgData.GenerateDataIncFile(OutFile, BinFile) != 0: raise Exception(GenCfgData.Error) return 0 if DscFile.lower().endswith('.pkl'): with open(DscFile, "rb") as PklFile: GenCfgData.__dict__ = marshal.load(PklFile) else: if GenCfgData.ParseDscFile(DscFile) != 0: raise Exception(GenCfgData.Error) # if GenCfgData.CheckCfgData() != 0: # raise Exception(GenCfgData.Error) if GenCfgData.CreateVarDict() != 0: raise Exception(GenCfgData.Error) if Command == 'GENPKL': with open(OutFile, "wb") as PklFile: marshal.dump(GenCfgData.__dict__, PklFile) return 0 if DltFile and Command in ['GENHDR', 'GENBIN', 'GENINC', 'GENBSF']: if GenCfgData.OverrideDefaultValue(DltFile) != 0: raise Exception(GenCfgData.Error) if GenCfgData.UpdateDefaultValue() != 0: raise Exception(GenCfgData.Error) # GenCfgData.PrintData () if sys.argv[1] == "GENBIN": if GenCfgData.GenerateBinary(OutFile) != 0: raise Exception(GenCfgData.Error) elif sys.argv[1] == "GENHDR": OutFiles = OutFile.split(';') BrdOutFile = OutFiles[0].strip() if len(OutFiles) > 1: ComOutFile = OutFiles[1].strip() else: ComOutFile = '' if GenCfgData.CreateHeaderFile(BrdOutFile, ComOutFile) != 0: raise Exception(GenCfgData.Error) elif sys.argv[1] == "GENBSF": if GenCfgData.GenerateBsfFile(OutFile) != 0: raise Exception(GenCfgData.Error) elif sys.argv[1] == "GENINC": if GenCfgData.GenerateDataIncFile(OutFile) != 0: raise Exception(GenCfgData.Error) elif sys.argv[1] == "GENDLT": if GenCfgData.GenerateDeltaFile(OutFile, CfgBinFile) != 0: raise Exception(GenCfgData.Error) elif sys.argv[1] == "GENDSC": if GenCfgData.GenerateDscFile(OutFile) != 0: raise Exception(GenCfgData.Error) else: raise Exception("Unsuported command '%s' !" % Command) return 0 if __name__ == '__main__': sys.exit(Main())
edk2-master
IntelFsp2Pkg/Tools/FspGenCfgData.py
# @file # Split a file into two pieces at the request offset. # # Copyright (c) 2021, Intel Corporation. All rights reserved.<BR> # # SPDX-License-Identifier: BSD-2-Clause-Patent # ## # Import Modules import unittest import tempfile import os import shutil import struct as st import filecmp import os, sys currentdir = os.path.dirname(os.path.realpath(__file__)) parentdir = os.path.dirname(currentdir) sys.path.append(parentdir) import FspDscBsf2Yaml YamlHeaderLineLength = 10 HdrFileHeaderLineLength = 32 BsfFileHeaderLineLength = 19 def GenFileWithoutHdr(inputfile, numLineToStrip): yaml_file = open(inputfile, "r") lines = yaml_file.readlines() yaml_file.close() del lines[:numLineToStrip] noHdrOutputFileName = "no-header-" + inputfile stripped_file = open(noHdrOutputFileName, "w") for line in lines: stripped_file.write(line) stripped_file.close() return noHdrOutputFileName class TestFspScripts(unittest.TestCase): def test_generateFspHeader_fromDsc(self): # Generate HEADER cmd = '{} {} HEADER {} {} {}'.format( 'python', '..\GenCfgOpt.py', 'QemuFspPkg.dsc', '.', "") os.system(cmd) noHdrOutputFileName = GenFileWithoutHdr("FspUpd.h", HdrFileHeaderLineLength) self.assertTrue(filecmp.cmp(noHdrOutputFileName, 'ExpectedFspUpd.h')) def test_generateFspsHeader_fromDsc(self): noHdrOutputFileName = GenFileWithoutHdr("FspsUpd.h", HdrFileHeaderLineLength) self.assertTrue(filecmp.cmp(noHdrOutputFileName, 'ExpectedFspsUpd.h')) def test_generateFsptHeader_fromDsc(self): noHdrOutputFileName = GenFileWithoutHdr("FsptUpd.h", HdrFileHeaderLineLength) self.assertTrue(filecmp.cmp(noHdrOutputFileName, 'ExpectedFsptUpd.h')) def test_generateFspmHeader_fromDsc(self): noHdrOutputFileName = GenFileWithoutHdr("FspmUpd.h", HdrFileHeaderLineLength) self.assertTrue(filecmp.cmp(noHdrOutputFileName, 'ExpectedFspmUpd.h')) def test_generateBsf_fromDsc(self): # Generate BSF cmd = '{} {} GENBSF {} {} {}'.format( 'python', '..\GenCfgOpt.py', 'QemuFspPkg.dsc', '.', "Output.bsf") os.system(cmd) noHdrOutputFileName = GenFileWithoutHdr("Output.bsf", BsfFileHeaderLineLength) self.assertTrue(filecmp.cmp(noHdrOutputFileName, 'ExpectedOutput.bsf')) def test_generateYaml_fromDsc(self): # Generate YAML cmd = '{} {} {} {}'.format( 'python', '..\FspDscBsf2Yaml.py', 'QemuFspPkg.dsc', "Output.yaml") os.system(cmd) noHdrOutputFileName = GenFileWithoutHdr("Output.yaml", YamlHeaderLineLength) self.assertTrue(filecmp.cmp(noHdrOutputFileName, 'ExpectedOutput.yaml')) if __name__ == '__main__': unittest.main()
edk2-master
IntelFsp2Pkg/Tools/Tests/test_yaml.py
#!/usr/bin/env python # @ SingleSign.py # Single signing script # # Copyright (c) 2020 - 2021, Intel Corporation. All rights reserved.<BR> # SPDX-License-Identifier: BSD-2-Clause-Patent # ## import os import sys import re import shutil import subprocess SIGNING_KEY = { # Key Id | Key File Name start | # ================================================================= # KEY_ID_MASTER is used for signing Slimboot Key Hash Manifest \ # container (KEYH Component) "KEY_ID_MASTER_RSA2048": "MasterTestKey_Priv_RSA2048.pem", "KEY_ID_MASTER_RSA3072": "MasterTestKey_Priv_RSA3072.pem", # KEY_ID_CFGDATA is used for signing external Config data blob) "KEY_ID_CFGDATA_RSA2048": "ConfigTestKey_Priv_RSA2048.pem", "KEY_ID_CFGDATA_RSA3072": "ConfigTestKey_Priv_RSA3072.pem", # KEY_ID_FIRMWAREUPDATE is used for signing capsule firmware update image) "KEY_ID_FIRMWAREUPDATE_RSA2048": "FirmwareUpdateTestKey_Priv_RSA2048.pem", "KEY_ID_FIRMWAREUPDATE_RSA3072": "FirmwareUpdateTestKey_Priv_RSA3072.pem", # KEY_ID_CONTAINER is used for signing container header with mono signature "KEY_ID_CONTAINER_RSA2048": "ContainerTestKey_Priv_RSA2048.pem", "KEY_ID_CONTAINER_RSA3072": "ContainerTestKey_Priv_RSA3072.pem", # CONTAINER_COMP1_KEY_ID is used for signing container components "KEY_ID_CONTAINER_COMP_RSA2048": "ContainerCompTestKey_Priv_RSA2048.pem", "KEY_ID_CONTAINER_COMP_RSA3072": "ContainerCompTestKey_Priv_RSA3072.pem", # KEY_ID_OS1_PUBLIC, KEY_ID_OS2_PUBLIC is used for referencing \ # Boot OS public keys "KEY_ID_OS1_PUBLIC_RSA2048": "OS1_TestKey_Pub_RSA2048.pem", "KEY_ID_OS1_PUBLIC_RSA3072": "OS1_TestKey_Pub_RSA3072.pem", "KEY_ID_OS2_PUBLIC_RSA2048": "OS2_TestKey_Pub_RSA2048.pem", "KEY_ID_OS2_PUBLIC_RSA3072": "OS2_TestKey_Pub_RSA3072.pem", } MESSAGE_SBL_KEY_DIR = """!!! PRE-REQUISITE: Path to SBL_KEY_DIR has. to be set with SBL KEYS DIRECTORY !!! \n!!! Generate keys. using GenerateKeys.py available in BootloaderCorePkg/Tools. directory !!! \n !!! Run $python. BootloaderCorePkg/Tools/GenerateKeys.py -k $PATH_TO_SBL_KEY_DIR !!!\n !!! Set SBL_KEY_DIR environ with path to SBL KEYS DIR !!!\n" !!! Windows $set SBL_KEY_DIR=$PATH_TO_SBL_KEY_DIR !!!\n !!! Linux $export SBL_KEY_DIR=$PATH_TO_SBL_KEY_DIR !!!\n""" def get_openssl_path(): if os.name == 'nt': if 'OPENSSL_PATH' not in os.environ: openssl_dir = "C:\\Openssl\\bin\\" if os.path.exists(openssl_dir): os.environ['OPENSSL_PATH'] = openssl_dir else: os.environ['OPENSSL_PATH'] = "C:\\Openssl\\" if 'OPENSSL_CONF' not in os.environ: openssl_cfg = "C:\\Openssl\\openssl.cfg" if os.path.exists(openssl_cfg): os.environ['OPENSSL_CONF'] = openssl_cfg openssl = os.path.join( os.environ.get('OPENSSL_PATH', ''), 'openssl.exe') else: # Get openssl path for Linux cases openssl = shutil.which('openssl') return openssl def run_process(arg_list, print_cmd=False, capture_out=False): sys.stdout.flush() if print_cmd: print(' '.join(arg_list)) exc = None result = 0 output = '' try: if capture_out: output = subprocess.check_output(arg_list).decode() else: result = subprocess.call(arg_list) except Exception as ex: result = 1 exc = ex if result: if not print_cmd: print('Error in running process:\n %s' % ' '.join(arg_list)) if exc is None: sys.exit(1) else: raise exc return output def check_file_pem_format(priv_key): # Check for file .pem format key_name = os.path.basename(priv_key) if os.path.splitext(key_name)[1] == ".pem": return True else: return False def get_key_id(priv_key): # Extract base name if path is provided. key_name = os.path.basename(priv_key) # Check for KEY_ID in key naming. if key_name.startswith('KEY_ID'): return key_name else: return None def get_sbl_key_dir(): # Check Key store setting SBL_KEY_DIR path if 'SBL_KEY_DIR' not in os.environ: exception_string = "ERROR: SBL_KEY_DIR is not defined." \ " Set SBL_KEY_DIR with SBL Keys directory!!\n" raise Exception(exception_string + MESSAGE_SBL_KEY_DIR) sbl_key_dir = os.environ.get('SBL_KEY_DIR') if not os.path.exists(sbl_key_dir): exception_string = "ERROR:SBL_KEY_DIR set " + sbl_key_dir \ + " is not valid." \ " Set the correct SBL_KEY_DIR path !!\n" \ + MESSAGE_SBL_KEY_DIR raise Exception(exception_string) else: return sbl_key_dir def get_key_from_store(in_key): # Check in_key is path to key if os.path.exists(in_key): return in_key # Get Slimboot key dir path sbl_key_dir = get_sbl_key_dir() # Extract if in_key is key_id priv_key = get_key_id(in_key) if priv_key is not None: if (priv_key in SIGNING_KEY): # Generate key file name from key id priv_key_file = SIGNING_KEY[priv_key] else: exception_string = "KEY_ID" + priv_key + "is not found " \ "is not found in supported KEY IDs!!" raise Exception(exception_string) elif check_file_pem_format(in_key): # check if file name is provided in pem format priv_key_file = in_key else: priv_key_file = None raise Exception('key provided %s is not valid!' % in_key) # Create a file path # Join Key Dir and priv_key_file try: priv_key = os.path.join(sbl_key_dir, priv_key_file) except Exception: raise Exception('priv_key is not found %s!' % priv_key) # Check for priv_key construted based on KEY ID exists in specified path if not os.path.isfile(priv_key): exception_string = "!!! ERROR: Key file corresponding to" \ + in_key + "do not exist in Sbl key " \ "directory at" + sbl_key_dir + "!!! \n" \ + MESSAGE_SBL_KEY_DIR raise Exception(exception_string) return priv_key # # Sign an file using openssl # # priv_key [Input] Key Id or Path to Private key # hash_type [Input] Signing hash # sign_scheme[Input] Sign/padding scheme # in_file [Input] Input file to be signed # out_file [Input/Output] Signed data file # def single_sign_file(priv_key, hash_type, sign_scheme, in_file, out_file): _hash_type_string = { "SHA2_256": 'sha256', "SHA2_384": 'sha384', "SHA2_512": 'sha512', } _hash_digest_Size = { # Hash_string : Hash_Size "SHA2_256": 32, "SHA2_384": 48, "SHA2_512": 64, "SM3_256": 32, } _sign_scheme_string = { "RSA_PKCS1": 'pkcs1', "RSA_PSS": 'pss', } priv_key = get_key_from_store(priv_key) # Temporary files to store hash generated hash_file_tmp = out_file+'.hash.tmp' hash_file = out_file+'.hash' # Generate hash using openssl dgst in hex format cmdargs = [get_openssl_path(), 'dgst', '-'+'%s' % _hash_type_string[hash_type], '-out', '%s' % hash_file_tmp, '%s' % in_file] run_process(cmdargs) # Extract hash form dgst command output and convert to ascii with open(hash_file_tmp, 'r') as fin: hashdata = fin.read() fin.close() try: hashdata = hashdata.rsplit('=', 1)[1].strip() except Exception: raise Exception('Hash Data not found for signing!') if len(hashdata) != (_hash_digest_Size[hash_type] * 2): raise Exception('Hash Data size do match with for hash type!') hashdata_bytes = bytearray.fromhex(hashdata) open(hash_file, 'wb').write(hashdata_bytes) print("Key used for Singing %s !!" % priv_key) # sign using Openssl pkeyutl cmdargs = [get_openssl_path(), 'pkeyutl', '-sign', '-in', '%s' % hash_file, '-inkey', '%s' % priv_key, '-out', '%s' % out_file, '-pkeyopt', 'digest:%s' % _hash_type_string[hash_type], '-pkeyopt', 'rsa_padding_mode:%s' % _sign_scheme_string[sign_scheme]] run_process(cmdargs) return # # Extract public key using openssl # # in_key [Input] Private key or public key in pem format # pub_key_file [Input/Output] Public Key to a file # # return keydata (mod, exp) in bin format # def single_sign_gen_pub_key(in_key, pub_key_file=None): in_key = get_key_from_store(in_key) # Expect key to be in PEM format is_prv_key = False cmdline = [get_openssl_path(), 'rsa', '-pubout', '-text', '-noout', '-in', '%s' % in_key] # Check if it is public key or private key text = open(in_key, 'r').read() if '-BEGIN RSA PRIVATE KEY-' in text: is_prv_key = True elif '-BEGIN PUBLIC KEY-' in text: cmdline.extend(['-pubin']) else: raise Exception('Unknown key format "%s" !' % in_key) if pub_key_file: cmdline.extend(['-out', '%s' % pub_key_file]) capture = False else: capture = True output = run_process(cmdline, capture_out=capture) if not capture: output = text = open(pub_key_file, 'r').read() data = output.replace('\r', '') data = data.replace('\n', '') data = data.replace(' ', '') # Extract the modulus if is_prv_key: match = re.search('modulus(.*)publicExponent:\\s+(\\d+)\\s+', data) else: match = re.search('Modulus(?:.*?):(.*)Exponent:\\s+(\\d+)\\s+', data) if not match: raise Exception('Public key not found!') modulus = match.group(1).replace(':', '') exponent = int(match.group(2)) mod = bytearray.fromhex(modulus) # Remove the '00' from the front if the MSB is 1 if mod[0] == 0 and (mod[1] & 0x80): mod = mod[1:] exp = bytearray.fromhex('{:08x}'.format(exponent)) keydata = mod + exp return keydata
edk2-master
IntelFsp2Pkg/Tools/ConfigEditor/SingleSign.py
# @ GenYamlCfg.py # # Copyright (c) 2020 - 2021, Intel Corporation. All rights reserved.<BR> # SPDX-License-Identifier: BSD-2-Clause-Patent # # import os import sys import re import marshal import string import operator as op import ast import tkinter.messagebox as messagebox import tkinter from datetime import date from collections import OrderedDict from CommonUtility import value_to_bytearray, value_to_bytes, \ bytes_to_value, get_bits_from_bytes, set_bits_to_bytes # Generated file copyright header __copyright_tmp__ = """/** @file Platform Configuration %s File. Copyright (c) %4d, Intel Corporation. All rights reserved.<BR> SPDX-License-Identifier: BSD-2-Clause-Patent This file is automatically generated. Please do NOT modify !!! **/ """ def get_copyright_header(file_type, allow_modify=False): file_description = { 'yaml': 'Boot Setting', 'dlt': 'Delta', 'inc': 'C Binary Blob', 'h': 'C Struct Header' } if file_type in ['yaml', 'dlt']: comment_char = '#' else: comment_char = '' lines = __copyright_tmp__.split('\n') if allow_modify: lines = [line for line in lines if 'Please do NOT modify' not in line] copyright_hdr = '\n'.join('%s%s' % (comment_char, line) for line in lines)[:-1] + '\n' return copyright_hdr % (file_description[file_type], date.today().year) def check_quote(text): if (text[0] == "'" and text[-1] == "'") or (text[0] == '"' and text[-1] == '"'): return True return False def strip_quote(text): new_text = text.strip() if check_quote(new_text): return new_text[1:-1] return text def strip_delimiter(text, delim): new_text = text.strip() if new_text: if new_text[0] == delim[0] and new_text[-1] == delim[-1]: return new_text[1:-1] return text def bytes_to_bracket_str(bytes): return '{ %s }' % (', '.join('0x%02x' % i for i in bytes)) def array_str_to_value(val_str): val_str = val_str.strip() val_str = strip_delimiter(val_str, '{}') val_str = strip_quote(val_str) value = 0 for each in val_str.split(',')[::-1]: each = each.strip() value = (value << 8) | int(each, 0) return value def write_lines(lines, file): fo = open(file, "w") fo.write(''.join([x[0] for x in lines])) fo.close() def read_lines(file): if not os.path.exists(file): test_file = os.path.basename(file) if os.path.exists(test_file): file = test_file fi = open(file, 'r') lines = fi.readlines() fi.close() return lines def expand_file_value(path, value_str): result = bytearray() match = re.match("\\{\\s*FILE:(.+)\\}", value_str) if match: file_list = match.group(1).split(',') for file in file_list: file = file.strip() bin_path = os.path.join(path, file) result.extend(bytearray(open(bin_path, 'rb').read())) print('\n\n result ', result) return result class ExpressionEval(ast.NodeVisitor): operators = { ast.Add: op.add, ast.Sub: op.sub, ast.Mult: op.mul, ast.Div: op.floordiv, ast.Mod: op.mod, ast.Eq: op.eq, ast.NotEq: op.ne, ast.Gt: op.gt, ast.Lt: op.lt, ast.GtE: op.ge, ast.LtE: op.le, ast.BitXor: op.xor, ast.BitAnd: op.and_, ast.BitOr: op.or_, ast.Invert: op.invert, ast.USub: op.neg } def __init__(self): self._debug = False self._expression = '' self._namespace = {} self._get_variable = None def eval(self, expr, vars={}): self._expression = expr if type(vars) is dict: self._namespace = vars self._get_variable = None else: self._namespace = {} self._get_variable = vars node = ast.parse(self._expression, mode='eval') result = self.visit(node.body) if self._debug: print('EVAL [ %s ] = %s' % (expr, str(result))) return result def visit_Name(self, node): if self._get_variable is not None: return self._get_variable(node.id) else: return self._namespace[node.id] def visit_Num(self, node): return node.n def visit_NameConstant(self, node): return node.value def visit_BoolOp(self, node): result = False if isinstance(node.op, ast.And): for value in node.values: result = self.visit(value) if not result: break elif isinstance(node.op, ast.Or): for value in node.values: result = self.visit(value) if result: break return True if result else False def visit_UnaryOp(self, node): val = self.visit(node.operand) return ExpressionEval.operators[type(node.op)](val) def visit_BinOp(self, node): lhs = self.visit(node.left) rhs = self.visit(node.right) return ExpressionEval.operators[type(node.op)](lhs, rhs) def visit_Compare(self, node): right = self.visit(node.left) result = True for operation, comp in zip(node.ops, node.comparators): if not result: break left = right right = self.visit(comp) result = ExpressionEval.operators[type(operation)](left, right) return result def visit_Call(self, node): if node.func.id in ['ternary']: condition = self.visit(node.args[0]) val_true = self.visit(node.args[1]) val_false = self.visit(node.args[2]) return val_true if condition else val_false elif node.func.id in ['offset', 'length']: if self._get_variable is not None: return self._get_variable(node.args[0].s, node.func.id) else: raise ValueError("Unsupported function: " + repr(node)) def generic_visit(self, node): raise ValueError("malformed node or string: " + repr(node)) class CFG_YAML(): TEMPLATE = 'template' CONFIGS = 'configs' VARIABLE = 'variable' def __init__(self): self.log_line = False self.allow_template = False self.cfg_tree = None self.tmp_tree = None self.var_dict = None self.def_dict = {} self.yaml_path = '' self.lines = [] self.full_lines = [] self.index = 0 self.re_expand = re.compile( r'(.+:\s+|\s*\-\s*)!expand\s+\{\s*(\w+_TMPL)\s*:\s*\[(.+)]\s*\}') self.re_include = re.compile(r'(.+:\s+|\s*\-\s*)!include\s+(.+)') @staticmethod def count_indent(line): return next((i for i, c in enumerate(line) if not c.isspace()), len(line)) @staticmethod def substitue_args(text, arg_dict): for arg in arg_dict: text = text.replace('$' + arg, arg_dict[arg]) return text @staticmethod def dprint(*args): pass def process_include(self, line, insert=True): match = self.re_include.match(line) if not match: raise Exception("Invalid !include format '%s' !" % line.strip()) prefix = match.group(1) include = match.group(2) if prefix.strip() == '-': prefix = '' adjust = 0 else: adjust = 2 include = strip_quote(include) request = CFG_YAML.count_indent(line) + adjust if self.log_line: # remove the include line itself del self.full_lines[-1] inc_path = os.path.join(self.yaml_path, include) if not os.path.exists(inc_path): # try relative path to project root try_path = os.path.join(os.path.dirname(os.path.realpath(__file__) ), "../..", include) if os.path.exists(try_path): inc_path = try_path else: raise Exception("ERROR: Cannot open file '%s'." % inc_path) lines = read_lines(inc_path) current = 0 same_line = False for idx, each in enumerate(lines): start = each.lstrip() if start == '' or start[0] == '#': continue if start[0] == '>': # append the content directly at the same line same_line = True start = idx current = CFG_YAML.count_indent(each) break lines = lines[start+1:] if same_line else lines[start:] leading = '' if same_line: request = len(prefix) leading = '>' lines = [prefix + '%s\n' % leading] + [' ' * request + i[current:] for i in lines] if insert: self.lines = lines + self.lines return lines def process_expand(self, line): match = self.re_expand.match(line) if not match: raise Exception("Invalid !expand format '%s' !" % line.strip()) lines = [] prefix = match.group(1) temp_name = match.group(2) args = match.group(3) if prefix.strip() == '-': indent = 0 else: indent = 2 lines = self.process_expand_template(temp_name, prefix, args, indent) self.lines = lines + self.lines def process_expand_template(self, temp_name, prefix, args, indent=2): # expand text with arg substitution if temp_name not in self.tmp_tree: raise Exception("Could not find template '%s' !" % temp_name) parts = args.split(',') parts = [i.strip() for i in parts] num = len(parts) arg_dict = dict(zip(['(%d)' % (i + 1) for i in range(num)], parts)) str_data = self.tmp_tree[temp_name] text = DefTemplate(str_data).safe_substitute(self.def_dict) text = CFG_YAML.substitue_args(text, arg_dict) target = CFG_YAML.count_indent(prefix) + indent current = CFG_YAML.count_indent(text) padding = target * ' ' if indent == 0: leading = [] else: leading = [prefix + '\n'] text = leading + [(padding + i + '\n')[current:] for i in text.splitlines()] return text def load_file(self, yaml_file): self.index = 0 self.lines = read_lines(yaml_file) def peek_line(self): if len(self.lines) == 0: return None else: return self.lines[0] def put_line(self, line): self.lines.insert(0, line) if self.log_line: del self.full_lines[-1] def get_line(self): if len(self.lines) == 0: return None else: line = self.lines.pop(0) if self.log_line: self.full_lines.append(line.rstrip()) return line def get_multiple_line(self, indent): text = '' newind = indent + 1 while True: line = self.peek_line() if line is None: break sline = line.strip() if sline != '': newind = CFG_YAML.count_indent(line) if newind <= indent: break self.get_line() if sline != '': text = text + line return text def traverse_cfg_tree(self, handler): def _traverse_cfg_tree(root, level=0): # config structure for key in root: if type(root[key]) is OrderedDict: level += 1 handler(key, root[key], level) _traverse_cfg_tree(root[key], level) level -= 1 _traverse_cfg_tree(self.cfg_tree) def count(self): def _count(name, cfgs, level): num[0] += 1 num = [0] self.traverse_cfg_tree(_count) return num[0] def parse(self, parent_name='', curr=None, level=0): child = None last_indent = None key = '' temp_chk = {} while True: line = self.get_line() if line is None: break curr_line = line.strip() if curr_line == '' or curr_line[0] == '#': continue indent = CFG_YAML.count_indent(line) if last_indent is None: last_indent = indent if indent != last_indent: # outside of current block, put the line back to queue self.put_line(' ' * indent + curr_line) if curr_line.endswith(': >'): # multiline marker old_count = len(self.full_lines) line = self.get_multiple_line(indent) if self.log_line and not self.allow_template \ and '!include ' in line: # expand include in template new_lines = [] lines = line.splitlines() for idx, each in enumerate(lines): if '!include ' in each: new_line = ''.join(self.process_include(each, False)) new_lines.append(new_line) else: new_lines.append(each) self.full_lines = self.full_lines[:old_count] + new_lines curr_line = curr_line + line if indent > last_indent: # child nodes if child is None: raise Exception('Unexpected format at line: %s' % (curr_line)) level += 1 self.parse(key, child, level) level -= 1 line = self.peek_line() if line is not None: curr_line = line.strip() indent = CFG_YAML.count_indent(line) if indent >= last_indent: # consume the line self.get_line() else: # end of file indent = -1 if curr is None: curr = OrderedDict() if indent < last_indent: return curr marker1 = curr_line[0] marker2 = curr_line[-1] start = 1 if marker1 == '-' else 0 pos = curr_line.find(': ') if pos > 0: child = None key = curr_line[start:pos].strip() if curr_line[pos + 2] == '>': curr[key] = curr_line[pos + 3:] else: # XXXX: !include / !expand if '!include ' in curr_line: self.process_include(line) elif '!expand ' in curr_line: if self.allow_template and not self.log_line: self.process_expand(line) else: value_str = curr_line[pos + 2:].strip() curr[key] = value_str if self.log_line and value_str[0] == '{': # expand {FILE: xxxx} format in the log line if value_str[1:].rstrip().startswith('FILE:'): value_bytes = expand_file_value( self.yaml_path, value_str) value_str = bytes_to_bracket_str(value_bytes) self.full_lines[-1] = line[ :indent] + curr_line[:pos + 2] + value_str elif marker2 == ':': child = OrderedDict() key = curr_line[start:-1].strip() if key == '$ACTION': # special virtual nodes, rename to ensure unique key key = '$ACTION_%04X' % self.index self.index += 1 if key in curr: if key not in temp_chk: # check for duplicated keys at same level temp_chk[key] = 1 else: raise Exception("Duplicated item '%s:%s' found !" % (parent_name, key)) curr[key] = child if self.var_dict is None and key == CFG_YAML.VARIABLE: self.var_dict = child if self.tmp_tree is None and key == CFG_YAML.TEMPLATE: self.tmp_tree = child if self.var_dict: for each in self.var_dict: txt = self.var_dict[each] if type(txt) is str: self.def_dict['(%s)' % each] = txt if self.tmp_tree and key == CFG_YAML.CONFIGS: # apply template for the main configs self.allow_template = True else: child = None # - !include cfg_opt.yaml if '!include ' in curr_line: self.process_include(line) return curr def load_yaml(self, opt_file): self.var_dict = None self.yaml_path = os.path.dirname(opt_file) self.load_file(opt_file) yaml_tree = self.parse() self.tmp_tree = yaml_tree[CFG_YAML.TEMPLATE] self.cfg_tree = yaml_tree[CFG_YAML.CONFIGS] return self.cfg_tree def expand_yaml(self, opt_file): self.log_line = True self.load_yaml(opt_file) self.log_line = False text = '\n'.join(self.full_lines) self.full_lines = [] return text class DefTemplate(string.Template): idpattern = '\\([_A-Z][_A-Z0-9]*\\)|[_A-Z][_A-Z0-9]*' class CGenYamlCfg: STRUCT = '$STRUCT' bits_width = {'b': 1, 'B': 8, 'W': 16, 'D': 32, 'Q': 64} builtin_option = {'$EN_DIS': [('0', 'Disable'), ('1', 'Enable')]} exclude_struct = ['FSP_UPD_HEADER', 'FSPT_ARCH_UPD', 'FSPM_ARCH_UPD', 'FSPS_ARCH_UPD', 'GPIO_GPP_*', 'GPIO_CFG_DATA', 'GpioConfPad*', 'GpioPinConfig', 'BOOT_OPTION*', 'PLATFORMID_CFG_DATA', '\\w+_Half[01]'] include_tag = ['GPIO_CFG_DATA'] keyword_set = set(['name', 'type', 'option', 'help', 'length', 'value', 'order', 'struct', 'condition']) def __init__(self): self._mode = '' self._debug = False self._macro_dict = {} self.binseg_dict = {} self.initialize() def initialize(self): self._old_bin = None self._cfg_tree = {} self._tmp_tree = {} self._cfg_list = [] self._cfg_page = {'root': {'title': '', 'child': []}} self._cur_page = '' self._var_dict = {} self._def_dict = {} self._yaml_path = '' @staticmethod def deep_convert_dict(layer): # convert OrderedDict to list + dict new_list = layer if isinstance(layer, OrderedDict): new_list = list(layer.items()) for idx, pair in enumerate(new_list): new_node = CGenYamlCfg.deep_convert_dict(pair[1]) new_list[idx] = dict({pair[0]: new_node}) return new_list @staticmethod def deep_convert_list(layer): if isinstance(layer, list): od = OrderedDict({}) for each in layer: if isinstance(each, dict): key = next(iter(each)) od[key] = CGenYamlCfg.deep_convert_list(each[key]) return od else: return layer @staticmethod def expand_include_files(file_path, cur_dir=''): if cur_dir == '': cur_dir = os.path.dirname(file_path) file_path = os.path.basename(file_path) input_file_path = os.path.join(cur_dir, file_path) file = open(input_file_path, "r") lines = file.readlines() file.close() new_lines = [] for line_num, line in enumerate(lines): match = re.match("^!include\\s*(.+)?$", line.strip()) if match: inc_path = match.group(1) tmp_path = os.path.join(cur_dir, inc_path) org_path = tmp_path if not os.path.exists(tmp_path): cur_dir = os.path.join(os.path.dirname (os.path.realpath(__file__) ), "..", "..") tmp_path = os.path.join(cur_dir, inc_path) if not os.path.exists(tmp_path): raise Exception("ERROR: Cannot open include\ file '%s'." % org_path) else: new_lines.append(('# Included from file: %s\n' % inc_path, tmp_path, 0)) new_lines.append(('# %s\n' % ('=' * 80), tmp_path, 0)) new_lines.extend(CGenYamlCfg.expand_include_files (inc_path, cur_dir)) else: new_lines.append((line, input_file_path, line_num)) return new_lines @staticmethod def format_struct_field_name(input, count=0): name = '' cap = True if '_' in input: input = input.lower() for each in input: if each == '_': cap = True continue elif cap: each = each.upper() cap = False name = name + each if count > 1: name = '%s[%d]' % (name, count) return name def get_mode(self): return self._mode def set_mode(self, mode): self._mode = mode def get_last_error(self): return '' def get_variable(self, var, attr='value'): if var in self._var_dict: var = self._var_dict[var] return var item = self.locate_cfg_item(var, False) if item is None: raise ValueError("Cannot find variable '%s' !" % var) if item: if 'indx' in item: item = self.get_item_by_index(item['indx']) if attr == 'offset': var = item['offset'] elif attr == 'length': var = item['length'] elif attr == 'value': var = self.get_cfg_item_value(item) else: raise ValueError("Unsupported variable attribute '%s' !" % attr) return var def eval(self, expr): def _handler(pattern): if pattern.group(1): target = 1 else: target = 2 result = self.get_variable(pattern.group(target)) if result is None: raise ValueError('Unknown variable $(%s) !' % pattern.group(target)) return hex(result) expr_eval = ExpressionEval() if '$' in expr: # replace known variable first expr = re.sub(r'\$\(([_a-zA-Z][\w\.]*)\)|\$([_a-zA-Z][\w\.]*)', _handler, expr) return expr_eval.eval(expr, self.get_variable) def parse_macros(self, macro_def_str): # ['-DABC=1', '-D', 'CFG_DEBUG=1', '-D', 'CFG_OUTDIR=Build'] self._macro_dict = {} is_expression = False for macro in macro_def_str: if macro.startswith('-D'): is_expression = True if len(macro) > 2: macro = macro[2:] else: continue if is_expression: is_expression = False match = re.match("(\\w+)=(.+)", macro) if match: self._macro_dict[match.group(1)] = match.group(2) else: match = re.match("(\\w+)", macro) if match: self._macro_dict[match.group(1)] = '' if len(self._macro_dict) == 0: error = 1 else: error = 0 if self._debug: print("INFO : Macro dictionary:") for each in self._macro_dict: print(" $(%s) = [ %s ]" % (each, self._macro_dict[each])) return error def get_cfg_list(self, page_id=None): if page_id is None: # return full list return self._cfg_list else: # build a new list for items under a page ID cfgs = [i for i in self._cfg_list if i['cname'] and (i['page'] == page_id)] return cfgs def get_cfg_page(self): return self._cfg_page def get_cfg_item_length(self, item): return item['length'] def get_cfg_item_value(self, item, array=False): value_str = item['value'] length = item['length'] return self.get_value(value_str, length, array) def format_value_to_str(self, value, bit_length, old_value=''): # value is always int length = (bit_length + 7) // 8 fmt = '' if old_value.startswith('0x'): fmt = '0x' elif old_value and (old_value[0] in ['"', "'", '{']): fmt = old_value[0] else: fmt = '' bvalue = value_to_bytearray(value, length) if fmt in ['"', "'"]: svalue = bvalue.rstrip(b'\x00').decode() value_str = fmt + svalue + fmt elif fmt == "{": value_str = '{ ' + ', '.join(['0x%02x' % i for i in bvalue]) + ' }' elif fmt == '0x': hex_len = length * 2 if len(old_value) == hex_len + 2: fstr = '0x%%0%dx' % hex_len else: fstr = '0x%x' value_str = fstr % value else: if length <= 2: value_str = '%d' % value elif length <= 8: value_str = '0x%x' % value else: value_str = '{ ' + ', '.join(['0x%02x' % i for i in bvalue]) + ' }' return value_str def reformat_value_str(self, value_str, bit_length, old_value=None): value = self.parse_value(value_str, bit_length, False) if old_value is None: old_value = value_str new_value = self.format_value_to_str(value, bit_length, old_value) return new_value def get_value(self, value_str, bit_length, array=True): value_str = value_str.strip() if value_str[0] == "'" and value_str[-1] == "'" or \ value_str[0] == '"' and value_str[-1] == '"': value_str = value_str[1:-1] bvalue = bytearray(value_str.encode()) if len(bvalue) == 0: bvalue = bytearray(b'\x00') if array: return bvalue else: return bytes_to_value(bvalue) else: if value_str[0] in '{': value_str = value_str[1:-1].strip() value = 0 for each in value_str.split(',')[::-1]: each = each.strip() value = (value << 8) | int(each, 0) if array: length = (bit_length + 7) // 8 return value_to_bytearray(value, length) else: return value def parse_value(self, value_str, bit_length, array=True): length = (bit_length + 7) // 8 if check_quote(value_str): value_str = bytes_to_bracket_str(value_str[1:-1].encode()) elif (',' in value_str) and (value_str[0] != '{'): value_str = '{ %s }' % value_str if value_str[0] == '{': result = expand_file_value(self._yaml_path, value_str) if len(result) == 0: bin_list = value_str[1:-1].split(',') value = 0 bit_len = 0 unit_len = 1 for idx, element in enumerate(bin_list): each = element.strip() if len(each) == 0: continue in_bit_field = False if each[0] in "'" + '"': each_value = bytearray(each[1:-1], 'utf-8') elif ':' in each: match = re.match("^(.+):(\\d+)([b|B|W|D|Q])$", each) if match is None: raise SystemExit("Exception: Invald value\ list format '%s' !" % each) if match.group(1) == '0' and match.group(2) == '0': unit_len = CGenYamlCfg.bits_width[match.group(3) ] // 8 cur_bit_len = int(match.group(2) ) * CGenYamlCfg.bits_width[ match.group(3)] value += ((self.eval(match.group(1)) & ( 1 << cur_bit_len) - 1)) << bit_len bit_len += cur_bit_len each_value = bytearray() if idx + 1 < len(bin_list): in_bit_field = True else: try: each_value = value_to_bytearray( self.eval(each.strip()), unit_len) except Exception: raise SystemExit("Exception: Value %d cannot \ fit into %s bytes !" % (each, unit_len)) if not in_bit_field: if bit_len > 0: if bit_len % 8 != 0: raise SystemExit("Exception: Invalid bit \ field alignment '%s' !" % value_str) result.extend(value_to_bytes(value, bit_len // 8)) value = 0 bit_len = 0 result.extend(each_value) elif check_quote(value_str): result = bytearray(value_str[1:-1], 'utf-8') # Excluding quotes else: result = value_to_bytearray(self.eval(value_str), length) if len(result) < length: result.extend(b'\x00' * (length - len(result))) elif len(result) > length: raise SystemExit("Exception: Value '%s' is too big to fit \ into %d bytes !" % (value_str, length)) if array: return result else: return bytes_to_value(result) return result def get_cfg_item_options(self, item): tmp_list = [] if item['type'] == "Combo": if item['option'] in CGenYamlCfg.builtin_option: for op_val, op_str in CGenYamlCfg.builtin_option[item['option' ]]: tmp_list.append((op_val, op_str)) else: if item['option'].find(';') != -1: opt_list = item['option'].split(';') else: opt_list = re.split(', ', item['option']) for option in opt_list: option = option.strip() try: if option.find(':') != -1: (op_val, op_str) = option.split(':') else: op_val = option op_str = option except Exception: raise SystemExit("Exception: Invalid \ option format '%s' !" % option) tmp_list.append((op_val, op_str)) return tmp_list def get_page_title(self, page_id, top=None): if top is None: top = self.get_cfg_page()['root'] for node in top['child']: page_key = next(iter(node)) if page_id == page_key: return node[page_key]['title'] else: result = self.get_page_title(page_id, node[page_key]) if result is not None: return result return None def print_pages(self, top=None, level=0): if top is None: top = self.get_cfg_page()['root'] for node in top['child']: page_id = next(iter(node)) print('%s%s: %s' % (' ' * level, page_id, node[page_id]['title'])) level += 1 self.print_pages(node[page_id], level) level -= 1 def get_item_by_index(self, index): return self._cfg_list[index] def get_item_by_path(self, path): node = self.locate_cfg_item(path) if node: return self.get_item_by_index(node['indx']) else: return None def locate_cfg_path(self, item): def _locate_cfg_path(root, level=0): # config structure if item is root: return path for key in root: if type(root[key]) is OrderedDict: level += 1 path.append(key) ret = _locate_cfg_path(root[key], level) if ret: return ret path.pop() return None path = [] return _locate_cfg_path(self._cfg_tree) def locate_cfg_item(self, path, allow_exp=True): def _locate_cfg_item(root, path, level=0): if len(path) == level: return root next_root = root.get(path[level], None) if next_root is None: if allow_exp: raise Exception('Not a valid CFG config option path: %s' % '.'.join(path[:level+1])) else: return None return _locate_cfg_item(next_root, path, level + 1) path_nodes = path.split('.') return _locate_cfg_item(self._cfg_tree, path_nodes) def traverse_cfg_tree(self, handler, top=None): def _traverse_cfg_tree(root, level=0): # config structure for key in root: if type(root[key]) is OrderedDict: level += 1 handler(key, root[key], level) _traverse_cfg_tree(root[key], level) level -= 1 if top is None: top = self._cfg_tree _traverse_cfg_tree(top) def print_cfgs(self, root=None, short=True, print_level=256): def _print_cfgs(name, cfgs, level): if 'indx' in cfgs: act_cfg = self.get_item_by_index(cfgs['indx']) else: offset = 0 length = 0 value = '' if CGenYamlCfg.STRUCT in cfgs: cfg = cfgs[CGenYamlCfg.STRUCT] offset = int(cfg['offset']) length = int(cfg['length']) if 'value' in cfg: value = cfg['value'] if length == 0: return act_cfg = dict({'value': value, 'offset': offset, 'length': length}) value = act_cfg['value'] bit_len = act_cfg['length'] offset = (act_cfg['offset'] + 7) // 8 if value != '': try: value = self.reformat_value_str(act_cfg['value'], act_cfg['length']) except Exception: value = act_cfg['value'] length = bit_len // 8 bit_len = '(%db)' % bit_len if bit_len % 8 else '' * 4 if level <= print_level: if short and len(value) > 40: value = '%s ... %s' % (value[:20], value[-20:]) print('%04X:%04X%-6s %s%s : %s' % (offset, length, bit_len, ' ' * level, name, value)) self.traverse_cfg_tree(_print_cfgs) def build_var_dict(self): def _build_var_dict(name, cfgs, level): if level <= 2: if CGenYamlCfg.STRUCT in cfgs: struct_info = cfgs[CGenYamlCfg.STRUCT] self._var_dict['_LENGTH_%s_' % name] = struct_info[ 'length'] // 8 self._var_dict['_OFFSET_%s_' % name] = struct_info[ 'offset'] // 8 self._var_dict = {} self.traverse_cfg_tree(_build_var_dict) self._var_dict['_LENGTH_'] = self._cfg_tree[CGenYamlCfg.STRUCT][ 'length'] // 8 return 0 def add_cfg_page(self, child, parent, title=''): def _add_cfg_page(cfg_page, child, parent): key = next(iter(cfg_page)) if parent == key: cfg_page[key]['child'].append({child: {'title': title, 'child': []}}) return True else: result = False for each in cfg_page[key]['child']: if _add_cfg_page(each, child, parent): result = True break return result return _add_cfg_page(self._cfg_page, child, parent) def set_cur_page(self, page_str): if not page_str: return if ',' in page_str: page_list = page_str.split(',') else: page_list = [page_str] for page_str in page_list: parts = page_str.split(':') if len(parts) in [1, 3]: page = parts[0].strip() if len(parts) == 3: # it is a new page definition, add it into tree parent = parts[1] if parts[1] else 'root' parent = parent.strip() if parts[2][0] == '"' and parts[2][-1] == '"': parts[2] = parts[2][1:-1] if not self.add_cfg_page(page, parent, parts[2]): raise SystemExit("Error: Cannot find parent page \ '%s'!" % parent) else: raise SystemExit("Error: Invalid page format '%s' !" % page_str) self._cur_page = page def extend_variable(self, line): # replace all variables if line == '': return line loop = 2 while loop > 0: line_after = DefTemplate(line).safe_substitute(self._def_dict) if line == line_after: break loop -= 1 line = line_after return line_after def reformat_number_per_type(self, itype, value): if check_quote(value) or value.startswith('{'): return value parts = itype.split(',') if len(parts) > 3 and parts[0] == 'EditNum': num_fmt = parts[1].strip() else: num_fmt = '' if num_fmt == 'HEX' and not value.startswith('0x'): value = '0x%X' % int(value, 10) elif num_fmt == 'DEC' and value.startswith('0x'): value = '%d' % int(value, 16) return value def add_cfg_item(self, name, item, offset, path): self.set_cur_page(item.get('page', '')) if name[0] == '$': # skip all virtual node return 0 if not set(item).issubset(CGenYamlCfg.keyword_set): for each in list(item): if each not in CGenYamlCfg.keyword_set: raise Exception("Invalid attribute '%s' for '%s'!" % (each, '.'.join(path))) length = item.get('length', 0) if type(length) is str: match = re.match("^(\\d+)([b|B|W|D|Q])([B|W|D|Q]?)\\s*$", length) if match: unit_len = CGenYamlCfg.bits_width[match.group(2)] length = int(match.group(1), 10) * unit_len else: try: length = int(length, 0) * 8 except Exception: raise Exception("Invalid length field '%s' for '%s' !" % (length, '.'.join(path))) if offset % 8 > 0: raise Exception("Invalid alignment for field '%s' for \ '%s' !" % (name, '.'.join(path))) else: # define is length in bytes length = length * 8 if not name.isidentifier(): raise Exception("Invalid config name '%s' for '%s' !" % (name, '.'.join(path))) itype = str(item.get('type', 'Reserved')) value = str(item.get('value', '')) if value: if not (check_quote(value) or value.startswith('{')): if ',' in value: value = '{ %s }' % value else: value = self.reformat_number_per_type(itype, value) help = str(item.get('help', '')) if '\n' in help: help = ' '.join([i.strip() for i in help.splitlines()]) option = str(item.get('option', '')) if '\n' in option: option = ' '.join([i.strip() for i in option.splitlines()]) # extend variables for value and condition condition = str(item.get('condition', '')) if condition: condition = self.extend_variable(condition) value = self.extend_variable(value) order = str(item.get('order', '')) if order: if '.' in order: (major, minor) = order.split('.') order = int(major, 16) else: order = int(order, 16) else: order = offset cfg_item = dict() cfg_item['length'] = length cfg_item['offset'] = offset cfg_item['value'] = value cfg_item['type'] = itype cfg_item['cname'] = str(name) cfg_item['name'] = str(item.get('name', '')) cfg_item['help'] = help cfg_item['option'] = option cfg_item['page'] = self._cur_page cfg_item['order'] = order cfg_item['path'] = '.'.join(path) cfg_item['condition'] = condition if 'struct' in item: cfg_item['struct'] = item['struct'] self._cfg_list.append(cfg_item) item['indx'] = len(self._cfg_list) - 1 # remove used info for reducing pkl size item.pop('option', None) item.pop('condition', None) item.pop('help', None) item.pop('name', None) item.pop('page', None) return length def build_cfg_list(self, cfg_name='', top=None, path=[], info={'offset': 0}): if top is None: top = self._cfg_tree info.clear() info = {'offset': 0} start = info['offset'] is_leaf = True for key in top: path.append(key) if type(top[key]) is OrderedDict: is_leaf = False self.build_cfg_list(key, top[key], path, info) path.pop() if is_leaf: length = self.add_cfg_item(cfg_name, top, info['offset'], path) info['offset'] += length elif cfg_name == '' or (cfg_name and cfg_name[0] != '$'): # check first element for struct first = next(iter(top)) struct_str = CGenYamlCfg.STRUCT if first != struct_str: struct_node = OrderedDict({}) top[struct_str] = struct_node top.move_to_end(struct_str, False) else: struct_node = top[struct_str] struct_node['offset'] = start struct_node['length'] = info['offset'] - start if struct_node['length'] % 8 != 0: raise SystemExit("Error: Bits length not aligned for %s !" % str(path)) def get_field_value(self, top=None): def _get_field_value(name, cfgs, level): if 'indx' in cfgs: act_cfg = self.get_item_by_index(cfgs['indx']) if act_cfg['length'] == 0: return value = self.get_value(act_cfg['value'], act_cfg['length'], False) set_bits_to_bytes(result, act_cfg['offset'] - struct_info['offset'], act_cfg['length'], value) if top is None: top = self._cfg_tree struct_info = top[CGenYamlCfg.STRUCT] result = bytearray((struct_info['length'] + 7) // 8) self.traverse_cfg_tree(_get_field_value, top) return result data_diff = '' def find_data_difference(self, act_val, act_cfg): # checks for any difference between BSF and Binary file config_val = '' if act_val != act_cfg['value']: if 'DEC' in act_cfg['type']: bsf_val = '0x%x' % int(act_val) if bsf_val != act_cfg['value']: config_val = bsf_val else: config_val = '' else: config_val = act_val available_fv1 = 'none' available_fv2 = 'none' if self.detect_fsp(): if len(self.available_fv) >= 1: if len(self.available_fv) > 1: available_fv1 = self.available_fv[1] if self.available_fv[2]: available_fv2 = self.available_fv[2] else: available_fv1 = self.available_fv[1] if act_cfg['length'] == 16: config_val = int(config_val, 16) config_val = '0x%x' % config_val act_cfg['value'] = int( act_cfg['value'], 16) act_cfg['value'] = '0x%x' % \ act_cfg['value'] if config_val: string = ('.' + act_cfg['cname']) if (act_cfg['path'].endswith(self.available_fv[0] + string) or act_cfg['path'].endswith(available_fv1 + string) or act_cfg['path'].endswith(available_fv2 + string)) \ and 'BsfSkip' not in act_cfg['cname'] \ and 'Reserved' not in act_cfg['name']: if act_cfg['option'] != '': if act_cfg['length'] == 8: config_val = int(config_val, 16) config_val = '0x%x' % config_val act_cfg['value'] = int( act_cfg['value'], 16) act_cfg['value'] = '0x%x' % \ act_cfg['value'] option = act_cfg['option'] cfg_val = '' bin_val = '' for i in option.split(','): if act_cfg['value'] in i: bin_val = i elif config_val in i: cfg_val = i if cfg_val != '' and bin_val != '': self.data_diff += '\n\nBinary: ' \ + act_cfg['name'] \ + ': ' + bin_val.replace(' ', '') \ + '\nConfig file: ' \ + act_cfg['name'] + ': ' \ + cfg_val.replace(' ', '') + '\n' else: self.data_diff += '\n\nBinary: ' \ + act_cfg['name'] + ': ' + act_cfg['value'] \ + '\nConfig file: ' + act_cfg['name'] \ + ': ' + config_val + '\n' def set_field_value(self, top, value_bytes, force=False): def _set_field_value(name, cfgs, level): if 'indx' not in cfgs: return act_cfg = self.get_item_by_index(cfgs['indx']) actual_offset = act_cfg['offset'] - struct_info['offset'] if force or act_cfg['value'] == '': value = get_bits_from_bytes(full_bytes, actual_offset, act_cfg['length']) act_val = act_cfg['value'] if act_val == '': act_val = '%d' % value act_val = self.reformat_number_per_type(act_cfg ['type'], act_val) act_cfg['value'] = self.format_value_to_str( value, act_cfg['length'], act_val) self.find_data_difference(act_val, act_cfg) if 'indx' in top: # it is config option value = bytes_to_value(value_bytes) act_cfg = self.get_item_by_index(top['indx']) act_cfg['value'] = self.format_value_to_str( value, act_cfg['length'], act_cfg['value']) else: # it is structure struct_info = top[CGenYamlCfg.STRUCT] length = struct_info['length'] // 8 full_bytes = bytearray(value_bytes[:length]) if len(full_bytes) < length: full_bytes.extend(bytearray(length - len(value_bytes))) self.traverse_cfg_tree(_set_field_value, top) def update_def_value(self): def _update_def_value(name, cfgs, level): if 'indx' in cfgs: act_cfg = self.get_item_by_index(cfgs['indx']) if act_cfg['value'] != '' and act_cfg['length'] > 0: try: act_cfg['value'] = self.reformat_value_str( act_cfg['value'], act_cfg['length']) except Exception: raise Exception("Invalid value expression '%s' \ for '%s' !" % (act_cfg['value'], act_cfg['path'])) else: if CGenYamlCfg.STRUCT in cfgs and 'value' in \ cfgs[CGenYamlCfg.STRUCT]: curr = cfgs[CGenYamlCfg.STRUCT] value_bytes = self.get_value(curr['value'], curr['length'], True) self.set_field_value(cfgs, value_bytes) self.traverse_cfg_tree(_update_def_value, self._cfg_tree) def evaluate_condition(self, item): expr = item['condition'] result = self.parse_value(expr, 1, False) return result def detect_fsp(self): cfg_segs = self.get_cfg_segment() if len(cfg_segs) == 3: fsp = True for idx, seg in enumerate(cfg_segs): if not seg[0].endswith('UPD_%s' % 'TMS'[idx]): fsp = False break else: fsp = False if fsp: self.set_mode('FSP') return fsp def get_cfg_segment(self): def _get_cfg_segment(name, cfgs, level): if 'indx' not in cfgs: if name.startswith('$ACTION_'): if 'find' in cfgs: find[0] = cfgs['find'] else: if find[0]: act_cfg = self.get_item_by_index(cfgs['indx']) segments.append([find[0], act_cfg['offset'] // 8, 0]) find[0] = '' return find = [''] segments = [] self.traverse_cfg_tree(_get_cfg_segment, self._cfg_tree) cfg_len = self._cfg_tree[CGenYamlCfg.STRUCT]['length'] // 8 if len(segments) == 0: segments.append(['', 0, cfg_len]) segments.append(['', cfg_len, 0]) cfg_segs = [] for idx, each in enumerate(segments[:-1]): cfg_segs.append((each[0], each[1], segments[idx+1][1] - each[1])) return cfg_segs def get_bin_segment(self, bin_data): cfg_segs = self.get_cfg_segment() bin_segs = [] for seg in cfg_segs: key = seg[0].encode() if key == 0: bin_segs.append([seg[0], 0, len(bin_data)]) break pos = bin_data.find(key) if pos >= 0: # ensure no other match for the key next_pos = bin_data.find(key, pos + len(seg[0])) if next_pos >= 0: if key == b'$SKLFSP$' or key == b'$BSWFSP$': string = ('Warning: Multiple matches for %s in ' 'binary!\n\nA workaround applied to such ' 'FSP 1.x binary to use second' ' match instead of first match!' % key) messagebox.showwarning('Warning!', string) pos = next_pos else: print("Warning: Multiple matches for '%s' " "in binary, the 1st instance will be used !" % seg[0]) bin_segs.append([seg[0], pos, seg[2]]) self.binseg_dict[seg[0]] = pos else: bin_segs.append([seg[0], -1, seg[2]]) self.binseg_dict[seg[0]] = -1 continue return bin_segs available_fv = [] missing_fv = [] def extract_cfg_from_bin(self, bin_data): # get cfg bin length cfg_bins = bytearray() bin_segs = self.get_bin_segment(bin_data) Dummy_offset = 0 for each in bin_segs: if each[1] != -1: cfg_bins.extend(bin_data[each[1]:each[1] + each[2]]) self.available_fv.append(each[0]) else: self.missing_fv.append(each[0]) string = each[0] + ' is not availabe.' messagebox.showinfo('', string) cfg_bins.extend(bytearray(each[2])) Dummy_offset += each[2] return cfg_bins def save_current_to_bin(self): cfg_bins = self.generate_binary_array() if self._old_bin is None: return cfg_bins bin_data = bytearray(self._old_bin) bin_segs = self.get_bin_segment(self._old_bin) cfg_off = 0 for each in bin_segs: length = each[2] if each[1] != -1: bin_data[each[1]:each[1] + length] = cfg_bins[cfg_off: cfg_off + length] cfg_off += length else: cfg_off += length print('Patched the loaded binary successfully !') return bin_data def show_data_difference(self, data_diff): # Displays if any data difference detected in BSF and Binary file pop_up_text = 'There are differences in Config file and binary '\ 'data detected!\n' pop_up_text += data_diff window = tkinter.Tk() window.title("Data Difference") window.resizable(1, 1) # Window Size window.geometry("800x400") frame = tkinter.Frame(window, height=800, width=700) frame.pack(side=tkinter.BOTTOM) # Vertical (y) Scroll Bar scroll = tkinter.Scrollbar(window) scroll.pack(side=tkinter.RIGHT, fill=tkinter.Y) text = tkinter.Text(window, wrap=tkinter.NONE, yscrollcommand=scroll.set, width=700, height=400) text.insert(tkinter.INSERT, pop_up_text) text.pack() # Configure the scrollbars scroll.config(command=text.yview) exit_button = tkinter.Button( window, text="Close", command=window.destroy) exit_button.pack(in_=frame, side=tkinter.RIGHT, padx=20, pady=10) def load_default_from_bin(self, bin_data): self._old_bin = bin_data cfg_bins = self.extract_cfg_from_bin(bin_data) self.set_field_value(self._cfg_tree, cfg_bins, True) if self.data_diff: self.show_data_difference(self.data_diff) return cfg_bins def generate_binary_array(self, path=''): if path == '': top = None else: top = self.locate_cfg_item(path) if not top: raise Exception("Invalid configuration path '%s' !" % path) return self.get_field_value(top) def generate_binary(self, bin_file_name, path=''): bin_file = open(bin_file_name, "wb") bin_file.write(self.generate_binary_array(path)) bin_file.close() return 0 def write_delta_file(self, out_file, platform_id, out_lines): dlt_fd = open(out_file, "w") dlt_fd.write("%s\n" % get_copyright_header('dlt', True)) if platform_id is not None: dlt_fd.write('#\n') dlt_fd.write('# Delta configuration values for ' 'platform ID 0x%04X\n' % platform_id) dlt_fd.write('#\n\n') for line in out_lines: dlt_fd.write('%s\n' % line) dlt_fd.close() def override_default_value(self, dlt_file): error = 0 dlt_lines = CGenYamlCfg.expand_include_files(dlt_file) platform_id = None for line, file_path, line_num in dlt_lines: line = line.strip() if not line or line.startswith('#'): continue match = re.match("\\s*([\\w\\.]+)\\s*\\|\\s*(.+)", line) if not match: raise Exception("Unrecognized line '%s' " "(File:'%s' Line:%d) !" % (line, file_path, line_num + 1)) path = match.group(1) value_str = match.group(2) top = self.locate_cfg_item(path) if not top: raise Exception( "Invalid configuration '%s' (File:'%s' Line:%d) !" % (path, file_path, line_num + 1)) if 'indx' in top: act_cfg = self.get_item_by_index(top['indx']) bit_len = act_cfg['length'] else: struct_info = top[CGenYamlCfg.STRUCT] bit_len = struct_info['length'] value_bytes = self.parse_value(value_str, bit_len) self.set_field_value(top, value_bytes, True) if path == 'PLATFORMID_CFG_DATA.PlatformId': platform_id = value_str if platform_id is None: raise Exception( "PLATFORMID_CFG_DATA.PlatformId is missing " "in file '%s' !" % (dlt_file)) return error def generate_delta_file_from_bin(self, delta_file, old_data, new_data, full=False): new_data = self.load_default_from_bin(new_data) lines = [] platform_id = None def_platform_id = 0 for item in self._cfg_list: if not full and (item['type'] in ['Reserved']): continue old_val = get_bits_from_bytes(old_data, item['offset'], item['length']) new_val = get_bits_from_bytes(new_data, item['offset'], item['length']) full_name = item['path'] if 'PLATFORMID_CFG_DATA.PlatformId' == full_name: def_platform_id = old_val if new_val != old_val or full: val_str = self.reformat_value_str(item['value'], item['length']) text = '%-40s | %s' % (full_name, val_str) lines.append(text) if self.get_mode() != 'FSP': if platform_id is None or def_platform_id == platform_id: platform_id = def_platform_id print("WARNING: 'PlatformId' configuration is " "same as default %d!" % platform_id) lines.insert(0, '%-40s | %s\n\n' % ('PLATFORMID_CFG_DATA.PlatformId', '0x%04X' % platform_id)) else: platform_id = None self.write_delta_file(delta_file, platform_id, lines) return 0 def generate_delta_file(self, delta_file, bin_file, bin_file2, full=False): fd = open(bin_file, 'rb') new_data = self.extract_cfg_from_bin(bytearray(fd.read())) fd.close() if bin_file2 == '': old_data = self.generate_binary_array() else: old_data = new_data fd = open(bin_file2, 'rb') new_data = self.extract_cfg_from_bin(bytearray(fd.read())) fd.close() return self.generate_delta_file_from_bin(delta_file, old_data, new_data, full) def prepare_marshal(self, is_save): if is_save: # Ordered dict is not marshallable, convert to list self._cfg_tree = CGenYamlCfg.deep_convert_dict(self._cfg_tree) else: # Revert it back self._cfg_tree = CGenYamlCfg.deep_convert_list(self._cfg_tree) def generate_yml_file(self, in_file, out_file): cfg_yaml = CFG_YAML() text = cfg_yaml.expand_yaml(in_file) yml_fd = open(out_file, "w") yml_fd.write(text) yml_fd.close() return 0 def write_cfg_header_file(self, hdr_file_name, tag_mode, tag_dict, struct_list): lines = [] lines.append('\n\n') if self.get_mode() == 'FSP': lines.append('#include <FspUpd.h>\n') tag_mode = tag_mode & 0x7F tag_list = sorted(list(tag_dict.items()), key=lambda x: x[1]) for tagname, tagval in tag_list: if (tag_mode == 0 and tagval >= 0x100) or \ (tag_mode == 1 and tagval < 0x100): continue lines.append('#define %-30s 0x%03X\n' % ( 'CDATA_%s_TAG' % tagname[:-9], tagval)) lines.append('\n\n') name_dict = {} new_dict = {} for each in struct_list: if (tag_mode == 0 and each['tag'] >= 0x100) or \ (tag_mode == 1 and each['tag'] < 0x100): continue new_dict[each['name']] = (each['alias'], each['count']) if each['alias'] not in name_dict: name_dict[each['alias']] = 1 lines.extend(self.create_struct(each['alias'], each['node'], new_dict)) lines.append('#pragma pack()\n\n') self.write_header_file(lines, hdr_file_name) def write_header_file(self, txt_body, file_name, type='h'): file_name_def = os.path.basename(file_name).replace('.', '_') file_name_def = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', file_name_def) file_name_def = re.sub('([a-z0-9])([A-Z])', r'\1_\2', file_name_def).upper() lines = [] lines.append("%s\n" % get_copyright_header(type)) lines.append("#ifndef __%s__\n" % file_name_def) lines.append("#define __%s__\n\n" % file_name_def) if type == 'h': lines.append("#pragma pack(1)\n\n") lines.extend(txt_body) if type == 'h': lines.append("#pragma pack()\n\n") lines.append("#endif\n") # Don't rewrite if the contents are the same create = True if os.path.exists(file_name): hdr_file = open(file_name, "r") org_txt = hdr_file.read() hdr_file.close() new_txt = ''.join(lines) if org_txt == new_txt: create = False if create: hdr_file = open(file_name, "w") hdr_file.write(''.join(lines)) hdr_file.close() def generate_data_inc_file(self, dat_inc_file_name, bin_file=None): # Put a prefix GUID before CFGDATA so that it can be located later on prefix = b'\xa7\xbd\x7f\x73\x20\x1e\x46\xd6\ xbe\x8f\x64\x12\x05\x8d\x0a\xa8' if bin_file: fin = open(bin_file, 'rb') bin_dat = prefix + bytearray(fin.read()) fin.close() else: bin_dat = prefix + self.generate_binary_array() file_name = os.path.basename(dat_inc_file_name).upper() file_name = file_name.replace('.', '_') txt_lines = [] txt_lines.append("UINT8 mConfigDataBlob[%d] = {\n" % len(bin_dat)) count = 0 line = [' '] for each in bin_dat: line.append('0x%02X, ' % each) count = count + 1 if (count & 0x0F) == 0: line.append('\n') txt_lines.append(''.join(line)) line = [' '] if len(line) > 1: txt_lines.append(''.join(line) + '\n') txt_lines.append("};\n\n") self.write_header_file(txt_lines, dat_inc_file_name, 'inc') return 0 def get_struct_array_info(self, input): parts = input.split(':') if len(parts) > 1: var = parts[1] input = parts[0] else: var = '' array_str = input.split('[') name = array_str[0] if len(array_str) > 1: num_str = ''.join(c for c in array_str[-1] if c.isdigit()) num_str = '1000' if len(num_str) == 0 else num_str array_num = int(num_str) else: array_num = 0 return name, array_num, var def process_multilines(self, string, max_char_length): multilines = '' string_length = len(string) current_string_start = 0 string_offset = 0 break_line_dict = [] if len(string) <= max_char_length: while (string_offset < string_length): if string_offset >= 1: if string[string_offset - 1] == '\\' and string[ string_offset] == 'n': break_line_dict.append(string_offset + 1) string_offset += 1 if break_line_dict != []: for each in break_line_dict: multilines += " %s\n" % string[ current_string_start:each].lstrip() current_string_start = each if string_length - current_string_start > 0: multilines += " %s\n" % string[ current_string_start:].lstrip() else: multilines = " %s\n" % string else: new_line_start = 0 new_line_count = 0 found_space_char = False while (string_offset < string_length): if string_offset >= 1: if new_line_count >= max_char_length - 1: if string[string_offset] == ' ' and \ string_length - string_offset > 10: break_line_dict.append(new_line_start + new_line_count) new_line_start = new_line_start + new_line_count new_line_count = 0 found_space_char = True elif string_offset == string_length - 1 and \ found_space_char is False: break_line_dict.append(0) if string[string_offset - 1] == '\\' and string[ string_offset] == 'n': break_line_dict.append(string_offset + 1) new_line_start = string_offset + 1 new_line_count = 0 string_offset += 1 new_line_count += 1 if break_line_dict != []: break_line_dict.sort() for each in break_line_dict: if each > 0: multilines += " %s\n" % string[ current_string_start:each].lstrip() current_string_start = each if string_length - current_string_start > 0: multilines += " %s\n" % \ string[current_string_start:].lstrip() return multilines def create_field(self, item, name, length, offset, struct, bsf_name, help, option, bits_length=None): pos_name = 28 name_line = '' # help_line = '' # option_line = '' if length == 0 and name == 'dummy': return '\n' if bits_length == 0: return '\n' is_array = False if length in [1, 2, 4, 8]: type = "UINT%d" % (length * 8) else: is_array = True type = "UINT8" if item and item['value'].startswith('{'): type = "UINT8" is_array = True if struct != '': struct_base = struct.rstrip('*') name = '*' * (len(struct) - len(struct_base)) + name struct = struct_base type = struct if struct in ['UINT8', 'UINT16', 'UINT32', 'UINT64']: is_array = True unit = int(type[4:]) // 8 length = length / unit else: is_array = False if is_array: name = name + '[%d]' % length if len(type) < pos_name: space1 = pos_name - len(type) else: space1 = 1 if bsf_name != '': name_line = " %s\n" % bsf_name else: name_line = "N/A\n" # if help != '': # help_line = self.process_multilines(help, 80) # if option != '': # option_line = self.process_multilines(option, 80) if offset is None: offset_str = '????' else: offset_str = '0x%04X' % offset if bits_length is None: bits_length = '' else: bits_length = ' : %d' % bits_length # return "\n/** %s%s%s**/\n %s%s%s%s;\n" % (name_line, help_line, # option_line, type, ' ' * space1, name, bits_length) return "\n /* Offset %s: %s */\n %s%s%s%s;\n" % ( offset_str, name_line.strip(), type, ' ' * space1, name, bits_length) def create_struct(self, cname, top, struct_dict): index = 0 last = '' lines = [] off_base = -1 if cname in struct_dict: if struct_dict[cname][2]: return [] lines.append('\ntypedef struct {\n') for field in top: if field[0] == '$': continue index += 1 t_item = top[field] if 'indx' not in t_item: if CGenYamlCfg.STRUCT not in top[field]: continue if struct_dict[field][1] == 0: continue append = True struct_info = top[field][CGenYamlCfg.STRUCT] if 'struct' in struct_info: struct, array_num, var = self.get_struct_array_info( struct_info['struct']) if array_num > 0: if last == struct: append = False last = struct if var == '': var = field field = CGenYamlCfg.format_struct_field_name( var, struct_dict[field][1]) else: struct = struct_dict[field][0] field = CGenYamlCfg.format_struct_field_name( field, struct_dict[field][1]) if append: offset = t_item['$STRUCT']['offset'] // 8 if off_base == -1: off_base = offset line = self.create_field(None, field, 0, 0, struct, '', '', '') lines.append(' %s' % line) last = struct continue item = self.get_item_by_index(t_item['indx']) if item['cname'] == 'CfgHeader' and index == 1 or \ (item['cname'] == 'CondValue' and index == 2): continue bit_length = None length = (item['length'] + 7) // 8 match = re.match("^(\\d+)([b|B|W|D|Q])([B|W|D|Q]?)", t_item['length']) if match and match.group(2) == 'b': bit_length = int(match.group(1)) if match.group(3) != '': length = CGenYamlCfg.bits_width[match.group(3)] // 8 else: length = 4 offset = item['offset'] // 8 if off_base == -1: off_base = offset struct = item.get('struct', '') name = field prompt = item['name'] help = item['help'] option = item['option'] line = self.create_field(item, name, length, offset, struct, prompt, help, option, bit_length) lines.append(' %s' % line) last = struct lines.append('\n} %s;\n\n' % cname) return lines def write_fsp_sig_header_file(self, hdr_file_name): hdr_fd = open(hdr_file_name, 'w') hdr_fd.write("%s\n" % get_copyright_header('h')) hdr_fd.write("#ifndef __FSPUPD_H__\n" "#define __FSPUPD_H__\n\n" "#include <FspEas.h>\n\n" "#pragma pack(1)\n\n") lines = [] for fsp_comp in 'TMS': top = self.locate_cfg_item('FSP%s_UPD' % fsp_comp) if not top: raise Exception('Could not find FSP UPD definition !') bins = self.get_field_value(top) lines.append("#define FSP%s_UPD_SIGNATURE" " 0x%016X /* '%s' */\n\n" % (fsp_comp, bytes_to_value(bins[:8]), bins[:8].decode())) hdr_fd.write(''.join(lines)) hdr_fd.write("#pragma pack()\n\n" "#endif\n") hdr_fd.close() def create_header_file(self, hdr_file_name, com_hdr_file_name='', path=''): def _build_header_struct(name, cfgs, level): if CGenYamlCfg.STRUCT in cfgs: if 'CfgHeader' in cfgs: # collect CFGDATA TAG IDs cfghdr = self.get_item_by_index(cfgs['CfgHeader']['indx']) tag_val = array_str_to_value(cfghdr['value']) >> 20 tag_dict[name] = tag_val if level == 1: tag_curr[0] = tag_val struct_dict[name] = (level, tag_curr[0], cfgs) if path == 'FSP_SIG': self.write_fsp_sig_header_file(hdr_file_name) return tag_curr = [0] tag_dict = {} struct_dict = {} if path == '': top = None else: top = self.locate_cfg_item(path) if not top: raise Exception("Invalid configuration path '%s' !" % path) _build_header_struct(path, top, 0) self.traverse_cfg_tree(_build_header_struct, top) if tag_curr[0] == 0: hdr_mode = 2 else: hdr_mode = 1 if re.match('FSP[TMS]_UPD', path): hdr_mode |= 0x80 # filter out the items to be built for tags and structures struct_list = [] for each in struct_dict: match = False for check in CGenYamlCfg.exclude_struct: if re.match(check, each): match = True if each in tag_dict: if each not in CGenYamlCfg.include_tag: del tag_dict[each] break if not match: struct_list.append({'name': each, 'alias': '', 'count': 0, 'level': struct_dict[each][0], 'tag': struct_dict[each][1], 'node': struct_dict[each][2]}) # sort by level so that the bottom level struct # will be build first to satisfy dependencies struct_list = sorted(struct_list, key=lambda x: x['level'], reverse=True) # Convert XXX_[0-9]+ to XXX as an array hint for each in struct_list: cfgs = each['node'] if 'struct' in cfgs['$STRUCT']: each['alias'], array_num, var = self.get_struct_array_info( cfgs['$STRUCT']['struct']) else: match = re.match('(\\w+)(_\\d+)', each['name']) if match: each['alias'] = match.group(1) else: each['alias'] = each['name'] # count items for array build for idx, each in enumerate(struct_list): if idx > 0: last_struct = struct_list[idx-1]['node']['$STRUCT'] curr_struct = each['node']['$STRUCT'] if struct_list[idx-1]['alias'] == each['alias'] and \ curr_struct['length'] == last_struct['length'] and \ curr_struct['offset'] == last_struct['offset'] + \ last_struct['length']: for idx2 in range(idx-1, -1, -1): if struct_list[idx2]['count'] > 0: struct_list[idx2]['count'] += 1 break continue each['count'] = 1 # generate common header if com_hdr_file_name: self.write_cfg_header_file(com_hdr_file_name, 0, tag_dict, struct_list) # generate platform header self.write_cfg_header_file(hdr_file_name, hdr_mode, tag_dict, struct_list) return 0 def load_yaml(self, cfg_file): cfg_yaml = CFG_YAML() self.initialize() self._cfg_tree = cfg_yaml.load_yaml(cfg_file) self._def_dict = cfg_yaml.def_dict self._yaml_path = os.path.dirname(cfg_file) self.build_cfg_list() self.build_var_dict() self.update_def_value() return 0 def usage(): print('\n'.join([ "GenYamlCfg Version 0.50", "Usage:", " GenYamlCfg GENINC BinFile IncOutFile " " [-D Macros]", " GenYamlCfg GENPKL YamlFile PklOutFile " " [-D Macros]", " GenYamlCfg GENBIN YamlFile[;DltFile] BinOutFile " " [-D Macros]", " GenYamlCfg GENDLT YamlFile[;BinFile] DltOutFile " " [-D Macros]", " GenYamlCfg GENYML YamlFile YamlOutFile" " [-D Macros]", " GenYamlCfg GENHDR YamlFile HdrOutFile " " [-D Macros]" ])) def main(): # Parse the options and args argc = len(sys.argv) if argc < 4: usage() return 1 gen_cfg_data = CGenYamlCfg() command = sys.argv[1].upper() out_file = sys.argv[3] if argc >= 5 and gen_cfg_data.parse_macros(sys.argv[4:]) != 0: raise Exception("ERROR: Macro parsing failed !") file_list = sys.argv[2].split(';') if len(file_list) >= 2: yml_file = file_list[0] dlt_file = file_list[1] elif len(file_list) == 1: yml_file = file_list[0] dlt_file = '' else: raise Exception("ERROR: Invalid parameter '%s' !" % sys.argv[2]) yml_scope = '' if '@' in yml_file: parts = yml_file.split('@') yml_file = parts[0] yml_scope = parts[1] if command == "GENDLT" and yml_file.endswith('.dlt'): # It needs to expand an existing DLT file dlt_file = yml_file lines = gen_cfg_data.expand_include_files(dlt_file) write_lines(lines, out_file) return 0 if command == "GENYML": if not yml_file.lower().endswith('.yaml'): raise Exception('Only YAML file is supported !') gen_cfg_data.generate_yml_file(yml_file, out_file) return 0 bin_file = '' if (yml_file.lower().endswith('.bin')) and (command == "GENINC"): # It is binary file bin_file = yml_file yml_file = '' if bin_file: gen_cfg_data.generate_data_inc_file(out_file, bin_file) return 0 cfg_bin_file = '' cfg_bin_file2 = '' if dlt_file: if command == "GENDLT": cfg_bin_file = dlt_file dlt_file = '' if len(file_list) >= 3: cfg_bin_file2 = file_list[2] if yml_file.lower().endswith('.pkl'): with open(yml_file, "rb") as pkl_file: gen_cfg_data.__dict__ = marshal.load(pkl_file) gen_cfg_data.prepare_marshal(False) # Override macro definition again for Pickle file if argc >= 5: gen_cfg_data.parse_macros(sys.argv[4:]) else: gen_cfg_data.load_yaml(yml_file) if command == 'GENPKL': gen_cfg_data.prepare_marshal(True) with open(out_file, "wb") as pkl_file: marshal.dump(gen_cfg_data.__dict__, pkl_file) json_file = os.path.splitext(out_file)[0] + '.json' fo = open(json_file, 'w') path_list = [] cfgs = {'_cfg_page': gen_cfg_data._cfg_page, '_cfg_list': gen_cfg_data._cfg_list, '_path_list': path_list} # optimize to reduce size path = None for each in cfgs['_cfg_list']: new_path = each['path'][:-len(each['cname'])-1] if path != new_path: path = new_path each['path'] = path path_list.append(path) else: del each['path'] if each['order'] == each['offset']: del each['order'] del each['offset'] # value is just used to indicate display type value = each['value'] if value.startswith('0x'): hex_len = ((each['length'] + 7) // 8) * 2 if len(value) == hex_len: value = 'x%d' % hex_len else: value = 'x' each['value'] = value elif value and value[0] in ['"', "'", '{']: each['value'] = value[0] else: del each['value'] fo.write(repr(cfgs)) fo.close() return 0 if dlt_file: gen_cfg_data.override_default_value(dlt_file) gen_cfg_data.detect_fsp() if command == "GENBIN": if len(file_list) == 3: old_data = gen_cfg_data.generate_binary_array() fi = open(file_list[2], 'rb') new_data = bytearray(fi.read()) fi.close() if len(new_data) != len(old_data): raise Exception("Binary file '%s' length does not match, \ ignored !" % file_list[2]) else: gen_cfg_data.load_default_from_bin(new_data) gen_cfg_data.override_default_value(dlt_file) gen_cfg_data.generate_binary(out_file, yml_scope) elif command == "GENDLT": full = True if 'FULL' in gen_cfg_data._macro_dict else False gen_cfg_data.generate_delta_file(out_file, cfg_bin_file, cfg_bin_file2, full) elif command == "GENHDR": out_files = out_file.split(';') brd_out_file = out_files[0].strip() if len(out_files) > 1: com_out_file = out_files[1].strip() else: com_out_file = '' gen_cfg_data.create_header_file(brd_out_file, com_out_file, yml_scope) elif command == "GENINC": gen_cfg_data.generate_data_inc_file(out_file) elif command == "DEBUG": gen_cfg_data.print_cfgs() else: raise Exception("Unsuported command '%s' !" % command) return 0 if __name__ == '__main__': sys.exit(main())
edk2-master
IntelFsp2Pkg/Tools/ConfigEditor/GenYamlCfg.py
#!/usr/bin/env python # @ CommonUtility.py # Common utility script # # Copyright (c) 2016 - 2021, Intel Corporation. All rights reserved.<BR> # SPDX-License-Identifier: BSD-2-Clause-Patent # ## import os import sys import shutil import subprocess import string from ctypes import ARRAY, c_char, c_uint16, c_uint32, \ c_uint8, Structure, sizeof from importlib.machinery import SourceFileLoader from SingleSign import single_sign_gen_pub_key # Key types defined should match with cryptolib.h PUB_KEY_TYPE = { "RSA": 1, "ECC": 2, "DSA": 3, } # Signing type schemes defined should match with cryptolib.h SIGN_TYPE_SCHEME = { "RSA_PKCS1": 1, "RSA_PSS": 2, "ECC": 3, "DSA": 4, } # Hash values defined should match with cryptolib.h HASH_TYPE_VALUE = { "SHA2_256": 1, "SHA2_384": 2, "SHA2_512": 3, "SM3_256": 4, } # Hash values defined should match with cryptolib.h HASH_VAL_STRING = dict(map(reversed, HASH_TYPE_VALUE.items())) AUTH_TYPE_HASH_VALUE = { "SHA2_256": 1, "SHA2_384": 2, "SHA2_512": 3, "SM3_256": 4, "RSA2048SHA256": 1, "RSA3072SHA384": 2, } HASH_DIGEST_SIZE = { "SHA2_256": 32, "SHA2_384": 48, "SHA2_512": 64, "SM3_256": 32, } class PUB_KEY_HDR (Structure): _pack_ = 1 _fields_ = [ ('Identifier', ARRAY(c_char, 4)), # signature ('P', 'U', 'B', 'K') ('KeySize', c_uint16), # Length of Public Key ('KeyType', c_uint8), # RSA or ECC ('Reserved', ARRAY(c_uint8, 1)), ('KeyData', ARRAY(c_uint8, 0)), ] def __init__(self): self.Identifier = b'PUBK' class SIGNATURE_HDR (Structure): _pack_ = 1 _fields_ = [ ('Identifier', ARRAY(c_char, 4)), ('SigSize', c_uint16), ('SigType', c_uint8), ('HashAlg', c_uint8), ('Signature', ARRAY(c_uint8, 0)), ] def __init__(self): self.Identifier = b'SIGN' class LZ_HEADER(Structure): _pack_ = 1 _fields_ = [ ('signature', ARRAY(c_char, 4)), ('compressed_len', c_uint32), ('length', c_uint32), ('version', c_uint16), ('svn', c_uint8), ('attribute', c_uint8) ] _compress_alg = { b'LZDM': 'Dummy', b'LZ4 ': 'Lz4', b'LZMA': 'Lzma', } def print_bytes(data, indent=0, offset=0, show_ascii=False): bytes_per_line = 16 printable = ' ' + string.ascii_letters + string.digits + string.punctuation str_fmt = '{:s}{:04x}: {:%ds} {:s}' % (bytes_per_line * 3) bytes_per_line data_array = bytearray(data) for idx in range(0, len(data_array), bytes_per_line): hex_str = ' '.join( '%02X' % val for val in data_array[idx:idx + bytes_per_line]) asc_str = ''.join('%c' % (val if (chr(val) in printable) else '.') for val in data_array[idx:idx + bytes_per_line]) print(str_fmt.format( indent * ' ', offset + idx, hex_str, ' ' + asc_str if show_ascii else '')) def get_bits_from_bytes(bytes, start, length): if length == 0: return 0 byte_start = (start) // 8 byte_end = (start + length - 1) // 8 bit_start = start & 7 mask = (1 << length) - 1 val = bytes_to_value(bytes[byte_start:byte_end + 1]) val = (val >> bit_start) & mask return val def set_bits_to_bytes(bytes, start, length, bvalue): if length == 0: return byte_start = (start) // 8 byte_end = (start + length - 1) // 8 bit_start = start & 7 mask = (1 << length) - 1 val = bytes_to_value(bytes[byte_start:byte_end + 1]) val &= ~(mask << bit_start) val |= ((bvalue & mask) << bit_start) bytes[byte_start:byte_end+1] = value_to_bytearray( val, byte_end + 1 - byte_start) def value_to_bytes(value, length): return value.to_bytes(length, 'little') def bytes_to_value(bytes): return int.from_bytes(bytes, 'little') def value_to_bytearray(value, length): return bytearray(value_to_bytes(value, length)) # def value_to_bytearray (value, length): return bytearray(value_to_bytes(value, length)) def get_aligned_value(value, alignment=4): if alignment != (1 << (alignment.bit_length() - 1)): raise Exception( 'Alignment (0x%x) should to be power of 2 !' % alignment) value = (value + (alignment - 1)) & ~(alignment - 1) return value def get_padding_length(data_len, alignment=4): new_data_len = get_aligned_value(data_len, alignment) return new_data_len - data_len def get_file_data(file, mode='rb'): return open(file, mode).read() def gen_file_from_object(file, object): open(file, 'wb').write(object) def gen_file_with_size(file, size): open(file, 'wb').write(b'\xFF' * size) def check_files_exist(base_name_list, dir='', ext=''): for each in base_name_list: if not os.path.exists(os.path.join(dir, each + ext)): return False return True def load_source(name, filepath): mod = SourceFileLoader(name, filepath).load_module() return mod def get_openssl_path(): if os.name == 'nt': if 'OPENSSL_PATH' not in os.environ: openssl_dir = "C:\\Openssl\\bin\\" if os.path.exists(openssl_dir): os.environ['OPENSSL_PATH'] = openssl_dir else: os.environ['OPENSSL_PATH'] = "C:\\Openssl\\" if 'OPENSSL_CONF' not in os.environ: openssl_cfg = "C:\\Openssl\\openssl.cfg" if os.path.exists(openssl_cfg): os.environ['OPENSSL_CONF'] = openssl_cfg openssl = os.path.join( os.environ.get('OPENSSL_PATH', ''), 'openssl.exe') else: # Get openssl path for Linux cases openssl = shutil.which('openssl') return openssl def run_process(arg_list, print_cmd=False, capture_out=False): sys.stdout.flush() if os.name == 'nt' and os.path.splitext(arg_list[0])[1] == '' and \ os.path.exists(arg_list[0] + '.exe'): arg_list[0] += '.exe' if print_cmd: print(' '.join(arg_list)) exc = None result = 0 output = '' try: if capture_out: output = subprocess.check_output(arg_list).decode() else: result = subprocess.call(arg_list) except Exception as ex: result = 1 exc = ex if result: if not print_cmd: print('Error in running process:\n %s' % ' '.join(arg_list)) if exc is None: sys.exit(1) else: raise exc return output # Adjust hash type algorithm based on Public key file def adjust_hash_type(pub_key_file): key_type = get_key_type(pub_key_file) if key_type == 'RSA2048': hash_type = 'SHA2_256' elif key_type == 'RSA3072': hash_type = 'SHA2_384' else: hash_type = None return hash_type def rsa_sign_file( priv_key, pub_key, hash_type, sign_scheme, in_file, out_file, inc_dat=False, inc_key=False): bins = bytearray() if inc_dat: bins.extend(get_file_data(in_file)) # def single_sign_file(priv_key, hash_type, sign_scheme, in_file, out_file): out_data = get_file_data(out_file) sign = SIGNATURE_HDR() sign.SigSize = len(out_data) sign.SigType = SIGN_TYPE_SCHEME[sign_scheme] sign.HashAlg = HASH_TYPE_VALUE[hash_type] bins.extend(bytearray(sign) + out_data) if inc_key: key = gen_pub_key(priv_key, pub_key) bins.extend(key) if len(bins) != len(out_data): gen_file_from_object(out_file, bins) def get_key_type(in_key): # Check in_key is file or key Id if not os.path.exists(in_key): key = bytearray(gen_pub_key(in_key)) else: # Check for public key in binary format. key = bytearray(get_file_data(in_key)) pub_key_hdr = PUB_KEY_HDR.from_buffer(key) if pub_key_hdr.Identifier != b'PUBK': pub_key = gen_pub_key(in_key) pub_key_hdr = PUB_KEY_HDR.from_buffer(pub_key) key_type = next( (key for key, value in PUB_KEY_TYPE.items() if value == pub_key_hdr.KeyType)) return '%s%d' % (key_type, (pub_key_hdr.KeySize - 4) * 8) def get_auth_hash_type(key_type, sign_scheme): if key_type == "RSA2048" and sign_scheme == "RSA_PKCS1": hash_type = 'SHA2_256' auth_type = 'RSA2048_PKCS1_SHA2_256' elif key_type == "RSA3072" and sign_scheme == "RSA_PKCS1": hash_type = 'SHA2_384' auth_type = 'RSA3072_PKCS1_SHA2_384' elif key_type == "RSA2048" and sign_scheme == "RSA_PSS": hash_type = 'SHA2_256' auth_type = 'RSA2048_PSS_SHA2_256' elif key_type == "RSA3072" and sign_scheme == "RSA_PSS": hash_type = 'SHA2_384' auth_type = 'RSA3072_PSS_SHA2_384' else: hash_type = '' auth_type = '' return auth_type, hash_type # def single_sign_gen_pub_key(in_key, pub_key_file=None): def gen_pub_key(in_key, pub_key=None): keydata = single_sign_gen_pub_key(in_key, pub_key) publickey = PUB_KEY_HDR() publickey.KeySize = len(keydata) publickey.KeyType = PUB_KEY_TYPE['RSA'] key = bytearray(publickey) + keydata if pub_key: gen_file_from_object(pub_key, key) return key def decompress(in_file, out_file, tool_dir=''): if not os.path.isfile(in_file): raise Exception("Invalid input file '%s' !" % in_file) # Remove the Lz Header fi = open(in_file, 'rb') di = bytearray(fi.read()) fi.close() lz_hdr = LZ_HEADER.from_buffer(di) offset = sizeof(lz_hdr) if lz_hdr.signature == b"LZDM" or lz_hdr.compressed_len == 0: fo = open(out_file, 'wb') fo.write(di[offset:offset + lz_hdr.compressed_len]) fo.close() return temp = os.path.splitext(out_file)[0] + '.tmp' if lz_hdr.signature == b"LZMA": alg = "Lzma" elif lz_hdr.signature == b"LZ4 ": alg = "Lz4" else: raise Exception("Unsupported compression '%s' !" % lz_hdr.signature) fo = open(temp, 'wb') fo.write(di[offset:offset + lz_hdr.compressed_len]) fo.close() compress_tool = "%sCompress" % alg if alg == "Lz4": try: cmdline = [ os.path.join(tool_dir, compress_tool), "-d", "-o", out_file, temp] run_process(cmdline, False, True) except Exception: msg_string = "Could not find/use CompressLz4 tool, " \ "trying with python lz4..." print(msg_string) try: import lz4.block if lz4.VERSION != '3.1.1': msg_string = "Recommended lz4 module version " \ "is '3.1.1'," + lz4.VERSION \ + " is currently installed." print(msg_string) except ImportError: msg_string = "Could not import lz4, use " \ "'python -m pip install lz4==3.1.1' " \ "to install it." print(msg_string) exit(1) decompress_data = lz4.block.decompress(get_file_data(temp)) with open(out_file, "wb") as lz4bin: lz4bin.write(decompress_data) else: cmdline = [ os.path.join(tool_dir, compress_tool), "-d", "-o", out_file, temp] run_process(cmdline, False, True) os.remove(temp) def compress(in_file, alg, svn=0, out_path='', tool_dir=''): if not os.path.isfile(in_file): raise Exception("Invalid input file '%s' !" % in_file) basename, ext = os.path.splitext(os.path.basename(in_file)) if out_path: if os.path.isdir(out_path): out_file = os.path.join(out_path, basename + '.lz') else: out_file = os.path.join(out_path) else: out_file = os.path.splitext(in_file)[0] + '.lz' if alg == "Lzma": sig = "LZMA" elif alg == "Tiano": sig = "LZUF" elif alg == "Lz4": sig = "LZ4 " elif alg == "Dummy": sig = "LZDM" else: raise Exception("Unsupported compression '%s' !" % alg) in_len = os.path.getsize(in_file) if in_len > 0: compress_tool = "%sCompress" % alg if sig == "LZDM": shutil.copy(in_file, out_file) compress_data = get_file_data(out_file) elif sig == "LZ4 ": try: cmdline = [ os.path.join(tool_dir, compress_tool), "-e", "-o", out_file, in_file] run_process(cmdline, False, True) compress_data = get_file_data(out_file) except Exception: msg_string = "Could not find/use CompressLz4 tool, " \ "trying with python lz4..." print(msg_string) try: import lz4.block if lz4.VERSION != '3.1.1': msg_string = "Recommended lz4 module version " \ "is '3.1.1', " + lz4.VERSION \ + " is currently installed." print(msg_string) except ImportError: msg_string = "Could not import lz4, use " \ "'python -m pip install lz4==3.1.1' " \ "to install it." print(msg_string) exit(1) compress_data = lz4.block.compress( get_file_data(in_file), mode='high_compression') elif sig == "LZMA": cmdline = [ os.path.join(tool_dir, compress_tool), "-e", "-o", out_file, in_file] run_process(cmdline, False, True) compress_data = get_file_data(out_file) else: compress_data = bytearray() lz_hdr = LZ_HEADER() lz_hdr.signature = sig.encode() lz_hdr.svn = svn lz_hdr.compressed_len = len(compress_data) lz_hdr.length = os.path.getsize(in_file) data = bytearray() data.extend(lz_hdr) data.extend(compress_data) gen_file_from_object(out_file, data) return out_file
edk2-master
IntelFsp2Pkg/Tools/ConfigEditor/CommonUtility.py
# @ ConfigEditor.py # # Copyright(c) 2018 - 2021, Intel Corporation. All rights reserved.<BR> # SPDX-License-Identifier: BSD-2-Clause-Patent # ## import os import sys import marshal import tkinter import tkinter.ttk as ttk import tkinter.messagebox as messagebox import tkinter.filedialog as filedialog from pickle import FALSE, TRUE from pathlib import Path from GenYamlCfg import CGenYamlCfg, bytes_to_value, \ bytes_to_bracket_str, value_to_bytes, array_str_to_value from ctypes import sizeof, Structure, ARRAY, c_uint8, c_uint64, c_char, \ c_uint32, c_uint16 from functools import reduce sys.path.insert(0, '..') from FspDscBsf2Yaml import bsf_to_dsc, dsc_to_yaml # noqa sys.dont_write_bytecode = True class create_tool_tip(object): ''' create a tooltip for a given widget ''' in_progress = False def __init__(self, widget, text=''): self.top_win = None self.widget = widget self.text = text self.widget.bind("<Enter>", self.enter) self.widget.bind("<Leave>", self.leave) def enter(self, event=None): if self.in_progress: return if self.widget.winfo_class() == 'Treeview': # Only show help when cursor is on row header. rowid = self.widget.identify_row(event.y) if rowid != '': return else: x, y, cx, cy = self.widget.bbox("insert") cursor = self.widget.winfo_pointerxy() x = self.widget.winfo_rootx() + 35 y = self.widget.winfo_rooty() + 20 if cursor[1] > y and cursor[1] < y + 20: y += 20 # creates a toplevel window self.top_win = tkinter.Toplevel(self.widget) # Leaves only the label and removes the app window self.top_win.wm_overrideredirect(True) self.top_win.wm_geometry("+%d+%d" % (x, y)) label = tkinter.Message(self.top_win, text=self.text, justify='left', background='bisque', relief='solid', borderwidth=1, font=("times", "10", "normal")) label.pack(ipadx=1) self.in_progress = True def leave(self, event=None): if self.top_win: self.top_win.destroy() self.in_progress = False class validating_entry(tkinter.Entry): def __init__(self, master, **kw): tkinter.Entry.__init__(*(self, master), **kw) self.parent = master self.old_value = '' self.last_value = '' self.variable = tkinter.StringVar() self.variable.trace("w", self.callback) self.config(textvariable=self.variable) self.config({"background": "#c0c0c0"}) self.bind("<Return>", self.move_next) self.bind("<Tab>", self.move_next) self.bind("<Escape>", self.cancel) for each in ['BackSpace', 'Delete']: self.bind("<%s>" % each, self.ignore) self.display(None) def ignore(self, even): return "break" def move_next(self, event): if self.row < 0: return row, col = self.row, self.col txt, row_id, col_id = self.parent.get_next_cell(row, col) self.display(txt, row_id, col_id) return "break" def cancel(self, event): self.variable.set(self.old_value) self.display(None) def display(self, txt, row_id='', col_id=''): if txt is None: self.row = -1 self.col = -1 self.place_forget() else: row = int('0x' + row_id[1:], 0) - 1 col = int(col_id[1:]) - 1 self.row = row self.col = col self.old_value = txt self.last_value = txt x, y, width, height = self.parent.bbox(row_id, col) self.place(x=x, y=y, w=width) self.variable.set(txt) self.focus_set() self.icursor(0) def callback(self, *Args): cur_val = self.variable.get() new_val = self.validate(cur_val) if new_val is not None and self.row >= 0: self.last_value = new_val self.parent.set_cell(self.row, self.col, new_val) self.variable.set(self.last_value) def validate(self, value): if len(value) > 0: try: int(value, 16) except Exception: return None # Normalize the cell format self.update() cell_width = self.winfo_width() max_len = custom_table.to_byte_length(cell_width) * 2 cur_pos = self.index("insert") if cur_pos == max_len + 1: value = value[-max_len:] else: value = value[:max_len] if value == '': value = '0' fmt = '%%0%dX' % max_len return fmt % int(value, 16) class custom_table(ttk.Treeview): _Padding = 20 _Char_width = 6 def __init__(self, parent, col_hdr, bins): cols = len(col_hdr) col_byte_len = [] for col in range(cols): # Columns col_byte_len.append(int(col_hdr[col].split(':')[1])) byte_len = sum(col_byte_len) rows = (len(bins) + byte_len - 1) // byte_len self.rows = rows self.cols = cols self.col_byte_len = col_byte_len self.col_hdr = col_hdr self.size = len(bins) self.last_dir = '' style = ttk.Style() style.configure("Custom.Treeview.Heading", font=('calibri', 10, 'bold'), foreground="blue") ttk.Treeview.__init__(self, parent, height=rows, columns=[''] + col_hdr, show='headings', style="Custom.Treeview", selectmode='none') self.bind("<Button-1>", self.click) self.bind("<FocusOut>", self.focus_out) self.entry = validating_entry(self, width=4, justify=tkinter.CENTER) self.heading(0, text='LOAD') self.column(0, width=60, stretch=0, anchor=tkinter.CENTER) for col in range(cols): # Columns text = col_hdr[col].split(':')[0] byte_len = int(col_hdr[col].split(':')[1]) self.heading(col+1, text=text) self.column(col+1, width=self.to_cell_width(byte_len), stretch=0, anchor=tkinter.CENTER) idx = 0 for row in range(rows): # Rows text = '%04X' % (row * len(col_hdr)) vals = ['%04X:' % (cols * row)] for col in range(cols): # Columns if idx >= len(bins): break byte_len = int(col_hdr[col].split(':')[1]) value = bytes_to_value(bins[idx:idx+byte_len]) hex = ("%%0%dX" % (byte_len * 2)) % value vals.append(hex) idx += byte_len self.insert('', 'end', values=tuple(vals)) if idx >= len(bins): break @staticmethod def to_cell_width(byte_len): return byte_len * 2 * custom_table._Char_width + custom_table._Padding @staticmethod def to_byte_length(cell_width): return(cell_width - custom_table._Padding) \ // (2 * custom_table._Char_width) def focus_out(self, event): self.entry.display(None) def refresh_bin(self, bins): if not bins: return # Reload binary into widget bin_len = len(bins) for row in range(self.rows): iid = self.get_children()[row] for col in range(self.cols): idx = row * sum(self.col_byte_len) + \ sum(self.col_byte_len[:col]) byte_len = self.col_byte_len[col] if idx + byte_len <= self.size: byte_len = int(self.col_hdr[col].split(':')[1]) if idx + byte_len > bin_len: val = 0 else: val = bytes_to_value(bins[idx:idx+byte_len]) hex_val = ("%%0%dX" % (byte_len * 2)) % val self.set(iid, col + 1, hex_val) def get_cell(self, row, col): iid = self.get_children()[row] txt = self.item(iid, 'values')[col] return txt def get_next_cell(self, row, col): rows = self.get_children() col += 1 if col > self.cols: col = 1 row += 1 cnt = row * sum(self.col_byte_len) + sum(self.col_byte_len[:col]) if cnt > self.size: # Reached the last cell, so roll back to beginning row = 0 col = 1 txt = self.get_cell(row, col) row_id = rows[row] col_id = '#%d' % (col + 1) return(txt, row_id, col_id) def set_cell(self, row, col, val): iid = self.get_children()[row] self.set(iid, col, val) def load_bin(self): # Load binary from file path = filedialog.askopenfilename( initialdir=self.last_dir, title="Load binary file", filetypes=(("Binary files", "*.bin"), ( "binary files", "*.bin"))) if path: self.last_dir = os.path.dirname(path) fd = open(path, 'rb') bins = bytearray(fd.read())[:self.size] fd.close() bins.extend(b'\x00' * (self.size - len(bins))) return bins return None def click(self, event): row_id = self.identify_row(event.y) col_id = self.identify_column(event.x) if row_id == '' and col_id == '#1': # Clicked on "LOAD" cell bins = self.load_bin() self.refresh_bin(bins) return if col_id == '#1': # Clicked on column 1(Offset column) return item = self.identify('item', event.x, event.y) if not item or not col_id: # Not clicked on valid cell return # Clicked cell row = int('0x' + row_id[1:], 0) - 1 col = int(col_id[1:]) - 1 if row * self.cols + col > self.size: return vals = self.item(item, 'values') if col < len(vals): txt = self.item(item, 'values')[col] self.entry.display(txt, row_id, col_id) def get(self): bins = bytearray() row_ids = self.get_children() for row_id in row_ids: row = int('0x' + row_id[1:], 0) - 1 for col in range(self.cols): idx = row * sum(self.col_byte_len) + \ sum(self.col_byte_len[:col]) byte_len = self.col_byte_len[col] if idx + byte_len > self.size: break hex = self.item(row_id, 'values')[col + 1] values = value_to_bytes(int(hex, 16) & ((1 << byte_len * 8) - 1), byte_len) bins.extend(values) return bins class c_uint24(Structure): """Little-Endian 24-bit Unsigned Integer""" _pack_ = 1 _fields_ = [('Data', (c_uint8 * 3))] def __init__(self, val=0): self.set_value(val) def __str__(self, indent=0): return '0x%.6x' % self.value def __int__(self): return self.get_value() def set_value(self, val): self.Data[0:3] = Val2Bytes(val, 3) def get_value(self): return Bytes2Val(self.Data[0:3]) value = property(get_value, set_value) class EFI_FIRMWARE_VOLUME_HEADER(Structure): _fields_ = [ ('ZeroVector', ARRAY(c_uint8, 16)), ('FileSystemGuid', ARRAY(c_uint8, 16)), ('FvLength', c_uint64), ('Signature', ARRAY(c_char, 4)), ('Attributes', c_uint32), ('HeaderLength', c_uint16), ('Checksum', c_uint16), ('ExtHeaderOffset', c_uint16), ('Reserved', c_uint8), ('Revision', c_uint8) ] class EFI_FIRMWARE_VOLUME_EXT_HEADER(Structure): _fields_ = [ ('FvName', ARRAY(c_uint8, 16)), ('ExtHeaderSize', c_uint32) ] class EFI_FFS_INTEGRITY_CHECK(Structure): _fields_ = [ ('Header', c_uint8), ('File', c_uint8) ] class EFI_FFS_FILE_HEADER(Structure): _fields_ = [ ('Name', ARRAY(c_uint8, 16)), ('IntegrityCheck', EFI_FFS_INTEGRITY_CHECK), ('Type', c_uint8), ('Attributes', c_uint8), ('Size', c_uint24), ('State', c_uint8) ] class EFI_COMMON_SECTION_HEADER(Structure): _fields_ = [ ('Size', c_uint24), ('Type', c_uint8) ] class EFI_SECTION_TYPE: """Enumeration of all valid firmware file section types.""" ALL = 0x00 COMPRESSION = 0x01 GUID_DEFINED = 0x02 DISPOSABLE = 0x03 PE32 = 0x10 PIC = 0x11 TE = 0x12 DXE_DEPEX = 0x13 VERSION = 0x14 USER_INTERFACE = 0x15 COMPATIBILITY16 = 0x16 FIRMWARE_VOLUME_IMAGE = 0x17 FREEFORM_SUBTYPE_GUID = 0x18 RAW = 0x19 PEI_DEPEX = 0x1b SMM_DEPEX = 0x1c class FSP_COMMON_HEADER(Structure): _fields_ = [ ('Signature', ARRAY(c_char, 4)), ('HeaderLength', c_uint32) ] class FSP_INFORMATION_HEADER(Structure): _fields_ = [ ('Signature', ARRAY(c_char, 4)), ('HeaderLength', c_uint32), ('Reserved1', c_uint16), ('SpecVersion', c_uint8), ('HeaderRevision', c_uint8), ('ImageRevision', c_uint32), ('ImageId', ARRAY(c_char, 8)), ('ImageSize', c_uint32), ('ImageBase', c_uint32), ('ImageAttribute', c_uint16), ('ComponentAttribute', c_uint16), ('CfgRegionOffset', c_uint32), ('CfgRegionSize', c_uint32), ('Reserved2', c_uint32), ('TempRamInitEntryOffset', c_uint32), ('Reserved3', c_uint32), ('NotifyPhaseEntryOffset', c_uint32), ('FspMemoryInitEntryOffset', c_uint32), ('TempRamExitEntryOffset', c_uint32), ('FspSiliconInitEntryOffset', c_uint32), ('FspMultiPhaseSiInitEntryOffset', c_uint32), ('ExtendedImageRevision', c_uint16), ('Reserved4', c_uint16) ] class FSP_EXTENDED_HEADER(Structure): _fields_ = [ ('Signature', ARRAY(c_char, 4)), ('HeaderLength', c_uint32), ('Revision', c_uint8), ('Reserved', c_uint8), ('FspProducerId', ARRAY(c_char, 6)), ('FspProducerRevision', c_uint32), ('FspProducerDataSize', c_uint32) ] class FSP_PATCH_TABLE(Structure): _fields_ = [ ('Signature', ARRAY(c_char, 4)), ('HeaderLength', c_uint16), ('HeaderRevision', c_uint8), ('Reserved', c_uint8), ('PatchEntryNum', c_uint32) ] class Section: def __init__(self, offset, secdata): self.SecHdr = EFI_COMMON_SECTION_HEADER.from_buffer(secdata, 0) self.SecData = secdata[0:int(self.SecHdr.Size)] self.Offset = offset def AlignPtr(offset, alignment=8): return (offset + alignment - 1) & ~(alignment - 1) def Bytes2Val(bytes): return reduce(lambda x, y: (x << 8) | y, bytes[:: -1]) def Val2Bytes(value, blen): return [(value >> (i*8) & 0xff) for i in range(blen)] class FirmwareFile: def __init__(self, offset, filedata): self.FfsHdr = EFI_FFS_FILE_HEADER.from_buffer(filedata, 0) self.FfsData = filedata[0:int(self.FfsHdr.Size)] self.Offset = offset self.SecList = [] def ParseFfs(self): ffssize = len(self.FfsData) offset = sizeof(self.FfsHdr) if self.FfsHdr.Name != '\xff' * 16: while offset < (ffssize - sizeof(EFI_COMMON_SECTION_HEADER)): sechdr = EFI_COMMON_SECTION_HEADER.from_buffer( self.FfsData, offset) sec = Section( offset, self.FfsData[offset:offset + int(sechdr.Size)]) self.SecList.append(sec) offset += int(sechdr.Size) offset = AlignPtr(offset, 4) class FirmwareVolume: def __init__(self, offset, fvdata): self.FvHdr = EFI_FIRMWARE_VOLUME_HEADER.from_buffer(fvdata, 0) self.FvData = fvdata[0: self.FvHdr.FvLength] self.Offset = offset if self.FvHdr.ExtHeaderOffset > 0: self.FvExtHdr = EFI_FIRMWARE_VOLUME_EXT_HEADER.from_buffer( self.FvData, self.FvHdr.ExtHeaderOffset) else: self.FvExtHdr = None self.FfsList = [] def ParseFv(self): fvsize = len(self.FvData) if self.FvExtHdr: offset = self.FvHdr.ExtHeaderOffset + self.FvExtHdr.ExtHeaderSize else: offset = self.FvHdr.HeaderLength offset = AlignPtr(offset) while offset < (fvsize - sizeof(EFI_FFS_FILE_HEADER)): ffshdr = EFI_FFS_FILE_HEADER.from_buffer(self.FvData, offset) if (ffshdr.Name == '\xff' * 16) and \ (int(ffshdr.Size) == 0xFFFFFF): offset = fvsize else: ffs = FirmwareFile( offset, self.FvData[offset:offset + int(ffshdr.Size)]) ffs.ParseFfs() self.FfsList.append(ffs) offset += int(ffshdr.Size) offset = AlignPtr(offset) class FspImage: def __init__(self, offset, fih, fihoff, patch): self.Fih = fih self.FihOffset = fihoff self.Offset = offset self.FvIdxList = [] self.Type = "XTMSXXXXOXXXXXXX"[(fih.ComponentAttribute >> 12) & 0x0F] self.PatchList = patch self.PatchList.append(fihoff + 0x1C) def AppendFv(self, FvIdx): self.FvIdxList.append(FvIdx) def Patch(self, delta, fdbin): count = 0 applied = 0 for idx, patch in enumerate(self.PatchList): ptype = (patch >> 24) & 0x0F if ptype not in [0x00, 0x0F]: raise Exception('ERROR: Invalid patch type %d !' % ptype) if patch & 0x80000000: patch = self.Fih.ImageSize - (0x1000000 - (patch & 0xFFFFFF)) else: patch = patch & 0xFFFFFF if (patch < self.Fih.ImageSize) and \ (patch + sizeof(c_uint32) <= self.Fih.ImageSize): offset = patch + self.Offset value = Bytes2Val(fdbin[offset:offset+sizeof(c_uint32)]) value += delta fdbin[offset:offset+sizeof(c_uint32)] = Val2Bytes( value, sizeof(c_uint32)) applied += 1 count += 1 # Don't count the FSP base address patch entry appended at the end if count != 0: count -= 1 applied -= 1 return (count, applied) class FirmwareDevice: def __init__(self, offset, FdData): self.FvList = [] self.FspList = [] self.FspExtList = [] self.FihList = [] self.BuildList = [] self.OutputText = "" self.Offset = 0 self.FdData = FdData def ParseFd(self): offset = 0 fdsize = len(self.FdData) self.FvList = [] while offset < (fdsize - sizeof(EFI_FIRMWARE_VOLUME_HEADER)): fvh = EFI_FIRMWARE_VOLUME_HEADER.from_buffer(self.FdData, offset) if b'_FVH' != fvh.Signature: raise Exception("ERROR: Invalid FV header !") fv = FirmwareVolume( offset, self.FdData[offset:offset + fvh.FvLength]) fv.ParseFv() self.FvList.append(fv) offset += fv.FvHdr.FvLength def CheckFsp(self): if len(self.FspList) == 0: return fih = None for fsp in self.FspList: if not fih: fih = fsp.Fih else: newfih = fsp.Fih if (newfih.ImageId != fih.ImageId) or \ (newfih.ImageRevision != fih.ImageRevision): raise Exception( "ERROR: Inconsistent FSP ImageId or " "ImageRevision detected !") def ParseFsp(self): flen = 0 for idx, fv in enumerate(self.FvList): # Check if this FV contains FSP header if flen == 0: if len(fv.FfsList) == 0: continue ffs = fv.FfsList[0] if len(ffs.SecList) == 0: continue sec = ffs.SecList[0] if sec.SecHdr.Type != EFI_SECTION_TYPE.RAW: continue fihoffset = ffs.Offset + sec.Offset + sizeof(sec.SecHdr) fspoffset = fv.Offset offset = fspoffset + fihoffset fih = FSP_INFORMATION_HEADER.from_buffer(self.FdData, offset) self.FihList.append(fih) if b'FSPH' != fih.Signature: continue offset += fih.HeaderLength offset = AlignPtr(offset, 2) Extfih = FSP_EXTENDED_HEADER.from_buffer(self.FdData, offset) self.FspExtList.append(Extfih) offset = AlignPtr(offset, 4) plist = [] while True: fch = FSP_COMMON_HEADER.from_buffer(self.FdData, offset) if b'FSPP' != fch.Signature: offset += fch.HeaderLength offset = AlignPtr(offset, 4) else: fspp = FSP_PATCH_TABLE.from_buffer( self.FdData, offset) offset += sizeof(fspp) start_offset = offset + 32 end_offset = offset + 32 while True: end_offset += 1 if(self.FdData[ end_offset: end_offset + 1] == b'\xff'): break self.BuildList.append( self.FdData[start_offset:end_offset]) pdata = (c_uint32 * fspp.PatchEntryNum).from_buffer( self.FdData, offset) plist = list(pdata) break fsp = FspImage(fspoffset, fih, fihoffset, plist) fsp.AppendFv(idx) self.FspList.append(fsp) flen = fsp.Fih.ImageSize - fv.FvHdr.FvLength else: fsp.AppendFv(idx) flen -= fv.FvHdr.FvLength if flen < 0: raise Exception("ERROR: Incorrect FV size in image !") self.CheckFsp() def IsIntegerType(self, val): if sys.version_info[0] < 3: if type(val) in (int, long): return True else: if type(val) is int: return True return False def ConvertRevisionString(self, obj): for field in obj._fields_: key = field[0] val = getattr(obj, key) rep = '' if self.IsIntegerType(val): if (key == 'ImageRevision'): FspImageRevisionMajor = ((val >> 24) & 0xFF) FspImageRevisionMinor = ((val >> 16) & 0xFF) FspImageRevisionRevision = ((val >> 8) & 0xFF) FspImageRevisionBuildNumber = (val & 0xFF) rep = '0x%08X' % val elif (key == 'ExtendedImageRevision'): FspImageRevisionRevision |= (val & 0xFF00) FspImageRevisionBuildNumber |= ((val << 8) & 0xFF00) rep = "0x%04X ('%02X.%02X.%04X.%04X')" % (val, FspImageRevisionMajor, FspImageRevisionMinor, FspImageRevisionRevision, FspImageRevisionBuildNumber) return rep def OutputFsp(self): def copy_text_to_clipboard(): window.clipboard_clear() window.clipboard_append(self.OutputText) window = tkinter.Tk() window.title("Fsp Headers") window.resizable(0, 0) # Window Size window.geometry("300x400+350+150") frame = tkinter.Frame(window) frame.pack(side=tkinter.BOTTOM) # Vertical (y) Scroll Bar scroll = tkinter.Scrollbar(window) scroll.pack(side=tkinter.RIGHT, fill=tkinter.Y) text = tkinter.Text(window, wrap=tkinter.NONE, yscrollcommand=scroll.set) i = 0 self.OutputText = self.OutputText + "Fsp Header Details \n\n" while i < len(self.FihList): try: # self.OutputText += str(self.BuildList[i].decode()) + "\n" self.OutputText += str(self.BuildList[i]) + "\n" except Exception: self.OutputText += "No description found\n" self.OutputText += "FSP Header :\n " self.OutputText += "Signature : " + \ str(self.FihList[i].Signature.decode('utf-8')) + "\n " self.OutputText += "Header Length : " + \ str(hex(self.FihList[i].HeaderLength)) + "\n " self.OutputText += "Reserved1 : " + \ str(hex(self.FihList[i].Reserved1)) + "\n " self.OutputText += "Header Revision : " + \ str(hex(self.FihList[i].HeaderRevision)) + "\n " self.OutputText += "Spec Version : " + \ str(hex(self.FihList[i].SpecVersion)) + "\n " self.OutputText += "Image Revision : " + \ str(hex(self.FihList[i].ImageRevision)) + "\n " self.OutputText += "Image Id : " + \ str(self.FihList[i].ImageId.decode('utf-8')) + "\n " self.OutputText += "Image Size : " + \ str(hex(self.FihList[i].ImageSize)) + "\n " self.OutputText += "Image Base : " + \ str(hex(self.FihList[i].ImageBase)) + "\n " self.OutputText += "Image Attribute : " + \ str(hex(self.FihList[i].ImageAttribute)) + "\n " self.OutputText += "Component Attribute : " + \ str(hex(self.FihList[i].ComponentAttribute)) + "\n " self.OutputText += "Cfg Region Offset : " + \ str(hex(self.FihList[i].CfgRegionOffset)) + "\n " self.OutputText += "Cfg Region Size : " + \ str(hex(self.FihList[i].CfgRegionSize)) + "\n " self.OutputText += "Reserved2 : " + \ str(hex(self.FihList[i].Reserved2)) + "\n " self.OutputText += "Temp Ram Init Entry : " + \ str(hex(self.FihList[i].TempRamInitEntryOffset)) + "\n " self.OutputText += "Reserved3 : " + \ str(hex(self.FihList[i].Reserved3)) + "\n " self.OutputText += "Notify Phase Entry : " + \ str(hex(self.FihList[i].NotifyPhaseEntryOffset)) + "\n " self.OutputText += "Fsp Memory Init Entry : " + \ str(hex(self.FihList[i].FspMemoryInitEntryOffset)) + "\n " self.OutputText += "Temp Ram Exit Entry : " + \ str(hex(self.FihList[i].TempRamExitEntryOffset)) + "\n " self.OutputText += "Fsp Silicon Init Entry : " + \ str(hex(self.FihList[i].FspSiliconInitEntryOffset)) + "\n " self.OutputText += "Fsp Multi Phase Si Init Entry : " + \ str(hex(self.FihList[i].FspMultiPhaseSiInitEntryOffset)) + "\n " # display ExtendedImageRevision & Reserved4 if HeaderRevision >= 6 for fsp in self.FihList: if fsp.HeaderRevision >= 6: Display_ExtndImgRev = TRUE else: Display_ExtndImgRev = FALSE self.OutputText += "\n" if Display_ExtndImgRev == TRUE: self.OutputText += "ExtendedImageRevision : " + \ str(self.ConvertRevisionString(self.FihList[i])) + "\n " self.OutputText += "Reserved4 : " + \ str(hex(self.FihList[i].Reserved4)) + "\n\n" self.OutputText += "FSP Extended Header:\n " self.OutputText += "Signature : " + \ str(self.FspExtList[i].Signature.decode('utf-8')) + "\n " self.OutputText += "Header Length : " + \ str(hex(self.FspExtList[i].HeaderLength)) + "\n " self.OutputText += "Header Revision : " + \ str(hex(self.FspExtList[i].Revision)) + "\n " self.OutputText += "Fsp Producer Id : " + \ str(self.FspExtList[i].FspProducerId.decode('utf-8')) + "\n " self.OutputText += "FspProducerRevision : " + \ str(hex(self.FspExtList[i].FspProducerRevision)) + "\n\n" i += 1 text.insert(tkinter.INSERT, self.OutputText) text.pack() # Configure the scrollbars scroll.config(command=text.yview) copy_button = tkinter.Button( window, text="Copy to Clipboard", command=copy_text_to_clipboard) copy_button.pack(in_=frame, side=tkinter.LEFT, padx=20, pady=10) exit_button = tkinter.Button( window, text="Close", command=window.destroy) exit_button.pack(in_=frame, side=tkinter.RIGHT, padx=20, pady=10) window.mainloop() class state: def __init__(self): self.state = False def set(self, value): self.state = value def get(self): return self.state class application(tkinter.Frame): def __init__(self, master=None): root = master self.debug = True self.mode = 'FSP' self.last_dir = '.' self.page_id = '' self.page_list = {} self.conf_list = {} self.cfg_page_dict = {} self.cfg_data_obj = None self.org_cfg_data_bin = None self.in_left = state() self.in_right = state() self.search_text = '' # Check if current directory contains a file with a .yaml extension # if not default self.last_dir to a Platform directory where it is # easier to locate *BoardPkg\CfgData\*Def.yaml files self.last_dir = '.' if not any(fname.endswith('.yaml') for fname in os.listdir('.')): platform_path = Path(os.path.realpath(__file__)).parents[2].\ joinpath('Platform') if platform_path.exists(): self.last_dir = platform_path tkinter.Frame.__init__(self, master, borderwidth=2) self.menu_string = [ 'Save Config Data to Binary', 'Load Config Data from Binary', 'Show Binary Information', 'Load Config Changes from Delta File', 'Save Config Changes to Delta File', 'Save Full Config Data to Delta File', 'Open Config BSF file' ] root.geometry("1200x800") # Search string fram = tkinter.Frame(root) # adding label to search box tkinter.Label(fram, text='Text to find:').pack(side=tkinter.LEFT) # adding of single line text box self.edit = tkinter.Entry(fram, width=30) # positioning of text box self.edit.pack( side=tkinter.LEFT, fill=tkinter.BOTH, expand=1, padx=(4, 4)) # setting focus self.edit.focus_set() # adding of search button butt = tkinter.Button(fram, text='Search', relief=tkinter.GROOVE, command=self.search_bar) butt.pack(side=tkinter.RIGHT, padx=(4, 4)) fram.pack(side=tkinter.TOP, anchor=tkinter.SE) paned = ttk.Panedwindow(root, orient=tkinter.HORIZONTAL) paned.pack(fill=tkinter.BOTH, expand=True, padx=(4, 4)) status = tkinter.Label(master, text="", bd=1, relief=tkinter.SUNKEN, anchor=tkinter.W) status.pack(side=tkinter.BOTTOM, fill=tkinter.X) frame_left = ttk.Frame(paned, height=800, relief="groove") self.left = ttk.Treeview(frame_left, show="tree") # Set up tree HScroller pady = (10, 10) self.tree_scroll = ttk.Scrollbar(frame_left, orient="vertical", command=self.left.yview) self.left.configure(yscrollcommand=self.tree_scroll.set) self.left.bind("<<TreeviewSelect>>", self.on_config_page_select_change) self.left.bind("<Enter>", lambda e: self.in_left.set(True)) self.left.bind("<Leave>", lambda e: self.in_left.set(False)) self.left.bind("<MouseWheel>", self.on_tree_scroll) self.left.pack(side='left', fill=tkinter.BOTH, expand=True, padx=(5, 0), pady=pady) self.tree_scroll.pack(side='right', fill=tkinter.Y, pady=pady, padx=(0, 5)) frame_right = ttk.Frame(paned, relief="groove") self.frame_right = frame_right self.conf_canvas = tkinter.Canvas(frame_right, highlightthickness=0) self.page_scroll = ttk.Scrollbar(frame_right, orient="vertical", command=self.conf_canvas.yview) self.right_grid = ttk.Frame(self.conf_canvas) self.conf_canvas.configure(yscrollcommand=self.page_scroll.set) self.conf_canvas.pack(side='left', fill=tkinter.BOTH, expand=True, pady=pady, padx=(5, 0)) self.page_scroll.pack(side='right', fill=tkinter.Y, pady=pady, padx=(0, 5)) self.conf_canvas.create_window(0, 0, window=self.right_grid, anchor='nw') self.conf_canvas.bind('<Enter>', lambda e: self.in_right.set(True)) self.conf_canvas.bind('<Leave>', lambda e: self.in_right.set(False)) self.conf_canvas.bind("<Configure>", self.on_canvas_configure) self.conf_canvas.bind_all("<MouseWheel>", self.on_page_scroll) paned.add(frame_left, weight=2) paned.add(frame_right, weight=10) style = ttk.Style() style.layout("Treeview", [('Treeview.treearea', {'sticky': 'nswe'})]) menubar = tkinter.Menu(root) file_menu = tkinter.Menu(menubar, tearoff=0) file_menu.add_command(label="Open Config YAML file", command=self.load_from_yaml) file_menu.add_command(label=self.menu_string[6], command=self.load_from_bsf_file) file_menu.add_command(label=self.menu_string[2], command=self.load_from_fd) file_menu.add_command(label=self.menu_string[0], command=self.save_to_bin, state='disabled') file_menu.add_command(label=self.menu_string[1], command=self.load_from_bin, state='disabled') file_menu.add_command(label=self.menu_string[3], command=self.load_from_delta, state='disabled') file_menu.add_command(label=self.menu_string[4], command=self.save_to_delta, state='disabled') file_menu.add_command(label=self.menu_string[5], command=self.save_full_to_delta, state='disabled') file_menu.add_command(label="About", command=self.about) menubar.add_cascade(label="File", menu=file_menu) self.file_menu = file_menu root.config(menu=menubar) if len(sys.argv) > 1: path = sys.argv[1] if not path.endswith('.yaml') and not path.endswith('.pkl'): messagebox.showerror('LOADING ERROR', "Unsupported file '%s' !" % path) return else: self.load_cfg_file(path) if len(sys.argv) > 2: path = sys.argv[2] if path.endswith('.dlt'): self.load_delta_file(path) elif path.endswith('.bin'): self.load_bin_file(path) else: messagebox.showerror('LOADING ERROR', "Unsupported file '%s' !" % path) return def search_bar(self): # get data from text box self.search_text = self.edit.get() # Clear the page and update it according to search value self.refresh_config_data_page() def set_object_name(self, widget, name): self.conf_list[id(widget)] = name def get_object_name(self, widget): if id(widget) in self.conf_list: return self.conf_list[id(widget)] else: return None def limit_entry_size(self, variable, limit): value = variable.get() if len(value) > limit: variable.set(value[:limit]) def on_canvas_configure(self, event): self.right_grid.grid_columnconfigure(0, minsize=event.width) def on_tree_scroll(self, event): if not self.in_left.get() and self.in_right.get(): # This prevents scroll event from being handled by both left and # right frame at the same time. self.on_page_scroll(event) return 'break' def on_page_scroll(self, event): if self.in_right.get(): # Only scroll when it is in active area min, max = self.page_scroll.get() if not((min == 0.0) and (max == 1.0)): self.conf_canvas.yview_scroll(-1 * int(event.delta / 120), 'units') def update_visibility_for_widget(self, widget, args): visible = True item = self.get_config_data_item_from_widget(widget, True) if item is None: return visible elif not item: return visible if self.cfg_data_obj.binseg_dict: str_split = item['path'].split('.') if str_split[-2] not in CGenYamlCfg.available_fv and \ str_split[-2] not in CGenYamlCfg.missing_fv: if self.cfg_data_obj.binseg_dict[str_split[-3]] == -1: visible = False widget.grid_remove() return visible else: if self.cfg_data_obj.binseg_dict[str_split[-2]] == -1: visible = False widget.grid_remove() return visible result = 1 if item['condition']: result = self.evaluate_condition(item) if result == 2: # Gray widget.configure(state='disabled') elif result == 0: # Hide visible = False widget.grid_remove() else: # Show widget.grid() widget.configure(state='normal') if visible and self.search_text != '': name = item['name'] if name.lower().find(self.search_text.lower()) == -1: visible = False widget.grid_remove() return visible def update_widgets_visibility_on_page(self): self.walk_widgets_in_layout(self.right_grid, self.update_visibility_for_widget) def combo_select_changed(self, event): self.update_config_data_from_widget(event.widget, None) self.update_widgets_visibility_on_page() def edit_num_finished(self, event): widget = event.widget item = self.get_config_data_item_from_widget(widget) if not item: return parts = item['type'].split(',') if len(parts) > 3: min = parts[2].lstrip()[1:] max = parts[3].rstrip()[:-1] min_val = array_str_to_value(min) max_val = array_str_to_value(max) text = widget.get() if ',' in text: text = '{ %s }' % text try: value = array_str_to_value(text) if value < min_val or value > max_val: raise Exception('Invalid input!') self.set_config_item_value(item, text) except Exception: pass text = item['value'].strip('{').strip('}').strip() widget.delete(0, tkinter.END) widget.insert(0, text) self.update_widgets_visibility_on_page() def update_page_scroll_bar(self): # Update scrollbar self.frame_right.update() self.conf_canvas.config(scrollregion=self.conf_canvas.bbox("all")) def on_config_page_select_change(self, event): self.update_config_data_on_page() sel = self.left.selection() if len(sel) > 0: page_id = sel[0] self.build_config_data_page(page_id) self.update_widgets_visibility_on_page() self.update_page_scroll_bar() def walk_widgets_in_layout(self, parent, callback_function, args=None): for widget in parent.winfo_children(): callback_function(widget, args) def clear_widgets_inLayout(self, parent=None): if parent is None: parent = self.right_grid for widget in parent.winfo_children(): widget.destroy() parent.grid_forget() self.conf_list.clear() def build_config_page_tree(self, cfg_page, parent): for page in cfg_page['child']: page_id = next(iter(page)) # Put CFG items into related page list self.page_list[page_id] = self.cfg_data_obj.get_cfg_list(page_id) self.page_list[page_id].sort(key=lambda x: x['order']) page_name = self.cfg_data_obj.get_page_title(page_id) child = self.left.insert( parent, 'end', iid=page_id, text=page_name, value=0) if len(page[page_id]) > 0: self.build_config_page_tree(page[page_id], child) def is_config_data_loaded(self): return True if len(self.page_list) else False def set_current_config_page(self, page_id): self.page_id = page_id def get_current_config_page(self): return self.page_id def get_current_config_data(self): page_id = self.get_current_config_page() if page_id in self.page_list: return self.page_list[page_id] else: return [] invalid_values = {} def build_config_data_page(self, page_id): self.clear_widgets_inLayout() self.set_current_config_page(page_id) disp_list = [] for item in self.get_current_config_data(): disp_list.append(item) row = 0 disp_list.sort(key=lambda x: x['order']) for item in disp_list: self.add_config_item(item, row) row += 2 if self.invalid_values: string = 'The following contails invalid options/values \n\n' for i in self.invalid_values: string += i + ": " + str(self.invalid_values[i]) + "\n" reply = messagebox.showwarning('Warning!', string) if reply == 'ok': self.invalid_values.clear() fsp_version = '' def load_config_data(self, file_name): gen_cfg_data = CGenYamlCfg() if file_name.endswith('.pkl'): with open(file_name, "rb") as pkl_file: gen_cfg_data.__dict__ = marshal.load(pkl_file) gen_cfg_data.prepare_marshal(False) elif file_name.endswith('.yaml'): if gen_cfg_data.load_yaml(file_name) != 0: raise Exception(gen_cfg_data.get_last_error()) else: raise Exception('Unsupported file "%s" !' % file_name) # checking fsp version if gen_cfg_data.detect_fsp(): self.fsp_version = '2.X' else: self.fsp_version = '1.X' return gen_cfg_data def about(self): msg = 'Configuration Editor\n--------------------------------\n \ Version 0.8\n2021' lines = msg.split('\n') width = 30 text = [] for line in lines: text.append(line.center(width, ' ')) messagebox.showinfo('Config Editor', '\n'.join(text)) def update_last_dir(self, path): self.last_dir = os.path.dirname(path) def get_open_file_name(self, ftype): if self.is_config_data_loaded(): if ftype == 'dlt': question = '' elif ftype == 'bin': question = 'All configuration will be reloaded from BIN file, \ continue ?' elif ftype == 'yaml': question = '' elif ftype == 'bsf': question = '' else: raise Exception('Unsupported file type !') if question: reply = messagebox.askquestion('', question, icon='warning') if reply == 'no': return None if ftype == 'yaml': if self.mode == 'FSP': file_type = 'YAML' file_ext = 'yaml' else: file_type = 'YAML or PKL' file_ext = 'pkl *.yaml' else: file_type = ftype.upper() file_ext = ftype path = filedialog.askopenfilename( initialdir=self.last_dir, title="Load file", filetypes=(("%s files" % file_type, "*.%s" % file_ext), ( "all files", "*.*"))) if path: self.update_last_dir(path) return path else: return None def load_from_delta(self): path = self.get_open_file_name('dlt') if not path: return self.load_delta_file(path) def load_delta_file(self, path): self.reload_config_data_from_bin(self.org_cfg_data_bin) try: self.cfg_data_obj.override_default_value(path) except Exception as e: messagebox.showerror('LOADING ERROR', str(e)) return self.update_last_dir(path) self.refresh_config_data_page() def load_from_bin(self): path = filedialog.askopenfilename( initialdir=self.last_dir, title="Load file", filetypes={("Binaries", "*.fv *.fd *.bin *.rom")}) if not path: return self.load_bin_file(path) def load_bin_file(self, path): with open(path, 'rb') as fd: bin_data = bytearray(fd.read()) if len(bin_data) < len(self.org_cfg_data_bin): messagebox.showerror('Binary file size is smaller than what \ YAML requires !') return try: self.reload_config_data_from_bin(bin_data) except Exception as e: messagebox.showerror('LOADING ERROR', str(e)) return def load_from_bsf_file(self): path = self.get_open_file_name('bsf') if not path: return self.load_bsf_file(path) def load_bsf_file(self, path): bsf_file = path dsc_file = os.path.splitext(bsf_file)[0] + '.dsc' yaml_file = os.path.splitext(bsf_file)[0] + '.yaml' bsf_to_dsc(bsf_file, dsc_file) dsc_to_yaml(dsc_file, yaml_file) self.load_cfg_file(yaml_file) return def load_from_fd(self): path = filedialog.askopenfilename( initialdir=self.last_dir, title="Load file", filetypes={("Binaries", "*.fv *.fd *.bin *.rom")}) if not path: return self.load_fd_file(path) def load_fd_file(self, path): with open(path, 'rb') as fd: bin_data = bytearray(fd.read()) fd = FirmwareDevice(0, bin_data) fd.ParseFd() fd.ParseFsp() fd.OutputFsp() def load_cfg_file(self, path): # Save current values in widget and clear database self.clear_widgets_inLayout() self.left.delete(*self.left.get_children()) self.cfg_data_obj = self.load_config_data(path) self.update_last_dir(path) self.org_cfg_data_bin = self.cfg_data_obj.generate_binary_array() self.build_config_page_tree(self.cfg_data_obj.get_cfg_page()['root'], '') msg_string = 'Click YES if it is FULL FSP '\ + self.fsp_version + ' Binary' reply = messagebox.askquestion('Form', msg_string) if reply == 'yes': self.load_from_bin() for menu in self.menu_string: self.file_menu.entryconfig(menu, state="normal") return 0 def load_from_yaml(self): path = self.get_open_file_name('yaml') if not path: return self.load_cfg_file(path) def get_save_file_name(self, extension): path = filedialog.asksaveasfilename( initialdir=self.last_dir, title="Save file", defaultextension=extension) if path: self.last_dir = os.path.dirname(path) return path else: return None def save_delta_file(self, full=False): path = self.get_save_file_name(".dlt") if not path: return self.update_config_data_on_page() new_data = self.cfg_data_obj.generate_binary_array() self.cfg_data_obj.generate_delta_file_from_bin(path, self.org_cfg_data_bin, new_data, full) def save_to_delta(self): self.save_delta_file() def save_full_to_delta(self): self.save_delta_file(True) def save_to_bin(self): path = self.get_save_file_name(".bin") if not path: return self.update_config_data_on_page() bins = self.cfg_data_obj.save_current_to_bin() with open(path, 'wb') as fd: fd.write(bins) def refresh_config_data_page(self): self.clear_widgets_inLayout() self.on_config_page_select_change(None) def set_config_data_page(self): page_id_list = [] for idx, page in enumerate( self.cfg_data_obj._cfg_page['root']['child']): page_id_list.append(list(page.keys())[0]) page_list = self.cfg_data_obj.get_cfg_list(page_id_list[idx]) self.cfg_page_dict[page_id_list[idx]] = 0 for item in page_list: str_split = item['path'].split('.') if str_split[-2] not in CGenYamlCfg.available_fv and \ str_split[-2] not in CGenYamlCfg.missing_fv: if self.cfg_data_obj.binseg_dict[str_split[-3]] != -1: self.cfg_page_dict[page_id_list[idx]] += 1 else: if self.cfg_data_obj.binseg_dict[str_split[-2]] != -1: self.cfg_page_dict[page_id_list[idx]] += 1 removed_page = 0 for idx, id in enumerate(page_id_list): if self.cfg_page_dict[id] == 0: del self.cfg_data_obj._cfg_page['root']['child'][idx-removed_page] # noqa: E501 removed_page += 1 def reload_config_data_from_bin(self, bin_dat): self.cfg_data_obj.load_default_from_bin(bin_dat) self.set_config_data_page() self.left.delete(*self.left.get_children()) self.build_config_page_tree(self.cfg_data_obj.get_cfg_page()['root'], '') self.refresh_config_data_page() def set_config_item_value(self, item, value_str): itype = item['type'].split(',')[0] if itype == "Table": new_value = value_str elif itype == "EditText": length = (self.cfg_data_obj.get_cfg_item_length(item) + 7) // 8 new_value = value_str[:length] if item['value'].startswith("'"): new_value = "'%s'" % new_value else: try: new_value = self.cfg_data_obj.reformat_value_str( value_str, self.cfg_data_obj.get_cfg_item_length(item), item['value']) except Exception: print("WARNING: Failed to format value string '%s' for '%s' !" % (value_str, item['path'])) new_value = item['value'] if item['value'] != new_value: if self.debug: print('Update %s from %s to %s !' % (item['cname'], item['value'], new_value)) item['value'] = new_value def get_config_data_item_from_widget(self, widget, label=False): name = self.get_object_name(widget) if not name or not len(self.page_list): return None if name.startswith('LABEL_'): if label: path = name[6:] else: return None else: path = name item = self.cfg_data_obj.get_item_by_path(path) return item def update_config_data_from_widget(self, widget, args): item = self.get_config_data_item_from_widget(widget) if item is None: return elif not item: if isinstance(widget, tkinter.Label): return raise Exception('Failed to find "%s" !' % self.get_object_name(widget)) itype = item['type'].split(',')[0] if itype == "Combo": opt_list = self.cfg_data_obj.get_cfg_item_options(item) tmp_list = [opt[0] for opt in opt_list] idx = widget.current() if idx != -1: self.set_config_item_value(item, tmp_list[idx]) elif itype in ["EditNum", "EditText"]: self.set_config_item_value(item, widget.get()) elif itype in ["Table"]: new_value = bytes_to_bracket_str(widget.get()) self.set_config_item_value(item, new_value) def evaluate_condition(self, item): try: result = self.cfg_data_obj.evaluate_condition(item) except Exception: print("WARNING: Condition '%s' is invalid for '%s' !" % (item['condition'], item['path'])) result = 1 return result def add_config_item(self, item, row): parent = self.right_grid name = tkinter.Label(parent, text=item['name'], anchor="w") parts = item['type'].split(',') itype = parts[0] widget = None if itype == "Combo": # Build opt_list = self.cfg_data_obj.get_cfg_item_options(item) current_value = self.cfg_data_obj.get_cfg_item_value(item, False) option_list = [] current = None for idx, option in enumerate(opt_list): option_str = option[0] try: option_value = self.cfg_data_obj.get_value( option_str, len(option_str), False) except Exception: option_value = 0 print('WARNING: Option "%s" has invalid format for "%s" !' % (option_str, item['path'])) if option_value == current_value: current = idx option_list.append(option[1]) widget = ttk.Combobox(parent, value=option_list, state="readonly") widget.bind("<<ComboboxSelected>>", self.combo_select_changed) widget.unbind_class("TCombobox", "<MouseWheel>") if current is None: print('WARNING: Value "%s" is an invalid option for "%s" !' % (current_value, item['path'])) self.invalid_values[item['path']] = current_value else: widget.current(current) elif itype in ["EditNum", "EditText"]: txt_val = tkinter.StringVar() widget = tkinter.Entry(parent, textvariable=txt_val) value = item['value'].strip("'") if itype in ["EditText"]: txt_val.trace( 'w', lambda *args: self.limit_entry_size (txt_val, (self.cfg_data_obj.get_cfg_item_length(item) + 7) // 8)) elif itype in ["EditNum"]: value = item['value'].strip("{").strip("}").strip() widget.bind("<FocusOut>", self.edit_num_finished) txt_val.set(value) elif itype in ["Table"]: bins = self.cfg_data_obj.get_cfg_item_value(item, True) col_hdr = item['option'].split(',') widget = custom_table(parent, col_hdr, bins) else: if itype and itype not in ["Reserved"]: print("WARNING: Type '%s' is invalid for '%s' !" % (itype, item['path'])) self.invalid_values[item['path']] = itype if widget: create_tool_tip(widget, item['help']) self.set_object_name(name, 'LABEL_' + item['path']) self.set_object_name(widget, item['path']) name.grid(row=row, column=0, padx=10, pady=5, sticky="nsew") widget.grid(row=row + 1, rowspan=1, column=0, padx=10, pady=5, sticky="nsew") def update_config_data_on_page(self): self.walk_widgets_in_layout(self.right_grid, self.update_config_data_from_widget) if __name__ == '__main__': root = tkinter.Tk() app = application(master=root) root.title("Config Editor") root.mainloop()
edk2-master
IntelFsp2Pkg/Tools/ConfigEditor/ConfigEditor.py
## @file # Automate the process of building the various reset vector types # # Copyright (c) 2014, Intel Corporation. All rights reserved.<BR> # # SPDX-License-Identifier: BSD-2-Clause-Patent # import glob import os import subprocess import sys def RunCommand(commandLine): #print ' '.join(commandLine) return subprocess.call(commandLine) for filename in glob.glob(os.path.join('Bin', '*.raw')): os.remove(filename) arch = 'ia32' debugType = None output = os.path.join('Bin', 'ResetVec') output += '.' + arch if debugType is not None: output += '.' + debugType output += '.raw' commandLine = ( 'nasm', '-D', 'ARCH_%s' % arch.upper(), '-D', 'DEBUG_%s' % str(debugType).upper(), '-o', output, 'ResetVectorCode.asm', ) ret = RunCommand(commandLine) print '\tASM\t' + output if ret != 0: sys.exit(ret) commandLine = ( 'python', 'Tools/FixupForRawSection.py', output, ) print '\tFIXUP\t' + output ret = RunCommand(commandLine) if ret != 0: sys.exit(ret)
edk2-master
IntelFsp2Pkg/FspSecCore/Vtf0/Build.py
## @file # Apply fixup to VTF binary image for FFS Raw section # # Copyright (c) 2014, Intel Corporation. All rights reserved.<BR> # # SPDX-License-Identifier: BSD-2-Clause-Patent # import sys filename = sys.argv[1] if filename.lower().find('ia32') >= 0: d = open(sys.argv[1], 'rb').read() c = ((len(d) + 4 + 7) & ~7) - 4 if c > len(d): c -= len(d) f = open(sys.argv[1], 'wb') f.write('\x90' * c) f.write(d) f.close() else: from struct import pack PAGE_PRESENT = 0x01 PAGE_READ_WRITE = 0x02 PAGE_USER_SUPERVISOR = 0x04 PAGE_WRITE_THROUGH = 0x08 PAGE_CACHE_DISABLE = 0x010 PAGE_ACCESSED = 0x020 PAGE_DIRTY = 0x040 PAGE_PAT = 0x080 PAGE_GLOBAL = 0x0100 PAGE_2M_MBO = 0x080 PAGE_2M_PAT = 0x01000 def NopAlign4k(s): c = ((len(s) + 0xfff) & ~0xfff) - len(s) return ('\x90' * c) + s def PageDirectoryEntries4GbOf2MbPages(baseAddress): s = '' for i in range(0x800): i = ( baseAddress + long(i << 21) + PAGE_2M_MBO + PAGE_CACHE_DISABLE + PAGE_ACCESSED + PAGE_DIRTY + PAGE_READ_WRITE + PAGE_PRESENT ) s += pack('Q', i) return s def PageDirectoryPointerTable4GbOf2MbPages(pdeBase): s = '' for i in range(0x200): i = ( pdeBase + (min(i, 3) << 12) + PAGE_CACHE_DISABLE + PAGE_ACCESSED + PAGE_READ_WRITE + PAGE_PRESENT ) s += pack('Q', i) return s def PageMapLevel4Table4GbOf2MbPages(pdptBase): s = '' for i in range(0x200): i = ( pdptBase + (min(i, 0) << 12) + PAGE_CACHE_DISABLE + PAGE_ACCESSED + PAGE_READ_WRITE + PAGE_PRESENT ) s += pack('Q', i) return s def First4GbPageEntries(topAddress): PDE = PageDirectoryEntries4GbOf2MbPages(0L) pml4tBase = topAddress - 0x1000 pdptBase = pml4tBase - 0x1000 pdeBase = pdptBase - len(PDE) PDPT = PageDirectoryPointerTable4GbOf2MbPages(pdeBase) PML4T = PageMapLevel4Table4GbOf2MbPages(pdptBase) return PDE + PDPT + PML4T def AlignAndAddPageTables(): d = open(sys.argv[1], 'rb').read() code = NopAlign4k(d) topAddress = 0x100000000 - len(code) d = ('\x90' * 4) + First4GbPageEntries(topAddress) + code f = open(sys.argv[1], 'wb') f.write(d) f.close() AlignAndAddPageTables()
edk2-master
IntelFsp2Pkg/FspSecCore/Vtf0/Tools/FixupForRawSection.py
# @file # Script to Build OVMF UEFI firmware # # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: BSD-2-Clause-Patent ## import os import sys sys.path.append(os.path.dirname(os.path.abspath(__file__))) from PlatformBuildLib import SettingsManager from PlatformBuildLib import PlatformBuilder # ####################################################################################### # # Common Configuration # # ####################################################################################### # class CommonPlatform(): ''' Common settings for this platform. Define static data here and use for the different parts of stuart ''' PackagesSupported = ("ArmVirtPkg",) ArchSupported = ("AARCH64", "ARM") TargetsSupported = ("DEBUG", "RELEASE", "NOOPT") Scopes = ('armvirt', 'edk2-build') WorkspaceRoot = os.path.realpath(os.path.join( os.path.dirname(os.path.abspath(__file__)), "..", "..")) DscName = os.path.join("ArmVirtPkg", "ArmVirtQemuKernel.dsc") # this platform produces an executable image that is invoked using # the Linux/arm64 kernel boot protocol FvQemuArg = " -kernel " import PlatformBuildLib PlatformBuildLib.CommonPlatform = CommonPlatform
edk2-master
ArmVirtPkg/PlatformCI/QemuKernelBuild.py
# @file # Script to Build ArmVirtPkg UEFI firmware # # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: BSD-2-Clause-Patent ## import os import logging import io from edk2toolext.environment import shell_environment from edk2toolext.environment.uefi_build import UefiBuilder from edk2toolext.invocables.edk2_platform_build import BuildSettingsManager from edk2toolext.invocables.edk2_setup import SetupSettingsManager, RequiredSubmodule from edk2toolext.invocables.edk2_update import UpdateSettingsManager from edk2toolext.invocables.edk2_pr_eval import PrEvalSettingsManager from edk2toollib.utility_functions import RunCmd from edk2toollib.utility_functions import GetHostInfo # ####################################################################################### # # Configuration for Update & Setup # # ####################################################################################### # class SettingsManager(UpdateSettingsManager, SetupSettingsManager, PrEvalSettingsManager): def GetPackagesSupported(self): ''' return iterable of edk2 packages supported by this build. These should be edk2 workspace relative paths ''' return CommonPlatform.PackagesSupported def GetArchitecturesSupported(self): ''' return iterable of edk2 architectures supported by this build ''' return CommonPlatform.ArchSupported def GetTargetsSupported(self): ''' return iterable of edk2 target tags supported by this build ''' return CommonPlatform.TargetsSupported def GetRequiredSubmodules(self): ''' return iterable containing RequiredSubmodule objects. If no RequiredSubmodules return an empty iterable ''' rs = [] # intentionally declare this one with recursive false to avoid overhead rs.append(RequiredSubmodule( "CryptoPkg/Library/OpensslLib/openssl", False)) # To avoid maintenance of this file for every new submodule # lets just parse the .gitmodules and add each if not already in list. # The GetRequiredSubmodules is designed to allow a build to optimize # the desired submodules but it isn't necessary for this repository. result = io.StringIO() ret = RunCmd("git", "config --file .gitmodules --get-regexp path", workingdir=self.GetWorkspaceRoot(), outstream=result) # Cmd output is expected to look like: # submodule.CryptoPkg/Library/OpensslLib/openssl.path CryptoPkg/Library/OpensslLib/openssl # submodule.SoftFloat.path ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3 if ret == 0: for line in result.getvalue().splitlines(): _, _, path = line.partition(" ") if path is not None: if path not in [x.path for x in rs]: rs.append(RequiredSubmodule(path, True)) # add it with recursive since we don't know return rs def SetArchitectures(self, list_of_requested_architectures): ''' Confirm the requests architecture list is valid and configure SettingsManager to run only the requested architectures. Raise Exception if a list_of_requested_architectures is not supported ''' unsupported = set(list_of_requested_architectures) - \ set(self.GetArchitecturesSupported()) if(len(unsupported) > 0): errorString = ( "Unsupported Architecture Requested: " + " ".join(unsupported)) logging.critical(errorString) raise Exception(errorString) self.ActualArchitectures = list_of_requested_architectures def GetWorkspaceRoot(self): ''' get WorkspacePath ''' return CommonPlatform.WorkspaceRoot def GetActiveScopes(self): ''' return tuple containing scopes that should be active for this process ''' scopes = CommonPlatform.Scopes ActualToolChainTag = shell_environment.GetBuildVars().GetValue("TOOL_CHAIN_TAG", "") if GetHostInfo().os.upper() == "LINUX" and ActualToolChainTag.upper().startswith("GCC"): if "AARCH64" in self.ActualArchitectures: scopes += ("gcc_aarch64_linux",) if "ARM" in self.ActualArchitectures: scopes += ("gcc_arm_linux",) return scopes def FilterPackagesToTest(self, changedFilesList: list, potentialPackagesList: list) -> list: ''' Filter other cases that this package should be built based on changed files. This should cover things that can't be detected as dependencies. ''' build_these_packages = [] possible_packages = potentialPackagesList.copy() for f in changedFilesList: # BaseTools files that might change the build if "BaseTools" in f: if os.path.splitext(f) not in [".txt", ".md"]: build_these_packages = possible_packages break # if the azure pipeline platform template file changed if "platform-build-run-steps.yml" in f: build_these_packages = possible_packages break return build_these_packages def GetPlatformDscAndConfig(self) -> tuple: ''' If a platform desires to provide its DSC then Policy 4 will evaluate if any of the changes will be built in the dsc. The tuple should be (<workspace relative path to dsc file>, <input dictionary of dsc key value pairs>) ''' return (CommonPlatform.DscName, {}) # ####################################################################################### # # Actual Configuration for Platform Build # # ####################################################################################### # class PlatformBuilder(UefiBuilder, BuildSettingsManager): def __init__(self): UefiBuilder.__init__(self) def AddCommandLineOptions(self, parserObj): ''' Add command line options to the argparser ''' parserObj.add_argument('-a', "--arch", dest="build_arch", type=str, default="AARCH64", help="Optional - Architecture to build. Default = AARCH64") def RetrieveCommandLineOptions(self, args): ''' Retrieve command line options from the argparser ''' shell_environment.GetBuildVars().SetValue( "TARGET_ARCH", args.build_arch.upper(), "From CmdLine") shell_environment.GetBuildVars().SetValue( "ACTIVE_PLATFORM", CommonPlatform.DscName, "From CmdLine") def GetWorkspaceRoot(self): ''' get WorkspacePath ''' return CommonPlatform.WorkspaceRoot def GetPackagesPath(self): ''' Return a list of workspace relative paths that should be mapped as edk2 PackagesPath ''' return () def GetActiveScopes(self): ''' return tuple containing scopes that should be active for this process ''' scopes = CommonPlatform.Scopes ActualToolChainTag = shell_environment.GetBuildVars().GetValue("TOOL_CHAIN_TAG", "") Arch = shell_environment.GetBuildVars().GetValue("TARGET_ARCH", "") if GetHostInfo().os.upper() == "LINUX" and ActualToolChainTag.upper().startswith("GCC"): if "AARCH64" == Arch: scopes += ("gcc_aarch64_linux",) elif "ARM" == Arch: scopes += ("gcc_arm_linux",) return scopes def GetName(self): ''' Get the name of the repo, platform, or product being build ''' ''' Used for naming the log file, among others ''' # check the startup nsh flag and if set then rename the log file. # this helps in CI so we don't overwrite the build log since running # uses the stuart_build command. if(shell_environment.GetBuildVars().GetValue("MAKE_STARTUP_NSH", "FALSE") == "TRUE"): return "ArmVirtPkg_With_Run" return "ArmVirtPkg" def GetLoggingLevel(self, loggerType): ''' Get the logging level for a given type base == lowest logging level supported con == Screen logging txt == plain text file logging md == markdown file logging ''' return logging.DEBUG def SetPlatformEnv(self): logging.debug("PlatformBuilder SetPlatformEnv") self.env.SetValue("PRODUCT_NAME", "ArmVirtQemu", "Platform Hardcoded") self.env.SetValue("MAKE_STARTUP_NSH", "FALSE", "Default to false") self.env.SetValue("QEMU_HEADLESS", "FALSE", "Default to false") return 0 def PlatformPreBuild(self): return 0 def PlatformPostBuild(self): return 0 def FlashRomImage(self): VirtualDrive = os.path.join(self.env.GetValue( "BUILD_OUTPUT_BASE"), "VirtualDrive") os.makedirs(VirtualDrive, exist_ok=True) OutputPath_FV = os.path.join( self.env.GetValue("BUILD_OUTPUT_BASE"), "FV") Built_FV = os.path.join(OutputPath_FV, "QEMU_EFI.fd") # pad fd to 64mb with open(Built_FV, "ab") as fvfile: fvfile.seek(0, os.SEEK_END) additional = b'\0' * ((64 * 1024 * 1024)-fvfile.tell()) fvfile.write(additional) # QEMU must be on that path # Unique Command and Args parameters per ARCH if (self.env.GetValue("TARGET_ARCH").upper() == "AARCH64"): cmd = "qemu-system-aarch64" args = "-M virt" args += " -cpu cortex-a57" # emulate cpu elif(self.env.GetValue("TARGET_ARCH").upper() == "ARM"): cmd = "qemu-system-arm" args = "-M virt,highmem=off" args += " -cpu cortex-a15" # emulate cpu else: raise NotImplementedError() # Common Args args += CommonPlatform.FvQemuArg + Built_FV # path to fw args += " -m 1024" # 1gb memory # turn off network args += " -net none" # Serial messages out args += " -serial stdio" # Mount disk with startup.nsh args += f" -drive file=fat:rw:{VirtualDrive},format=raw,media=disk" # Conditional Args if (self.env.GetValue("QEMU_HEADLESS").upper() == "TRUE"): args += " -display none" # no graphics else: args += " -device virtio-gpu-pci" # add recommended QEMU graphics device args += " -device qemu-xhci,id=usb" # add USB support for below devices args += " -device usb-tablet,id=input0,bus=usb.0,port=1" # add a usb mouse args += " -device usb-kbd,id=input1,bus=usb.0,port=2" # add a usb keyboard if (self.env.GetValue("MAKE_STARTUP_NSH").upper() == "TRUE"): f = open(os.path.join(VirtualDrive, "startup.nsh"), "w") f.write("BOOT SUCCESS !!! \n") # add commands here f.write("reset -s\n") f.close() ret = RunCmd(cmd, args) if ret == 0xc0000005: # for some reason getting a c0000005 on successful return return 0 return ret
edk2-master
ArmVirtPkg/PlatformCI/PlatformBuildLib.py
# @file # Script to Build OVMF UEFI firmware # # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: BSD-2-Clause-Patent ## import os import sys sys.path.append(os.path.dirname(os.path.abspath(__file__))) from PlatformBuildLib import SettingsManager from PlatformBuildLib import PlatformBuilder # ####################################################################################### # # Common Configuration # # ####################################################################################### # class CommonPlatform(): ''' Common settings for this platform. Define static data here and use for the different parts of stuart ''' PackagesSupported = ("ArmVirtPkg",) ArchSupported = ("AARCH64", "ARM") TargetsSupported = ("DEBUG", "RELEASE", "NOOPT") Scopes = ('armvirt', 'edk2-build') WorkspaceRoot = os.path.realpath(os.path.join( os.path.dirname(os.path.abspath(__file__)), "..", "..")) DscName = os.path.join("ArmVirtPkg", "ArmVirtQemu.dsc") # this platform produces a bootable NOR flash image FvQemuArg = " -pflash " import PlatformBuildLib PlatformBuildLib.CommonPlatform = CommonPlatform
edk2-master
ArmVirtPkg/PlatformCI/QemuBuild.py
# @file # Script to Build ArmVirtPkg UEFI firmware # # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: BSD-2-Clause-Patent ## import os import sys sys.path.append(os.path.dirname(os.path.abspath(__file__))) from PlatformBuildLib import SettingsManager from PlatformBuildLib import PlatformBuilder # ####################################################################################### # # Common Configuration # # ####################################################################################### # class CommonPlatform(): ''' Common settings for this platform. Define static data here and use for the different parts of stuart ''' PackagesSupported = ("ArmVirtPkg",) ArchSupported = ("AARCH64", "ARM") TargetsSupported = ("DEBUG", "RELEASE") Scopes = ('armvirt', 'edk2-build') WorkspaceRoot = os.path.realpath(os.path.join( os.path.dirname(os.path.abspath(__file__)), "..", "..")) DscName = os.path.join("ArmVirtPkg", "ArmVirtKvmTool.dsc") FvQemuArg = "" # ignored import PlatformBuildLib PlatformBuildLib.CommonPlatform = CommonPlatform
edk2-master
ArmVirtPkg/PlatformCI/KvmToolBuild.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Setup script to build the TAO Toolkit package.""" import os import setuptools from release.python.utils import utils version_locals = utils.get_version_details() PACKAGE_LIST = [ "nvidia_tao_deploy" ] # Getting dependencies. def get_requirements(): """Simple function to get packages.""" package_root = os.path.dirname(os.path.abspath(__file__)) with open(os.path.join(package_root, "docker/requirements.txt"), 'r') as req_file: requirements = [r.replace('\n', '')for r in req_file.readlines()] return requirements setuptools_packages = [] for package_name in PACKAGE_LIST: setuptools_packages.extend(utils.find_packages(package_name)) setuptools.setup( name=version_locals['__package_name__'], version=version_locals['__version__'], description=version_locals['__description__'], author='NVIDIA Corporation', classifiers=[ 'Environment :: Console', 'License :: OSI Approved :: Apache Software License', 'Natural Language :: English', 'Operating System :: POSIX', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Topic :: Scientific/Engineering :: Artificial Intelligence', ], license=version_locals['__license__'], keywords=version_locals['__keywords__'], packages=setuptools_packages, package_data={ '': ['*.pyc', "*.yaml", "*.so", '*.pdf'] }, include_package_data=True, python_requires=">=3.6.*", install_requires=get_requirements(), zip_safe=False, entry_points={ 'console_scripts': [ 'classification_pyt=nvidia_tao_deploy.cv.classification_pyt.entrypoint.classification_pyt:main', 'classification_tf1=nvidia_tao_deploy.cv.classification_tf1.entrypoint.classification_tf1:main', 'classification_tf2=nvidia_tao_deploy.cv.classification_tf2.entrypoint.classification_tf2:main', 'deformable_detr=nvidia_tao_deploy.cv.deformable_detr.entrypoint.deformable_detr:main', 'detectnet_v2=nvidia_tao_deploy.cv.detectnet_v2.entrypoint.detectnet_v2:main', 'dino=nvidia_tao_deploy.cv.dino.entrypoint.dino:main', 'dssd=nvidia_tao_deploy.cv.ssd.entrypoint.ssd:main', 'efficientdet_tf1=nvidia_tao_deploy.cv.efficientdet_tf1.entrypoint.efficientdet_tf1:main', 'efficientdet_tf2=nvidia_tao_deploy.cv.efficientdet_tf2.entrypoint.efficientdet_tf2:main', 'faster_rcnn=nvidia_tao_deploy.cv.faster_rcnn.entrypoint.faster_rcnn:main', 'lprnet=nvidia_tao_deploy.cv.lprnet.entrypoint.lprnet:main', 'mask_rcnn=nvidia_tao_deploy.cv.mask_rcnn.entrypoint.mask_rcnn:main', 'ml_recog=nvidia_tao_deploy.cv.metric_learning_recognition.entrypoint.metric_learning_recognition:main', 'multitask_classification=nvidia_tao_deploy.cv.multitask_classification.entrypoint.multitask_classification:main', 'ocdnet=nvidia_tao_deploy.cv.ocdnet.entrypoint.ocdnet:main', 'ocrnet=nvidia_tao_deploy.cv.ocrnet.entrypoint.ocrnet:main', 'optical_inspection=nvidia_tao_deploy.cv.optical_inspection.entrypoint.optical_inspection:main', 'retinanet=nvidia_tao_deploy.cv.retinanet.entrypoint.retinanet:main', 'ssd=nvidia_tao_deploy.cv.ssd.entrypoint.ssd:main', 'segformer=nvidia_tao_deploy.cv.segformer.entrypoint.segformer:main', 'unet=nvidia_tao_deploy.cv.unet.entrypoint.unet:main', 'yolo_v3=nvidia_tao_deploy.cv.yolo_v3.entrypoint.yolo_v3:main', 'yolo_v4=nvidia_tao_deploy.cv.yolo_v4.entrypoint.yolo_v4:main', 'yolo_v4_tiny=nvidia_tao_deploy.cv.yolo_v4.entrypoint.yolo_v4:main', ] } )
tao_deploy-main
setup.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TAO Deploy runner"""
tao_deploy-main
runner/__init__.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Instantiate the TAO-Deploy docker container for developers.""" import argparse from distutils.version import LooseVersion import json import os import subprocess import sys ROOT_DIR = os.getenv("NV_TAO_DEPLOY_TOP", os.path.dirname(os.path.dirname(os.getcwd()))) print(f"Current root directory {ROOT_DIR}") with open(os.path.join(ROOT_DIR, "docker/manifest.json"), "r") as m_file: docker_config = json.load(m_file) DOCKER_REGISTRY = docker_config["registry"] DOCKER_REPOSITORY = docker_config["repository"] DOCKER_TAG = docker_config["tag"] DOCKER_COMMAND = "docker" HOME_PATH = os.path.expanduser("~") MOUNTS_PATH = os.path.join(HOME_PATH, ".tao_mounts.json") def get_docker_mounts_from_file(mounts_file=MOUNTS_PATH): """Check for docker mounts in ~/.tlt_mounts.json.""" if not os.path.exists(mounts_file): return [] with open(mounts_file, 'r') as mfile: data = json.load(mfile) assert "Mounts" in list(data.keys()), "Invalid json file. Requires Mounts key." return data["Mounts"] def format_mounts(mount_points): """Format mount points to docker recognizable commands.""" formatted_mounts = [] # Traverse through mount points and add format them for the docker command. for mount_point in mount_points: assert "source" in list(mount_point.keys()), "destination" in list(mount_point.keys()) mount = "{}:{}".format(mount_point["source"], mount_point["destination"]) formatted_mounts.append(mount) return formatted_mounts def check_image_exists(docker_image): """Check if the image exists locally.""" check_command = '{} images | grep "\\<{}\\>" | grep "{}" >/dev/null 2>&1'.format(DOCKER_COMMAND, docker_image, DOCKER_TAG) rc = subprocess.call(check_command, stdout=sys.stderr, shell=True) return rc == 0 def pull_base_container(docker_image): """Pull the default base container.""" pull_command = "{} pull {}:{}".format(DOCKER_COMMAND, docker_image, DOCKER_TAG) rc = subprocess.call(pull_command, stdout=sys.stderr, shell=True) return rc == 0 def get_formatted_mounts(mount_file): """Simple function to get default mount points.""" default_mounts = get_docker_mounts_from_file(mount_file) return format_mounts(default_mounts) def check_mounts(formatted_mounts): """Check the formatted mount commands.""" assert type(formatted_mounts) == list for mounts in formatted_mounts: source_path = mounts.split(":")[0] if not os.path.exists(source_path): raise ValueError("Path doesn't exist: {}".format(source_path)) return True def get_docker_gpus_prefix(gpus): """Get the docker command gpu's prefix.""" docker_version = ( subprocess.check_output( ["docker", "version", "--format={{ .Server.APIVersion }}"] ) .strip() .decode() ) if LooseVersion(docker_version) >= LooseVersion("1.40"): # You are using the latest version of docker using # --gpus instead of the nvidia runtime. gpu_string = "--gpus " if gpus == "all": gpu_string += "all" else: gpu_string += "\'\"device={}\"\'".format(gpus) else: # Stick to the older version of getting the gpu's using runtime=nvidia gpu_string = "--runtime=nvidia -e NVIDIA_DRIVER_CAPABILITIES=all " if gpus != "none": gpu_string += "-e NVIDIA_VISIBLE_DEVICES={}".format(gpus) return gpu_string def create_base_docker(): """Function to create the base docker.""" create_command = "bash {}/docker/build.sh --build".format(ROOT_DIR) try: subprocess.run(create_command, stdout=sys.stderr, shell=True, check=True) except subprocess.CalledProcessError as e: raise RuntimeError(f"Container build failed with error {e}") def instantiate_dev_docker(gpus, mount_file, mount_cli_list, env_var_list, command, ulimit=None, shm_size="16G", run_as_user=False): """Instiate the docker container.""" docker_image = "{}/{}:{}".format(DOCKER_REGISTRY, DOCKER_REPOSITORY, DOCKER_TAG) # Invoking the nvidia docker. gpu_string = get_docker_gpus_prefix(gpus) # Prefix for the run command. run_command = "{} run -it --rm".format(DOCKER_COMMAND) # get default mount points. formatted_mounts = get_formatted_mounts(MOUNTS_PATH) # get mounts from cli mount file. formatted_mounts += get_formatted_mounts(mount_file) if mount_cli_list is not None: formatted_mounts.extend(mount_cli_list) assert check_mounts(formatted_mounts), "Mounts don't exists, Please make sure the paths all exist." mount_string = "-v {}:/workspace/tao-deploy ".format(os.getenv("NV_TAO_DEPLOY_TOP", os.getcwd())) # Defining env variables. env_variables = "-e PYTHONPATH={}:$PYTHONPATH ".format("/workspace/tao-deploy") for env in env_var_list: if "=" not in env: print("invalid env variable definition. skipping this {}".format(env)) continue env_variables += "-e {} ".format(env) for path in formatted_mounts: mount_string += "-v {} ".format(path) # Setting shared memory. shm_option = "--shm-size {}".format(shm_size) # Setting ulimits for host ulimit_options = "" if ulimit is not None: for param in ulimit: ulimit_options += "--ulimit {} ".format(param) user_option = "" if run_as_user: user_option = "--user {}:{}".format(os.getuid(), os.getgid()) working_directory = "/workspace/tao-deploy" working_dir_option = f"-w {working_directory}" final_command = "{} {} {} {} {} {} {} {} {} {}".format( run_command, gpu_string, mount_string, env_variables, shm_option, ulimit_options, user_option, working_dir_option, docker_image, " ".join(command) ) print(final_command) return subprocess.check_call(final_command, stdout=sys.stderr, shell=True) def parse_cli_args(args=None): """Parse run container command line.""" parser = argparse.ArgumentParser( prog="tao_deploy", description="Tool to run the TAO Toolkit TensorFlow2 container.", add_help=True) parser.add_argument( "--gpus", default="all", type=str, help="Comma separated GPU indices to be exposed to the docker." ) parser.add_argument( "--volume", action="append", type=str, default=[], help="Volumes to bind." ) parser.add_argument( "--env", action="append", type=str, default=[], help="Environment variables to bind." ) parser.add_argument( "--mounts_file", help="Path to the mounts file.", default="", type=str ) parser.add_argument( "--shm_size", help="Shared memory size for docker", default="16G", type=str ) parser.add_argument( "--run_as_user", help="Flag to run as user", action="store_true", default=False ) parser.add_argument( "--ulimit", action='append', help="Docker ulimits for the host machine." ) args = vars(parser.parse_args(args)) return args def main(cl_args=None): """Start docker container.""" if "--" in cl_args: index = cl_args.index("--") # Split args to the tlt docker wrapper and the command to be run inside the docker. runner_args = cl_args[:index] command_args = cl_args[index + 1:] else: runner_args = cl_args command_args = "" # parse command line args. args = parse_cli_args(runner_args) docker_image = "{}/{}".format(DOCKER_REGISTRY, DOCKER_REPOSITORY) if not check_image_exists(docker_image): if not pull_base_container(docker_image): print("The base container doesn't exist locally and the pull failed. Hence creating the base container") create_base_docker() try: instantiate_dev_docker( args["gpus"], args["mounts_file"], args["volume"], args["env"], command_args, args["ulimit"], args["shm_size"], args["run_as_user"] ) except subprocess.CalledProcessError: # Do nothing - the errors are printed in entrypoint launch. pass if __name__ == "__main__": main(sys.argv[1:])
tao_deploy-main
runner/tao_deploy.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Internal script to encrypt an onnx file to .etlt.""" import argparse import os from nvidia_tao_deploy.utils.decoding import encode_etlt def main(args=None): """Encrypt an onnx file.""" args = parse_command_line_arguments(args) if not os.path.exists(args.model_path): raise FileNotFoundError(f"{args.model_path} does not exist") dir_name = os.path.dirname(args.output_path) os.makedirs(dir_name, exist_ok=True) encode_etlt(args.model_path, args.output_path, "", args.key) print(f"Model encrypted at {args.output_path}") def build_command_line_parser(parser=None): """Build the command line parser using argparse. Args: parser (subparser): Provided from the wrapper script to build a chained parser mechanism. Returns: parser """ if parser is None: parser = argparse.ArgumentParser(prog='encrypt_onnx', description='Encrypt an onnx file.') parser.add_argument( '-m', '--model_path', type=str, required=True, help='Path to an onnx model file.' ) parser.add_argument( '-k', '--key', type=str, required=True, help='Key to save a .etlt model.' ) parser.add_argument( '-o', '--output_path', type=str, required=True, help="Output .etlt file path." ) return parser def parse_command_line_arguments(args=None): """Simple function to parse command line arguments.""" parser = build_command_line_parser(args) return parser.parse_args(args) if __name__ == '__main__': main()
tao_deploy-main
internal/encrypt_onnx.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Internal script to decrypt an .etlt file to onnx.""" import argparse import os import shutil from nvidia_tao_deploy.utils.decoding import decode_etlt def main(args=None): """decrypt an etlt file.""" args = parse_command_line_arguments(args) if not os.path.exists(args.model_path): raise FileNotFoundError(f"{args.model_path} does not exist") dir_name = os.path.dirname(args.output_path) os.makedirs(dir_name, exist_ok=True) tmp_decrypted_model, backend = decode_etlt(args.model_path, args.key) print(f"Model is decrypted to it's original '{backend}' backend") shutil.copy(tmp_decrypted_model, args.output_path) print(f"Model decrypted at {args.output_path}") def build_command_line_parser(parser=None): """Build the command line parser using argparse. Args: parser (subparser): Provided from the wrapper script to build a chained parser mechanism. Returns: parser """ if parser is None: parser = argparse.ArgumentParser(prog='decrypt_onnx', description='Decrypt an etlt file.') parser.add_argument( '-m', '--model_path', type=str, required=True, help='Path to an etlt model file.' ) parser.add_argument( '-k', '--key', type=str, required=True, help='Key to save a .etlt model.' ) parser.add_argument( '-o', '--output_path', type=str, required=True, help="Output onnx file path." ) return parser def parse_command_line_arguments(args=None): """Simple function to parse command line arguments.""" parser = build_command_line_parser(args) return parser.parse_args(args) if __name__ == '__main__': main()
tao_deploy-main
internal/decrypt_onnx.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module containing implementation of release packaging."""
tao_deploy-main
release/__init__.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Version string for the TAO Toolkit Deploy AI models/tasks.""" MAJOR = "5" MINOR = "0.0" PATCH = "01" PRE_RELEASE = '' # Getting the build number. def get_build_info(): """Get the build version number.""" # required since setup.py runs a version string and global imports aren't executed. import os # noqa pylint: disable=import-outside-toplevel build_file = "build.info" if not os.path.exists(build_file): raise FileNotFoundError("Build file doesn't exist.") patch = 0 with open(build_file, 'r') as bfile: patch = bfile.read().strip() assert bfile.closed, "Build file wasn't closed properly." return patch try: PATCH = get_build_info() except FileNotFoundError: pass # Use the following formatting: (major, minor, patch, pre-release) VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE) # Version of the library. __version__ = '.'.join(map(str, VERSION[:3])) + ''.join(VERSION[3:]) # Version of the file format. __format_version__ = 2 # Other package info. __package_name__ = "nvidia-tao-deploy" __description__ = "NVIDIA's package for deploying models from TAO Toolkit." __keywords__ = "nvidia, tao, tensorrt" __contact_names__ = "Sean Cha" __contact_emails__ = "[email protected]" __license__ = "NVIDIA Proprietary Software"
tao_deploy-main
release/python/version.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Packaging modules for TAO Toolkit."""
tao_deploy-main
release/python/__init__.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module containing utility functions required for packaging TAO Toolkit modules."""
tao_deploy-main
release/python/utils/__init__.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper utils for packaging.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import glob import os import setuptools # Rename all .py files to .py_tmp temporarily. ignore_list = ['__init__.py', '__version__.py'] LOCAL_DIR = os.path.dirname(os.path.abspath(__file__)) def up_directory(dir_path, n=1): """Go up n directories from dir_path.""" dir_up = dir_path for _ in range(n): dir_up = os.path.split(dir_up)[0] return dir_up TOP_LEVEL_DIR = up_directory(LOCAL_DIR, 3) def remove_prefix(dir_path): """Remove a certain prefix from path.""" max_path = 8 prefix = dir_path while max_path > 0: prefix = os.path.split(prefix)[0] if prefix.endswith('ai_infra'): return dir_path[len(prefix) + 1:] max_path -= 1 return dir_path def get_subdirs(path): """Get all subdirs of given path.""" dirs = os.walk(path) return [remove_prefix(x[0]) for x in dirs] def rename_py_files(path, ext, new_ext, ignore_files): """Rename all .ext files in a path to .new_ext except __init__ files.""" files = glob.glob(path + '/*' + ext) for ignore_file in ignore_files: files = [f for f in files if ignore_file not in f] for filename in files: os.rename(filename, filename.replace(ext, new_ext)) def get_version_details(): """Simple function to get packages for setup.py.""" # Define env paths. LAUNCHER_SDK_PATH = os.path.join(TOP_LEVEL_DIR, "release/python") # Get current __version__. version_locals = {} with open(os.path.join(LAUNCHER_SDK_PATH, 'version.py')) as version_file: exec(version_file.read(), {}, version_locals) return version_locals def cleanup(): """Cleanup directories after the build process.""" req_subdirs = get_subdirs(TOP_LEVEL_DIR) # Cleanup. Rename all .py_tmp files back to .py and delete pyc files for dir_path in req_subdirs: dir_path = os.path.join(TOP_LEVEL_DIR, dir_path) # TODO: @vpraveen Think about removing python files before the final # release. rename_py_files(dir_path, '.py_tmp', '.py', ignore_list) pyc_list = glob.glob(dir_path + '/*.pyc') for pyc_file in pyc_list: os.remove(pyc_file) def find_packages(package_name): """List of packages. Args: package_name (str): Name of the package. Returns: packages (list): List of packages. """ packages = setuptools.find_packages(package_name) packages = [f"{package_name}.{f}" for f in packages] packages.append(package_name) return packages
tao_deploy-main
release/python/utils/utils.py
#!/usr/bin/python import sys import re from submodules.rules import rules def main(): with open(sys.argv[1], "r") as fp: lines = fp.readlines() for idx, line in enumerate(lines): if line.strip() == "# ------------------------ >8 ------------------------": break if line[0] == "#": continue if not line_valid(idx, line): print(f"line# {idx} failed") show_rules() sys.exit(1) sys.exit(0) def line_valid(idx, line): if idx == 0: #return re.match("^[A-Z].{,48}[0-9A-z \t]$", line) return re.match("^\[((?!\s*$).{0,15})\][ \t].*?[A-Z].{0,48}[0-9A-z \t]$", line) else: return len(line.strip()) <= 72 def show_rules(): print(rules) if __name__ == "__main__": main()
tao_deploy-main
scripts/git-hooks/commit-msg.py
tao_deploy-main
scripts/git-hooks/submodules/__init__.py
rules = """ # Failing to add message in the mentioned format will # cause your local commit fail. # # Please follow these rules for commit messages: # ============================================== # 1. Commit message format - first line is mandatory # [YOUR_MODULE_NAME] Subject line here not exceeding 50 characters # * Optional line entry with detail not exceeding 72 characters # * Optional line entry with detail not exceeding 72 characters # * Optional line entry with detail not exceeding 72 characters # 2. Limit the module name (YOUR_MODULE_NAME) to 15 characters length # 3. Limit the subject(Text part after [YOUR_MODULE_NAME]) line to max # 50 characters # 4. Start subject (Text part after [YOUR_MODULE_NAME]) with a Capital # letter and don't end with a period '.' # 5. Wrap the body lines (if any) at 72 characters """
tao_deploy-main
scripts/git-hooks/submodules/rules.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TAO Deploy Metrics"""
tao_deploy-main
nvidia_tao_deploy/metrics/__init__.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Semantic Segmentation mIoU calculation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import logging from tqdm.auto import tqdm import math import numpy as np logging.getLogger('matplotlib').setLevel(logging.WARNING) logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s', level="INFO") logger = logging.getLogger(__name__) def getScoreAverage(scoreList): """Compute the average score of all classes.""" validScores = 0 scoreSum = 0.0 for score in scoreList: if not math.isnan(score): validScores += 1 scoreSum += score if validScores == 0: return float('nan') avg_score = scoreSum / validScores return avg_score class SemSegMetric(object): """Semantic segmentation evaluation metric class.""" def __init__(self, train_id_name_mapping, label_id_train_id_mapping, num_classes): """Constructs SemSeg evaluation class. Args: train_id_name_mapping (dict): dict of list with key being the label id and value a list of class names. num_classes (int): number of classes to evaluate """ self.train_id_name_mapping = train_id_name_mapping self.label_id_train_id_mapping = label_id_train_id_mapping self.num_classes = num_classes assert self.num_classes == len(self.train_id_name_mapping), "Invalid size for class mapping!" def get_evaluation_metrics(self, ground_truths, predictions): """Generates semantic segmentation metrics. Args: ground_truths(list): List of ground truths numpy arrays. predictions(list): List of prediction numpy arrays. """ metrices = self.compute_metrics_masks(ground_truths, predictions) recall_str = "Recall : " + str(metrices["rec"]) precision_str = "Precision: " + str(metrices["prec"]) f1_score_str = "F1 score: " + str(metrices["fmes"]) mean_iou_str = "Mean IOU: " + str(metrices["mean_iou_index"]) results_str = [recall_str, precision_str, f1_score_str, mean_iou_str] metrices_str_categorical = {} metrices_str = collections.defaultdict(dict) for k, v in metrices["results_dic"].items(): class_name = str(k) for metric_type, val in v.items(): metrices_str[str(metric_type)][class_name] = str(val) metrices_str_categorical["categorical"] = metrices_str for result in results_str: # This will print the results to the stdout print(f"{result}\n") return metrices def compute_metrics_masks(self, ground_truths, predictions): """Compute metrics for semantic segmentation. Args: ground_truths(list): List of ground truths numpy arrays. predictions(list): List of prediction numpy arrays. """ conf_mat = np.zeros([self.num_classes, self.num_classes], dtype=np.float32) for pred, gt in tqdm(zip(predictions, ground_truths), desc="Calculating confusion matrix"): pred = pred.flatten() gt = gt.flatten() gt = np.vectorize(self.label_id_train_id_mapping.get)(gt) result = np.zeros((self.num_classes, self.num_classes)) for i in range(len(gt)): result[gt[i]][pred[i]] += 1 conf_mat += np.matrix(result) metrices = {} perclass_tp = np.diagonal(conf_mat).astype(np.float32) perclass_fp = conf_mat.sum(axis=0) - perclass_tp perclass_fn = conf_mat.sum(axis=1) - perclass_tp iou_per_class = perclass_tp / (perclass_fp + perclass_tp + perclass_fn) precision_per_class = perclass_tp / (perclass_fp + perclass_tp) recall_per_class = perclass_tp / (perclass_tp + perclass_fn) f1_per_class = [] final_results_dic = {} for num_class in range(self.num_classes): name_class = "/".join(self.train_id_name_mapping[num_class]) per_class_metric = {} prec = precision_per_class[num_class] rec = recall_per_class[num_class] iou = iou_per_class[num_class] f1 = (2 * prec * rec) / float((prec + rec)) f1_per_class.append(f1) per_class_metric["precision"] = prec per_class_metric["Recall"] = rec per_class_metric["F1 Score"] = f1 per_class_metric["iou"] = iou final_results_dic[name_class] = per_class_metric mean_iou_index = getScoreAverage(iou_per_class) mean_rec = getScoreAverage(recall_per_class) mean_precision = getScoreAverage(precision_per_class) mean_f1_score = getScoreAverage(f1_per_class) metrices["rec"] = mean_rec metrices["prec"] = mean_precision metrices["fmes"] = mean_f1_score metrices["mean_iou_index"] = mean_iou_index metrices["results_dic"] = final_results_dic return metrices
tao_deploy-main
nvidia_tao_deploy/metrics/semantic_segmentation_metric.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """mAP calculation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from functools import partial import logging import os import numpy as np # Suppress logging from matplotlib from matplotlib import pyplot as plt # noqa: E402 logging.getLogger('matplotlib').setLevel(logging.WARNING) logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s', level="INFO") logger = logging.getLogger(__name__) def batch_iou(box, box_list): """element-wise IOU to perform on a batch (box_list). Args: box: np array of shape (4,): the target box box_list: np array of shape (N, 4): a batch of boxes to match the box. Returns: np array of shape (N,). The IOU between target box and each single box in box_list """ if box.ndim == 1: box = np.expand_dims(box, axis=0) if box_list.ndim == 1: box_list = np.expand_dims(box_list, axis=0) # Compute the IoU. min_xy = np.maximum(box[:, :2], box_list[:, :2]) max_xy = np.minimum(box[:, 2:], box_list[:, 2:]) interx = np.maximum(0, max_xy - min_xy) interx = interx[:, 0] * interx[:, 1] box_area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1]) box_list_areas = (box_list[:, 2] - box_list[:, 0]) * (box_list[:, 3] - box_list[:, 1]) union_areas = box_area + box_list_areas - interx return interx / union_areas def _per_img_match(x, n_classes, sorting_algorithm, matching_iou_threshold): """Helper function for multithreading matching. Do not call this function from outside. It's outside the class definition purely due to python pickle issue. Arguments: x (tuple): (gt_box, pred_box) n_classes (int): number of classes sorting_algorithm (str): Which sorting algorithm the matching algorithm should use. This argument accepts any valid sorting algorithm for Numpy's `argsort()` function. You will usually want to choose between 'quicksort' (fastest and most memory efficient, but not stable) and 'mergesort' (slight slower and less memory efficient, but stable). The official Matlab evaluation algorithm uses a stable sorting algorithm, so this algorithm is only guaranteed to behave identically if you choose 'mergesort' as the sorting algorithm, but it will almost always behave identically even if you choose 'quicksort' (but no guarantees). matching_iou_threshold (float): A prediction will be considered a true positive if it has a Jaccard overlap of at least `matching_iou_threshold` with any ground truth bounding box of the same class. """ gt = x[0] pred = x[1] T = [[] for _ in range(n_classes)] P = [[] for _ in range(n_classes)] gt_cls = [gt[gt[:, 0].astype(np.int) == i, 1:] for i in range(n_classes)] gt_cls_valid = [np.ones((len(i), )) for i in gt_cls] gt_hard_count = [i[:, 0].sum() for i in gt_cls] desc_inds = np.argsort(-pred[:, 1], kind=sorting_algorithm) pred = pred[desc_inds] for pred_box in pred: pred_cls = int(pred_box[0]) # if no GT in this class, simply recognize as FP if len(gt_cls[pred_cls]) == 0: T[pred_cls].append(0) P[pred_cls].append(pred_box[1]) continue overlaps = batch_iou(box_list=gt_cls[pred_cls][:, -4:], box=pred_box[-4:]) overlaps_unmatched = overlaps * gt_cls_valid[pred_cls] if np.max(overlaps_unmatched) >= matching_iou_threshold: # invalidate the matched gt matched_gt_idx = np.argmax(overlaps_unmatched) gt_cls_valid[pred_cls][matched_gt_idx] = 0.0 if gt_cls[pred_cls][matched_gt_idx, 0] < 0.5: # this is not a hard box. We should append GT T[pred_cls].append(1) P[pred_cls].append(pred_box[1]) else: logger.warning("Got label marked as difficult(occlusion > 0), " "please set occlusion field in KITTI label to 0, " "if you want to include it in mAP calculation " "during validation/evaluation.") # this hard box is already processed. Deduct from gt_hard_cnt gt_hard_count[pred_cls] = gt_hard_count[pred_cls] - 1 else: T[pred_cls].append(0) P[pred_cls].append(pred_box[1]) for idx, cls_valid in enumerate(gt_cls_valid): non_match_count = int(round(cls_valid.sum() - gt_hard_count[idx])) T[idx].extend([1] * non_match_count) P[idx].extend([0.0] * non_match_count) return (T, P) class KITTIMetric: """Computes the mean average precision of the given lists of pred and GT.""" def __init__(self, n_classes, conf_thres=0.01, matching_iou_threshold=0.5, average_precision_mode='sample', num_recall_points=11): """Initializes Keras / TensorRT objects needed for model inference. Args: n_classes (integer): Number of classes conf_thres (float): confidence threshold to consider a bbox. matching_iou_threshold (float, optional): A prediction will be considered a true positive if it has a Jaccard overlap of at least `matching_iou_threshold` with any ground truth bounding box of the same class. average_precision_mode (str, optional): Can be either 'sample' or 'integrate'. In the case of 'sample', the average precision will be computed according to the Pascal VOC formula that was used up until VOC 2009, where the precision will be sampled for `num_recall_points` recall values. In the case of 'integrate', the average precision will be computed according to the Pascal VOC formula that was used from VOC 2010 onward, where the average precision will be computed by numerically integrating over the whole preciscion-recall curve instead of sampling individual points from it. 'integrate' mode is basically just the limit case of 'sample' mode as the number of sample points increases. num_recall_points (int, optional): The number of points to sample from the precision-recall-curve to compute the average precisions. In other words, this is the number of equidistant recall values for which the resulting precision will be computed. 11 points is the value used in the official Pascal VOC 2007 detection evaluation algorithm. """ self.n_classes = n_classes self.conf_thres = conf_thres self.matching_iou_threshold = matching_iou_threshold self.average_precision_mode = average_precision_mode self.num_recall_points = num_recall_points self.gt_labels = None self.pred_labels = None self.T = None self.P = None self.ap = None def __call__(self, gt, pred, verbose=True, class_names=None, vis_path=None): """Compute AP of each classes and mAP. Arguments: gt (list of numpy arrays): A list of length n_eval_images. Each element is a numpy array of shape (n_bbox, 6). n_bbox is the number of boxes inside the image and 6 elements for the bbox is [class_id, is_difficult, xmin, ymin, xmax, ymax]. Note: is_difficult is 0 if the bbox is not difficult. 1 otherwise. Always set is_difficult to 0 if you don't have this field in your GT label. pred (list of numpy arrays): A list of length n_eval_images. Each element is a numpy array of shape (n_bbox, 6). n_bbox is the number of boxes inside the image and 6 elements for the bbox is [class_id, confidence, xmin, ymin, xmax, ymax] verbose (bool, optional): If `True`, will print out the progress during runtime. class_name(list): Name of object classes for vis. vis_path(string): Path to save vis image. Note: the class itself supports both normalized / un-normalized coords. As long as the coords is_normalized for gt and pred identical, the class gives correct results. Returns: A float, the mean average precision. A list of length n_classes. AP for each class """ self.gt_labels = gt self.pred_labels = pred self.matching(sorting_algorithm='quicksort', matching_iou_threshold=self.matching_iou_threshold, verbose=verbose) if verbose: print('Start to calculate AP for each class') # Calc AP and plot PR curves self._calc_ap(sorting_algorithm='quicksort', average_precision_mode=self.average_precision_mode, num_recall_points=self.num_recall_points, class_names=class_names, vis_path=vis_path) # Save plots to image if vis_path is not None: plt.legend() plt.title("Precision-Recall curve") plt.xlabel("Recall") plt.ylabel("Precision") plt.grid() save_path = os.path.join(vis_path, "PR_curve.png") plt.savefig(save_path) print(f"PR-curve image saved to {save_path}") plt.clf() # release memory self.gt_labels = None self.pred_labels = None return np.mean(self.ap), self.ap def matching(self, sorting_algorithm, matching_iou_threshold, verbose): """Generate T, P list for AP calculation. Arguments: T: 0 - negative match, 1 - positive match P: confidence of this prediction """ if (self.gt_labels is None) or (self.pred_labels is None): raise ValueError("Matching cannot be called before the completion of prediction!") if len(self.gt_labels) != len(self.pred_labels): raise ValueError("Image count mismatch between ground truth and prediction!") T = [[] for _ in range(self.n_classes)] P = [[] for _ in range(self.n_classes)] per_img_match = partial(_per_img_match, n_classes=self.n_classes, sorting_algorithm=sorting_algorithm, matching_iou_threshold=matching_iou_threshold) results = [] for x in zip(self.gt_labels, self.pred_labels): results.append(per_img_match(x)) for t, p in results: for i in range(self.n_classes): T[i] += t[i] P[i] += p[i] self.T = T self.P = P def __voc_ap( self, rec, prec, average_precision_mode, num_recall_points, class_name=None, vis_path=None ): if average_precision_mode == 'sample': ap = 0. for t in np.linspace(0., 1.0, num_recall_points): if np.sum(rec >= t) == 0: p = 0 else: p = np.max(prec[rec >= t]) ap = ap + p / float(num_recall_points) if class_name and vis_path: rec_arr = np.array(rec) prec_arr = np.array(prec) plt.plot(rec_arr, prec_arr, label=class_name) elif average_precision_mode == 'integrate': # correct AP calculation # first append sentinel values at the end mrec = np.concatenate(([0.], rec, [1.])) mpre = np.concatenate(([0.], prec, [0.])) # compute the precision envelope for i in range(mpre.size - 1, 0, -1): mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) # to calculate area under PR curve, look for points # where X axis (recall) changes value i = np.where(mrec[1:] != mrec[:-1])[0] # and sum (\Delta recall) * prec ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) if class_name and vis_path: if class_name != "bg": plt.plot(mrec, mpre, label=class_name) else: raise ValueError("average_precision_mode should be either sample or integrate") return ap def _calc_ap( self, sorting_algorithm, average_precision_mode, num_recall_points, class_names=None, vis_path=None ): """compute the AP for classes.""" if (self.T is None) or (self.P is None): raise ValueError("Matching must be done first!") self.ap = [] class_idx = 0 for T, P in zip(self.T, self.P): if class_names is not None: class_name = class_names[class_idx] else: class_name = None prec = [] rec = [] TP = 0. FP = 0. FN = 0. # sort according to prob. Ta = np.array(T) Pa = np.array(P) s_idx = np.argsort(-Pa, kind=sorting_algorithm) P = Pa[s_idx].tolist() T = Ta[s_idx].tolist() npos = np.sum(Ta) for t, p in zip(T, P): if t == 1 and p >= self.conf_thres: TP += 1 elif t == 1 and p < self.conf_thres: FN += 1 elif t == 0 and p >= self.conf_thres: FP += 1 if TP + FP == 0.: precision = 0. else: precision = float(TP) / (TP + FP) if npos > 0: recall = float(TP) / float(npos) else: recall = 0.0 prec.append(precision) rec.append(recall) ap = self.__voc_ap( np.array(rec), np.array(prec), average_precision_mode, num_recall_points, class_name, vis_path ) self.ap.append(ap) class_idx += 1
tao_deploy-main
nvidia_tao_deploy/metrics/kitti_metric.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """COCO-style evaluation metrics. Implements the interface of COCO API and metric_fn in tf.TPUEstimator. COCO API: github.com/cocodataset/cocoapi/ """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import cv2 import logging import numpy as np from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval import pycocotools.mask as maskUtils class MaskCOCO(COCO): """COCO object for mask evaluation.""" def reset(self, dataset): """Reset the dataset and groundtruth data index in this object. Args: dataset: dict of groundtruth data. It should has similar structure as the COCO groundtruth JSON file. Must contains three keys: {'images', 'annotations', 'categories'}. 'images': list of image information dictionary. Required keys: 'id', 'width' and 'height'. 'annotations': list of dict. Bounding boxes and segmentations related information. Required keys: {'id', 'image_id', 'category_id', 'bbox', 'iscrowd', 'area', 'segmentation'}. 'categories': list of dict of the category information. Required key: 'id'. Refer to http://cocodataset.org/#format-data for more details. Raises: AttributeError: If the dataset is empty or not a dict. """ assert dataset, 'Groundtruth should not be empty.' assert isinstance(dataset, dict), \ f'annotation file format {type(dataset)} not supported' self.anns, self.cats, self.imgs = {}, {}, {} self.dataset = copy.deepcopy(dataset) self.createIndex() def loadRes(self, detection_results, include_mask, is_image_mask=False): """Load result file and return a result api object. Args: detection_results: a dictionary containing predictions results. include_mask: a boolean, whether to include mask in detection results. is_image_mask: a boolean, where the predict mask is a whole image mask. Returns: res: result MaskCOCO api object """ res = MaskCOCO() res.dataset['images'] = list(self.dataset['images']) logging.info('Loading and preparing results...') predictions = self.load_predictions(detection_results, include_mask=include_mask, is_image_mask=is_image_mask) assert isinstance(predictions, list), 'results in not an array of objects' if predictions: image_ids = [pred['image_id'] for pred in predictions] assert set(image_ids) == (set(image_ids) & set(self.getImgIds())), \ 'Results do not correspond to current coco set' if (predictions and 'bbox' in predictions[0] and predictions[0]['bbox']): res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) for idx, pred in enumerate(predictions): bb = pred['bbox'] x1, x2, y1, y2 = [bb[0], bb[0] + bb[2], bb[1], bb[1] + bb[3]] if 'segmentation' not in pred: pred['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]] pred['area'] = bb[2] * bb[3] pred['id'] = idx + 1 pred['iscrowd'] = 0 elif 'segmentation' in predictions[0]: res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) for idx, pred in enumerate(predictions): # now only support compressed RLE format as segmentation results pred['area'] = maskUtils.area(pred['segmentation']) if 'bbox' not in pred: pred['bbox'] = maskUtils.toBbox(pred['segmentation']) pred['id'] = idx + 1 pred['iscrowd'] = 0 res.dataset['annotations'] = predictions res.createIndex() return res def load_predictions(self, detection_results, include_mask, is_image_mask=False): """Create prediction dictionary list from detection and mask results. Args: detection_results: a dictionary containing numpy arrays which corresponds to prediction results. include_mask: a boolean, whether to include mask in detection results. is_image_mask: a boolean, where the predict mask is a whole image mask. Returns: a list of dictionary including different prediction results from the model in numpy form. """ predictions = [] num_detections = detection_results['detection_scores'].size current_index = 0 for i, image_id in enumerate(detection_results['source_id']): if include_mask: box_coorindates_in_image = detection_results['detection_boxes'][i] segments = generate_segmentation_from_masks( detection_results['detection_masks'][i], box_coorindates_in_image, int(detection_results['image_info'][i][3]), int(detection_results['image_info'][i][4]), is_image_mask=is_image_mask) # Convert the mask to uint8 and then to fortranarray for RLE encoder. encoded_masks = [ maskUtils.encode(np.asfortranarray(instance_mask.astype(np.uint8))) for instance_mask in segments] for box_index in range(int(detection_results['num_detections'][i])): if current_index % 1000 == 0: logging.info('%s/%s', current_index, num_detections) current_index += 1 prediction = {'image_id': int(image_id), 'bbox': detection_results['detection_boxes'][i][box_index].tolist(), 'score': detection_results['detection_scores'][i][box_index], 'category_id': int( detection_results['detection_classes'][i][box_index])} if include_mask: prediction['segmentation'] = encoded_masks[box_index] predictions.append(prediction) return predictions def generate_segmentation_from_masks(masks, detected_boxes, image_height, image_width, is_image_mask=False): """Generates segmentation result from instance masks. Args: masks: a numpy array of shape [N, mask_height, mask_width] representing the instance masks w.r.t. the `detected_boxes`. detected_boxes: a numpy array of shape [N, 4] representing the reference bounding boxes. image_height: an integer representing the height of the image. image_width: an integer representing the width of the image. is_image_mask: bool. True: input masks are whole-image masks. False: input masks are bounding-box level masks. Returns: segms: a numpy array of shape [N, image_height, image_width] representing the instance masks *pasted* on the image canvas. """ def expand_boxes(boxes, scale): """Expands an array of boxes by a given scale.""" # Reference: # https://github.com/facebookresearch/Detectron/blob/master/detectron/utils/boxes.py#L227 # The `boxes` in the reference implementation is in [x1, y1, x2, y2] form, # whereas `boxes` here is in [x1, y1, w, h] form w_half = boxes[:, 2] * .5 h_half = boxes[:, 3] * .5 x_c = boxes[:, 0] + w_half y_c = boxes[:, 1] + h_half w_half *= scale h_half *= scale boxes_exp = np.zeros(boxes.shape) boxes_exp[:, 0] = x_c - w_half boxes_exp[:, 2] = x_c + w_half boxes_exp[:, 1] = y_c - h_half boxes_exp[:, 3] = y_c + h_half return boxes_exp # Reference: # https://github.com/facebookresearch/Detectron/blob/master/detectron/core/test.py#L812 # To work around an issue with cv2.resize (it seems to automatically pad # with repeated border values), we manually zero-pad the masks by 1 pixel # prior to resizing back to the original image resolution. This prevents # "top hat" artifacts. We therefore need to expand the reference boxes by an # appropriate factor. _, mask_height, mask_width = masks.shape scale = max((mask_width + 2.0) / mask_width, (mask_height + 2.0) / mask_height) ref_boxes = expand_boxes(detected_boxes, scale) ref_boxes = ref_boxes.astype(np.int32) padded_mask = np.zeros((mask_height + 2, mask_width + 2), dtype=np.float32) segms = [] for mask_ind, mask in enumerate(masks): im_mask = np.zeros((image_height, image_width), dtype=np.uint8) if is_image_mask: # Process whole-image masks. im_mask[:, :] = mask[:, :] else: # Process mask inside bounding boxes. padded_mask[1:-1, 1:-1] = mask[:, :] ref_box = ref_boxes[mask_ind, :] w = ref_box[2] - ref_box[0] + 1 h = ref_box[3] - ref_box[1] + 1 w = np.maximum(w, 1) h = np.maximum(h, 1) mask = cv2.resize(padded_mask, (w, h)) mask = np.array(mask > 0.5, dtype=np.uint8) x_0 = max(ref_box[0], 0) x_1 = min(ref_box[2] + 1, image_width) y_0 = max(ref_box[1], 0) y_1 = min(ref_box[3] + 1, image_height) im_mask[y_0:y_1, x_0:x_1] = \ mask[(y_0 - ref_box[1]):(y_1 - ref_box[1]), (x_0 - ref_box[0]):(x_1 - ref_box[0])] segms.append(im_mask) segms = np.array(segms) assert masks.shape[0] == segms.shape[0] return segms class EvaluationMetric(object): """COCO evaluation metric class.""" def __init__(self, filename, include_mask, eval_class_ids=None): """Constructs COCO evaluation class. The class provides the interface to metrics_fn in TPUEstimator. The _evaluate() loads a JSON file in COCO annotation format as the groundtruths and runs COCO evaluation. Args: filename (str): Ground truth JSON file name. If filename is None, use groundtruth data passed from the dataloader for evaluation. include_mask (bool): boolean to indicate whether or not to include mask eval. eval_class_ids (list): class ids to evaluate on. """ self.filename = filename self.coco_gt = MaskCOCO(filename) self.metric_names = ['AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'ARmax1', 'ARmax10', 'ARmax100', 'ARs', 'ARm', 'ARl'] self._include_mask = include_mask if self._include_mask: mask_metric_names = ['mask_' + x for x in self.metric_names] self.metric_names.extend(mask_metric_names) self.eval_class_ids = eval_class_ids self._reset() def _reset(self): """Reset COCO API object.""" if self.filename is None and not hasattr(self, 'coco_gt'): self.coco_gt = MaskCOCO() def predict_metric_fn(self, predictions, is_predict_image_mask=False, groundtruth_data=None): """Generates COCO metrics.""" image_ids = list(set(predictions['source_id'])) if groundtruth_data is not None: self.coco_gt.reset(groundtruth_data) coco_dt = self.coco_gt.loadRes(predictions, self._include_mask, is_image_mask=is_predict_image_mask) coco_eval = COCOeval(self.coco_gt, coco_dt, iouType='bbox') coco_eval.params.imgIds = image_ids if self.eval_class_ids is not None: coco_eval.params.catIds = self.eval_class_ids coco_eval.evaluate() coco_eval.accumulate() coco_eval.summarize() coco_metrics = coco_eval.stats if self._include_mask: # Create another object for instance segmentation metric evaluation. mcoco_eval = COCOeval(self.coco_gt, coco_dt, iouType='segm') mcoco_eval.params.imgIds = image_ids if self.eval_class_ids is not None: mcoco_eval.params.catIds = self.eval_class_ids mcoco_eval.evaluate() mcoco_eval.accumulate() mcoco_eval.summarize() mask_coco_metrics = mcoco_eval.stats if self._include_mask: metrics = np.hstack((coco_metrics, mask_coco_metrics)) else: metrics = coco_metrics # clean up after evaluation is done. self._reset() metrics = metrics.astype(np.float32) metrics_dict = {} for i, name in enumerate(self.metric_names): metrics_dict[name] = metrics[i] return metrics_dict
tao_deploy-main
nvidia_tao_deploy/metrics/coco_metric.py
# Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved. """EULA for TAO DEPLOY.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function
tao_deploy-main
nvidia_tao_deploy/license/__init__.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Util class for creating image batches.""" import os import sys from pathlib import Path import numpy as np from PIL import Image import random from nvidia_tao_deploy.cv.common.constants import VALID_IMAGE_EXTENSIONS from nvidia_tao_deploy.inferencer.preprocess_input import preprocess_input from nvidia_tao_deploy.cv.deformable_detr.dataloader import resize from nvidia_tao_deploy.cv.metric_learning_recognition.dataloader import center_crop class ImageBatcher: """Creates batches of pre-processed images.""" def __init__(self, input, shape, dtype, # noqa pylint: disable=W0622 max_num_images=None, exact_batches=False, preprocessor="EfficientDet", img_std=[0.229, 0.224, 0.225], img_mean=[0.485, 0.456, 0.406]): """Initialize. Args: input: The input directory to read images from. (list or str) shape: The tensor shape of the batch to prepare, either in channels_first or channels_last format. dtype: The (numpy) datatype to cast the batched data to. max_num_images: The maximum number of images to read from the directory. exact_batches: This defines how to handle a number of images that is not an exact multiple of the batch size. If false, it will pad the final batch with zeros to reach the batch size. If true, it will *remove* the last few images in excess of a batch size multiple, to guarantee batches are exact (useful for calibration). preprocessor: Set the preprocessor to use, depending on which network is being used. img_std: Set img std for DDETR use case img_mean: Set img mean for DDETR use case """ self.images = [] def is_image(path): return os.path.isfile(path) and path.lower().endswith(VALID_IMAGE_EXTENSIONS) if isinstance(input, list): # Multiple directories for image_dir in input: self.images.extend(str(p.resolve()) for p in Path(image_dir).glob("**/*") if p.suffix in VALID_IMAGE_EXTENSIONS) # Shuffle so that we sample uniformly from the sequence random.shuffle(self.images) else: if os.path.isdir(input): self.images = [str(p.resolve()) for p in Path(input).glob("**/*") if p.suffix in VALID_IMAGE_EXTENSIONS] self.images.sort() elif os.path.isfile(input): if is_image(input): self.images.append(input) self.num_images = len(self.images) if self.num_images < 1: print(f"No valid {'/'.join(VALID_IMAGE_EXTENSIONS)} images found in {input}") sys.exit(1) # Handle Tensor Shape self.dtype = dtype self.shape = shape assert len(self.shape) == 4 self.batch_size = shape[0] assert self.batch_size > 0 self.format = None self.width = -1 self.height = -1 if self.shape[1] == 3: self.format = "channels_first" self.height = self.shape[2] self.width = self.shape[3] elif self.shape[3] == 3: self.format = "channels_last" self.height = self.shape[1] self.width = self.shape[2] assert all([self.format, self.width > 0, self.height > 0]) # Adapt the number of images as needed if max_num_images and 0 < max_num_images < len(self.images): self.num_images = max_num_images if exact_batches: self.num_images = self.batch_size * (self.num_images // self.batch_size) if self.num_images < 1: raise ValueError("Not enough images to create batches") self.images = self.images[0:self.num_images] # Subdivide the list of images into batches self.num_batches = 1 + int((self.num_images - 1) / self.batch_size) self.batches = [] for i in range(self.num_batches): start = i * self.batch_size end = min(start + self.batch_size, self.num_images) self.batches.append(self.images[start:end]) # Indices self.image_index = 0 self.batch_index = 0 self.preprocessor = preprocessor self.img_std = img_std self.img_mean = img_mean def preprocess_image(self, image_path): """The image preprocessor loads an image from disk and prepares it as needed for batching. This includes padding, resizing, normalization, data type casting, and transposing. This Image Batcher implements few algorithms for now: * EfficientDet: Resizes and pads the image to fit the input size. * MRCNN: Resizes, pads, and normalizes the image to fit the input size. * DetectNetv2: Resizes and normalizes the image to fit the input size. Args: image_path (str): The path to the image on disk to load. Returns: Two values: A numpy array holding the image sample, ready to be contacatenated into the rest of the batch, and the resize scale used, if any. """ def resize_pad(image, pad_color=(0, 0, 0)): """Resize and Pad. A subroutine to implement padding and resizing. This will resize the image to fit fully within the input size, and pads the remaining bottom-right portions with the value provided. Args: image: The PIL image object pad_color: The RGB values to use for the padded area. Default: Black/Zeros. Returns Two values: The PIL image object already padded and cropped, and the resize scale used. """ width, height = image.size width_scale = width / self.width height_scale = height / self.height scale = 1.0 / max(width_scale, height_scale) image = image.resize( (round(width * scale), round(height * scale)), resample=Image.BILINEAR) pad = Image.new("RGB", (self.width, self.height)) pad.paste(pad_color, [0, 0, self.width, self.height]) pad.paste(image) return pad, scale scale = None image = Image.open(image_path) image = image.convert(mode='RGB') if self.preprocessor == "EfficientDet": # For EfficientNet V2: Resize & Pad with ImageNet mean values # and keep as [0,255] Normalization image, scale = resize_pad(image, (124, 116, 104)) image = np.asarray(image, dtype=self.dtype) # [0-1] Normalization, Mean subtraction and Std Dev scaling are # part of the EfficientDet graph, so no need to do it during preprocessing here elif self.preprocessor == "DetectNetv2": image = image.resize((self.width, self.height), Image.LANCZOS) image = np.asarray(image, dtype=self.dtype) image = image / 255.0 scale = 1.0 elif self.preprocessor == "MRCNN": image, scale = resize_pad(image, (124, 116, 104)) image = np.asarray(image, dtype=self.dtype) image = preprocess_input(image, data_format="channels_last", mode="torch") elif self.preprocessor == 'DDETR': image = np.asarray(image, dtype=self.dtype) orig_h, orig_w, _ = image.shape image, _ = resize(image, None, size=(self.height, self.width)) image = preprocess_input(image, data_format='channels_last', img_std=self.img_std, mode='torch') new_h, new_w, _ = image.shape scale = (orig_h / new_h, orig_w / new_w) elif self.preprocessor == "OCDNet": image = image.resize((self.width, self.height), Image.LANCZOS) rgb_mean = np.array([122.67891434, 116.66876762, 104.00698793]) image -= rgb_mean image /= 255. elif self.preprocessor == "MLRecog": init_size = (int(self.width * 1.14), int(self.height * 1.14)) image = image.resize(init_size, Image.BILINEAR) image = center_crop(image, self.width, self.height) image = np.asarray(image, dtype=self.dtype) image = preprocess_input(image, data_format='channels_first', img_mean=self.img_mean, img_std=self.img_std, mode='torch') else: raise NotImplementedError(f"Preprocessing method {self.preprocessor} not supported") if self.format == "channels_first": image = np.transpose(image, (2, 0, 1)) return image, scale def get_batch(self): """Retrieve the batches. This is a generator object, so you can use it within a loop as: for batch, images in batcher.get_batch(): ... Or outside of a batch with the next() function. Returns: A generator yielding three items per iteration: a numpy array holding a batch of images, the list of paths to the images loaded within this batch, and the list of resize scales for each image in the batch. """ for i, batch_images in enumerate(self.batches): batch_data = np.zeros(self.shape, dtype=self.dtype) batch_scales = [None] * len(batch_images) for i, image in enumerate(batch_images): self.image_index += 1 batch_data[i], batch_scales[i] = self.preprocess_image(image) self.batch_index += 1 yield batch_data, batch_images, batch_scales
tao_deploy-main
nvidia_tao_deploy/utils/image_batcher.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TAO Deploy utils"""
tao_deploy-main
nvidia_tao_deploy/utils/__init__.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """EfficientDet convert etlt model to TRT engine.""" import logging import os import struct import tempfile from zipfile import ZipFile from eff.core import Archive from eff_tao_encryption.tao_codec import decrypt_stream, encrypt_stream logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s', level="INFO") logger = logging.getLogger(__name__) def decode_model(model_path, key=None): """Decrypt the model.""" if model_path.endswith('etlt'): return decode_etlt(model_path, key) if model_path.endswith('onnx'): return model_path, 'onnx' if model_path.endswith('uff'): return model_path, 'uff' raise NotImplementedError( "TAO Deploy gen_trt_engine action accepts either .onnx, .uff " "or .etlt file extension" ) def decode_etlt(etlt_model_path, key): """Decrypt ETLT model.""" _handle, decrypted_model = tempfile.mkstemp() os.close(_handle) with open(decrypted_model, 'wb') as temp_file, open(etlt_model_path, 'rb') as encoded_file: size = encoded_file.read(4) size = struct.unpack("<i", size)[0] input_node_name = encoded_file.read(size) if size: # ETLT is in UFF format logging.info("The provided .etlt file is in UFF format.") logging.info("Input name: %s", input_node_name) file_format = "uff" else: # ETLT is in ONNX format logging.info("The provided .etlt file is in ONNX format.") file_format = "onnx" decrypt_stream(encoded_file, temp_file, key.encode(), encryption=True, rewind=False) return decrypted_model, file_format def decode_eff(eff_model_path, key): """Decrypt EFF.""" eff_filename = os.path.basename(eff_model_path) eff_art = Archive.restore_artifact( restore_path=eff_model_path, artifact_name=eff_filename, passphrase=key) zip_path = eff_art.get_handle() # Unzip ckpt_path = os.path.dirname(zip_path) # TODO(@yuw): try catch? with ZipFile(zip_path, "r") as zip_file: zip_file.extractall(ckpt_path) extracted_files = os.listdir(ckpt_path) # TODO(@yuw): get onnx path ckpt_name = None for f in extracted_files: if 'ckpt' in f: ckpt_name = f.split('.')[0] return ckpt_path, ckpt_name def encode_etlt(tmp_file_name, output_file_name, input_tensor_name, key): """Encrypt ETLT model.""" # Encode temporary uff to output file with open(tmp_file_name, "rb") as open_temp_file, \ open(output_file_name, "wb") as open_encoded_file: # TODO: @vpraveen: Remove this hack to support multiple input nodes. # This will require an update to tlt_converter and DS. Postponing this for now. if isinstance(input_tensor_name, list): input_tensor_name = input_tensor_name[0] open_encoded_file.write(struct.pack("<i", len(input_tensor_name))) open_encoded_file.write(input_tensor_name.encode()) encrypt_stream(open_temp_file, open_encoded_file, key, encryption=True, rewind=False)
tao_deploy-main
nvidia_tao_deploy/utils/decoding.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TAO common path utils used across all apps.""" import os def expand_path(path): """This function takes in a path and returns the absolute path of that path after expanding the tilde (~) character to the user's home directory. This is done to prevent any path traversal vulnerability. Args: path (str): The path to expand and make absolute. Returns: str: The absolute path with expanded tilde. """ return os.path.abspath(os.path.expanduser(path))
tao_deploy-main
nvidia_tao_deploy/utils/path_utils.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """CV root module."""
tao_deploy-main
nvidia_tao_deploy/cv/__init__.py