rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
pass | self._update() | def write_char(self, char): if self.exercise.is_character_match(char): self.exercise.get_current_sequence().write_char(char) self._update() self.set_can_save(True) else: pass |
except Exception as e: | except Exception, e: | def install_app(self, remove=False): if self.noapp: return |
from optparse import OptionParser | def main(): from optparse import OptionParser p = OptionParser(usage="usage: %prog [options] [name] [version]", description=__doc__) p.add_option("--config", action="store_true", help="display the configuration and exit") p.add_option('-f', "--force", action="store_true", help="force install the main package " "(not it's dependencies, see --forceall)") p.add_option("--forceall", action="store_true", help="force install of all packages " "(i.e. including dependencies)") p.add_option('-i', "--info", action="store_true", help="show information about a package") p.add_option('-l', "--list", action="store_true", help="list the packages currently installed on the system") p.add_option('-n', "--dry-run", action="store_true", help="show what would have been downloaded/removed/installed") p.add_option('-N', "--no-deps", action="store_true", help="neither download nor install dependencies") p.add_option("--path", action="store_true", help="based on the configuration, display how to set the " "PATH and PYTHONPATH environment variables") p.add_option("--prefix", action="store", help="install prefix (disregarding of any settings in " "the config file)", metavar='PATH') p.add_option("--proxy", action="store", help="use a proxy for downloads", metavar='URL') p.add_option("--remove", action="store_true", help="remove a package") p.add_option('-s', "--search", action="store_true", help="search the index in the repo (chain) of packages " "and display versions available.") p.add_option("--sys-prefix", action="store_true", help="use sys.prefix as the install prefix") p.add_option('-v', "--verbose", action="store_true") p.add_option('--version', action="store_true") p.add_option("--whats-new", action="store_true", help="display to which installed packages updates are " "available") opts, args = p.parse_args() if len(args) > 0 and (opts.config or opts.path): p.error("Option takes no arguments") if opts.prefix and opts.sys_prefix: p.error("Options --prefix and --sys-prefix exclude each ohter") if opts.force and opts.forceall: p.error("Options --force and --forceall exclude each ohter") pat = None if (opts.list or opts.search) and args: pat = re.compile(args[0], re.I) if opts.version: # --version from enstaller import __version__ print "Enstaller version:", __version__ return if opts.config: # --config config.print_config() return if opts.proxy: # --proxy from proxy.api import setup_proxy setup_proxy(opts.proxy) if config.get_path() is None: # create config file if it dosn't exist config.write(opts.proxy) conf = config.read() # conf if (not opts.proxy) and conf['proxy']: from proxy.api import setup_proxy setup_proxy(conf['proxy']) global prefix, dry_run, noapp, version # set globals if opts.sys_prefix: prefix = sys.prefix elif opts.prefix: prefix = opts.prefix else: prefix = conf['prefix'] dry_run = opts.dry_run noapp = conf['noapp'] version = opts.version if opts.path: # --path print_path() return if opts.list: # --list list_option(pat) return c = Chain(conf['IndexedRepos'], verbose) # init chain if opts.search: # --search search(c, pat) return if opts.info: # --info if len(args) != 1: p.error("Option requires one argument (name of package)") info_option(conf['info_url'], c, canonical(args[0])) return if opts.whats_new: # --whats-new if args: p.error("Option requires no arguments") whats_new(c) return if len(args) == 0: p.error("Requirement (name and optional version) missing") if len(args) > 2: p.error("A requirement is a name and an optional version") req = Req(' '.join(args)) print "prefix:", prefix check_write() if opts.remove: # --remove remove_req(req) return dists = get_dists(c, req, # dists recur=not opts.no_deps) # Warn the user about packages which depend on what will be updated depend_warn([filename_dist(d) for d in dists]) # Packages which are installed currently sys_inst = set(egginst.get_installed(sys.prefix)) if prefix == sys.prefix: prefix_inst = sys_inst else: prefix_inst = set(egginst.get_installed(prefix)) all_inst = sys_inst | prefix_inst # These are the packahes which are being excluded from being installed if opts.forceall: exclude = set() else: exclude = all_inst if opts.force: exclude.discard(filename_dist(dists[-1])) # Fetch distributions if not isdir(conf['local']): os.makedirs(conf['local']) for dist, fn in iter_dists_excl(dists, exclude): c.fetch_dist(dist, conf['local'], check_md5=opts.force or opts.forceall, dry_run=dry_run) # Remove packages (in reverse install order) for dist in dists[::-1]: fn = filename_dist(dist) if fn in all_inst: # if the distribution (which needs to be installed) is already # installed don't remove it continue cname = cname_fn(fn) # Only remove packages installed in prefix for fn_inst in prefix_inst: if cname == cname_fn(fn_inst): call_egginst(fn_inst, remove=True) # Install packages installed_something = False for dist, fn in iter_dists_excl(dists, exclude): installed_something = True call_egginst(join(conf['local'], fn)) if not installed_something: print "No update necessary, %s is up-to-date." % req print_installed_info(req.name) |
|
if opts.proxy: from proxy.api import setup_proxy setup_proxy(opts.proxy) | def main(): from optparse import OptionParser p = OptionParser(usage="usage: %prog [options] [name] [version]", description=__doc__) p.add_option("--config", action="store_true", help="display the configuration and exit") p.add_option('-f', "--force", action="store_true", help="force install the main package " "(not it's dependencies, see --forceall)") p.add_option("--forceall", action="store_true", help="force install of all packages " "(i.e. including dependencies)") p.add_option('-i', "--info", action="store_true", help="show information about a package") p.add_option('-l', "--list", action="store_true", help="list the packages currently installed on the system") p.add_option('-n', "--dry-run", action="store_true", help="show what would have been downloaded/removed/installed") p.add_option('-N', "--no-deps", action="store_true", help="neither download nor install dependencies") p.add_option("--path", action="store_true", help="based on the configuration, display how to set the " "PATH and PYTHONPATH environment variables") p.add_option("--prefix", action="store", help="install prefix (disregarding of any settings in " "the config file)", metavar='PATH') p.add_option("--proxy", action="store", help="use a proxy for downloads", metavar='URL') p.add_option("--remove", action="store_true", help="remove a package") p.add_option('-s', "--search", action="store_true", help="search the index in the repo (chain) of packages " "and display versions available.") p.add_option("--sys-prefix", action="store_true", help="use sys.prefix as the install prefix") p.add_option('-v', "--verbose", action="store_true") p.add_option('--version', action="store_true") p.add_option("--whats-new", action="store_true", help="display to which installed packages updates are " "available") opts, args = p.parse_args() if len(args) > 0 and (opts.config or opts.path): p.error("Option takes no arguments") if opts.prefix and opts.sys_prefix: p.error("Options --prefix and --sys-prefix exclude each ohter") if opts.force and opts.forceall: p.error("Options --force and --forceall exclude each ohter") pat = None if (opts.list or opts.search) and args: pat = re.compile(args[0], re.I) if opts.version: # --version from enstaller import __version__ print "Enstaller version:", __version__ return if opts.config: # --config config.print_config() return if opts.proxy: # --proxy from proxy.api import setup_proxy setup_proxy(opts.proxy) if config.get_path() is None: # create config file if it dosn't exist config.write(opts.proxy) conf = config.read() # conf if (not opts.proxy) and conf['proxy']: from proxy.api import setup_proxy setup_proxy(conf['proxy']) global prefix, dry_run, noapp, version # set globals if opts.sys_prefix: prefix = sys.prefix elif opts.prefix: prefix = opts.prefix else: prefix = conf['prefix'] dry_run = opts.dry_run noapp = conf['noapp'] version = opts.version if opts.path: # --path print_path() return if opts.list: # --list list_option(pat) return c = Chain(conf['IndexedRepos'], verbose) # init chain if opts.search: # --search search(c, pat) return if opts.info: # --info if len(args) != 1: p.error("Option requires one argument (name of package)") info_option(conf['info_url'], c, canonical(args[0])) return if opts.whats_new: # --whats-new if args: p.error("Option requires no arguments") whats_new(c) return if len(args) == 0: p.error("Requirement (name and optional version) missing") if len(args) > 2: p.error("A requirement is a name and an optional version") req = Req(' '.join(args)) print "prefix:", prefix check_write() if opts.remove: # --remove remove_req(req) return dists = get_dists(c, req, # dists recur=not opts.no_deps) # Warn the user about packages which depend on what will be updated depend_warn([filename_dist(d) for d in dists]) # Packages which are installed currently sys_inst = set(egginst.get_installed(sys.prefix)) if prefix == sys.prefix: prefix_inst = sys_inst else: prefix_inst = set(egginst.get_installed(prefix)) all_inst = sys_inst | prefix_inst # These are the packahes which are being excluded from being installed if opts.forceall: exclude = set() else: exclude = all_inst if opts.force: exclude.discard(filename_dist(dists[-1])) # Fetch distributions if not isdir(conf['local']): os.makedirs(conf['local']) for dist, fn in iter_dists_excl(dists, exclude): c.fetch_dist(dist, conf['local'], check_md5=opts.force or opts.forceall, dry_run=dry_run) # Remove packages (in reverse install order) for dist in dists[::-1]: fn = filename_dist(dist) if fn in all_inst: # if the distribution (which needs to be installed) is already # installed don't remove it continue cname = cname_fn(fn) # Only remove packages installed in prefix for fn_inst in prefix_inst: if cname == cname_fn(fn_inst): call_egginst(fn_inst, remove=True) # Install packages installed_something = False for dist, fn in iter_dists_excl(dists, exclude): installed_something = True call_egginst(join(conf['local'], fn)) if not installed_something: print "No update necessary, %s is up-to-date." % req print_installed_info(req.name) |
|
if (not opts.proxy) and conf['proxy']: from proxy.api import setup_proxy | if opts.proxy: setup_proxy(opts.proxy) elif conf['proxy']: | def main(): from optparse import OptionParser p = OptionParser(usage="usage: %prog [options] [name] [version]", description=__doc__) p.add_option("--config", action="store_true", help="display the configuration and exit") p.add_option('-f', "--force", action="store_true", help="force install the main package " "(not it's dependencies, see --forceall)") p.add_option("--forceall", action="store_true", help="force install of all packages " "(i.e. including dependencies)") p.add_option('-i', "--info", action="store_true", help="show information about a package") p.add_option('-l', "--list", action="store_true", help="list the packages currently installed on the system") p.add_option('-n', "--dry-run", action="store_true", help="show what would have been downloaded/removed/installed") p.add_option('-N', "--no-deps", action="store_true", help="neither download nor install dependencies") p.add_option("--path", action="store_true", help="based on the configuration, display how to set the " "PATH and PYTHONPATH environment variables") p.add_option("--prefix", action="store", help="install prefix (disregarding of any settings in " "the config file)", metavar='PATH') p.add_option("--proxy", action="store", help="use a proxy for downloads", metavar='URL') p.add_option("--remove", action="store_true", help="remove a package") p.add_option('-s', "--search", action="store_true", help="search the index in the repo (chain) of packages " "and display versions available.") p.add_option("--sys-prefix", action="store_true", help="use sys.prefix as the install prefix") p.add_option('-v', "--verbose", action="store_true") p.add_option('--version', action="store_true") p.add_option("--whats-new", action="store_true", help="display to which installed packages updates are " "available") opts, args = p.parse_args() if len(args) > 0 and (opts.config or opts.path): p.error("Option takes no arguments") if opts.prefix and opts.sys_prefix: p.error("Options --prefix and --sys-prefix exclude each ohter") if opts.force and opts.forceall: p.error("Options --force and --forceall exclude each ohter") pat = None if (opts.list or opts.search) and args: pat = re.compile(args[0], re.I) if opts.version: # --version from enstaller import __version__ print "Enstaller version:", __version__ return if opts.config: # --config config.print_config() return if opts.proxy: # --proxy from proxy.api import setup_proxy setup_proxy(opts.proxy) if config.get_path() is None: # create config file if it dosn't exist config.write(opts.proxy) conf = config.read() # conf if (not opts.proxy) and conf['proxy']: from proxy.api import setup_proxy setup_proxy(conf['proxy']) global prefix, dry_run, noapp, version # set globals if opts.sys_prefix: prefix = sys.prefix elif opts.prefix: prefix = opts.prefix else: prefix = conf['prefix'] dry_run = opts.dry_run noapp = conf['noapp'] version = opts.version if opts.path: # --path print_path() return if opts.list: # --list list_option(pat) return c = Chain(conf['IndexedRepos'], verbose) # init chain if opts.search: # --search search(c, pat) return if opts.info: # --info if len(args) != 1: p.error("Option requires one argument (name of package)") info_option(conf['info_url'], c, canonical(args[0])) return if opts.whats_new: # --whats-new if args: p.error("Option requires no arguments") whats_new(c) return if len(args) == 0: p.error("Requirement (name and optional version) missing") if len(args) > 2: p.error("A requirement is a name and an optional version") req = Req(' '.join(args)) print "prefix:", prefix check_write() if opts.remove: # --remove remove_req(req) return dists = get_dists(c, req, # dists recur=not opts.no_deps) # Warn the user about packages which depend on what will be updated depend_warn([filename_dist(d) for d in dists]) # Packages which are installed currently sys_inst = set(egginst.get_installed(sys.prefix)) if prefix == sys.prefix: prefix_inst = sys_inst else: prefix_inst = set(egginst.get_installed(prefix)) all_inst = sys_inst | prefix_inst # These are the packahes which are being excluded from being installed if opts.forceall: exclude = set() else: exclude = all_inst if opts.force: exclude.discard(filename_dist(dists[-1])) # Fetch distributions if not isdir(conf['local']): os.makedirs(conf['local']) for dist, fn in iter_dists_excl(dists, exclude): c.fetch_dist(dist, conf['local'], check_md5=opts.force or opts.forceall, dry_run=dry_run) # Remove packages (in reverse install order) for dist in dists[::-1]: fn = filename_dist(dist) if fn in all_inst: # if the distribution (which needs to be installed) is already # installed don't remove it continue cname = cname_fn(fn) # Only remove packages installed in prefix for fn_inst in prefix_inst: if cname == cname_fn(fn_inst): call_egginst(fn_inst, remove=True) # Install packages installed_something = False for dist, fn in iter_dists_excl(dists, exclude): installed_something = True call_egginst(join(conf['local'], fn)) if not installed_something: print "No update necessary, %s is up-to-date." % req print_installed_info(req.name) |
_placehold_pat = re.compile('/PLACEHOLD' * 5 + '([^\0]*)\0') | _placehold_pat = re.compile('/PLACEHOLD' * 5 + '([^\0\\s]*)\0') | def find_lib(fn): for tgt in _targets: dst = abspath(join(tgt, fn)) if exists(dst): return dst print "ERROR: library %r not found" % fn return join('/ERROR/path/not/found', fn) |
import platform | def get_arch(): import platform if '64' in platform.architecture()[0]: return 'amd64' else: return 'x86' |
|
self.installed_size = size if size == 0: return | def extract(self): cur = n = 0 size = sum(self.z.getinfo(name).file_size for name in self.arcnames) self.installed_size = size if size == 0: return |
|
rat = float(n) / size | if size == 0: rat = 1 else: rat = float(n) / size | def extract(self): cur = n = 0 size = sum(self.z.getinfo(name).file_size for name in self.arcnames) self.installed_size = size if size == 0: return |
size = sum(self.z.getinfo(name).file_size for name in self.arcnames) | size = sum(self.z.getinfo(name).file_size for name in self.arcnames) self.installed_size = size if size == 0: return | def extract(self): cur = n = 0 size = sum(self.z.getinfo(name).file_size for name in self.arcnames) sys.stdout.write('%9s [' % human_bytes(size)) for name in self.arcnames: n += self.z.getinfo(name).file_size rat = float(n) / size if rat * 64 >= cur: sys.stdout.write('.') sys.stdout.flush() cur += 1 self.write_arcname(name) |
self.installed_size = size | def extract(self): cur = n = 0 size = sum(self.z.getinfo(name).file_size for name in self.arcnames) sys.stdout.write('%9s [' % human_bytes(size)) for name in self.arcnames: n += self.z.getinfo(name).file_size rat = float(n) / size if rat * 64 >= cur: sys.stdout.write('.') sys.stdout.flush() cur += 1 self.write_arcname(name) |
|
egg_pat = re.compile(r'([\w.]+)-([\w.]+)-(\d+).egg$') | egg_pat = re.compile(r'([\w.]+)-([\w.]+)-(\d+)\.egg$') | def filename_dist(dist): return split_dist(dist)[1] |
help="show information about package(s)") | help="show information about a package") | def main(): from optparse import OptionParser p = OptionParser(usage="usage: %prog [options] [name] [version]", description=__doc__) p.add_option("--config", action="store_true", help="display the configuration and exit") p.add_option('-f', "--force", action="store_true", help="force install the main package " "(not it's dependencies, see --forceall)") p.add_option("--forceall", action="store_true", help="force install of all packages " "(i.e. including dependencies)") p.add_option('-i', "--info", action="store_true", help="show information about package(s)") p.add_option('-l', "--list", action="store_true", help="list the packages currently installed on the system") p.add_option('-n', "--dry-run", action="store_true", help="show what would have been downloaded/removed/installed") p.add_option('-N', "--no-deps", action="store_true", help="neither download nor install dependencies") p.add_option("--path", action="store_true", help="based on the configuration, display how to set the " "PATH and PYTHONPATH environment variables") p.add_option("--prefix", action="store", help="install prefix (disregarding of any settings in " "the config file)", metavar='PATH') p.add_option("--proxy", action="store", help="use a proxy for downloads", metavar='URL') p.add_option("--remove", action="store_true", help="remove a package") p.add_option('-s', "--search", action="store_true", help="search the index in the repo (chain) of packages " "and display versions available.") p.add_option("--sys-prefix", action="store_true", help="use sys.prefix as the install prefix") p.add_option('-v', "--verbose", action="store_true") p.add_option('--version', action="store_true") opts, args = p.parse_args() if len(args) > 0 and (opts.config or opts.path): p.error("Option takes no arguments") if opts.prefix and opts.sys_prefix: p.error("Options --prefix and --sys-prefix exclude each ohter") if opts.force and opts.forceall: p.error("Options --force and --forceall exclude each ohter") pat = None if (opts.list or opts.search) and args: pat = re.compile(args[0], re.I) if opts.version: # --version from enstaller import __version__ print "Enstaller version:", __version__ return if opts.config: # --config config.print_config() return if opts.proxy: # --proxy from proxy.api import setup_proxy setup_proxy(opts.proxy) if config.get_path() is None: # create config file if it dosn't exist config.write(opts.proxy) conf = config.read() # conf if (not opts.proxy) and conf['proxy']: from proxy.api import setup_proxy setup_proxy(conf['proxy']) global prefix, dry_run, noapp, version # set globals if opts.sys_prefix: prefix = sys.prefix elif opts.prefix: prefix = opts.prefix else: prefix = conf['prefix'] dry_run = opts.dry_run noapp = conf['noapp'] version = opts.version if opts.path: # --path print_path() return if opts.list: # --list list_option(pat) return c = Chain(conf['IndexedRepos'], verbose) # init chain if opts.search: # --search search(c, pat) return if len(args) == 0: p.error("Requirement (name and optional version) missing") if len(args) > 2: p.error("A requirement is a name and an optional version") req = Req(' '.join(args)) if opts.info: # --info if len(args) != 1: p.error("Option requires one argument (the name)") info_option(conf['info_url'], c, req.name) return print "prefix:", prefix check_write() if opts.remove: # --remove remove_req(req) return dists = get_dists(c, req, # dists recur=not opts.no_deps) # Warn the user about packages which depend on what will be updated depend_warn([filename_dist(d) for d in dists]) # Packages which are installed currently sys_inst = set(egginst.get_installed(sys.prefix)) if prefix == sys.prefix: prefix_inst = sys_inst else: prefix_inst = set(egginst.get_installed(prefix)) all_inst = sys_inst | prefix_inst # These are the packahes which are being excluded from being installed if opts.forceall: exclude = set() else: exclude = all_inst if opts.force: exclude.discard(filename_dist(dists[-1])) # Fetch distributions if not isdir(conf['local']): os.makedirs(conf['local']) for dist, fn in iter_dists_excl(dists, exclude): c.fetch_dist(dist, conf['local'], check_md5=opts.force or opts.forceall, dry_run=dry_run) # Remove packages (in reverse install order) for dist in dists[::-1]: fn = filename_dist(dist) if fn in all_inst: # if the distribution (which needs to be installed) is already # installed don't remove it continue cname = cname_fn(fn) # Only remove packages installed in prefix for fn_inst in prefix_inst: if cname == cname_fn(fn_inst): call_egginst(fn_inst, remove=True) # Install packages installed_something = False for dist, fn in iter_dists_excl(dists, exclude): installed_something = True call_egginst(join(conf['local'], fn)) if not installed_something: print "No update necessary, %s is up-to-date." % req print_installed_info(req.name) |
if opts.info: if len(args) != 1: p.error("Option requires one argument (the name)") info_option(conf['info_url'], c, req.name) return | def main(): from optparse import OptionParser p = OptionParser(usage="usage: %prog [options] [name] [version]", description=__doc__) p.add_option("--config", action="store_true", help="display the configuration and exit") p.add_option('-f', "--force", action="store_true", help="force install the main package " "(not it's dependencies, see --forceall)") p.add_option("--forceall", action="store_true", help="force install of all packages " "(i.e. including dependencies)") p.add_option('-i', "--info", action="store_true", help="show information about package(s)") p.add_option('-l', "--list", action="store_true", help="list the packages currently installed on the system") p.add_option('-n', "--dry-run", action="store_true", help="show what would have been downloaded/removed/installed") p.add_option('-N', "--no-deps", action="store_true", help="neither download nor install dependencies") p.add_option("--path", action="store_true", help="based on the configuration, display how to set the " "PATH and PYTHONPATH environment variables") p.add_option("--prefix", action="store", help="install prefix (disregarding of any settings in " "the config file)", metavar='PATH') p.add_option("--proxy", action="store", help="use a proxy for downloads", metavar='URL') p.add_option("--remove", action="store_true", help="remove a package") p.add_option('-s', "--search", action="store_true", help="search the index in the repo (chain) of packages " "and display versions available.") p.add_option("--sys-prefix", action="store_true", help="use sys.prefix as the install prefix") p.add_option('-v', "--verbose", action="store_true") p.add_option('--version', action="store_true") opts, args = p.parse_args() if len(args) > 0 and (opts.config or opts.path): p.error("Option takes no arguments") if opts.prefix and opts.sys_prefix: p.error("Options --prefix and --sys-prefix exclude each ohter") if opts.force and opts.forceall: p.error("Options --force and --forceall exclude each ohter") pat = None if (opts.list or opts.search) and args: pat = re.compile(args[0], re.I) if opts.version: # --version from enstaller import __version__ print "Enstaller version:", __version__ return if opts.config: # --config config.print_config() return if opts.proxy: # --proxy from proxy.api import setup_proxy setup_proxy(opts.proxy) if config.get_path() is None: # create config file if it dosn't exist config.write(opts.proxy) conf = config.read() # conf if (not opts.proxy) and conf['proxy']: from proxy.api import setup_proxy setup_proxy(conf['proxy']) global prefix, dry_run, noapp, version # set globals if opts.sys_prefix: prefix = sys.prefix elif opts.prefix: prefix = opts.prefix else: prefix = conf['prefix'] dry_run = opts.dry_run noapp = conf['noapp'] version = opts.version if opts.path: # --path print_path() return if opts.list: # --list list_option(pat) return c = Chain(conf['IndexedRepos'], verbose) # init chain if opts.search: # --search search(c, pat) return if len(args) == 0: p.error("Requirement (name and optional version) missing") if len(args) > 2: p.error("A requirement is a name and an optional version") req = Req(' '.join(args)) if opts.info: # --info if len(args) != 1: p.error("Option requires one argument (the name)") info_option(conf['info_url'], c, req.name) return print "prefix:", prefix check_write() if opts.remove: # --remove remove_req(req) return dists = get_dists(c, req, # dists recur=not opts.no_deps) # Warn the user about packages which depend on what will be updated depend_warn([filename_dist(d) for d in dists]) # Packages which are installed currently sys_inst = set(egginst.get_installed(sys.prefix)) if prefix == sys.prefix: prefix_inst = sys_inst else: prefix_inst = set(egginst.get_installed(prefix)) all_inst = sys_inst | prefix_inst # These are the packahes which are being excluded from being installed if opts.forceall: exclude = set() else: exclude = all_inst if opts.force: exclude.discard(filename_dist(dists[-1])) # Fetch distributions if not isdir(conf['local']): os.makedirs(conf['local']) for dist, fn in iter_dists_excl(dists, exclude): c.fetch_dist(dist, conf['local'], check_md5=opts.force or opts.forceall, dry_run=dry_run) # Remove packages (in reverse install order) for dist in dists[::-1]: fn = filename_dist(dist) if fn in all_inst: # if the distribution (which needs to be installed) is already # installed don't remove it continue cname = cname_fn(fn) # Only remove packages installed in prefix for fn_inst in prefix_inst: if cname == cname_fn(fn_inst): call_egginst(fn_inst, remove=True) # Install packages installed_something = False for dist, fn in iter_dists_excl(dists, exclude): installed_something = True call_egginst(join(conf['local'], fn)) if not installed_something: print "No update necessary, %s is up-to-date." % req print_installed_info(req.name) |
|
placehold_pat = re.compile('(/PLACEHOLD){5,}([^\0\\s]*)\0') | placehold_pat = re.compile(5 * '/PLACEHOLD' + '([^\0\\s]*)\0') | def find_lib(fn): for tgt in _targets: dst = abspath(join(tgt, fn)) if exists(dst): return dst print "ERROR: library %r not found" % fn return join('/ERROR/path/not/found', fn) |
data = f.read() | data = f.read(262144) | def fix_object_code(path): tp = get_object_type(path) if tp is None: return f = open(path, 'r+b') data = f.read() matches = list(placehold_pat.finditer(data)) if not matches: f.close() return if verbose: print "Fixing placeholders in:", path for m in matches: gr2 = m.group(2) # this should not be necessary as the regular expression is # evaluated from left to right, meaning that greediness of # the placeholder repetition comes before the greedy group2 while gr2.startswith('/PLACEHOLD'): gr2 = gr2[10:] if tp.startswith('MachO-') and gr2.startswith('/'): # deprecated: because we now use rpath on OSX as well r = find_lib(gr2[1:]) else: assert gr2 == '' or gr2.startswith(':') rpaths = list(_targets) # extend the list with rpath which were already in the binary, # if any rpaths.extend(p for p in gr2.split(':') if p) r = ':'.join(rpaths) if alt_replace_func is not None: r = alt_replace_func(r) padding = len(m.group(0)) - len(r) if padding < 1: # we need at least one null-character raise Exception("placeholder %r too short" % m.group(0)) r += padding * '\0' assert m.start() + len(r) == m.end() f.seek(m.start()) f.write(r) f.close() |
gr2 = m.group(2) | rest = m.group(1) while rest.startswith('/PLACEHOLD'): rest = rest[10:] | def fix_object_code(path): tp = get_object_type(path) if tp is None: return f = open(path, 'r+b') data = f.read() matches = list(placehold_pat.finditer(data)) if not matches: f.close() return if verbose: print "Fixing placeholders in:", path for m in matches: gr2 = m.group(2) # this should not be necessary as the regular expression is # evaluated from left to right, meaning that greediness of # the placeholder repetition comes before the greedy group2 while gr2.startswith('/PLACEHOLD'): gr2 = gr2[10:] if tp.startswith('MachO-') and gr2.startswith('/'): # deprecated: because we now use rpath on OSX as well r = find_lib(gr2[1:]) else: assert gr2 == '' or gr2.startswith(':') rpaths = list(_targets) # extend the list with rpath which were already in the binary, # if any rpaths.extend(p for p in gr2.split(':') if p) r = ':'.join(rpaths) if alt_replace_func is not None: r = alt_replace_func(r) padding = len(m.group(0)) - len(r) if padding < 1: # we need at least one null-character raise Exception("placeholder %r too short" % m.group(0)) r += padding * '\0' assert m.start() + len(r) == m.end() f.seek(m.start()) f.write(r) f.close() |
while gr2.startswith('/PLACEHOLD'): gr2 = gr2[10:] if tp.startswith('MachO-') and gr2.startswith('/'): | if tp.startswith('MachO-') and rest.startswith('/'): | def fix_object_code(path): tp = get_object_type(path) if tp is None: return f = open(path, 'r+b') data = f.read() matches = list(placehold_pat.finditer(data)) if not matches: f.close() return if verbose: print "Fixing placeholders in:", path for m in matches: gr2 = m.group(2) # this should not be necessary as the regular expression is # evaluated from left to right, meaning that greediness of # the placeholder repetition comes before the greedy group2 while gr2.startswith('/PLACEHOLD'): gr2 = gr2[10:] if tp.startswith('MachO-') and gr2.startswith('/'): # deprecated: because we now use rpath on OSX as well r = find_lib(gr2[1:]) else: assert gr2 == '' or gr2.startswith(':') rpaths = list(_targets) # extend the list with rpath which were already in the binary, # if any rpaths.extend(p for p in gr2.split(':') if p) r = ':'.join(rpaths) if alt_replace_func is not None: r = alt_replace_func(r) padding = len(m.group(0)) - len(r) if padding < 1: # we need at least one null-character raise Exception("placeholder %r too short" % m.group(0)) r += padding * '\0' assert m.start() + len(r) == m.end() f.seek(m.start()) f.write(r) f.close() |
r = find_lib(gr2[1:]) | r = find_lib(rest[1:]) | def fix_object_code(path): tp = get_object_type(path) if tp is None: return f = open(path, 'r+b') data = f.read() matches = list(placehold_pat.finditer(data)) if not matches: f.close() return if verbose: print "Fixing placeholders in:", path for m in matches: gr2 = m.group(2) # this should not be necessary as the regular expression is # evaluated from left to right, meaning that greediness of # the placeholder repetition comes before the greedy group2 while gr2.startswith('/PLACEHOLD'): gr2 = gr2[10:] if tp.startswith('MachO-') and gr2.startswith('/'): # deprecated: because we now use rpath on OSX as well r = find_lib(gr2[1:]) else: assert gr2 == '' or gr2.startswith(':') rpaths = list(_targets) # extend the list with rpath which were already in the binary, # if any rpaths.extend(p for p in gr2.split(':') if p) r = ':'.join(rpaths) if alt_replace_func is not None: r = alt_replace_func(r) padding = len(m.group(0)) - len(r) if padding < 1: # we need at least one null-character raise Exception("placeholder %r too short" % m.group(0)) r += padding * '\0' assert m.start() + len(r) == m.end() f.seek(m.start()) f.write(r) f.close() |
assert gr2 == '' or gr2.startswith(':') | assert rest == '' or rest.startswith(':') | def fix_object_code(path): tp = get_object_type(path) if tp is None: return f = open(path, 'r+b') data = f.read() matches = list(placehold_pat.finditer(data)) if not matches: f.close() return if verbose: print "Fixing placeholders in:", path for m in matches: gr2 = m.group(2) # this should not be necessary as the regular expression is # evaluated from left to right, meaning that greediness of # the placeholder repetition comes before the greedy group2 while gr2.startswith('/PLACEHOLD'): gr2 = gr2[10:] if tp.startswith('MachO-') and gr2.startswith('/'): # deprecated: because we now use rpath on OSX as well r = find_lib(gr2[1:]) else: assert gr2 == '' or gr2.startswith(':') rpaths = list(_targets) # extend the list with rpath which were already in the binary, # if any rpaths.extend(p for p in gr2.split(':') if p) r = ':'.join(rpaths) if alt_replace_func is not None: r = alt_replace_func(r) padding = len(m.group(0)) - len(r) if padding < 1: # we need at least one null-character raise Exception("placeholder %r too short" % m.group(0)) r += padding * '\0' assert m.start() + len(r) == m.end() f.seek(m.start()) f.write(r) f.close() |
rpaths.extend(p for p in gr2.split(':') if p) | rpaths.extend(p for p in rest.split(':') if p) | def fix_object_code(path): tp = get_object_type(path) if tp is None: return f = open(path, 'r+b') data = f.read() matches = list(placehold_pat.finditer(data)) if not matches: f.close() return if verbose: print "Fixing placeholders in:", path for m in matches: gr2 = m.group(2) # this should not be necessary as the regular expression is # evaluated from left to right, meaning that greediness of # the placeholder repetition comes before the greedy group2 while gr2.startswith('/PLACEHOLD'): gr2 = gr2[10:] if tp.startswith('MachO-') and gr2.startswith('/'): # deprecated: because we now use rpath on OSX as well r = find_lib(gr2[1:]) else: assert gr2 == '' or gr2.startswith(':') rpaths = list(_targets) # extend the list with rpath which were already in the binary, # if any rpaths.extend(p for p in gr2.split(':') if p) r = ':'.join(rpaths) if alt_replace_func is not None: r = alt_replace_func(r) padding = len(m.group(0)) - len(r) if padding < 1: # we need at least one null-character raise Exception("placeholder %r too short" % m.group(0)) r += padding * '\0' assert m.start() + len(r) == m.end() f.seek(m.start()) f.write(r) f.close() |
except: print("Warning: An error occurred while %sinstalling application " "item" % ('un' if remove else '')) | except Exception as e: print("Warning (%sinstalling application item):\n%r" % ('un' if remove else '', e)) | def install_app(self, remove=False): if self.noapp: return |
if opts.proxy: proxy = opts.proxy else: proxy = conf['proxy'] if proxy: from proxy.api import setup_proxy setup_proxy(proxy) | def main(): from optparse import OptionParser p = OptionParser( usage="usage: %prog [options] [name] [version]", description=("download and install eggs ...")) p.add_option("--config", action="store_true", help="display the configuration and exit") p.add_option('-f', "--force", action="store_true", help="force install the main package " "(not it's dependencies, see --forceall)") p.add_option("--forceall", action="store_true", help="force install of all packages " "(i.e. including dependencies)") p.add_option('-l', "--list", action="store_true", help="list the packages currently installed on the system") p.add_option('-n', "--dry-run", action="store_true", help="show what would have been downloaded/removed/installed") p.add_option('-N', "--no-deps", action="store_true", help="neither download nor install dependencies") p.add_option("--path", action="store_true", help="based on the configuration, display how to set the " "PATH and PYTHONPATH environment variables") p.add_option("--prefix", action="store", help="install prefix (disregarding of any settings in " "the config file)", metavar='PATH') p.add_option("--sys-prefix", action="store_true", help="use sys.prefix as the install prefix") p.add_option("--proxy", action="store", help="use a proxy for downloads", metavar='URL') p.add_option("--remove", action="store_true", help="remove a package") p.add_option('-s', "--search", action="store_true", help="search the index in the repo (chain) of packages " "and display versions available.") p.add_option("--test", action="store_true", help="perform some internal tests (for development only)") p.add_option('-v', "--verbose", action="store_true") p.add_option('--version', action="store_true") opts, args = p.parse_args() if len(args) > 0 and (opts.test or opts.config or opts.path): p.error("Option takes no arguments") if opts.prefix and opts.sys_prefix: p.error("Options --prefix and --sys-prefix exclude each ohter") if opts.force and opts.forceall: p.error("Options --force and --forceall exclude each ohter") pat = None if (opts.list or opts.search) and args: try: pat = re.compile(args[0], re.I) except: pass if opts.version: # --version from enstaller import __version__ print "Enstaller version:", __version__ return if opts.config: # --config print_config() return conf = get_config() # conf global prefix, dry_run, noapp, version # set globals if opts.sys_prefix: prefix = sys.prefix elif opts.prefix: prefix = opts.prefix else: prefix = conf['prefix'] dry_run = opts.dry_run noapp = conf['noapp'] version = opts.version if opts.path: # --path print_path() return if opts.list: # --list list_option(pat) return if opts.proxy: # --proxy proxy = opts.proxy else: proxy = conf['proxy'] # Only import the proxy API if some we have a proxy. if proxy: from proxy.api import setup_proxy setup_proxy(proxy) c = Chain(conf['IndexedRepos'], verbose) # init chain if opts.search: # --search search(c, pat) return if opts.test: # --test c.test() return if len(args) == 0: p.error("Requirement (name and optional version) missing") if len(args) > 2: p.error("A requirement is a name and an optional version") req = Req(' '.join(args)) print "prefix:", prefix check_write() if opts.remove: # --remove remove_req(req) return dists = get_dists(c, req, # dists recur=not opts.no_deps) # Warn the user about packages which depend on what will be updated depend_warn([filename_dist(d) for d in dists]) # Packages which are installed currently sys_inst = set(egginst.get_installed(sys.prefix)) if prefix == sys.prefix: prefix_inst = sys_inst else: prefix_inst = set(egginst.get_installed(prefix)) all_inst = sys_inst | prefix_inst # These are the packahes which are being excluded from being installed if opts.forceall: exclude = set() else: exclude = all_inst if opts.force: exclude.discard(filename_dist(dists[-1])) # Fetch distributions if not isdir(conf['local']): os.makedirs(conf['local']) for dist, fn in iter_dists_excl(dists, exclude): c.fetch_dist(dist, conf['local'], check_md5=opts.force or opts.forceall, dry_run=dry_run) # Remove packages (in reverse install order) for dist in dists[::-1]: fn = filename_dist(dist) if fn in all_inst: # if the distribution (which needs to be installed) is already # installed don't remove it continue cname = cname_fn(fn) # Only remove packages installed in prefix for fn_inst in prefix_inst: if cname == cname_fn(fn_inst): call_egginst(fn_inst, remove=True) # Install packages for dist, fn in iter_dists_excl(dists, exclude): call_egginst(join(conf['local'], fn)) |
|
password2 = getpass('Confirm passowrd: ') | password2 = getpass('Confirm password: ') | def input_userpass(): from getpass import getpass print """\ |
return getUtility(IAuthentication).getPrincipal(self.__name__) | return getPrincipal(self.__name__) | def principal(self): try: return getUtility(IAuthentication).getPrincipal(self.__name__) except PrincipalLookupError: return None |
curr_x = curr_x - 100 if (curr_x <= 50): curr_x = 50 | curr_x = curr_x - 20 if (curr_x <= 10): curr_x = 10 | def move_left(self): (curr_x, curr_y) = self.pos curr_x = curr_x - 100 if (curr_x <= 50): curr_x = 50 self.pos = (curr_x, curr_y) self.rect = self.image.get_rect(center = self.pos) |
curr_x = curr_x + 100 if (curr_x >= 650): curr_x = 650 | curr_x = curr_x + 20 if (curr_x >= 690): curr_x = 690 | def move_right(self): (curr_x, curr_y) = self.pos curr_x = curr_x + 100 if (curr_x >= 650): curr_x = 650 self.pos = (curr_x, curr_y) self.rect = self.image.get_rect(center = self.pos) |
print curr_y | def fire(self): (curr_x, curr_y) = self.pos print curr_y Projectile(self.pos,self.angle,self.velocity) |
|
tx = self.t/10.0 | tx = self.t/50.0 | def update(self): if self.alive: # FIXME - Need to figure out how to get time into this formula for y #print "projectile y: " + str(proj_y) (curr_x, curr_y) = self.pos tx = self.t/10.0 proj_y = self.h0 + (tx * self.velocity * math.sin(self.rad_angle)) - (self.gravity * tx * tx) / 2 size = ((proj_y / 20) + self.min_size) self.image = pygame.Surface((size,size)) self.image.fill(self.color) proj_x = self.velocity * math.cos(self.rad_angle) * tx if proj_y < 0: print "proj_x:" + str(proj_x) self.hit_ground() if (curr_y >= 500 and curr_y <= 600): if (proj_y < 10): self.bounce = True print proj_y if (self.bounce == False): self.pos = (curr_x, (SCREEN_WIDTH - ((proj_x * 20)) + 20 )) else: self.pos = (curr_x, (curr_y + (tx*10))) self.rect.center = self.pos self.t = self.t + 1 |
size = ((proj_y / 20) + self.min_size) | size = ((proj_y / 2) + self.min_size) | def update(self): if self.alive: # FIXME - Need to figure out how to get time into this formula for y #print "projectile y: " + str(proj_y) (curr_x, curr_y) = self.pos tx = self.t/10.0 proj_y = self.h0 + (tx * self.velocity * math.sin(self.rad_angle)) - (self.gravity * tx * tx) / 2 size = ((proj_y / 20) + self.min_size) self.image = pygame.Surface((size,size)) self.image.fill(self.color) proj_x = self.velocity * math.cos(self.rad_angle) * tx if proj_y < 0: print "proj_x:" + str(proj_x) self.hit_ground() if (curr_y >= 500 and curr_y <= 600): if (proj_y < 10): self.bounce = True print proj_y if (self.bounce == False): self.pos = (curr_x, (SCREEN_WIDTH - ((proj_x * 20)) + 20 )) else: self.pos = (curr_x, (curr_y + (tx*10))) self.rect.center = self.pos self.t = self.t + 1 |
print "proj_x:" + str(proj_x) | def update(self): if self.alive: # FIXME - Need to figure out how to get time into this formula for y #print "projectile y: " + str(proj_y) (curr_x, curr_y) = self.pos tx = self.t/10.0 proj_y = self.h0 + (tx * self.velocity * math.sin(self.rad_angle)) - (self.gravity * tx * tx) / 2 size = ((proj_y / 20) + self.min_size) self.image = pygame.Surface((size,size)) self.image.fill(self.color) proj_x = self.velocity * math.cos(self.rad_angle) * tx if proj_y < 0: print "proj_x:" + str(proj_x) self.hit_ground() if (curr_y >= 500 and curr_y <= 600): if (proj_y < 10): self.bounce = True print proj_y if (self.bounce == False): self.pos = (curr_x, (SCREEN_WIDTH - ((proj_x * 20)) + 20 )) else: self.pos = (curr_x, (curr_y + (tx*10))) self.rect.center = self.pos self.t = self.t + 1 |
|
if (curr_y >= 500 and curr_y <= 600): | if (proj_x > 11 and proj_x < 12): | def update(self): if self.alive: # FIXME - Need to figure out how to get time into this formula for y #print "projectile y: " + str(proj_y) (curr_x, curr_y) = self.pos tx = self.t/10.0 proj_y = self.h0 + (tx * self.velocity * math.sin(self.rad_angle)) - (self.gravity * tx * tx) / 2 size = ((proj_y / 20) + self.min_size) self.image = pygame.Surface((size,size)) self.image.fill(self.color) proj_x = self.velocity * math.cos(self.rad_angle) * tx if proj_y < 0: print "proj_x:" + str(proj_x) self.hit_ground() if (curr_y >= 500 and curr_y <= 600): if (proj_y < 10): self.bounce = True print proj_y if (self.bounce == False): self.pos = (curr_x, (SCREEN_WIDTH - ((proj_x * 20)) + 20 )) else: self.pos = (curr_x, (curr_y + (tx*10))) self.rect.center = self.pos self.t = self.t + 1 |
print proj_y | print "proj_y: " + str(proj_y) | def update(self): if self.alive: # FIXME - Need to figure out how to get time into this formula for y #print "projectile y: " + str(proj_y) (curr_x, curr_y) = self.pos tx = self.t/10.0 proj_y = self.h0 + (tx * self.velocity * math.sin(self.rad_angle)) - (self.gravity * tx * tx) / 2 size = ((proj_y / 20) + self.min_size) self.image = pygame.Surface((size,size)) self.image.fill(self.color) proj_x = self.velocity * math.cos(self.rad_angle) * tx if proj_y < 0: print "proj_x:" + str(proj_x) self.hit_ground() if (curr_y >= 500 and curr_y <= 600): if (proj_y < 10): self.bounce = True print proj_y if (self.bounce == False): self.pos = (curr_x, (SCREEN_WIDTH - ((proj_x * 20)) + 20 )) else: self.pos = (curr_x, (curr_y + (tx*10))) self.rect.center = self.pos self.t = self.t + 1 |
self.pos = (curr_x, (curr_y + (tx*10))) | self.pos = (curr_x,curr_y) | def update(self): if self.alive: # FIXME - Need to figure out how to get time into this formula for y #print "projectile y: " + str(proj_y) (curr_x, curr_y) = self.pos tx = self.t/10.0 proj_y = self.h0 + (tx * self.velocity * math.sin(self.rad_angle)) - (self.gravity * tx * tx) / 2 size = ((proj_y / 20) + self.min_size) self.image = pygame.Surface((size,size)) self.image.fill(self.color) proj_x = self.velocity * math.cos(self.rad_angle) * tx if proj_y < 0: print "proj_x:" + str(proj_x) self.hit_ground() if (curr_y >= 500 and curr_y <= 600): if (proj_y < 10): self.bounce = True print proj_y if (self.bounce == False): self.pos = (curr_x, (SCREEN_WIDTH - ((proj_x * 20)) + 20 )) else: self.pos = (curr_x, (curr_y + (tx*10))) self.rect.center = self.pos self.t = self.t + 1 |
items = re.split("\-|\*", " ".join([item.strip() for item in entry.desc if item])) | raw_items = [item.strip().replace('"', '"') for item in entry.desc if item] string = "" items = [] for item in raw_items: if item.startswith('-') or item.startswith('*'): if string: items.append(string) string = item.lstrip('-* ') else: string += item if string: items.append(string) | def write_index(): """Create markup for a new form submit with all tests. """ entries = [] for entry in get_tests(): if entry.title: title = "".join(entry.title) entries.append(TR_TD_COLS_4 % title) if entry.url: url = "".join(entry.url) entries.append(TR_TD_URL % (url, url)) if entry.label: id = "id-%s" % entry.id[0].strip() label = "".join(entry.label) items = re.split("\-|\*", " ".join([item.strip() for item in entry.desc if item])) desc = "\n ".join([""] + items + [""]) entries.append(TR_TEST % (desc, label, id, id)) return entries |
u = u.replace('./', '../') | u = u.replace('./', '../' * len(e.repo)) | def test(): load_templates() entries = tests2singledocs() """ label: Export mode: DOM tabs: DOM urls: http://dev.opera.com repo: dom index: 0002 file_name: 0002.export.html desc: - Press the Export button.- Verify that the current view is displayed in a new tab. """ if not os.path.exists(PAPA): os.makedirs(PAPA) index = [] for e in entries: content = [HTML_HEAD % (("../" * len(e.repo)) + STYLESHEET_NAME)] urls = [] for u in e.urls: u = u.replace('./', '../') urls.append(HTML_URL % (u, u)) raw_items = [item2html(item) for item in e.desc if item] string = "" items = [] for item in raw_items: if item.startswith('-') or item.startswith('*'): if string: items.append(string) string = item.lstrip('-* ') else: string += ' ' + item if string: items.append(string) content.append(HTML_TITLE % ("".join(e.label), e.deprecated and HTML_DEPRECATED or "", e.mode, e.tabs, "".join(urls), e.index, "".join([HTML_ITEM % item for item in items]))) repo = PAPA for dir in e.repo: repo = os.path.join(repo, dir) if not os.path.exists(repo): os.makedirs(repo) with open(os.path.join(repo, e.file_name), 'wb') as f: f.write("".join(content)) index.append((e.mode, e.tabs, "".join(e.label), "./%s/%s" % ('/'.join(e.repo), e.file_name))) print_index(index) print_stylesheet() if not os.path.exists(os.path.join(PAPA, TESTCASES)): shutil.copytree(TESTCASES, os.path.join(PAPA, TESTCASES)) |
id_count = DEFAULT_ID_DELTA | id_count = 0 | def add_ids_test_index(): """Add an id to all tests which are missing one. """ import shutil import tempfile ID = 1 LABEL = 2 DESC = 3 ERROR = 4 state = DESC in_file = open(TESTS, 'rb') lines = in_file.readlines() in_file.close() id_count = DEFAULT_ID_DELTA tmpfd, tmppath = tempfile.mkstemp(".tmp", "dftests.") tmpfile = os.fdopen(tmpfd, "w") # state order: ID, LABEL, DESC # title resets the id_count (counting restarts in each repo) for index, line in enumerate(lines): if line.startswith('***'): id_count = DEFAULT_ID_DELTA elif line.startswith('id:'): if not state == DESC: state = ERROR break state = ID id_count = int(line[3:]) elif line.startswith('label:'): if state == DESC: id = get_next_id(id_count, lines, index) tmpfile.write("id: %#05i\n" % id_count) id_count = id state = ID if not state == ID: state = ERROR break state = LABEL elif line.startswith('desc:'): if not state == LABEL: state = ERROR break state = DESC tmpfile.write(line) tmpfile.close() if state == ERROR: raise AssertionError("Not well formed entry!") shutil.copy(tmppath, TESTS) os.unlink(tmppath) |
self.index = '' | self.index_count = 0 | def __init__(self): self.title = [] self.url = [] self.desc = [] self.label = [] self.id = [] self.buffer = [] self.index = 0 self.mode = '' self.tabs = '' self.urls = '' self.repo = '' self.index = '' self.file_name = '' |
def get_ids(): """Parse the IDS file. Parse the IDS file and return a list of the id's. Includes all tests and other attributes of a protocol like tester and changeset to check if a new submitted protocol is complete. """ f_ids = open(IDS, 'r') ids = [id.strip() for id in f_ids.readlines()] f_ids.close() return ids | def is_empty(self): return bool(self.title or self.url or self.desc or self.label) |
|
index = 0 | def tests2singledocs(): entries = get_tests() for e in entries: e.normalize() entries = filter(lambda e: e.is_empty(), entries) cur = Entry() type = '' index = 0 for entry in entries: if entry.title: cur.mode, ts = parse_title(''.join(entry.title)) cur.repo = cur.mode.lower() cur.tabs = ', '.join(ts) type = 'title' index = 1 elif entry.url: if type == 'url': cur.urls.extend(entry.url) else: cur.urls = entry.url[:] type = 'url' if entry.label: type = 'label' entry.mode = cur.mode entry.tabs = cur.tabs entry.urls = entry.url or cur.urls entry.repo = cur.repo entry.index = "%#04i" % index file_name = ''.join(entry.label).strip().replace(' ', '-').replace(',', '').lower() entry.file_name = "%s.%s.html" % (entry.index, file_name) index += 1 return filter(lambda e: e.label , entries) |
|
cur.index_count += 1 | def tests2singledocs(): entries = get_tests() for e in entries: e.normalize() entries = filter(lambda e: e.is_empty(), entries) cur = Entry() type = '' for entry in entries: if entry.title: cur.mode, ts = parse_title(''.join(entry.title)) cur.repo = [label2filename(cur.mode)] if ts: cur.repo.append(label2filename(ts[0])) cur.tabs = ', '.join(ts) type = 'title' index = 1 elif entry.url: if type == 'url': cur.urls.extend(entry.url) else: cur.urls = entry.url[:] type = 'url' if entry.label: type = 'label' cur.index_count += 1 entry.mode = cur.mode entry.tabs = cur.tabs entry.urls = entry.url or cur.urls entry.repo = cur.repo[0:] entry.index = "%#04i" % cur.index_count file_name = label2filename(entry.label) entry.file_name = "%s.%s.html" % (entry.index, file_name) index += 1 return filter(lambda e: e.label , entries) |
|
entry.index = "% | entry.index = ''.join(entry.id) | def tests2singledocs(): entries = get_tests() for e in entries: e.normalize() entries = filter(lambda e: e.is_empty(), entries) cur = Entry() type = '' for entry in entries: if entry.title: cur.mode, ts = parse_title(''.join(entry.title)) cur.repo = [label2filename(cur.mode)] if ts: cur.repo.append(label2filename(ts[0])) cur.tabs = ', '.join(ts) type = 'title' index = 1 elif entry.url: if type == 'url': cur.urls.extend(entry.url) else: cur.urls = entry.url[:] type = 'url' if entry.label: type = 'label' cur.index_count += 1 entry.mode = cur.mode entry.tabs = cur.tabs entry.urls = entry.url or cur.urls entry.repo = cur.repo[0:] entry.index = "%#04i" % cur.index_count file_name = label2filename(entry.label) entry.file_name = "%s.%s.html" % (entry.index, file_name) index += 1 return filter(lambda e: e.label , entries) |
content = [HTML_HEAD % ("../" + STYLESHEET_NAME)] | content = [HTML_HEAD % (("../" * len(e.repo)) + STYLESHEET_NAME)] | def test(): load_templates() entries = tests2singledocs() """ label: Export mode: DOM tabs: DOM urls: http://dev.opera.com repo: dom index: 0002 file_name: 0002.export.html desc: - Press the Export button.- Verify that the current view is displayed in a new tab. """ if not os.path.exists(PAPA): os.makedirs(PAPA) index = [] for e in entries: content = [HTML_HEAD % ("../" + STYLESHEET_NAME)] urls = [] for u in e.urls: u = u.replace('./', '../') urls.append(HTML_URL % (u, u)) raw_items = [item2html(item) for item in e.desc if item] string = "" items = [] for item in raw_items: if item.startswith('-') or item.startswith('*'): if string: items.append(string) string = item.lstrip('-* ') else: string += ' ' + item if string: items.append(string) content.append(HTML_TITLE % ("".join(e.label), e.deprecated and HTML_DEPRECATED or "", e.mode, e.tabs, "".join(urls), e.index, "".join([HTML_ITEM % item for item in items]))) repo = PAPA for dir in e.repo: repo = os.path.join(repo, dir) if not os.path.exists(repo): os.makedirs(repo) with open(os.path.join(repo, e.file_name), 'wb') as f: f.write("".join(content)) index.append((e.mode, e.tabs, "".join(e.label), "./%s/%s" % ('/'.join(e.repo), e.file_name))) print_index(index) print_stylesheet() if not os.path.exists(os.path.join(PAPA, TESTCASES)): shutil.copytree(TESTCASES, os.path.join(PAPA, TESTCASES)) |
file_name = ''.join(entry.label).strip().replace(' ', '-').replace(',', '').lower() | file_name = label2filename(entry.label) | def tests2singledocs(): entries = get_tests() for e in entries: e.normalize() entries = filter(lambda e: e.is_empty(), entries) cur = Entry() type = '' for entry in entries: if entry.title: cur.mode, ts = parse_title(''.join(entry.title)) cur.repo = cur.mode.lower() cur.tabs = ', '.join(ts) type = 'title' index = 1 elif entry.url: if type == 'url': cur.urls.extend(entry.url) else: cur.urls = entry.url[:] type = 'url' if entry.label: type = 'label' cur.index_count += 1 entry.mode = cur.mode entry.tabs = cur.tabs entry.urls = entry.url or cur.urls entry.repo = cur.repo entry.index = "%#04i" % cur.index_count file_name = ''.join(entry.label).strip().replace(' ', '-').replace(',', '').lower() entry.file_name = "%s.%s.html" % (entry.index, file_name) index += 1 return filter(lambda e: e.label , entries) |
self.sum_status = {'Preparation': 0, 'Production': 0, 'Maintenance': 0, 'Process': 0, 'W-up': 0} | self.sum_status = {'Preparation': 0, 'Production': 0, 'Maintenance': 0, 'Process': 0, 'W-up': 0, 'JobEnd': 0} | def __init__(self, bdefilename=None): """ Initialize the class and read the bde file is it is provided. """ self.content = [] self.dbname = 'oeebde.db' self.recordcode = 'recordcode' self.nfilelines = 0 if bdefilename is not None: self.bdefilename = bdefilename self.readfile(bdefilename) # Variables related to sumup # The status code of a Sum-up to indicate whether it is currently happening self.sum_status = {'Preparation': 0, 'Production': 0, 'Maintenance': 0, 'Process': 0, 'W-up': 0} # The Sum-Up results dictionary self.sumups = {} self.output = {} # Constant to indicate whether a Sum-up is significant self.SUM_UNKNOWN = -1 self.SUM_TRIVIAL = 0 self.SUM_SIGNIFICANT = 1 self.SUM_CONCATENATE = 2 # significant duration is 5 min (convert to unit hour) self.SIG_DURATION = 5.0/60.0 # significant Impreesion Count is 20 self.SIG_IMPCOUNT = 20 |
sys.exit(0) | return False | def data_sumup(self): """ Perform Sum-ups for Preparation and Production. The Sum-ups will be performed in two stages: 1. Simple Sum-ups to process through all selected lines and record the start and end lines of the Sum-ups. The results are saved in a intermediate dictionary. 2. Process dictionary to decide whether a Sum-up is significant or not. Use Preparation Sum-up for an example. If a MR entry is read and Preparation sum-up is not currently happening, the Preparation sum-up is triggered. The (line number, 'Preparation', JobID, ActivityCode) tuple will be used as a dictionary key to record the corresponding line. But as for now, we do not know if the this Sum-up will be signifcant or not. So an additional status code will also be recorded to indicated that we do not know if the Sum-up is significant. This status code will be updated in later process on the dictionary to reflect the Sum-up's true nature. """ |
print "Error: Preparation started before previous Preparation ends" | self.report_error(917, self.sumups[key][1]) | def data_sumup(self): """ Perform Sum-ups for Preparation and Production. The Sum-ups will be performed in two stages: 1. Simple Sum-ups to process through all selected lines and record the start and end lines of the Sum-ups. The results are saved in a intermediate dictionary. 2. Process dictionary to decide whether a Sum-up is significant or not. Use Preparation Sum-up for an example. If a MR entry is read and Preparation sum-up is not currently happening, the Preparation sum-up is triggered. The (line number, 'Preparation', JobID, ActivityCode) tuple will be used as a dictionary key to record the corresponding line. But as for now, we do not know if the this Sum-up will be signifcant or not. So an additional status code will also be recorded to indicated that we do not know if the Sum-up is significant. This status code will be updated in later process on the dictionary to reflect the Sum-up's true nature. """ |
print "Error: Production started before previous Production ends" | self.report_error(918, self.sumups[key][1]) | def data_sumup(self): """ Perform Sum-ups for Preparation and Production. The Sum-ups will be performed in two stages: 1. Simple Sum-ups to process through all selected lines and record the start and end lines of the Sum-ups. The results are saved in a intermediate dictionary. 2. Process dictionary to decide whether a Sum-up is significant or not. Use Preparation Sum-up for an example. If a MR entry is read and Preparation sum-up is not currently happening, the Preparation sum-up is triggered. The (line number, 'Preparation', JobID, ActivityCode) tuple will be used as a dictionary key to record the corresponding line. But as for now, we do not know if the this Sum-up will be signifcant or not. So an additional status code will also be recorded to indicated that we do not know if the Sum-up is significant. This status code will be updated in later process on the dictionary to reflect the Sum-up's true nature. """ |
elif key[1] == 'Maintenance': | elif key[1] in ['Maintenance', 'JobEnd']: | def data_sumup(self): """ Perform Sum-ups for Preparation and Production. The Sum-ups will be performed in two stages: 1. Simple Sum-ups to process through all selected lines and record the start and end lines of the Sum-ups. The results are saved in a intermediate dictionary. 2. Process dictionary to decide whether a Sum-up is significant or not. Use Preparation Sum-up for an example. If a MR entry is read and Preparation sum-up is not currently happening, the Preparation sum-up is triggered. The (line number, 'Preparation', JobID, ActivityCode) tuple will be used as a dictionary key to record the corresponding line. But as for now, we do not know if the this Sum-up will be signifcant or not. So an additional status code will also be recorded to indicated that we do not know if the Sum-up is significant. This status code will be updated in later process on the dictionary to reflect the Sum-up's true nature. """ |
print "Warning: Preparation without Production" | self.report_error(804) | def data_sumup(self): """ Perform Sum-ups for Preparation and Production. The Sum-ups will be performed in two stages: 1. Simple Sum-ups to process through all selected lines and record the start and end lines of the Sum-ups. The results are saved in a intermediate dictionary. 2. Process dictionary to decide whether a Sum-up is significant or not. Use Preparation Sum-up for an example. If a MR entry is read and Preparation sum-up is not currently happening, the Preparation sum-up is triggered. The (line number, 'Preparation', JobID, ActivityCode) tuple will be used as a dictionary key to record the corresponding line. But as for now, we do not know if the this Sum-up will be signifcant or not. So an additional status code will also be recorded to indicated that we do not know if the Sum-up is significant. This status code will be updated in later process on the dictionary to reflect the Sum-up's true nature. """ |
self.report_error(803) print badids | thelines = [] for theid in badids: idx = [line[4] for line in self.content].index(theid) thelines += [(idx+1,) + (self.content[idx])] self.report_error(803, thelines) | def data_sumup(self): """ Perform Sum-ups for Preparation and Production. The Sum-ups will be performed in two stages: 1. Simple Sum-ups to process through all selected lines and record the start and end lines of the Sum-ups. The results are saved in a intermediate dictionary. 2. Process dictionary to decide whether a Sum-up is significant or not. Use Preparation Sum-up for an example. If a MR entry is read and Preparation sum-up is not currently happening, the Preparation sum-up is triggered. The (line number, 'Preparation', JobID, ActivityCode) tuple will be used as a dictionary key to record the corresponding line. But as for now, we do not know if the this Sum-up will be signifcant or not. So an additional status code will also be recorded to indicated that we do not know if the Sum-up is significant. This status code will be updated in later process on the dictionary to reflect the Sum-up's true nature. """ |
prekey == self.get_key_for_concatenate(prekey) | prekey = self.get_key_for_concatenate(prekey) | def gen_output_for_key(self, keys, idx): |
print 'Warning: Trivial Sum-up with nothing to concatenate.' | self.report_error(916, self.sumups[key][1]) self.sumups[key][0] = self.SUM_TRIVIAL_BUT_NEEDED self.output[key] = (lnum, stime, jobid, sumup_name, duration, impcount) | def gen_output_for_key(self, keys, idx): |
print 'Warning: Trivial Sum-up with nothing to concatenate.' else: | self.report_error(916, self.sumups[key][1]) self.sumups[key][0] = self.SUM_TRIVIAL_BUT_NEEDED self.output[key] = (lnum, stime, jobid, sumup_name, duration, impcount) elif key[1] in ['Maintenance', 'Process', 'W-up', 'JobEnd']: | def gen_output_for_key(self, keys, idx): |
print "%10d %s %6s %15s %0.2f %10d" % line | print "%10d %s %6s %15s %6.2f %10d" % line | def report_output(self): # Print output print "" keys = self.output.keys() keys.sort() for key in keys: line = self.output[key] print "%10d %s %6s %15s %0.2f %10d" % line |
if not bde.readfile("tmp.bde"): | if not bde.readfile("good.bde"): | def report_output(self): # Print output print "" keys = self.output.keys() keys.sort() for key in keys: line = self.output[key] print "%10d %s %6s %15s %0.2f %10d" % line |
interspersed=True) | interspersed=False) | def __init__(self,args=None): description=""" |
self.parser.add_option("--test",action="store_true",dest="test",default=False,help="Doesn't write to the file, but outputs the result on stdout") self.parser.add_option("--evaluate",action="store_false",dest="verbatim",default=True,help="Interpret the string as a python expression before assigning it") | self.parser.add_option("--test", action="store_true", dest="test", default=False, help="Doesn't write to the file, but outputs the result on stdout") self.parser.add_option("--strip-quotes-from-value", action="store_true", dest="stripQuotes", default=False, help="Strip the quotes from the value if they had to be defined") self.parser.add_option("--evaluate", action="store_false", dest="verbatim", default=True, help="Interpret the string as a python expression before assigning it") | def addOptions(self): self.parser.add_option("--test",action="store_true",dest="test",default=False,help="Doesn't write to the file, but outputs the result on stdout") self.parser.add_option("--evaluate",action="store_false",dest="verbatim",default=True,help="Interpret the string as a python expression before assigning it") |
if self.opts.stripQuotes: if val[0]=='"': val=val[1:] if val[-1]=='"': val=val[:-1] | def run(self): fName=self.parser.getArgs()[0] all=self.parser.getArgs()[1] if all[0]=='"': all=all[1:] if all[-1]=='"': all=all[:-1] val=self.parser.getArgs()[2] |
|
name,value pairs or mapping, if applicable. If """ | name,value pairs or mapping, if applicable.""" | def from_params(cls, params): """Returns a list of MultipartParam objects from a sequence of name, value pairs, MultipartParam instances, or from a mapping of names to values |
command += '--chained-input=output \\\n' | command += '--chained-input=%s \\\n' % ( steps[step]['inputModule']) | def main(argv) : """ prepareRelValworkflows prepare workflows for chained processing of RelVal samples - parse file holding cmsDriver commands for 1st and 2nd steps - prepare workflows - prepare WorkflowInjector:Input script - prepare ForceMerge script - prepare DBSMigrationToGlobal script - prepare PhEDExInjection script - prepare local DBS query script required parameters --samples <textfile> : list of RelVal sample parameter-sets in plain text file, one sample per line, # marks comment --version <processing version> : processing version (v1, v2, ... ) --DBSURL <URL> : URL of the local DBS (http://cmsdbsprod.cern.ch/cms_dbs_prod_local_07/servlet/DBSServlet | http://cmssrv46.fnal.gov:8080/DBS208/servlet/DBSServlet) --only-sites : Site where dataset is going to be processed or where the input dataset is taken from. Usually srm-cms.cern.ch and cmssrm.fnal.gov optional parameters --pileupdataset : input pileup dataset. It must be provided if the <samples> txt file contains PilepUp samples --lumi <number> : initial run for generation (default: 666666), set it to 777777 for high statistics samples --event <number> : initial event number (default: 1) --store-fail : store output files for failed jobs in chain processing. --read-dbs : DBS URL used for obtaining the list of available blocks for real data. Default: http://cmsdbsprod.cern.ch/cms_dbs_prod_global/servlet/DBSServlet --scripts-dir : Path to workflow creation scripts (default: $PUTIL) --skip-config : Is the configuration file was already created, it will skip cmsDriver command execution --extra-label : Extra label for identifying the datasets: /RelVal*/CMSSW_X_Y_Z-<Conditions>_<SpecialTag>_<ExtraLabel>_<FilterName>-<version>/TIER --workflow-label : Label for the workflows. --help (-h) : help --debug (-d) : debug statements """ start_total_time = time.time() # default version = os.environ.get("CMSSW_VERSION") if version is None: print '' print 'CMSSW version cannot be determined from $CMSSW_VERSION' sys.exit(2) architecture = os.environ.get("SCRAM_ARCH") if architecture is None: print '' print 'CMSSW architecture cannot be determined from $SCRAM_ARCH' sys.exit(2) try: from ProdCommon.DataMgmt.DBS.DBSReader import DBSReader except ImportError, ex: print ex print 'Please load prodAgent libraries (point $PYTHONPATH to the right path).' sys.exit(2) samplesFile = None processing_version = None initial_run = "666666" initial_event = "1" debug = False DBSURL = None pileup_dataset = None storeFail = False readDBS = 'http://cmsdbsprod.cern.ch/cms_dbs_prod_global/servlet/DBSServlet' onlySites = None scriptsDir = '$PUTIL' #os.path.expandvars(os.environ.get('PUTIL', None)) skip_config = False extra_label = '' workflow_label = '' try: opts, args = getopt.getopt(argv, "", ["help", "debug", "samples=", "version=", "DBSURL=", "event=", "lumi=", "pileupdataset=", "store-fail", "read-dbs=", "only-sites=", "scripts-dir=", "skip-config", "extra-label=", "workflow-label="]) except getopt.GetoptError: print main.__doc__ sys.exit(2) # check command line parameter for opt, arg in opts : if opt == "--help" : print main.__doc__ sys.exit() elif opt == "--debug" : debug = True elif opt == "--samples" : samplesFile = arg elif opt == "--version" : processing_version = arg elif opt == "--lumi" : initial_run = arg elif opt == "--event" : initial_event = arg elif opt == "--DBSURL" : DBSURL = arg elif opt == "--pileupdataset" : pileup_dataset = arg print arg elif opt == '--store-fail': storeFail = True elif opt == '--read-dbs': readDBS = arg elif opt == '--only-sites': onlySites = arg elif opt == '--scripts-dir': if arg.endswith('/') : scriptsDir = arg[:-1] else: scriptsDir = arg scriptsDirTemp = scriptsDir # There's no need to expand the shell variables anymore #if scriptsDir.startswith('$') : # scriptsDirTemp = os.environ.get(scriptsDir[1:],None) # scriptsDir = os.path.expandvars(scriptsDirTemp) if scriptsDirTemp != None: if not os.path.exists(scriptsDirTemp): print "--scripts-dir argument does not exist, please verify." sys.exit(6) else: print "--scripts-dir argument does not exist, please verify." sys.exit(6) elif opt == "--skip-config": skip_config = True elif opt == "--extra-label": extra_label = arg elif opt == "--workflow-label": workflow_label = arg if samplesFile == None or processing_version == None or DBSURL == None : print main.__doc__ sys.exit(2) if debug: print "\nprepareRelValWorkflows.py was started with the following arguments: %s" % \ " ".join(argv) print "\n" samples = [] steps = {} primary_prefix = 'RelVal' max_step = 1 try: file = open(samplesFile) except IOError: print 'file with list of parameter-sets cannot be opened!' sys.exit(1) n_line = 0 print 'Parsing input file...' start_parse_time = time.time() for line in file.readlines(): n_line += 1 # Skipping lines with no info if line.strip() != '' and line.strip() != '\n' and \ not line.strip().startswith("#") and \ line.find('//') != 0: # I don't know what's the last condition for line_parts = [part.strip() for part in line.split('@@@') if part] dqmData = {} # Keys: Scenario, Run # // # // Parsing first step #// if not line.strip().startswith('STEP'): command = '' array = [] special_tag = '' conditions = None total_events = None events_per_job = None pile_up = False output_name = '' input_data = {} input_blocks = "" acq_era = version sample_info = line_parts[0].strip() # // # // Filling up sample's details #// sample_info_parts = [part.strip() for part in \ sample_info.split('++') if part] sample_number = sample_info_parts[0] #We might need this later sample_name = sample_info_parts[1] sample_steps = [i.strip() for i in \ sample_info_parts[2].split(',') if i] primary = primary_prefix + sample_name # // # // Is it a real data processing sample? According to this #// we assign or not the command variable. #\\ if line_parts[0].find('REALDATA') > -1: is_real_data = True else: is_real_data = False command = line_parts[1].strip() # // # // Clean cmsDriver command format #// if command.find('=') > -1: command = command.replace('=',' ') array = [i for i in command.split() if i] # // # // Remove --python_filename if present #// if '--python_filename' in array: del array[array.index('--python_filename'):\ array.index('--python_filename')+2] # // # // Parse conditions #// if '--conditions' in array: conditions_arg = array[array.index('--conditions')+1] if conditions_arg.startswith('auto:'): conditions_key = conditions_arg.split('auto:')[1] conditions_value = autoCond[conditions_key] else: conditions_value = conditions_arg conditions = [ x.strip() \ for x in conditions_value.split(',') \ if x.find("::") != -1 ][0].split('::')[0].strip() else: conditions = 'SpecialConditions' # // # // Parsing number of events #// if '--relval' in array : total_events = array[array.index('--relval')+1\ ].split(',')[0].strip() events_per_job = array[array.index('--relval')+1\ ].split(',')[1].strip() # // # // Special tag #// # FastSim if command.find('FASTSIM') > -1: special_tag = 'FastSim' # PileUp (at the same time with FastSim) if '--pileup' in array : # // # // Will use whatever argument of --pileup option is #// pileup_arg = array[array.index('--pileup') + 1] if pileup_arg.lower().strip() != 'nopileup': if special_tag: special_tag = "_".join( [special_tag, "PU", pileup_arg.strip()]) else: special_tag = "_".join(["PU", pileup_arg.strip()]) pile_up = True if pileup_dataset is None : print "You have to provide a pileup dataset." print "Usually it is a MinBias (RAW)." print "Use option --pileupdataset" sys.exit(5) # // # // Sort of custom tag #// if '--beamspot' in array: beamspot_arg = \ array[array.index('--beamspot') + 1].strip() if special_tag: special_tag = "_".join( [special_tag, beamspot_arg]) else: special_tag = beamspot_arg # // # // Cfg file's output name #// output_name = "_".join( [x for x in [primary, conditions, special_tag] if x] ) + ".py" # // # // Add command options #// if command.find('no_exec') < 0: array.append('--no_exec') if command.find('python_filename') < 0: array.append('--python_filename') array.append(output_name) # Recomposing cmsDriver command command = " ".join(array) # Filling up DQM information dqmData['Runs'] = '1' dqmData['Scenario'] = getDQMScenario(command) # // # // Collecting info for real data samples #// if is_real_data: # // # // Parsing dataset details. The following details are #// supported: REALDATA, RUN, LABEL, FILES, EVENTS, PDNAME #\\ # Producing tuples from the input options. data_options = [tuple(x.split(':')) \ for x in sample_info_parts[3].split(',') if x.strip()] # Parsing tuples for arg_v in data_options: if len(arg_v) == 2: input_data[arg_v[0].strip()] = arg_v[1].strip() elif len(arg_v) == 1: input_data[arg_v[0].strip()] = None else: print "Line %s has an extra ','." % (line) sys.exit(7) # // # // Verifiying optional arguments: RUN, LABEL, FILE, EVENTS, #// PRIMARY #\\ data_run = input_data.get('RUN', '') data_label = input_data.get('LABEL', '') data_files = input_data.get('FILES', '') data_events = input_data.get('EVENTS', '') data_pname = input_data.get('PRIMARY', None) if data_events: data_events = int(data_events) if data_files: data_files = int(data_events) # // # // Looking for best matching dataset. It should be just #// one, otherwise the script will exit. #\\ reader = DBSReader(readDBS) query = "find dataset where dataset like %s" % ( input_data['REALDATA']) result_xml = reader.dbs.executeQuery(query) # XML Handler result_list = DBSXMLParser(result_xml) target_datasets = [x['dataset'] for x in result_list] # If more than one dataset is found. if len(target_datasets) > 1: # Is this an input relval dataset produced in the # current release? query = "find dataset where dataset like %s " % ( input_data['REALDATA']) query += "and release=%s" % version result_xml = reader.dbs.executeQuery(query) result_list = DBSXMLParser(result_xml) target_datasets = [x['dataset'] for x in result_list] # If more than one dataset is found, match the processing # version if len(target_datasets) > 1: find_version = \ lambda x: x.find(processing_version) != -1 target_datasets = filter(find_version, target_datasets) if len(target_datasets) > 1: msg = "Dataset pattern in line %s is too broad." % line msg += "These datasets were found: %s" % ( " ".join(target_datasets)) print msg sys.exit(8) if not target_datasets: msg = "Dataset pattern produced no match in line %s" % ( line) print msg sys.exit(8) # Now I can look up the blocks for this dataset. target_dataset = target_datasets[0] input_data['REALDATA'] = target_dataset # // # // Looking up the blocks for a given Dataset and the #// provided list of runs #\\ runs_list = \ [x.strip() for x in data_run.split('|') if x.strip()] runs_in_dbs = [x['RunNumber'] for x in \ reader.dbs.listRuns(target_dataset)] runs_in_dbs.sort() # Creating lambda function for filtering runs. # Do filtering only if a run list was requested if runs_list: expr = '' # First a string expression to evaluate is_the_first = True for run in runs_list: if is_the_first: expr += "(" is_the_first = False else: expr += " or " # Run range: XXXXXX-XXXXXX if run.count("-"): run_limits = \ [x.strip() for x in run.split('-') if x.strip()] expr += "(x >= %s and x <= %s)" % ( run_limits[0], run_limits[1]) else: expr += "x == %s" % run if not is_the_first: expr += ")" # Here comes the lambda funtion runs_filter = lambda x: eval(expr) # Filtering runs in DBS using the list provided in the # input file. target_runs = filter(runs_filter, runs_in_dbs) else: target_runs = runs_in_dbs # Pulling up input files from DBS (including run info). input_files = reader.dbs.listFiles( path=target_dataset, retriveList=['retrive_run']) # // # // Parsing input blocks #// blocks = {} for input_file in input_files: # Skip files with no events # A block will be skipped if all its files have 0 # events if input_file['NumberOfEvents'] == 0: continue runs = \ [int(x['RunNumber']) for x in input_file['RunsList']] for run in runs: if run in target_runs: break else: continue # skip file if it's not in the target_runs cur_files = \ blocks.setdefault(input_file['Block']['Name'], {}).setdefault('Files', 0) cur_events = \ blocks[input_file['Block']['Name']].setdefault( 'Events', 0) cur_runs = \ blocks[input_file['Block']['Name']].setdefault( 'Runs', set()) blocks[input_file['Block']['Name']]['Files'] += 1 blocks[input_file['Block']['Name']]['Events'] += \ input_file['NumberOfEvents'] blocks[input_file['Block']['Name']]['Runs'] = \ cur_runs.union(runs) # // # // Truncating blocks list #// total_events = 0 total_files = 0 blocks_to_process = [] runs_to_process = set() for block in blocks: blocks_to_process.append(block) runs_to_process = runs_to_process.union(blocks[block]['Runs']) total_events += blocks[block]['Events'] total_files += blocks[block]['Files'] if data_events and (data_events < total_events): break if data_files and (data_files < total_files): break input_blocks = ",".join(blocks_to_process) # // # // If PRIMARY is true, then it will use the #// sample_name value as primary dataset name, else it #\\ will use the input primary dataset name. # \\ if data_pname is not None and \ data_pname.lower() in ('y', 't', 'true'): primary = "".join([primary_prefix, sample_name]) else: primary = \ [x for x in input_data['REALDATA'].split("/") if x][0] # // # // Seting special tag #// special_tag_parts = [] # Add RelVal tag if not present. if target_dataset.find(primary_prefix) == -1: special_tag_parts.append(primary_prefix) # Add LABEL if data_label: special_tag_parts.append(data_label) special_tag = "_".join(special_tag_parts) # // # // Setting Acq. Era #// #processed_dataset = target_dataset.split('/')[2] #dataset_acq_era = processed_dataset.split("-")[0] #if dataset_acq_era.startswith(version): # acq_era = version #else: # acq_era = dataset_acq_era # Filling up DQM information dqmData['Runs'] = \ ",".join([str(x) for x in list(runs_to_process)]) # // # // Composing a dictionary per sample #// dict = {} dict['sampleName'] = sample_name dict['command'] = command dict['primary'] = primary dict['outputName'] = output_name dict['conditions'] = conditions dict['specialTag'] = special_tag dict['totalEvents'] = total_events dict['eventsPerJob'] = events_per_job dict['pileUp'] = pile_up dict['isRealData'] = is_real_data dict['inputData'] = input_data dict['inputBlocks'] = input_blocks dict['steps'] = sample_steps dict['AcqEra'] = acq_era dict['DQMData'] = dqmData samples.append(dict) if debug: print 'Parsing' print 'Sample:', sample_name print 'Command:', command print 'Conditions:', conditions print 'Special tag:', special_tag print 'Total events:', total_events print 'Events per job:', events_per_job print 'Steps:', sample_steps print 'PileUp:', pile_up print 'Input data:', input_data print 'Input blocks:', input_blocks print 'DQMData:', dqmData print '' # // # // No a first step command (second HLT table, RECO, ALCA, etc) #// else: step_number = int(line_parts[0].split('++')[0].strip()[-1]) step_name = line_parts[0].split('++')[1].strip() command = line_parts[1].strip() # // # // Clean cmsDriver command format #// if command.find('=') > -1: command = command.replace('=',' ') array = [i for i in command.split() if i] # // # // Remove --python_filename if present #// if '--python_filename' in array: del array[array.index('--python_filename'):\ array.index('--python_filename')+2] # // # // Parse conditions #// if '--conditions' in array: conditions_arg = array[array.index('--conditions')+1] if conditions_arg.startswith('auto:'): conditions_key = conditions_arg.split('auto:')[1] conditions_value = autoCond[conditions_key] else: conditions_value = conditions_arg conditions = [ x.strip() \ for x in conditions_value.split(',') \ if x.find("::") != -1 ][0].split('::')[0].strip() else: conditions = 'SpecialConditions' # // # // Cfg file's output name #// output_name = "_".join([step_name, conditions]) + ".py" # // # // Add command options #// if command.find('no_exec') < 0: array.append('--no_exec') if command.find('python_filename') < 0: array.append('--python_filename') array.append(output_name) # Recomposing cmsDriver command command = " ".join(array) # // # // Second trigger table? This may be changed, right now I am #// assuming that all 4 steps workflows are like this. #\\ stage_previous = True if step_number == 2: if '-s' in array: index = array.index('-s') else: index = array.index('--step') if array[index+1].find('RECO') < 0: stage_previous = False if step_number > max_step: max_step = step_number # // # // HARVESTING cmsDriver commands should be ignored. RelVals #// should not run any HARVESTING configuration. Harvestings #\\ run independently after the datasets are produced. # \\ skip_step = False if '-s' in array: index = array.index('-s') else: index = array.index('--step') if array[index+1].count('HARVESTING') > 0: skip_step = True # // # // Composing a dictionary per step #// dict = {} dict['stepNumber'] = step_number dict['command'] = command dict['outputName'] = output_name dict['conditions'] = conditions dict['stagePrevious'] = stage_previous dict['DQMData'] = {'Scenario': getDQMScenario(command)} dict['skipStep'] = skip_step # // # // Step name should be unique #// if step_name not in steps: steps[step_name] = dict else: print "Label %s is repeated!!!" % step_name sys.exit(1) if debug: print 'Parsing' print 'Step name:', step_name print 'Step number:', step_number print 'Command:', command print 'Conditions:', conditions print 'Stage previous:', stage_previous print 'DQM Data:', dict['DQMData'] print '' parse_time = time.time() - start_parse_time file.close() if debug: print "Collected information step 1" for sample in samples: print 'Sample name:', sample['sampleName'] print 'Command', sample['command'] print 'Real data:', sample['isRealData'] print 'Input data:', sample['inputData'] print 'Input blocks', sample['inputBlocks'] print 'Conditions:', sample['conditions'] print 'Total events:', sample['totalEvents'] print 'Events per job:', sample['eventsPerJob'] print 'Output name:', sample['outputName'] print 'Steps:', sample['steps'] print 'PileUp:', sample['pileUp'] print 'Special tag:', sample['specialTag'] print 'Acq. Era:', sample['AcqEra'] print 'DQM data:', sample['DQMData'] print '' for i in range(2, max_step+1): print 'Collected information step %s' % i for step in steps: if steps[step]['stepNumber'] == i: print 'Step name:', step print 'Command:', steps[step]['command'] print 'Conditions:', steps[step]['conditions'] print 'Stage previous:', steps[step]['stagePrevious'] print 'DQM Data:', steps[step]['DQMData'] print '' # // # // Execute cmsDriver command #// print '' print 'Executing cmsDriver commands for step 1 configurations' print '' start_cmsDriver_time = time.time() for sample in samples: if not sample['isRealData']: # // # // if the cfg. file was already created, we'll skip cmsDriver #// command execution. #\\ if os.path.exists("/".join([os.getcwd(), sample['outputName']])) and skip_config: print 'cmsDriver command for step 1 to produce:', \ sample['outputName'],'was already issued, skipping.' continue exitCode, output, error = executeCommand(sample['command']) if exitCode == 0: print 'cmsDriver command for step 1 to produce:', \ sample['outputName'],'exited with ExitCode:', exitCode else : print 'cmsDriver command for step 1 to produce:', \ sample['outputName'],'failed with ExitCode:', exitCode sys.exit(1) else : msg = 'Real Data:\n' msg += 'Input dataset: %s\n' % (sample['inputData']['REALDATA']) msg += 'Run: %s\n' % (sample['inputData'].get('RUN', 'All')) msg += 'Input blocks: %s' % (sample['inputBlocks']) print msg for i in range(2, max_step+1): print '' print 'Executing cmsDriver commands for step %s configurations' % i print '' for step in steps: if steps[step]['stepNumber'] == i: # // # // if the cfg. file was already created, we'll skip cmsDriver #// command execution. #\\ if os.path.exists("/".join([os.getcwd(), steps[step]['outputName']])) and skip_config: print 'cmsDriver command for step %s to produce:' % i, \ steps[step]['outputName'],'was already issued, skipping.' continue # // # // Skip HARVESTING cmsDriver commands #// if steps[step]['skipStep']: print 'This is a HARVESTING cmsDriver command, skipping. ' continue exitCode, output, error = executeCommand(steps[step]['command']) if exitCode == 0: print 'cmsDriver command for step %s to produce:' % i, \ steps[step]['outputName'], \ 'exited with ExitCode:', exitCode else: print 'cmsDriver command for step %s to produce:' % i, \ steps[step]['outputName'], \ 'failed with ExitCode:', exitCode sys.exit(1) cmsDriver_time = time.time() - start_cmsDriver_time print '' print 'Workflow creation' print '' start_workflow_time = time.time() datasets = [] unmergedDatasets = [] mergedDatasets = [] workflows = {} # // # // Create workflows #// for sample in samples: command = 'python ' + scriptsDir conditions = '' # Conditions -> processingString # // # // In case we are processing data #// if sample['isRealData']: command += '/createProcessingWorkflow.py \\\n' # Not changing the primary dataset name for real data. command += '--override-channel=' + sample['primary'] + ' \\\n' command += '--dataset=' + sample['inputData']['REALDATA'] + ' \\\n' command += '--only-blocks=' + sample['inputBlocks'] + ' \\\n' command += '--dbs-url=' + readDBS + ' \\\n' conditions = steps[sample['steps'][0]]['conditions'] command += '--split-type=file \\\n' command += '--split-size=1 \\\n' # // # // MC workflows #// else: command += '/createProductionWorkflow.py \\\n' command += '--channel=' + sample['primary'] + ' \\\n' conditions = sample['conditions'] command += '--starting-run=' + initial_run + ' \\\n' command += '--starting-event=' + initial_event + ' \\\n' command += '--totalevents=' + sample['totalEvents'] + ' \\\n' command += '--eventsperjob=' + sample['eventsPerJob'] + ' \\\n' if sample['pileUp']: command += '--pileup-dataset=' + pileup_dataset + ' \\\n' if storeFail: command += '--store-fail=True \\\n' # // # // First step #// command += '--version=' + version + ' \\\n' command += '--py-cfg=' + sample['outputName'] + ' \\\n' # // # // Input configurations (Second step and further) #// if sample['steps'][0].lower().strip() != 'none': i = 0 for step in sample['steps']: # Is this a HARVESTING step? If so, skip it! if steps[step]['skipStep']: continue # Not a HARVESTING step, continue normally. command += '--version=' + version + ' \\\n' command += '--py-cfg=' + steps[step]['outputName'] + ' \\\n' if i != 0 or not sample['isRealData']: command += '--stageout-intermediates=%s \\\n' % ( steps[step]['stagePrevious']) command += '--chained-input=output \\\n' else: dqmScenario = steps[step]['DQMData']['Scenario'] # // # // If a two-hlt tables workflow, will take conditions from #// the second step information #\\ if not steps[step]['stagePrevious'] and \ i == 0: conditions = steps[step]['conditions'] i += 1 # // # // Common options #// command += '--group=RelVal \\\n' command += '--category=relval \\\n' command += '--activity=RelVal \\\n' command += '--acquisition_era=' + sample['AcqEra'] + ' \\\n' command += '--only-sites=' + onlySites + ' \\\n' command += '--processing_version=' + processing_version + ' \\\n' # Workflow label if workflow_label: command += '--workflow_tag=' + workflow_label + ' \\\n' # // # // processingString="CMSSWVersion"_"Conditions"_"specialTag"_"extra-label" #// CMSSWVersion is appended only when the input dataset does not have it. #\\ processing_string_parts = [] if sample['AcqEra'] != version: processing_string_parts.append(version) processing_string_parts.append(conditions) if sample['specialTag']: processing_string_parts.append(sample['specialTag']) if extra_label: processing_string_parts.append(extra_label) command += '--processing_string=' + "_".join(processing_string_parts) if debug: print command print '' start_command_time = time.time() exitCode, output, error = executeCommand(command) command_time = time.time() - start_command_time if debug: print output print '' output = [x for x in output.split('\n') if x] if exitCode == 0: #parse output tmp = [] index = FindIndex(output,'Output Datasets') for dataset in output[index+1:]: tmp.append(dataset.strip()) # DQM Data dqmInfo = {} dqmInfo['Runs'] = sample['DQMData']['Runs'] if sample['isRealData']: dqmInfo['Scenario'] = dqmScenario else: dqmInfo['Scenario'] = sample['DQMData']['Scenario'] datasets.append({'unmerged': tmp, 'totalEvents': sample['totalEvents'], 'merged': [x.replace('-unmerged','') for x in tmp], 'DQMData': dqmInfo }) unmergedDatasets.append(tmp) index = FindIndex(output,'Created') if index == -1: print "No workflow was created by create*workflow.py" sys.exit(1) workflow = output[index].split()[1].strip() workflows.setdefault(workflow, {})['isRealData'] = sample['isRealData'] workflows[workflow]['time'] = command_time print 'workflow creation command for workflow:', workflow, \ 'exited with ExitCode:', exitCode else : print 'workflow creation command:' print command print 'failed: %s' % error sys.exit(1) if debug: print 'Created workflows:' print workflows.keys() print '' print "Unmerged datasets:" print unmergedDatasets # extract merged datasets for sample in unmergedDatasets: tmp = [] for dataset in sample: tmp.append(dataset.replace('-unmerged','')) mergedDatasets.append(tmp) workflow_time = time.time() - start_workflow_time print '' print 'Write helper scripts' print '' # WorkflowInjector:Input script inputScript = open('input.sh','w') inputScript.write('#!/bin/bash\n') feeder = 'None' for workflow in workflows.keys(): if workflows[workflow]['isRealData']: if feeder.find('ReReco') < 0: inputScript.write('python $PRODAGENT_ROOT/util/publish.py WorkflowInjector:SetPlugin BlockFeeder\n') feeder = 'ReReco' else : if feeder.find('Request') < 0: inputScript.write('python $PRODAGENT_ROOT/util/publish.py WorkflowInjector:SetPlugin RequestFeeder\n') feeder = 'Request' inputScript.write('python $PRODAGENT_ROOT/util/publish.py WorkflowInjector:Input ' + os.path.join(os.getcwd(), workflow) + '\n') inputScript.close() os.chmod('input.sh',0755) print 'Wrote WorkflowInjector:Input script to:',os.path.join(os.getcwd(),'input.sh') # ForceMerge forceMergeScript = open('forceMerge.sh','w') forceMergeScript.write('#!/bin/bash\n') for sample in unmergedDatasets : for dataset in sample : forceMergeScript.write('python $PRODAGENT_ROOT/util/publish.py ForceMerge ' + dataset + '\n') forceMergeScript.close() os.chmod('forceMerge.sh',0755) print 'Wrote ForceMerge script to:',os.path.join(os.getcwd(),'forceMerge.sh') # MigrateDatasetToGlobal migrateScript = open('migrateToGlobal.sh','w') migrateScript.write('#!/bin/bash\n') for sample in mergedDatasets : for dataset in sample : migrateScript.write('python $PRODAGENT_ROOT/util/publish.py DBSInterface:MigrateDatasetToGlobal ' + dataset + '\n') migrateScript.close() os.chmod('migrateToGlobal.sh',0755) print 'Wrote DBSInterface:MigrateDatasetToGlobal script to:',os.path.join(os.getcwd(),'migrateToGlobal.sh') # PhEDExInjectDataset phedexScript = open('injectIntoPhEDEx.sh','w') phedexScript.write('#!/bin/bash\n') for sample in mergedDatasets : for dataset in sample : phedexScript.write('python $PRODAGENT_ROOT/util/publish.py PhEDExInjectDataset ' + dataset + '\n') phedexScript.close() os.chmod('injectIntoPhEDEx.sh',0755) print 'Wrote PhEDExInjectDataset script to:',os.path.join(os.getcwd(),'injectIntoPhEDEx.sh') # DBS: query unmerged datasets queryUnmergedScript = open('queryUnmerged.sh','w') queryUnmergedScript.write('#!/bin/bash\n') for sample in unmergedDatasets : for dataset in sample : #if dataset.find('-RECO') == -1 or len(sample) == 1 : queryUnmergedScript.write('python $PRODAGENT_ROOT/util/InspectDBS2.py --DBSURL=' + DBSURL + ' --datasetPath=' + dataset + ' | grep total\n') queryUnmergedScript.close() os.chmod('queryUnmerged.sh',0755) print 'Wrote DBS query script for unmerged datasets to:',os.path.join(os.getcwd(),'queryUnmerged.sh') # DBS: query merged datasets queryMergedScript = open('queryMerged.sh','w') queryMergedScript.write('#!/bin/bash\n') for sample in mergedDatasets : for dataset in sample : #if dataset.find('-RECO') == -1 or len(sample) == 1 : queryMergedScript.write('python $PRODAGENT_ROOT/util/InspectDBS2.py --DBSURL=' + DBSURL + ' --datasetPath=' + dataset + ' | grep total\n') queryMergedScript.close() os.chmod('queryMerged.sh',0755) print 'Wrote DBS query script for merged datasets to:',os.path.join(os.getcwd(),'queryMerged.sh') # DQMHarvesting DQMinputScript = open('DQMinput.sh','w') DQMinputScript.write("#!/bin/bash\n") reHarvest = re.compile(r'/.*/.*/(RECO|.*-RECO)') # Only RECO datasets for now. for sample in datasets: for dataset in sample['merged']: if reHarvest.match(dataset): for run in sample['DQMData']['Runs'].split(","): DQMinputScript.write('python $PRODAGENT_ROOT/util/harvestDQM.py --run=%s --path=%s --scenario=%s\n' % ( run, dataset, sample['DQMData']['Scenario'])) os.chmod('DQMinput.sh',0755) print 'Wrote DQMHarvesting script for merged datasets to:', os.path.join(os.getcwd(),'DQMinput.sh') # Output datasets list outputList = open('outputDatasets.txt','w') for sample in mergedDatasets : for dataset in sample : outputList.write(dataset + "\n") print 'Wrote output datasets list to:', os.path.join(os.getcwd(),'outputDatasets.txt') # File with expected number of events numberOfEvents = open('eventsExpected.txt','w') for sample in datasets: for dataset in sample['merged']: numberOfEvents.write("%s %s\n" % (sample['totalEvents'],dataset)) numberOfEvents.close() print 'Wrote events per dataset to:', os.path.join(os.getcwd(),'eventsExpected.txt') total_time = time.time() - start_total_time # File with timing report (Parsing, cmsDriver comands, workflow creation) timingInfo = open('timingInfo.txt', 'w') timingInfo.write('Total time: %s s\n' % total_time) timingInfo.write('Cofigs. creation time: %s s\n' % cmsDriver_time) timingInfo.write('Workflows creation time: %s s\n' % workflow_time) output_text = [] sum = 0 for workflow in workflows: if sum == 0: min = [workflow, workflows[workflow]['time']] max = [workflow, workflows[workflow]['time']] sum += workflows[workflow]['time'] output_text.append("%s: %s s" % (workflow, workflows[workflow]['time'])) if max[1] < workflows[workflow]['time']: max = [workflow, workflows[workflow]['time']] if min[1] > workflows[workflow]['time']: min = [workflow, workflows[workflow]['time']] timingInfo.write('Average time per workflow: %s s\n' % (int(sum) / int(len(workflows)))) timingInfo.write('Max. time on %s: %s s\n' % tuple(max)) timingInfo.write('Min. time on %s: %s s\n' % tuple(min)) timingInfo.write('=' * 10) timingInfo.write('Details of time per workflow:\n%s\n' % "\n".join(output_text)) |
executable = os.path.join(self.workingDir, bossJob['executable'] ) if not os.path.exists( executable ): | executable = bossJob['executable'] executablePath = os.path.join(self.workingDir, executable) updateJob = False if not self.isBulk and not executable.count(self.singleSpecName): executable = self.singleSpecName + '-submit' executablePath = os.path.join(self.workingDir, executable) msg = "This job %s was originally sumbitted as part of a Bulk " \ % self.singleSpecName msg += "Submission. Making a new wrapping script %s for single submission." \ % executablePath logging.info(msg) self.makeWrapperScript( executablePath, "$1" ) msg = "I also need to update the input sandbox in the BossLite DB" msg += " for this job..." logging.info(msg) self.jobInputFiles = [ self.specFiles[self.mainJobSpecName], self.mainSandbox ] inpSandbox = ','.join( self.jobInputFiles ) updateJob = True if not os.path.exists( executablePath ): | def prepareResubmission(self, bossJob): """ __prepareResubmission__ |
% executable) self.makeWrapperScript( executable, "$1" ) | % executablePath) self.makeWrapperScript( executablePath, "$1" ) | def prepareResubmission(self, bossJob): """ __prepareResubmission__ |
self.report.exitCode = 60312 self.report.status = "Failed" self.report.addError(msg, "StageOutError") | self.report.exitCode = 60314 self.report.status = "Failed" newError = self.report.addError(60314, "StageOutError") newError['Description'] = msg | def __call__(self): """ copy logs to local file system, tar, stage out to storage and delete originals """ #first copy logs locally logs = [] fileInfo = { 'LFN' : None, 'PFN' : None, 'SEName' : None, 'GUID' : None, } try: stagein = StageInMgr() stageout = StageOutMgr(**self.overrideParams) delete = DeleteMgr() except StandardError, ex: msg = "Unable to load StageIn/Out/Delete Impl: %s" % str(ex) print msg self.report.exitCode = 60312 self.report.status = "Failed" self.report.addError(msg, "StageOutError") self.saveFjr() return #for log, se in self.logsToCollect.items(): for log in self.logsToCollect: file = fileInfo file['LFN'] = log try: file = stagein(**file) logs.append(file) except StageOutFailure, ex: msg = "Unable to StageIn %s" % file['LFN'] print msg self.report.addSkippedFile(file['PFN'], file['LFN']) if not logs: print "No logs collected" self.report.exitCode = 60312 self.report.status = "Failed" self.report.addError(msg, "StageOutError") self.saveFjr() return tarPFN = self.createArchive(logs) # now stage out tar file fileInfo = { 'LFN' : "%s/%s" % (self.lfnBase, os.path.basename(tarPFN)), 'PFN' : tarPFN, 'SEName' : None, 'GUID' : None, } try: fileInfo = stageout(**fileInfo) exitCode = 0 except Exception, ex: msg = "Unable to stage out log archive:\n" msg += str(ex) print msg self.report.exitCode = 60312 self.report.status = "Failed" #self.report.addError(60312, "StageOutError") self.saveFjr() return # delete file - ignore failures if exitCode == 0: for file in logs: try: delete(**file) #exitCode = 0 #ignore error here except Exception, ex: msg = "Unable to delete log:\n" msg += str(ex) print msg #exitCode = 60312 #ignore error here # write successful fjr and merge with top level self.report.exitCode = exitCode if exitCode == 0 : self.report.status = "Success" self.report.addLogFile(fileInfo['LFN'], fileInfo['SEName']) else: # at the moment do nothing with failures self.report.status = "Failed" self.saveFjr() return |
self.report.addError(msg, "StageOutError") | newError = self.report.addError(60312, "StageInError") newError['Description'] = msg | def __call__(self): """ copy logs to local file system, tar, stage out to storage and delete originals """ #first copy logs locally logs = [] fileInfo = { 'LFN' : None, 'PFN' : None, 'SEName' : None, 'GUID' : None, } try: stagein = StageInMgr() stageout = StageOutMgr(**self.overrideParams) delete = DeleteMgr() except StandardError, ex: msg = "Unable to load StageIn/Out/Delete Impl: %s" % str(ex) print msg self.report.exitCode = 60312 self.report.status = "Failed" self.report.addError(msg, "StageOutError") self.saveFjr() return #for log, se in self.logsToCollect.items(): for log in self.logsToCollect: file = fileInfo file['LFN'] = log try: file = stagein(**file) logs.append(file) except StageOutFailure, ex: msg = "Unable to StageIn %s" % file['LFN'] print msg self.report.addSkippedFile(file['PFN'], file['LFN']) if not logs: print "No logs collected" self.report.exitCode = 60312 self.report.status = "Failed" self.report.addError(msg, "StageOutError") self.saveFjr() return tarPFN = self.createArchive(logs) # now stage out tar file fileInfo = { 'LFN' : "%s/%s" % (self.lfnBase, os.path.basename(tarPFN)), 'PFN' : tarPFN, 'SEName' : None, 'GUID' : None, } try: fileInfo = stageout(**fileInfo) exitCode = 0 except Exception, ex: msg = "Unable to stage out log archive:\n" msg += str(ex) print msg self.report.exitCode = 60312 self.report.status = "Failed" #self.report.addError(60312, "StageOutError") self.saveFjr() return # delete file - ignore failures if exitCode == 0: for file in logs: try: delete(**file) #exitCode = 0 #ignore error here except Exception, ex: msg = "Unable to delete log:\n" msg += str(ex) print msg #exitCode = 60312 #ignore error here # write successful fjr and merge with top level self.report.exitCode = exitCode if exitCode == 0 : self.report.status = "Success" self.report.addLogFile(fileInfo['LFN'], fileInfo['SEName']) else: # at the moment do nothing with failures self.report.status = "Failed" self.saveFjr() return |
self.report.exitCode = 60312 | self.report.exitCode = 60314 | def __call__(self): """ copy logs to local file system, tar, stage out to storage and delete originals """ #first copy logs locally logs = [] fileInfo = { 'LFN' : None, 'PFN' : None, 'SEName' : None, 'GUID' : None, } try: stagein = StageInMgr() stageout = StageOutMgr(**self.overrideParams) delete = DeleteMgr() except StandardError, ex: msg = "Unable to load StageIn/Out/Delete Impl: %s" % str(ex) print msg self.report.exitCode = 60312 self.report.status = "Failed" self.report.addError(msg, "StageOutError") self.saveFjr() return #for log, se in self.logsToCollect.items(): for log in self.logsToCollect: file = fileInfo file['LFN'] = log try: file = stagein(**file) logs.append(file) except StageOutFailure, ex: msg = "Unable to StageIn %s" % file['LFN'] print msg self.report.addSkippedFile(file['PFN'], file['LFN']) if not logs: print "No logs collected" self.report.exitCode = 60312 self.report.status = "Failed" self.report.addError(msg, "StageOutError") self.saveFjr() return tarPFN = self.createArchive(logs) # now stage out tar file fileInfo = { 'LFN' : "%s/%s" % (self.lfnBase, os.path.basename(tarPFN)), 'PFN' : tarPFN, 'SEName' : None, 'GUID' : None, } try: fileInfo = stageout(**fileInfo) exitCode = 0 except Exception, ex: msg = "Unable to stage out log archive:\n" msg += str(ex) print msg self.report.exitCode = 60312 self.report.status = "Failed" #self.report.addError(60312, "StageOutError") self.saveFjr() return # delete file - ignore failures if exitCode == 0: for file in logs: try: delete(**file) #exitCode = 0 #ignore error here except Exception, ex: msg = "Unable to delete log:\n" msg += str(ex) print msg #exitCode = 60312 #ignore error here # write successful fjr and merge with top level self.report.exitCode = exitCode if exitCode == 0 : self.report.status = "Success" self.report.addLogFile(fileInfo['LFN'], fileInfo['SEName']) else: # at the moment do nothing with failures self.report.status = "Failed" self.saveFjr() return |
runs_to_process = [] | runs_to_process = set() | def endElement(self, name): global is_dataset if name == 'dataset': is_dataset = False |
runs_to_process.extend(list(blocks[block]['Runs'])) | runs_to_process = runs_to_process.union(blocks[block]['Runs']) | def endElement(self, name): global is_dataset if name == 'dataset': is_dataset = False |
dqmData['Runs'] = ",".join(runs_to_process) | dqmData['Runs'] = ",".join(list(runs_to_process)) | def endElement(self, name): global is_dataset if name == 'dataset': is_dataset = False |
def localCustomization(self, config, merge = False): """ Apply site specific customizations to the config """ site_config = self.taskState.getSiteConfig() self.ioCustomization(config, site_config.io_config, merge) def ioCustomization(self, config, custom_config, merge = False): """ Apply site specific io customizations """ if not custom_config or (merge is False and not config.inputFiles): return config import re version = lambda x: tuple(int(x) for x in re.compile('(\d+)').findall(x)) cmssw_version = version(os.environ['CMSSW_VERSION']) if cmssw_version < (2, 1, 8): return config print "Site specific IO parameters will be used:" cache_size = custom_config.get('cacheSize', None) if cache_size: if merge: from ProdCommon.CMSConfigTools.ConfigAPI.InputSource import InputSource InputSource(config.source) config.sourceParams['cacheSize'] = cache_size if merge: from FWCore.ParameterSet.Modules import Service config.add_(Service('AdaptorConfig')) for param in custom_config: print " %s %s" % (param, custom_config[param]) if param == 'cacheSize': continue if merge: import FWCore.ParameterSet.Types as CfgTypes adaptor = config.services['AdaptorConfig'] setattr(adaptor, param, CfgTypes.untracked(CfgTypes.string(str(custom_config[param])))) else: config.tFileAdaptorConfig[param] = custom_config[param] return config | def localCustomization(self, config, merge = False): """ Apply site specific customizations to the config """ site_config = self.taskState.getSiteConfig() |
|
self.localCustomization(self.jobSpecNode.cfgInterface) | def createPSet(self): """ _createPSet_ |
|
self.localCustomization(self.jobSpecNode.cfgInterface, merge = True) | def createMergePSet(self): """ _createMergePSet_ |
|
timingInfo.write('Min. time on %s: %s s\n' % tuple(max)) | timingInfo.write('Min. time on %s: %s s\n' % tuple(min)) | def main(argv) : """ prepareRelValworkflows prepare workflows for chained processing of RelVal samples - parse file holding cmsDriver commands for 1st and 2nd steps - prepare workflows - prepare WorkflowInjector:Input script - prepare ForceMerge script - prepare DBSMigrationToGlobal script - prepare PhEDExInjection script - prepare local DBS query script required parameters --samples <textfile> : list of RelVal sample parameter-sets in plain text file, one sample per line, # marks comment --version <processing version> : processing version (v1, v2, ... ) --DBSURL <URL> : URL of the local DBS (http://cmsdbsprod.cern.ch/cms_dbs_prod_local_07/servlet/DBSServlet | http://cmssrv46.fnal.gov:8080/DBS208/servlet/DBSServlet) --only-sites : Site where dataset is going to be processed or where the input dataset is taken from. Usually srm-cms.cern.ch and cmssrm.fnal.gov optional parameters --pileupdataset : input pileup dataset. It must be provided if the <samples> txt file contains PilepUp samples --lumi <number> : initial run for generation (default: 666666), set it to 777777 for high statistics samples --event <number> : initial event number (default: 1) --store-fail : store output files for failed jobs in chain processing. --read-dbs : DBS URL used for obtaining the list of available blocks for real data. Default: http://cmsdbsprod.cern.ch/cms_dbs_prod_global/servlet/DBSServlet --scripts-dir : Path to workflow creation scripts (default: $PUTIL) --skip-config : Is the configuration file was already created, it will skip cmsDriver command execution --extra-label : Extra label for identifying the datasets: /RelVal*/CMSSW_X_Y_Z-<Conditions>_<SpecialTag>_<ExtraLabel>_<FilterName>-<version>/TIER --workflow-label : Label for the workflows. --help (-h) : help --debug (-d) : debug statements """ start_total_time = time.time() # default version = os.environ.get("CMSSW_VERSION") if version is None: print '' print 'CMSSW version cannot be determined from $CMSSW_VERSION' sys.exit(2) architecture = os.environ.get("SCRAM_ARCH") if architecture is None: print '' print 'CMSSW architecture cannot be determined from $SCRAM_ARCH' sys.exit(2) try: from ProdCommon.DataMgmt.DBS.DBSReader import DBSReader except ImportError, ex: print ex print 'Please load prodAgent libraries (point $PYTHONPATH to the right path).' sys.exit(2) samplesFile = None processing_version = None initial_run = "666666" initial_event = "1" debug = False DBSURL = None pileup_dataset = None storeFail = False readDBS = 'http://cmsdbsprod.cern.ch/cms_dbs_prod_global/servlet/DBSServlet' onlySites = None scriptsDir = '$PUTIL' #os.path.expandvars(os.environ.get('PUTIL', None)) skip_config = False extra_label = '' workflow_label = '' try: opts, args = getopt.getopt(argv, "", ["help", "debug", "samples=", "version=", "DBSURL=", "event=", "lumi=", "pileupdataset=", "store-fail", "read-dbs=", "only-sites=", "scripts-dir=", "skip-config", "extra-label=", "workflow-label="]) except getopt.GetoptError: print main.__doc__ sys.exit(2) # check command line parameter for opt, arg in opts : if opt == "--help" : print main.__doc__ sys.exit() elif opt == "--debug" : debug = True elif opt == "--samples" : samplesFile = arg elif opt == "--version" : processing_version = arg elif opt == "--lumi" : initial_run = arg elif opt == "--event" : initial_event = arg elif opt == "--DBSURL" : DBSURL = arg elif opt == "--pileupdataset" : pileup_dataset = arg print arg elif opt == '--store-fail': storeFail = True elif opt == '--read-dbs': readDBS = arg elif opt == '--only-sites': onlySites = arg elif opt == '--scripts-dir': if arg.endswith('/') : scriptsDir = arg[:-1] else: scriptsDir = arg scriptsDirTemp = scriptsDir # There's no need to expand the shell variables anymore #if scriptsDir.startswith('$') : # scriptsDirTemp = os.environ.get(scriptsDir[1:],None) # scriptsDir = os.path.expandvars(scriptsDirTemp) if scriptsDirTemp != None: if not os.path.exists(scriptsDirTemp): print "--scripts-dir argument does not exist, please verify." sys.exit(6) else: print "--scripts-dir argument does not exist, please verify." sys.exit(6) elif opt == "--skip-config": skip_config = True elif opt == "--extra-label": extra_label = arg elif opt == "--workflow-label": workflow_label = arg if samplesFile == None or processing_version == None or DBSURL == None : print main.__doc__ sys.exit(2) if debug: print "\nprepareRelValWorkflows.py was started with the following arguments: %s" % \ " ".join(argv) print "\n" samples = [] steps = {} primary_prefix = 'RelVal' max_step = 1 try: file = open(samplesFile) except IOError: print 'file with list of parameter-sets cannot be opened!' sys.exit(1) n_line = 0 print 'Parsing input file...' start_parse_time = time.time() for line in file.readlines(): n_line += 1 # Skipping lines with no info if line.strip() != '' and line.strip() != '\n' and \ not line.strip().startswith("#") and \ line.find('//') != 0: # I don't know what's the last condition for line_parts = [part.strip() for part in line.split('@@@') if part] dqmData = {} # Keys: Scenario, Run # // # // Parsing first step #// if not line.strip().startswith('STEP'): command = '' array = [] special_tag = '' conditions = None total_events = None events_per_job = None pile_up = False output_name = '' input_data = {} input_blocks = "" acq_era = version sample_info = line_parts[0].strip() # // # // Filling up sample's details #// sample_info_parts = [part.strip() for part in \ sample_info.split('++') if part] sample_number = sample_info_parts[0] #We might need this later sample_name = sample_info_parts[1] sample_steps = [i.strip() for i in \ sample_info_parts[2].split(',') if i] primary = primary_prefix + sample_name # // # // Is it a real data processing sample? According to this #// we assign or not the command variable. #\\ if line_parts[0].find('REALDATA') > -1: is_real_data = True else: is_real_data = False command = line_parts[1].strip() # // # // Clean cmsDriver command format #// if command.find('=') > -1: command = command.replace('=',' ') array = [i for i in command.split() if i] # // # // Remove --python_filename if present #// if '--python_filename' in array: del array[array.index('--python_filename'):\ array.index('--python_filename')+2] # // # // Parse conditions #// if '--conditions' in array: conditions_arg = array[array.index('--conditions')+1] if conditions_arg.startswith('auto:'): conditions_key = conditions_arg.split('auto:')[1] conditions_value = autoCond[conditions_key] else: conditions_value = conditions_arg conditions = [ x.strip() \ for x in conditions_value.split(',') \ if x.find("::") != -1 ][0].split('::')[0].strip() else: conditions = 'SpecialConditions' # // # // Parsing number of events #// if '--relval' in array : total_events = array[array.index('--relval')+1\ ].split(',')[0].strip() events_per_job = array[array.index('--relval')+1\ ].split(',')[1].strip() # // # // Special tag #// # FastSim if command.find('FASTSIM') > -1: special_tag = 'FastSim' # PileUp (at the same time with FastSim) if '--pileup' in array : # // # // Will use whatever argument of --pileup option is #// pileup_arg = array[array.index('--pileup') + 1] if pileup_arg.lower().strip() != 'nopileup': if special_tag: special_tag = "_".join( [special_tag, "PU", pileup_arg.strip()]) else: special_tag = "_".join(["PU", pileup_arg.strip()]) pile_up = True if pileup_dataset is None : print "You have to provide a pileup dataset." print "Usually it is a MinBias (RAW)." print "Use option --pileupdataset" sys.exit(5) # // # // Sort of custom tag #// if '--beamspot' in array: beamspot_arg = \ array[array.index('--beamspot') + 1].strip() if special_tag: special_tag = "_".join( [special_tag, beamspot_arg]) else: special_tag = beamspot_arg # // # // Cfg file's output name #// output_name = "_".join( [x for x in [primary, conditions, special_tag] if x] ) + ".py" # // # // Add command options #// if command.find('no_exec') < 0: array.append('--no_exec') if command.find('python_filename') < 0: array.append('--python_filename') array.append(output_name) # Recomposing cmsDriver command command = " ".join(array) # Filling up DQM information dqmData['Runs'] = '1' dqmData['Scenario'] = getDQMScenario(command) # // # // Collecting info for real data samples #// if is_real_data: # // # // Parsing dataset details. The following details are #// supported: REALDATA, RUN, LABEL, FILES, EVENTS, PDNAME #\\ # Producing tuples from the input options. data_options = [tuple(x.split(':')) \ for x in sample_info_parts[3].split(',') if x.strip()] # Parsing tuples for arg_v in data_options: if len(arg_v) == 2: input_data[arg_v[0].strip()] = arg_v[1].strip() elif len(arg_v) == 1: input_data[arg_v[0].strip()] = None else: print "Line %s has an extra ','." % (line) sys.exit(7) # // # // Verifiying optional arguments: RUN, LABEL, FILE, EVENTS, #// PRIMARY #\\ data_run = input_data.get('RUN', '') data_label = input_data.get('LABEL', '') data_files = input_data.get('FILES', '') data_events = input_data.get('EVENTS', '') data_pname = input_data.get('PRIMARY', None) if data_events: data_events = int(data_events) if data_files: data_files = int(data_events) # // # // Looking for best matching dataset. It should be just #// one, otherwise the script will exit. #\\ reader = DBSReader(readDBS) query = "find dataset where dataset like %s" % ( input_data['REALDATA']) result_xml = reader.dbs.executeQuery(query) # XML Handler result_list = DBSXMLParser(result_xml) target_datasets = [x['dataset'] for x in result_list] # If more than one dataset is found. if len(target_datasets) > 1: # Is this an input relval dataset produced in the # current release? query = "find dataset where dataset like %s " % ( input_data['REALDATA']) query += "and release=%s" % version result_xml = reader.dbs.executeQuery(query) result_list = DBSXMLParser(result_xml) target_datasets = [x['dataset'] for x in result_list] # If more than one dataset is found, match the processing # version if len(target_datasets) > 1: find_version = \ lambda x: x.find(processing_version) != -1 target_datasets = filter(find_version, target_datasets) if len(target_datasets) > 1: msg = "Dataset pattern in line %s is too broad." % line msg += "These datasets were found: %s" % ( " ".join(target_datasets)) print msg sys.exit(8) if not target_datasets: msg = "Dataset pattern produced no match in line %s" % ( line) print msg sys.exit(8) # Now I can look up the blocks for this dataset. target_dataset = target_datasets[0] input_data['REALDATA'] = target_dataset # // # // Looking up the blocks for a given Dataset and the #// provided list of runs #\\ runs_list = \ [x.strip() for x in data_run.split('|') if x.strip()] query = \ "find run where dataset = %s" % target_dataset is_the_first = True for run in runs_list: if is_the_first: query += " and (" is_the_first = False else: query += " or " # Run range: XXXXXX-XXXXXX if run.count("-"): run_limits = \ [x.strip() for x in run.split('-') if x.strip()] query += "(run >= %s and run <= %s)" % ( run_limits[0], run_limits[1]) else: query += "run = %s" % run if not is_the_first: query += ")" result_xml = reader.dbs.executeQuery(query) result_list = DBSXMLParser(result_xml) if not result_list: raise Exception, "query %s produced no results" % query target_runs = [x['run'] for x in result_list] input_files = reader.dbs.listFiles( path=target_dataset, retriveList=['retrive_run']) # // # // Parsing input blocks #// blocks = {} for input_file in input_files: # Skip files with no events # A block will be skipped if all its files have 0 # events if input_file['NumberOfEvents'] == 0: continue runs = \ [str(x['RunNumber']) for x in input_file['RunsList']] for run in runs: if run in target_runs: break else: continue # skip file if it's not in the target_runs cur_files = \ blocks.setdefault(input_file['Block']['Name'], {}).setdefault('Files', 0) cur_events = \ blocks[input_file['Block']['Name']].setdefault( 'Events', 0) cur_runs = \ blocks[input_file['Block']['Name']].setdefault( 'Runs', set()) blocks[input_file['Block']['Name']]['Files'] += 1 blocks[input_file['Block']['Name']]['Events'] += \ input_file['NumberOfEvents'] blocks[input_file['Block']['Name']]['Runs'] = \ cur_runs.union(runs) # // # // Truncating blocks list #// total_events = 0 total_files = 0 blocks_to_process = [] runs_to_process = set() for block in blocks: blocks_to_process.append(block) runs_to_process = runs_to_process.union(blocks[block]['Runs']) total_events += blocks[block]['Events'] total_files += blocks[block]['Files'] if data_events and (data_events < total_events): break if data_files and (data_files < total_files): break input_blocks = ",".join(blocks_to_process) # // # // If PRIMARY is true, then it will use the #// sample_name value as primary dataset name, else it #\\ will use the input primary dataset name. # \\ if data_pname is not None and \ data_pname.lower() in ('y', 't', 'true'): primary = "".join([primary_prefix, sample_name]) else: primary = \ [x for x in input_data['REALDATA'].split("/") if x][0] # // # // Seting special tag #// special_tag_parts = [] # Add RelVal tag if not present. if target_dataset.find(primary_prefix) == -1: special_tag_parts.append(primary_prefix) # Add LABEL if data_label: special_tag_parts.append(data_label) special_tag = "_".join(special_tag_parts) # // # // Setting Acq. Era #// #processed_dataset = target_dataset.split('/')[2] #dataset_acq_era = processed_dataset.split("-")[0] #if dataset_acq_era.startswith(version): # acq_era = version #else: # acq_era = dataset_acq_era # Filling up DQM information dqmData['Runs'] = ",".join(list(runs_to_process)) # // # // Composing a dictionary per sample #// dict = {} dict['sampleName'] = sample_name dict['command'] = command dict['primary'] = primary dict['outputName'] = output_name dict['conditions'] = conditions dict['specialTag'] = special_tag dict['totalEvents'] = total_events dict['eventsPerJob'] = events_per_job dict['pileUp'] = pile_up dict['isRealData'] = is_real_data dict['inputData'] = input_data dict['inputBlocks'] = input_blocks dict['steps'] = sample_steps dict['AcqEra'] = acq_era dict['DQMData'] = dqmData samples.append(dict) if debug: print 'Parsing' print 'Sample:', sample_name print 'Command:', command print 'Conditions:', conditions print 'Special tag:', special_tag print 'Total events:', total_events print 'Events per job:', events_per_job print 'Steps:', sample_steps print 'PileUp:', pile_up print 'Input data:', input_data print 'Input blocks:', input_blocks print 'DQMData:', dqmData print '' # // # // No a first step command (second HLT table, RECO, ALCA, etc) #// else: step_number = int(line_parts[0].split('++')[0].strip()[-1]) step_name = line_parts[0].split('++')[1].strip() command = line_parts[1].strip() # // # // Clean cmsDriver command format #// if command.find('=') > -1: command = command.replace('=',' ') array = [i for i in command.split() if i] # // # // Remove --python_filename if present #// if '--python_filename' in array: del array[array.index('--python_filename'):\ array.index('--python_filename')+2] # // # // Parse conditions #// if '--conditions' in array: conditions_arg = array[array.index('--conditions')+1] if conditions_arg.startswith('auto:'): conditions_key = conditions_arg.split('auto:')[1] conditions_value = autoCond[conditions_key] else: conditions_value = conditions_arg conditions = [ x.strip() \ for x in conditions_value.split(',') \ if x.find("::") != -1 ][0].split('::')[0].strip() else: conditions = 'SpecialConditions' # // # // Cfg file's output name #// output_name = "_".join([step_name, conditions]) + ".py" # // # // Add command options #// if command.find('no_exec') < 0: array.append('--no_exec') if command.find('python_filename') < 0: array.append('--python_filename') array.append(output_name) # Recomposing cmsDriver command command = " ".join(array) # // # // Second trigger table? This may be changed, right now I am #// assuming that all 4 steps workflows are like this. #\\ stage_previous = True if step_number == 2: if '-s' in array: index = array.index('-s') else: index = array.index('--step') if array[index+1].find('RECO') < 0: stage_previous = False if step_number > max_step: max_step = step_number # // # // HARVESTING cmsDriver commands should be ignored. RelVals #// should not run any HARVESTING configuration. Harvestings #\\ run independently after the datasets are produced. # \\ skip_step = False if '-s' in array: index = array.index('-s') else: index = array.index('--step') if array[index+1].count('HARVESTING') > 0: skip_step = True # // # // Composing a dictionary per step #// dict = {} dict['stepNumber'] = step_number dict['command'] = command dict['outputName'] = output_name dict['conditions'] = conditions dict['stagePrevious'] = stage_previous dict['DQMData'] = {'Scenario': getDQMScenario(command)} dict['skipStep'] = skip_step # // # // Step name should be unique #// if step_name not in steps: steps[step_name] = dict else: print "Label %s is repeated!!!" % step_name sys.exit(1) if debug: print 'Parsing' print 'Step name:', step_name print 'Step number:', step_number print 'Command:', command print 'Conditions:', conditions print 'Stage previous:', stage_previous print 'DQM Data:', dict['DQMData'] print '' parse_time = time.time() - start_parse_time file.close() if debug: print "Collected information step 1" for sample in samples: print 'Sample name:', sample['sampleName'] print 'Command', sample['command'] print 'Real data:', sample['isRealData'] print 'Input data:', sample['inputData'] print 'Input blocks', sample['inputBlocks'] print 'Conditions:', sample['conditions'] print 'Total events:', sample['totalEvents'] print 'Events per job:', sample['eventsPerJob'] print 'Output name:', sample['outputName'] print 'Steps:', sample['steps'] print 'PileUp:', sample['pileUp'] print 'Special tag:', sample['specialTag'] print 'Acq. Era:', sample['AcqEra'] print 'DQM data:', sample['DQMData'] print '' for i in range(2, max_step+1): print 'Collected information step %s' % i for step in steps: if steps[step]['stepNumber'] == i: print 'Step name:', step print 'Command:', steps[step]['command'] print 'Conditions:', steps[step]['conditions'] print 'Stage previous:', steps[step]['stagePrevious'] print 'DQM Data:', steps[step]['DQMData'] print '' # // # // Execute cmsDriver command #// print '' print 'Executing cmsDriver commands for step 1 configurations' print '' start_cmsDriver_time = time.time() for sample in samples: if not sample['isRealData']: # // # // if the cfg. file was already created, we'll skip cmsDriver #// command execution. #\\ if os.path.exists("/".join([os.getcwd(), sample['outputName']])) and skip_config: print 'cmsDriver command for step 1 to produce:', \ sample['outputName'],'was already issued, skipping.' continue exitCode, output, error = executeCommand(sample['command']) if exitCode == 0: print 'cmsDriver command for step 1 to produce:', \ sample['outputName'],'exited with ExitCode:', exitCode else : print 'cmsDriver command for step 1 to produce:', \ sample['outputName'],'failed with ExitCode:', exitCode sys.exit(1) else : msg = 'Real Data:\n' msg += 'Input dataset: %s\n' % (sample['inputData']['REALDATA']) msg += 'Run: %s\n' % (sample['inputData'].get('RUN', 'All')) msg += 'Input blocks: %s' % (sample['inputBlocks']) print msg for i in range(2, max_step+1): print '' print 'Executing cmsDriver commands for step %s configurations' % i print '' for step in steps: if steps[step]['stepNumber'] == i: # // # // if the cfg. file was already created, we'll skip cmsDriver #// command execution. #\\ if os.path.exists("/".join([os.getcwd(), steps[step]['outputName']])) and skip_config: print 'cmsDriver command for step %s to produce:' % i, \ steps[step]['outputName'],'was already issued, skipping.' continue # // # // Skip HARVESTING cmsDriver commands #// if steps[step]['skipStep']: print 'This is a HARVESTING cmsDriver command, skipping. ' continue exitCode, output, error = executeCommand(steps[step]['command']) if exitCode == 0: print 'cmsDriver command for step %s to produce:' % i, \ steps[step]['outputName'], \ 'exited with ExitCode:', exitCode else: print 'cmsDriver command for step %s to produce:' % i, \ steps[step]['outputName'], \ 'failed with ExitCode:', exitCode sys.exit(1) cmsDriver_time = time.time() - start_cmsDriver_time print '' print 'Workflow creation' print '' start_workflow_time = time.time() datasets = [] unmergedDatasets = [] mergedDatasets = [] workflows = {} # // # // Create workflows #// for sample in samples: command = 'python ' + scriptsDir conditions = '' # Conditions -> processingString # // # // In case we are processing data #// if sample['isRealData']: command += '/createProcessingWorkflow.py \\\n' # Not changing the primary dataset name for real data. command += '--override-channel=' + sample['primary'] + ' \\\n' command += '--dataset=' + sample['inputData']['REALDATA'] + ' \\\n' command += '--only-blocks=' + sample['inputBlocks'] + ' \\\n' command += '--dbs-url=' + readDBS + ' \\\n' conditions = steps[sample['steps'][0]]['conditions'] command += '--split-type=file \\\n' command += '--split-size=1 \\\n' # // # // MC workflows #// else: command += '/createProductionWorkflow.py \\\n' command += '--channel=' + sample['primary'] + ' \\\n' conditions = sample['conditions'] command += '--starting-run=' + initial_run + ' \\\n' command += '--starting-event=' + initial_event + ' \\\n' command += '--totalevents=' + sample['totalEvents'] + ' \\\n' command += '--eventsperjob=' + sample['eventsPerJob'] + ' \\\n' if sample['pileUp']: command += '--pileup-dataset=' + pileup_dataset + ' \\\n' if storeFail: command += '--store-fail=True \\\n' # // # // First step #// command += '--version=' + version + ' \\\n' command += '--py-cfg=' + sample['outputName'] + ' \\\n' # // # // Input configurations (Second step and further) #// if sample['steps'][0].lower().strip() != 'none': i = 0 for step in sample['steps']: # Is this a HARVESTING step? If so, skip it! if steps[step]['skipStep']: continue # Not a HARVESTING step, continue normally. command += '--version=' + version + ' \\\n' command += '--py-cfg=' + steps[step]['outputName'] + ' \\\n' if i != 0 or not sample['isRealData']: command += '--stageout-intermediates=%s \\\n' % ( steps[step]['stagePrevious']) command += '--chained-input=output \\\n' else: dqmScenario = steps[step]['DQMData']['Scenario'] # // # // If a two-hlt tables workflow, will take conditions from #// the second step information #\\ if not steps[step]['stagePrevious'] and \ i == 0: conditions = steps[step]['conditions'] i += 1 # // # // Common options #// command += '--group=RelVal \\\n' command += '--category=relval \\\n' command += '--activity=RelVal \\\n' command += '--acquisition_era=' + sample['AcqEra'] + ' \\\n' command += '--only-sites=' + onlySites + ' \\\n' command += '--processing_version=' + processing_version + ' \\\n' # Workflow label if workflow_label: command += '--workflow_tag=' + workflow_label + ' \\\n' # // # // processingString="CMSSWVersion"_"Conditions"_"specialTag"_"extra-label" #// CMSSWVersion is appended only when the input dataset does not have it. #\\ processing_string_parts = [] if sample['AcqEra'] != version: processing_string_parts.append(version) processing_string_parts.append(conditions) if sample['specialTag']: processing_string_parts.append(sample['specialTag']) if extra_label: processing_string_parts.append(extra_label) command += '--processing_string=' + "_".join(processing_string_parts) if debug: print command print '' start_command_time = time.time() exitCode, output, error = executeCommand(command) command_time = time.time() - start_command_time if debug: print output print '' output = [x for x in output.split('\n') if x] if exitCode == 0: #parse output tmp = [] index = FindIndex(output,'Output Datasets') for dataset in output[index+1:]: tmp.append(dataset.strip()) # DQM Data dqmInfo = {} dqmInfo['Runs'] = sample['DQMData']['Runs'] if sample['isRealData']: dqmInfo['Scenario'] = dqmScenario else: dqmInfo['Scenario'] = sample['DQMData']['Scenario'] datasets.append({'unmerged': tmp, 'totalEvents': sample['totalEvents'], 'merged': [x.replace('-unmerged','') for x in tmp], 'DQMData': dqmInfo }) unmergedDatasets.append(tmp) index = FindIndex(output,'Created') if index == -1: print "No workflow was created by create*workflow.py" sys.exit(1) workflow = output[index].split()[1].strip() workflows.setdefault(workflow, {})['isRealData'] = sample['isRealData'] workflows[workflow]['time'] = command_time print 'workflow creation command for workflow:', workflow, \ 'exited with ExitCode:', exitCode else : print 'workflow creation command:' print command print 'failed: %s' % error sys.exit(1) if debug: print 'Created workflows:' print workflows.keys() print '' print "Unmerged datasets:" print unmergedDatasets # extract merged datasets for sample in unmergedDatasets: tmp = [] for dataset in sample: tmp.append(dataset.replace('-unmerged','')) mergedDatasets.append(tmp) workflow_time = time.time() - start_workflow_time print '' print 'Write helper scripts' print '' # WorkflowInjector:Input script inputScript = open('input.sh','w') inputScript.write('#!/bin/bash\n') feeder = 'None' for workflow in workflows.keys(): if workflows[workflow]['isRealData']: if feeder.find('ReReco') < 0: inputScript.write('python $PRODAGENT_ROOT/util/publish.py WorkflowInjector:SetPlugin BlockFeeder\n') feeder = 'ReReco' else : if feeder.find('Request') < 0: inputScript.write('python $PRODAGENT_ROOT/util/publish.py WorkflowInjector:SetPlugin RequestFeeder\n') feeder = 'Request' inputScript.write('python $PRODAGENT_ROOT/util/publish.py WorkflowInjector:Input ' + os.path.join(os.getcwd(), workflow) + '\n') inputScript.close() os.chmod('input.sh',0755) print 'Wrote WorkflowInjector:Input script to:',os.path.join(os.getcwd(),'input.sh') # ForceMerge forceMergeScript = open('forceMerge.sh','w') forceMergeScript.write('#!/bin/bash\n') for sample in unmergedDatasets : for dataset in sample : forceMergeScript.write('python $PRODAGENT_ROOT/util/publish.py ForceMerge ' + dataset + '\n') forceMergeScript.close() os.chmod('forceMerge.sh',0755) print 'Wrote ForceMerge script to:',os.path.join(os.getcwd(),'forceMerge.sh') # MigrateDatasetToGlobal migrateScript = open('migrateToGlobal.sh','w') migrateScript.write('#!/bin/bash\n') for sample in mergedDatasets : for dataset in sample : migrateScript.write('python $PRODAGENT_ROOT/util/publish.py DBSInterface:MigrateDatasetToGlobal ' + dataset + '\n') migrateScript.close() os.chmod('migrateToGlobal.sh',0755) print 'Wrote DBSInterface:MigrateDatasetToGlobal script to:',os.path.join(os.getcwd(),'migrateToGlobal.sh') # PhEDExInjectDataset phedexScript = open('injectIntoPhEDEx.sh','w') phedexScript.write('#!/bin/bash\n') for sample in mergedDatasets : for dataset in sample : phedexScript.write('python $PRODAGENT_ROOT/util/publish.py PhEDExInjectDataset ' + dataset + '\n') phedexScript.close() os.chmod('injectIntoPhEDEx.sh',0755) print 'Wrote PhEDExInjectDataset script to:',os.path.join(os.getcwd(),'injectIntoPhEDEx.sh') # DBS: query unmerged datasets queryUnmergedScript = open('queryUnmerged.sh','w') queryUnmergedScript.write('#!/bin/bash\n') for sample in unmergedDatasets : for dataset in sample : #if dataset.find('-RECO') == -1 or len(sample) == 1 : queryUnmergedScript.write('python $PRODAGENT_ROOT/util/InspectDBS2.py --DBSURL=' + DBSURL + ' --datasetPath=' + dataset + ' | grep total\n') queryUnmergedScript.close() os.chmod('queryUnmerged.sh',0755) print 'Wrote DBS query script for unmerged datasets to:',os.path.join(os.getcwd(),'queryUnmerged.sh') # DBS: query merged datasets queryMergedScript = open('queryMerged.sh','w') queryMergedScript.write('#!/bin/bash\n') for sample in mergedDatasets : for dataset in sample : #if dataset.find('-RECO') == -1 or len(sample) == 1 : queryMergedScript.write('python $PRODAGENT_ROOT/util/InspectDBS2.py --DBSURL=' + DBSURL + ' --datasetPath=' + dataset + ' | grep total\n') queryMergedScript.close() os.chmod('queryMerged.sh',0755) print 'Wrote DBS query script for merged datasets to:',os.path.join(os.getcwd(),'queryMerged.sh') # DQMHarvesting DQMinputScript = open('DQMinput.sh','w') DQMinputScript.write("#!/bin/bash\n") reHarvest = re.compile(r'/.*/.*/(RECO|.*-RECO)') # Only RECO datasets for now. for sample in datasets: for dataset in sample['merged']: if reHarvest.match(dataset): for run in sample['DQMData']['Runs'].split(","): DQMinputScript.write('python $PRODAGENT_ROOT/util/harvestDQM.py --run=%s --path=%s --scenario=%s\n' % ( run, dataset, sample['DQMData']['Scenario'])) os.chmod('DQMinput.sh',0755) print 'Wrote DQMHarvesting script for merged datasets to:', os.path.join(os.getcwd(),'DQMinput.sh') # Output datasets list outputList = open('outputDatasets.txt','w') for sample in mergedDatasets : for dataset in sample : outputList.write(dataset + "\n") print 'Wrote output datasets list to:', os.path.join(os.getcwd(),'outputDatasets.txt') # File with expected number of events numberOfEvents = open('eventsExpected.txt','w') for sample in datasets: for dataset in sample['merged']: numberOfEvents.write("%s %s\n" % (sample['totalEvents'],dataset)) numberOfEvents.close() print 'Wrote events per dataset to:', os.path.join(os.getcwd(),'eventsExpected.txt') total_time = time.time() - start_total_time # File with timing report (Parsing, cmsDriver comands, workflow creation) timingInfo = open('timingInfo.txt', 'w') timingInfo.write('Total time: %s s\n' % total_time) timingInfo.write('Cofigs. creation time: %s s\n' % cmsDriver_time) timingInfo.write('Workflows creation time: %s s\n' % workflow_time) output_text = [] sum = 0 for workflow in workflows: if sum == 0: min = [workflow, workflows[workflow]['time']] max = [workflow, workflows[workflow]['time']] sum += workflows[workflow]['time'] output_text.append("%s: %s s" % (workflow, workflows[workflow]['time'])) if max[1] < workflows[workflow]['time']: max = [workflow, workflows[workflow]['time']] if min[1] > workflows[workflow]['time']: min = [workflow, workflows[workflow]['time']] timingInfo.write('Average time per workflow: %s s\n' % (int(sum) / int(len(workflows)))) timingInfo.write('Max. time on %s: %s s\n' % tuple(max)) timingInfo.write('Min. time on %s: %s s\n' % tuple(max)) timingInfo.write('=' * 10) timingInfo.write('Details of time per workflow:\n%s\n' % "\n".join(output_text)) |
self.setdefault("JSToolUI" , "ProdAgent") | self.setdefault("JSToolUI" , None) | def __init__(self): dict.__init__(self) self.task = None self.job = None self.destinations = {} self.publisher = None self.setdefault("Application", None) self.setdefault("ApplicationVersion", None) self.setdefault("GridJobID", None) self.setdefault("LocalBatchID", None) self.setdefault("GridUser", None) self.setdefault("User" , os.environ.get('USER', 'ProdAgent')) self.setdefault("JSTool","ProdAgent") self.setdefault("NbEvPerRun", 0) self.setdefault("NodeName", None) self.setdefault("Scheduler", None) self.setdefault("TaskType", "privateproduction") self.setdefault("NSteps", 0) self.setdefault("VO", "CMS") self.setdefault("TargetCE", None) self.setdefault("RBname", None) self.setdefault("JSToolUI" , "ProdAgent") |
elif err.message().find( "Job current status doesn" ) != -1: | elif err.message().find("Output not yet Ready") != -1 or \ err.message().find( "Job current status doesn" ) != -1: | def getOutput(cls, job, task, schedSession ): """ __getOutput__ |
print "DEBUG 777 setting fileclass %s" % fileclass | print "DEBUG 777 creating %s" % targetDir | def createOutputDirectory(self, targetPFN): """ _createOutputDirectory_ |
if not self.isBulk and not executable.count(self.singleSpecName): | if not self.isBulk and (not executable.count(self.singleSpecName) or \ not oldGlobalSandbox.count(self.singleSpecName)): | def prepareResubmission(self, bossJob): """ __prepareResubmission__ |
msg += "Submission. Making a new wrapping script %s for single submission." \ | msg += "Submission." msg += " Making a new wrapping script %s for single submission." \ | def prepareResubmission(self, bossJob): """ __prepareResubmission__ |
self.bossTask = self.bossLiteSession.loadTask( bossJob['taskId'], bossJob['jobId'] ) | def prepareResubmission(self, bossJob): """ __prepareResubmission__ |
|
if len(splittedPayload) == 3 and splittedPayload[2] != 'all': jobsToKill = eval(str(splittedPayload[2])) | if len(splittedPayload) == 2 and splittedPayload[1] != 'all': jobsToKill = eval(str(splittedPayload[1])) | def killTask(self, taskSpecId): """ |
cfg = node.cfgInterface | cfgInter = node.cfgInterface | def findCfgFiles(node): """ _findCfgFiles_ Look for cms cfg file in payload node provided """ try: #hash = node._OutputDatasets[0]['PSetHash'] #cfg = node.configuration cfg = node.cfgInterface cfgInter = cfg.makeConfiguration() print node.name + ": Found cfg." except Exception, ex: # Not a cfg file print node.name + ": No cfg. found." return actOnCfg(node.name, cfgInter) return |
pfn = self.localStageOut(lfn, fileToStage['PFN'], fileToStage['Checksums']) | checksums = fileToStage.get('Checksums', None) pfn = self.localStageOut(lfn, fileToStage['PFN'], checksums) | def __call__(self, **fileToStage): """ _operator()_ |
toplevelReport = os.path.join(os.environ['PRODAGENT_JOB_DIR'], "FrameworkJobReport.xml") if state.jobSpecNode._InputLinks and \ state.jobSpecNode._InputLinks[0]["AppearStandalone"] and \ os.path.exists(toplevelReport): parentForward = True for link in state.jobSpecNode._InputLinks: if not link["AppearStandalone"]: parentForward = False break inputTaskNames = [ getTaskState(x['InputNode']).taskName() \ for x in state.jobSpecNode._InputLinks ] existingReports = readJobReport(toplevelReport) if parentForward: for existingReport in existingReports: if existingReport.name in inputTaskNames: print "Forwading input files from node: %s" % \ existingReport.name for outputFile in report.files: for inputFile in outputFile.inputFiles: foundParent = False inputFile['PFN'] = \ inputFile['PFN'].replace('file:', '') for previousFile in existingReport.files: previousFile['PFN'] = \ previousFile['PFN'].replace('file:', '') if inputFile['PFN'].count(previousFile['PFN']): print "Keeping file for forwarding: %s" % \ inputFile state.parentsForwarded.extend([inputFile]) foundParent = True msg = "These input files will be replaced by previous step's input" msg += " files: %s" % state.parentsForwarded print msg | def processFrameworkJobReport(): """ _processFrameworkJobReport_ Read the job report and insert external information such as datasets for each file entry. """ state = TaskState(os.getcwd()) state.loadRunResDB() state.loadJobSpecNode() state.jobSpecNode.loadConfiguration() state.dumpJobReport() badReport = False try: state.loadJobReport() except Exception, ex: # // # // Error reading report ==> it is corrupt. #// Setting it to None means that it will be converted into # //a diagnostic report in the following code # // #// print "Error Reading JobReport:" print str(ex) badReport = True state._JobReport = None # // # // If no report file is found, we create an empty one to #// make sure we report the failure implied by the missing report if state.getJobReport() == None: print "Generating Job Report by hand..." state._JobReport = FwkJobReport() report = state.getJobReport() exitCode = state.getExitStatus() reportStatus = "Success" if badReport: # // # // Unreadable report => make sure this gets logged #// exitCode = 50115 reportStatus = "Failed" if exitCode == None: print "WARNING: CANNOT FIND EXIT STATUS..." exitCode = 50116 reportStatus = "Failed" if exitCode != 0: reportStatus = "Failed" report.status = reportStatus report.exitCode = exitCode report.workflowSpecId = state.taskAttrs['WorkflowSpecID'] report.jobSpecId = state.jobSpec.parameters['JobName'] report.jobType = state.taskAttrs['JobType'] if report.name == None: taskName = state.taskAttrs['Name'] report.name = taskName # // # // filter zero event output files #// TODO: Make this configurable via ProdAgent config switch #[ report.files.remove(x) for x in report.files if x['TotalEvents'] == 0 ] # // # // Filter out input files that are not globally known - i.e. no LFN and #// should not be propagated to DBS (can be left by a previous cmsGen step) if state.configurationDict().has_key('DropNonLFNInputs') and \ state.configurationDict()['DropNonLFNInputs'][0] == 'True': [report.inputFiles.remove(x) for x in report.inputFiles if \ x['LFN'] in (None, '')] for outfile in report.files: [outfile.inputFiles.remove(x) for x in outfile.inputFiles if \ x['LFN'] in (None, '')] # // # // generate sizes and checksums #// try: state.generateFileStats() except Exception, ex: print "Error generating file stats: %s" % str(ex) report.status = "Failed" report.exitCode = 50998 # // # // match files to datasets. #// state.assignFilesToDatasets() # // # // Include site details in job report #// siteName = "Unknown" hostName = socket.gethostname() seName = "Unknown" ceName = getSyncCE() state.loadSiteConfig() siteCfg = state.getSiteConfig() if siteCfg != None: siteName = siteCfg.siteName if siteCfg.localStageOut.get('se-name', None) != None: seName = siteCfg.localStageOut['se-name'] report.siteDetails['SiteName'] = siteName report.siteDetails['HostName'] = hostName report.siteDetails['se-name'] = seName report.siteDetails['ce-name'] = ceName # // # // If available, include basic start/stop times in job report #// if os.path.exists("start.time"): report.timing['AppStartTime'] = file("start.time").read().strip() if os.path.exists("end.time"): report.timing['AppEndTime'] = file("end.time").read().strip() # // # // add dashboard id #// report.dashboardId = getDashboardId(state.jobSpec) # // # // Add Performance Report if logfiles are Available #// stderrLog = "%s-main.sh-stderr.log" % state.taskName() perfReport = "PerfReport.log" if not os.path.exists(stderrLog): stderrLog = None if not os.path.exists(perfReport): perfReport = None PerfReps.makePerfReports(report.performance, stderrLog, perfReport) report.performance.getInfoOnWorker() # // # // write out updated report #// localReport = os.path.join(os.getcwd(), "FrameworkJobReport.xml") toplevelReport = os.path.join(os.environ['PRODAGENT_JOB_DIR'], "FrameworkJobReport.xml") if state.jobSpecNode._InputLinks and \ state.jobSpecNode._InputLinks[0]["AppearStandalone"] and \ os.path.exists(toplevelReport): # // # // Combine with report from input node, save to toplevel and locally #// for link in state.jobSpecNode._InputLinks: if not link["AppearStandalone"]: msg = """Reports will only be combined when all input links have AppearStandalone set to true""" raise RuntimeError, msg inputTaskNames = [ getTaskState(x['InputNode']).taskName() \ for x in state.jobSpecNode._InputLinks ] print "Combining current report with %s" % str(inputTaskNames) report = combineReports(toplevelReport, inputTaskNames, report) report.write(localReport) else: # // # // Add this report to the job toplevel report #// This will create the toplevel job report if it doesnt # //exist, otherwise it will merge this report with whatever # // is in there already. #// print "Adding report to top level" report.write(localReport) mergeReports(toplevelReport, localReport) return |
|
x['LFN'] in (None, '')] | x['LFN'] in (None, '') and \ x['PFN'] not in [y['PFN'] for y in state.parentsForwarded]] | def processFrameworkJobReport(): """ _processFrameworkJobReport_ Read the job report and insert external information such as datasets for each file entry. """ state = TaskState(os.getcwd()) state.loadRunResDB() state.loadJobSpecNode() state.jobSpecNode.loadConfiguration() state.dumpJobReport() badReport = False try: state.loadJobReport() except Exception, ex: # // # // Error reading report ==> it is corrupt. #// Setting it to None means that it will be converted into # //a diagnostic report in the following code # // #// print "Error Reading JobReport:" print str(ex) badReport = True state._JobReport = None # // # // If no report file is found, we create an empty one to #// make sure we report the failure implied by the missing report if state.getJobReport() == None: print "Generating Job Report by hand..." state._JobReport = FwkJobReport() report = state.getJobReport() exitCode = state.getExitStatus() reportStatus = "Success" if badReport: # // # // Unreadable report => make sure this gets logged #// exitCode = 50115 reportStatus = "Failed" if exitCode == None: print "WARNING: CANNOT FIND EXIT STATUS..." exitCode = 50116 reportStatus = "Failed" if exitCode != 0: reportStatus = "Failed" report.status = reportStatus report.exitCode = exitCode report.workflowSpecId = state.taskAttrs['WorkflowSpecID'] report.jobSpecId = state.jobSpec.parameters['JobName'] report.jobType = state.taskAttrs['JobType'] if report.name == None: taskName = state.taskAttrs['Name'] report.name = taskName # // # // filter zero event output files #// TODO: Make this configurable via ProdAgent config switch #[ report.files.remove(x) for x in report.files if x['TotalEvents'] == 0 ] # // # // Filter out input files that are not globally known - i.e. no LFN and #// should not be propagated to DBS (can be left by a previous cmsGen step) if state.configurationDict().has_key('DropNonLFNInputs') and \ state.configurationDict()['DropNonLFNInputs'][0] == 'True': [report.inputFiles.remove(x) for x in report.inputFiles if \ x['LFN'] in (None, '')] for outfile in report.files: [outfile.inputFiles.remove(x) for x in outfile.inputFiles if \ x['LFN'] in (None, '')] # // # // generate sizes and checksums #// try: state.generateFileStats() except Exception, ex: print "Error generating file stats: %s" % str(ex) report.status = "Failed" report.exitCode = 50998 # // # // match files to datasets. #// state.assignFilesToDatasets() # // # // Include site details in job report #// siteName = "Unknown" hostName = socket.gethostname() seName = "Unknown" ceName = getSyncCE() state.loadSiteConfig() siteCfg = state.getSiteConfig() if siteCfg != None: siteName = siteCfg.siteName if siteCfg.localStageOut.get('se-name', None) != None: seName = siteCfg.localStageOut['se-name'] report.siteDetails['SiteName'] = siteName report.siteDetails['HostName'] = hostName report.siteDetails['se-name'] = seName report.siteDetails['ce-name'] = ceName # // # // If available, include basic start/stop times in job report #// if os.path.exists("start.time"): report.timing['AppStartTime'] = file("start.time").read().strip() if os.path.exists("end.time"): report.timing['AppEndTime'] = file("end.time").read().strip() # // # // add dashboard id #// report.dashboardId = getDashboardId(state.jobSpec) # // # // Add Performance Report if logfiles are Available #// stderrLog = "%s-main.sh-stderr.log" % state.taskName() perfReport = "PerfReport.log" if not os.path.exists(stderrLog): stderrLog = None if not os.path.exists(perfReport): perfReport = None PerfReps.makePerfReports(report.performance, stderrLog, perfReport) report.performance.getInfoOnWorker() # // # // write out updated report #// localReport = os.path.join(os.getcwd(), "FrameworkJobReport.xml") toplevelReport = os.path.join(os.environ['PRODAGENT_JOB_DIR'], "FrameworkJobReport.xml") if state.jobSpecNode._InputLinks and \ state.jobSpecNode._InputLinks[0]["AppearStandalone"] and \ os.path.exists(toplevelReport): # // # // Combine with report from input node, save to toplevel and locally #// for link in state.jobSpecNode._InputLinks: if not link["AppearStandalone"]: msg = """Reports will only be combined when all input links have AppearStandalone set to true""" raise RuntimeError, msg inputTaskNames = [ getTaskState(x['InputNode']).taskName() \ for x in state.jobSpecNode._InputLinks ] print "Combining current report with %s" % str(inputTaskNames) report = combineReports(toplevelReport, inputTaskNames, report) report.write(localReport) else: # // # // Add this report to the job toplevel report #// This will create the toplevel job report if it doesnt # //exist, otherwise it will merge this report with whatever # // is in there already. #// print "Adding report to top level" report.write(localReport) mergeReports(toplevelReport, localReport) return |
try: version = os.environ.get("CMSSW_VERSION") except: | version = os.environ.get("CMSSW_VERSION") if version is None: | def main(argv) : """ prepareRelValworkflows prepare workflows for chained processing of RelVal samples - parse file holding cmsDriver commands for 1st and 2nd steps - prepare workflows - prepare WorkflowInjector:Input script - prepare ForceMerge script - prepare DBSMigrationToGlobal script - prepare PhEDExInjection script - prepare local DBS query script required parameters --samples <textfile> : list of RelVal sample parameter-sets in plain text file, one sample per line, # marks comment --version <processing version> : processing version (v1, v2, ... ) --DBSURL <URL> : URL of the local DBS (http://cmsdbsprod.cern.ch/cms_dbs_prod_local_07/servlet/DBSServlet | http://cmssrv46.fnal.gov:8080/DBS208/servlet/DBSServlet) --only-sites : Site where dataset is going to be processed or where the input dataset is taken from. Usually srm-cms.cern.ch and cmssrm.fnal.gov optional parameters --pileupdataset : input pileup dataset. It must be provided if the <samples> txt file contains PilepUp samples --lumi <number> : initial run for generation (default: 666666), set it to 777777 for high statistics samples --event <number> : initial event number (default: 1) --store-fail : store output files for failed jobs in chain processing. --read-dbs : DBS URL used for obtaining the list of available blocks for real data. Default: http://cmsdbsprod.cern.ch/cms_dbs_prod_global/servlet/DBSServlet --scripts-dir : Path to workflow creation scripts (default: $PUTIL) --skip-config : Is the configuration file was already created, it will skip cmsDriver command execution --extra-label : Extra label for identifying the datasets: /RelVal*/CMSSW_X_Y_Z-<Conditions>_<SpecialTag>_<ExtraLabel>_<FilterName>-<version>/TIER --workflow-label : Label for the workflows. --help (-h) : help --debug (-d) : debug statements """ start_total_time = time.time() # default try: version = os.environ.get("CMSSW_VERSION") except: print '' print 'CMSSW version cannot be determined from $CMSSW_VERSION' sys.exit(2) try: architecture = os.environ.get("SCRAM_ARCH") except: print '' print 'CMSSW architecture cannot be determined from $SCRAM_ARCH' sys.exit(2) try: from ProdCommon.DataMgmt.DBS.DBSReader import DBSReader except ImportError, ex: print ex print 'Please load prodAgent libraries (point $PYTHONPATH to the right path).' sys.exit(2) samplesFile = None processing_version = None initial_run = "666666" initial_event = "1" debug = False DBSURL = None pileup_dataset = None storeFail = False readDBS = 'http://cmsdbsprod.cern.ch/cms_dbs_prod_global/servlet/DBSServlet' onlySites = None scriptsDir = '$PUTIL' skip_config = False extra_label = '' workflow_label = '' try: opts, args = getopt.getopt(argv, "", ["help", "debug", "samples=", "version=", "DBSURL=", "event=", "lumi=", "pileupdataset=", "store-fail", "read-dbs=", "only-sites=", "scripts-dir=", "skip-config", "extra-label=", "workflow-label="]) except getopt.GetoptError: print main.__doc__ sys.exit(2) # check command line parameter for opt, arg in opts : if opt == "--help" : print main.__doc__ sys.exit() elif opt == "--debug" : debug = True elif opt == "--samples" : samplesFile = arg elif opt == "--version" : processing_version = arg elif opt == "--lumi" : initial_run = arg elif opt == "--event" : initial_event = arg elif opt == "--DBSURL" : DBSURL = arg elif opt == "--pileupdataset" : pileup_dataset = arg print arg elif opt == '--store-fail': storeFail = True elif opt == '--read-dbs': readDBS = arg elif opt == '--only-sites': onlySites = arg elif opt == '--scripts-dir': if arg.endswith('/') : scriptsDir = arg[:-1] else: scriptsDir = arg scriptsDirTemp = scriptsDir if scriptsDir.startswith('$') : scriptsDirTemp = os.environ.get(scriptsDir[1:],None) scriptsDir = os.path.expandvars(scriptsDirTemp) if scriptsDirTemp != None: if not os.path.exists(scriptsDirTemp): print "--scripts-dir argument does not exist, please verify." sys.exit(6) else: print "--scripts-dir argument does not exist, please verify." sys.exit(6) elif opt == "--skip-config": skip_config = True elif opt == "--extra-label": extra_label = arg elif opt == "--workflow-label": workflow_label = arg if samplesFile == None or processing_version == None or DBSURL == None : print main.__doc__ sys.exit(2) if debug: print "\nprepareRelValWorkflows.py was started with the following arguments: %s" % \ " ".join(argv) print "\n" samples = [] steps = {} primary_prefix = 'RelVal' max_step = 1 try: file = open(samplesFile) except IOError: print 'file with list of parameter-sets cannot be opened!' sys.exit(1) n_line = 0 print 'Parsing input file...' start_parse_time = time.time() for line in file.readlines(): n_line += 1 # Skipping lines with no info if line.strip() != '' and line.strip() != '\n' and \ not line.strip().startswith("#") and \ line.find('//') != 0: # I don't know what's the last condition for line_parts = [part.strip() for part in line.split('@@@') if part] dqmData = {} # Keys: Scenario, Run # // # // Parsing first step #// if not line.strip().startswith('STEP'): command = '' array = [] special_tag = '' conditions = None total_events = None events_per_job = None pile_up = False output_name = '' input_data = {} input_blocks = "" acq_era = version sample_info = line_parts[0].strip() # // # // Filling up sample's details #// sample_info_parts = [part.strip() for part in \ sample_info.split('++') if part] sample_number = sample_info_parts[0] #We might need this later sample_name = sample_info_parts[1] sample_steps = [i.strip() for i in \ sample_info_parts[2].split(',') if i] primary = primary_prefix + sample_name # // # // Is it a real data processing sample? According to this #// we assign or not the command variable. #\\ if line_parts[0].find('REALDATA') > -1: is_real_data = True else: is_real_data = False command = line_parts[1].strip() # // # // Clean cmsDriver command format #// if command.find('=') > -1: command = command.replace('=',' ') array = [i for i in command.split() if i] # // # // Remove --python_filename if present #// if '--python_filename' in array: del array[array.index('--python_filename'):\ array.index('--python_filename')+2] # // # // Parse conditions #// if '--conditions' in array: conditions_arg = array[array.index('--conditions')+1] if conditions_arg.startswith('auto:'): conditions_key = conditions_arg.split('auto:')[1] conditions_value = autoCond[conditions_key] else: conditions_value = conditions_arg conditions = [ x.strip() \ for x in conditions_value.split(',') \ if x.find("::") != -1 ][0].split('::')[0].strip() else: conditions = 'SpecialConditions' # // # // Parsing number of events #// if '--relval' in array : total_events = array[array.index('--relval')+1\ ].split(',')[0].strip() events_per_job = array[array.index('--relval')+1\ ].split(',')[1].strip() # // # // Special tag #// # FastSim if command.find('FASTSIM') > -1: special_tag = 'FastSim' # PileUp (at the same time with FastSim) if '--pileup' in array : # // # // Will use whatever argument of --pileup option is #// pileup_arg = array[array.index('--pileup') + 1] if pileup_arg.lower().strip() != 'nopileup': if special_tag: special_tag = "_".join( [special_tag, pileup_arg.strip()]) else: special_tag = pileup_arg.strip() pile_up = True if pileup_dataset is None : print "You have to provide a pileup dataset." print "Usually it is a MinBias (RAW)." print "Use option --pileupdataset" sys.exit(5) # // # // Sort of custom tag #// if '--beamspot' in array: beamspot_arg = \ array[array.index('--beamspot') + 1].strip() if special_tag: special_tag = "_".join( [special_tag, beamspot_arg]) else: special_tag = beamspot_arg # // # // Cfg file's output name #// output_name = "_".join( [x for x in [primary, conditions, special_tag] if x] ) + ".py" # // # // Add command options #// if command.find('no_exec') < 0: array.append('--no_exec') if command.find('python_filename') < 0: array.append('--python_filename') array.append(output_name) # Recomposing cmsDriver command command = " ".join(array) # Filling up DQM information dqmData['Run'] = 1 dqmData['Scenario'] = getDQMScenario(command) # // # // Collecting info for real data samples #// if is_real_data: # // # // Parsing dataset details. The following details are #// supported: REALDATA, RUN, LABEL, FILES, EVENTS, PDNAME #\\ # Producing tuples from the input options. data_options = [tuple(x.split(':')) \ for x in sample_info_parts[3].split(',') if x.strip()] # Parsing tuples for arg_v in data_options: if len(arg_v) == 2: input_data[arg_v[0].strip()] = arg_v[1].strip() elif len(arg_v) == 1: input_data[arg_v[0].strip()] = None else: print "Line %s has an extra ','." % (line) sys.exit(7) # // # // Verifiying optional arguments: LABEL, FILE, EVENTS, #// PRIMARY #\\ data_label = input_data.get('LABEL', '') data_files = input_data.get('FILES', '') data_events = input_data.get('EVENTS', '') data_pname = input_data.get('PRIMARY', '') if data_events: data_events = int(data_events) if data_files: data_files = int(data_events) # // # // Looking for best matching dataset. It should be just #// one, otherwise the script will exit. #\\ reader = DBSReader(readDBS) query = "find dataset where dataset like %s" % ( input_data['REALDATA']) result_xml = reader.dbs.executeQuery(query) # XML Handler parsed_datasets = [] global is_dataset is_dataset = False class Handler(xml.sax.handler.ContentHandler): def startElement(self, name, attrs): global is_dataset if name == 'dataset': is_dataset = True def characters(self, content): global is_dataset if is_dataset: parsed_datasets.append(content) def endElement(self, name): global is_dataset if name == 'dataset': is_dataset = False xml.sax.parseString(result_xml, Handler()) target_datasets = parsed_datasets # If more than one dataset is found. if len(target_datasets) > 1: # Is this an input relval dataset produced in the # current release? query = "find dataset where dataset like %s " % ( input_data['REALDATA']) query += "and release=%s" % version parsed_datasets = [] result_xml = reader.dbs.executeQuery(query) xml.sax.parseString(result_xml, Handler()) find_version = lambda x: x in parsed_datasets target_datasets = filter(find_version, target_datasets) # If more than one dataset is found, match the processing # version if len(target_datasets) > 1: find_version = \ lambda x: x.find(processing_version) != -1 target_datasets = filter(find_version, target_datasets) if len(target_datasets) > 1: msg = "Dataset pattern in line %s is too broad." % line msg += "These datasets were found: %s" % ( " ".join(target_datasets)) print msg sys.exit(8) if not target_datasets: msg = "Dataset pattern produced no match in line %s" % ( line) print msg sys.exit(8) # Now I can look up the blocks for this dataset. target_dataset = target_datasets[0] input_data['REALDATA'] = target_dataset # // # // Looking up the blocks for a given Dataset and a given run #// reader = DBSReader(readDBS) input_files = reader.dbs.listFiles(path=target_dataset, \ runNumber=input_data['RUN']) blocks = {} # // # // Parsing input blocks #// for input_file in input_files: cur_files = \ blocks.setdefault(input_file['Block']['Name'], {}).setdefault('Files', 0) cur_events = \ blocks[input_file['Block']['Name']].setdefault( 'Events', 0) blocks[input_file['Block']['Name']]['Files'] += 1 blocks[input_file['Block']['Name']]['Events'] += \ input_file['NumberOfEvents'] # // # // Truncating blocks list #// total_events = 0 total_files = 0 blocks_to_process = [] for block in blocks: blocks_to_process.append(block) total_events += blocks[block]['Events'] total_files += blocks[block]['Files'] if data_events and (data_events < total_events): break if data_files and (data_files < total_files): break input_blocks = ",".join(blocks_to_process) # // # // If PRIMARY is present or true, then it will use the #// sample_name value as primary dataset name, else it #\\ will use the input primary dataset name. # \\ if data_pname is None or \ data_pname.lower() in ('y', 't', 'true'): primary = "".join([primary_prefix, sample_name]) else: primary = \ [x for x in input_data['REALDATA'].split("/") if x][0] # // # // Seting special tag #// special_tag_parts = [] # Add RelVal tag if not present. if target_dataset.find(primary_prefix) == -1: special_tag_parts.append(primary_prefix) # Add LABEL if data_label: special_tag_parts.append(data_label) special_tag = "_".join(special_tag_parts) # // # // Setting Acq. Era #// #processed_dataset = target_dataset.split('/')[2] #dataset_acq_era = processed_dataset.split("-")[0] #if dataset_acq_era.startswith(version): # acq_era = version #else: # acq_era = dataset_acq_era # Filling up DQM information dqmData['Run'] = input_data['RUN'] # // # // Composing a dictionary per sample #// dict = {} dict['sampleName'] = sample_name dict['command'] = command dict['primary'] = primary dict['outputName'] = output_name dict['conditions'] = conditions dict['specialTag'] = special_tag dict['totalEvents'] = total_events dict['eventsPerJob'] = events_per_job dict['pileUp'] = pile_up dict['isRealData'] = is_real_data dict['inputData'] = input_data dict['inputBlocks'] = input_blocks dict['steps'] = sample_steps dict['AcqEra'] = acq_era dict['DQMData'] = dqmData samples.append(dict) if debug: print 'Parsing' print 'Sample:', sample_name print 'Command:', command print 'Conditions:', conditions print 'Special tag:', special_tag print 'Total events:', total_events print 'Events per job:', events_per_job print 'Steps:', sample_steps print 'PileUp:', pile_up print 'Input data:', input_data print 'Input blocks:', input_blocks print 'DQMData:', dqmData print '' # // # // No a first step command (second HLT table, RECO, ALCA, etc) #// else: step_number = int(line_parts[0].split('++')[0].strip()[-1]) step_name = line_parts[0].split('++')[1].strip() command = line_parts[1].strip() # // # // Clean cmsDriver command format #// if command.find('=') > -1: command = command.replace('=',' ') array = [i for i in command.split() if i] # // # // Remove --python_filename if present #// if '--python_filename' in array: del array[array.index('--python_filename'):\ array.index('--python_filename')+2] # // # // Parse conditions #// if '--conditions' in array: conditions_arg = array[array.index('--conditions')+1] if conditions_arg.startswith('auto:'): conditions_key = conditions_arg.split('auto:')[1] conditions_value = autoCond[conditions_key] else: conditions_value = conditions_arg conditions = [ x.strip() \ for x in conditions_value.split(',') \ if x.find("::") != -1 ][0].split('::')[0].strip() else: conditions = 'SpecialConditions' # // # // Cfg file's output name #// output_name = "_".join([step_name, conditions]) + ".py" # // # // Add command options #// if command.find('no_exec') < 0: array.append('--no_exec') if command.find('python_filename') < 0: array.append('--python_filename') array.append(output_name) # Recomposing cmsDriver command command = " ".join(array) # // # // Second trigger table? This may be changed, right now I am #// assuming that all 4 steps workflows are like this. #\\ stage_previous = True if step_number == 2: if '-s' in array: index = array.index('-s') else: index = array.index('--step') if array[index+1].find('RECO') < 0: stage_previous = False if step_number > max_step: max_step = step_number # // # // Composing a dictionary per step #// dict = {} dict['stepNumber'] = step_number dict['command'] = command dict['outputName'] = output_name dict['conditions'] = conditions dict['stagePrevious'] = stage_previous dict['DQMData'] = {'Scenario': getDQMScenario(command)} # // # // Step name should be unique #// if step_name not in steps: steps[step_name] = dict else: print "Label %s is repeated!!!" % step_name sys.exit(1) if debug: print 'Parsing' print 'Step name:', step_name print 'Step number:', step_number print 'Command:', command print 'Conditions:', conditions print 'Stage previous:', stage_previous print 'DQM Data:', dict['DQMData'] print '' parse_time = time.time() - start_parse_time file.close() if debug: print "Collected information step 1" for sample in samples: print 'Sample name:', sample['sampleName'] print 'Command', sample['command'] print 'Real data:', sample['isRealData'] print 'Input data:', sample['inputData'] print 'Input blocks', sample['inputBlocks'] print 'Conditions:', sample['conditions'] print 'Total events:', sample['totalEvents'] print 'Events per job:', sample['eventsPerJob'] print 'Output name:', sample['outputName'] print 'Steps:', sample['steps'] print 'PileUp:', sample['pileUp'] print 'Special tag:', sample['specialTag'] print 'Acq. Era:', sample['AcqEra'] print 'DQM data:', sample['DQMData'] print '' for i in range(2, max_step+1): print 'Collected information step %s' % i for step in steps: if steps[step]['stepNumber'] == i: print 'Step name:', step print 'Command:', steps[step]['command'] print 'Conditions:', steps[step]['conditions'] print 'Stage previous:', steps[step]['stagePrevious'] print 'DQM Data:', steps[step]['DQMData'] print '' # // # // Execute cmsDriver command #// print '' print 'Executing cmsDriver commands for step 1 configurations' print '' start_cmsDriver_time = time.time() for sample in samples: if not sample['isRealData']: # // # // if the cfg. file was already created, we'll skip cmsDriver #// command execution. #\\ if os.path.exists("/".join([os.getcwd(), sample['outputName']])) and skip_config: print 'cmsDriver command for step 1 to produce:', \ sample['outputName'],'was already issued, skipping.' continue exitCode, output, error = executeCommand(sample['command']) if exitCode == 0: print 'cmsDriver command for step 1 to produce:', \ sample['outputName'],'exited with ExitCode:', exitCode else : print 'cmsDriver command for step 1 to produce:', \ sample['outputName'],'failed with ExitCode:', exitCode sys.exit(1) else : msg = 'Real Data:\n' msg += 'Input dataset: %s\n' % (sample['inputData']['REALDATA']) msg += 'Run: %s\n' % (sample['inputData']['RUN']) msg += 'Input blocks: %s' % (sample['inputBlocks']) print msg for i in range(2, max_step+1): print '' print 'Executing cmsDriver commands for step %s configurations' % i print '' for step in steps: if steps[step]['stepNumber'] == i: # // # // if the cfg. file was already created, we'll skip cmsDriver #// command execution. #\\ if os.path.exists("/".join([os.getcwd(), steps[step]['outputName']])) and skip_config: print 'cmsDriver command for step %s to produce:' % i, \ steps[step]['outputName'],'was already issued, skipping.' continue exitCode, output, error = executeCommand(steps[step]['command']) if exitCode == 0: print 'cmsDriver command for step %s to produce:' % i, \ steps[step]['outputName'], \ 'exited with ExitCode:', exitCode else: print 'cmsDriver command for step %s to produce:' % i, \ steps[step]['outputName'], \ 'failed with ExitCode:', exitCode sys.exit(1) cmsDriver_time = time.time() - start_cmsDriver_time print '' print 'Workflow creation' print '' start_workflow_time = time.time() datasets = [] unmergedDatasets = [] mergedDatasets = [] workflows = {} # // # // Create workflows #// for sample in samples: command = 'python ' + scriptsDir conditions = '' # Conditions -> processingString # // # // In case we are processing data #// if sample['isRealData']: command += '/createProcessingWorkflow.py \\\n' # Not changing the primary dataset name for real data. command += '--override-channel=' + sample['primary'] + ' \\\n' command += '--dataset=' + sample['inputData']['REALDATA'] + ' \\\n' command += '--only-blocks=' + sample['inputBlocks'] + ' \\\n' command += '--dbs-url=' + readDBS + ' \\\n' conditions = steps[sample['steps'][0]]['conditions'] command += '--split-type=file \\\n' command += '--split-size=1 \\\n' # // # // MC workflows #// else: command += '/createProductionWorkflow.py \\\n' command += '--channel=' + sample['primary'] + ' \\\n' conditions = sample['conditions'] command += '--starting-run=' + initial_run + ' \\\n' command += '--starting-event=' + initial_event + ' \\\n' command += '--totalevents=' + sample['totalEvents'] + ' \\\n' command += '--eventsperjob=' + sample['eventsPerJob'] + ' \\\n' if sample['pileUp']: command += '--pileup-dataset=' + pileup_dataset + ' \\\n' if storeFail: command += '--store-fail=True \\\n' # // # // First step #// command += '--version=' + version + ' \\\n' command += '--py-cfg=' + sample['outputName'] + ' \\\n' # // # // Input configurations (Second step and further) #// if sample['steps'][0].lower().strip() != 'none': for i, step in enumerate(sample['steps']): command += '--version=' + version + ' \\\n' command += '--py-cfg=' + steps[step]['outputName'] + ' \\\n' if i != 0 or not sample['isRealData']: command += '--stageout-intermediates=%s \\\n' % ( steps[step]['stagePrevious']) command += '--chained-input=output \\\n' else: dqmScenario = steps[step]['DQMData']['Scenario'] # // # // If a two-hlt tables workflow, will take conditions from #// the second step information #\\ if not steps[step]['stagePrevious'] and \ i == 0: conditions = steps[step]['conditions'] # // # // Common options #// command += '--group=RelVal \\\n' command += '--category=relval \\\n' command += '--activity=RelVal \\\n' command += '--acquisition_era=' + sample['AcqEra'] + ' \\\n' command += '--only-sites=' + onlySites + ' \\\n' command += '--processing_version=' + processing_version + ' \\\n' # Workflow label if workflow_label: command += '--workflow_tag=' + workflow_label + ' \\\n' # // # // processingString="CMSSWVersion"_"Conditions"_"specialTag"_"extra-label" #// CMSSWVersion is appended only when the input dataset does not have it. #\\ processing_string_parts = [] if sample['AcqEra'] != version: processing_string_parts.append(version) processing_string_parts.append(conditions) if sample['specialTag']: processing_string_parts.append(sample['specialTag']) if extra_label: processing_string_parts.append(extra_label) command += '--processing_string=' + "_".join(processing_string_parts) if debug: print command print '' start_command_time = time.time() exitCode, output, error = executeCommand(command) command_time = time.time() - start_command_time if debug: print output print '' if exitCode == 0: #parse output tmp = [] index = FindIndex(output,'Output Datasets') for dataset in output[index+1:]: tmp.append(dataset.strip()) # DQM Data dqmInfo = {} dqmInfo['Run'] = sample['DQMData']['Run'] if sample['isRealData']: dqmInfo['Scenario'] = dqmScenario else: dqmInfo['Scenario'] = sample['DQMData']['Scenario'] datasets.append({'unmerged': tmp, 'totalEvents': sample['totalEvents'], 'merged': [x.replace('-unmerged','') for x in tmp], 'DQMData': dqmInfo }) unmergedDatasets.append(tmp) index = FindIndex(output,'Created') if index == -1: print "No workflow was created by create*workflow.py" sys.exit(1) workflow = output[index].split()[1].strip() workflows.setdefault(workflow, {})['isRealData'] = sample['isRealData'] workflows[workflow]['time'] = command_time print 'workflow creation command for workflow:', workflow, \ 'exited with ExitCode:', exitCode else : print 'workflow creation command:' print command print 'failed' sys.exit(1) if debug: print 'Created workflows:' print workflows.keys() print '' print "Unmerged datasets:" print unmergedDatasets # extract merged datasets for sample in unmergedDatasets: tmp = [] for dataset in sample: tmp.append(dataset.replace('-unmerged','')) mergedDatasets.append(tmp) workflow_time = time.time() - start_workflow_time print '' print 'Write helper scripts' print '' # WorkflowInjector:Input script inputScript = open('input.sh','w') inputScript.write('#!/bin/bash\n') feeder = 'None' for workflow in workflows.keys(): if workflows[workflow]['isRealData']: if feeder.find('ReReco') < 0: inputScript.write('python $PRODAGENT_ROOT/util/publish.py WorkflowInjector:SetPlugin BlockFeeder\n') feeder = 'ReReco' else : if feeder.find('Request') < 0: inputScript.write('python $PRODAGENT_ROOT/util/publish.py WorkflowInjector:SetPlugin RequestFeeder\n') feeder = 'Request' inputScript.write('python $PRODAGENT_ROOT/util/publish.py WorkflowInjector:Input ' + os.path.join(os.getcwd(), workflow) + '\n') inputScript.close() os.chmod('input.sh',0755) print 'Wrote WorkflowInjector:Input script to:',os.path.join(os.getcwd(),'input.sh') # ForceMerge forceMergeScript = open('forceMerge.sh','w') forceMergeScript.write('#!/bin/bash\n') for sample in unmergedDatasets : for dataset in sample : forceMergeScript.write('python $PRODAGENT_ROOT/util/publish.py ForceMerge ' + dataset + '\n') forceMergeScript.close() os.chmod('forceMerge.sh',0755) print 'Wrote ForceMerge script to:',os.path.join(os.getcwd(),'forceMerge.sh') # MigrateDatasetToGlobal migrateScript = open('migrateToGlobal.sh','w') migrateScript.write('#!/bin/bash\n') for sample in mergedDatasets : for dataset in sample : migrateScript.write('python $PRODAGENT_ROOT/util/publish.py DBSInterface:MigrateDatasetToGlobal ' + dataset + '\n') migrateScript.close() os.chmod('migrateToGlobal.sh',0755) print 'Wrote DBSInterface:MigrateDatasetToGlobal script to:',os.path.join(os.getcwd(),'migrateToGlobal.sh') # PhEDExInjectDataset phedexScript = open('injectIntoPhEDEx.sh','w') phedexScript.write('#!/bin/bash\n') for sample in mergedDatasets : for dataset in sample : phedexScript.write('python $PRODAGENT_ROOT/util/publish.py PhEDExInjectDataset ' + dataset + '\n') phedexScript.close() os.chmod('injectIntoPhEDEx.sh',0755) print 'Wrote PhEDExInjectDataset script to:',os.path.join(os.getcwd(),'injectIntoPhEDEx.sh') # DBS: query unmerged datasets queryUnmergedScript = open('queryUnmerged.sh','w') queryUnmergedScript.write('#!/bin/bash\n') for sample in unmergedDatasets : for dataset in sample : #if dataset.find('-RECO') == -1 or len(sample) == 1 : queryUnmergedScript.write('python $PRODAGENT_ROOT/util/InspectDBS2.py --DBSURL=' + DBSURL + ' --datasetPath=' + dataset + ' | grep total\n') queryUnmergedScript.close() os.chmod('queryUnmerged.sh',0755) print 'Wrote DBS query script for unmerged datasets to:',os.path.join(os.getcwd(),'queryUnmerged.sh') # DBS: query merged datasets queryMergedScript = open('queryMerged.sh','w') queryMergedScript.write('#!/bin/bash\n') for sample in mergedDatasets : for dataset in sample : #if dataset.find('-RECO') == -1 or len(sample) == 1 : queryMergedScript.write('python $PRODAGENT_ROOT/util/InspectDBS2.py --DBSURL=' + DBSURL + ' --datasetPath=' + dataset + ' | grep total\n') queryMergedScript.close() os.chmod('queryMerged.sh',0755) print 'Wrote DBS query script for merged datasets to:',os.path.join(os.getcwd(),'queryMerged.sh') # DQMHarvesting DQMinputScript = open('DQMinput.sh','w') DQMinputScript.write("#!/bin/bash\n") reHarvest = re.compile(r'/.*/.*/(RECO|.*-RECO)') # Only RECO datasets for now. for sample in datasets: for dataset in sample['merged']: if reHarvest.match(dataset): DQMinputScript.write('python $PRODAGENT_ROOT/util/harvestDQM.py --run=%s --path=%s --scenario=%s\n' % ( sample['DQMData']['Run'], dataset, sample['DQMData']['Scenario'])) os.chmod('DQMinput.sh',0755) print 'Wrote DQMHarvesting script for merged datasets to:', os.path.join(os.getcwd(),'DQMinput.sh') # Output datasets list outputList = open('outputDatasets.txt','w') for sample in mergedDatasets : for dataset in sample : outputList.write(dataset + "\n") print 'Wrote output datasets list to:', os.path.join(os.getcwd(),'outputDatasets.txt') # File with expected number of events numberOfEvents = open('eventsExpected.txt','w') for sample in datasets: for dataset in sample['merged']: numberOfEvents.write("%s %s\n" % (sample['totalEvents'],dataset)) numberOfEvents.close() print 'Wrote events per dataset to:', os.path.join(os.getcwd(),'eventsExpected.txt') total_time = time.time() - start_total_time # File with timing report (Parsing, cmsDriver comands, workflow creation) timingInfo = open('timingInfo.txt', 'w') timingInfo.write('Total time: %s s\n' % total_time) timingInfo.write('Cofigs. creation time: %s s\n' % cmsDriver_time) timingInfo.write('Workflows creation time: %s s\n' % workflow_time) output_text = [] sum = 0 for workflow in workflows: if sum == 0: min = [workflow, workflows[workflow]['time']] max = [workflow, workflows[workflow]['time']] sum += workflows[workflow]['time'] output_text.append("%s: %s s" % (workflow, workflows[workflow]['time'])) if max[1] < workflows[workflow]['time']: max = [workflow, workflows[workflow]['time']] if min[1] > workflows[workflow]['time']: min = [workflow, workflows[workflow]['time']] timingInfo.write('Average time per workflow: %s s\n' % (int(sum) / int(len(workflows)))) timingInfo.write('Max. time on %s: %s s\n' % tuple(max)) timingInfo.write('Min. time on %s: %s s\n' % tuple(max)) timingInfo.write('=' * 10) timingInfo.write('Details of time per workflow:\n%s\n' % "\n".join(output_text)) |
try: architecture = os.environ.get("SCRAM_ARCH") except: | architecture = os.environ.get("SCRAM_ARCH") if architecture is None: | def main(argv) : """ prepareRelValworkflows prepare workflows for chained processing of RelVal samples - parse file holding cmsDriver commands for 1st and 2nd steps - prepare workflows - prepare WorkflowInjector:Input script - prepare ForceMerge script - prepare DBSMigrationToGlobal script - prepare PhEDExInjection script - prepare local DBS query script required parameters --samples <textfile> : list of RelVal sample parameter-sets in plain text file, one sample per line, # marks comment --version <processing version> : processing version (v1, v2, ... ) --DBSURL <URL> : URL of the local DBS (http://cmsdbsprod.cern.ch/cms_dbs_prod_local_07/servlet/DBSServlet | http://cmssrv46.fnal.gov:8080/DBS208/servlet/DBSServlet) --only-sites : Site where dataset is going to be processed or where the input dataset is taken from. Usually srm-cms.cern.ch and cmssrm.fnal.gov optional parameters --pileupdataset : input pileup dataset. It must be provided if the <samples> txt file contains PilepUp samples --lumi <number> : initial run for generation (default: 666666), set it to 777777 for high statistics samples --event <number> : initial event number (default: 1) --store-fail : store output files for failed jobs in chain processing. --read-dbs : DBS URL used for obtaining the list of available blocks for real data. Default: http://cmsdbsprod.cern.ch/cms_dbs_prod_global/servlet/DBSServlet --scripts-dir : Path to workflow creation scripts (default: $PUTIL) --skip-config : Is the configuration file was already created, it will skip cmsDriver command execution --extra-label : Extra label for identifying the datasets: /RelVal*/CMSSW_X_Y_Z-<Conditions>_<SpecialTag>_<ExtraLabel>_<FilterName>-<version>/TIER --workflow-label : Label for the workflows. --help (-h) : help --debug (-d) : debug statements """ start_total_time = time.time() # default try: version = os.environ.get("CMSSW_VERSION") except: print '' print 'CMSSW version cannot be determined from $CMSSW_VERSION' sys.exit(2) try: architecture = os.environ.get("SCRAM_ARCH") except: print '' print 'CMSSW architecture cannot be determined from $SCRAM_ARCH' sys.exit(2) try: from ProdCommon.DataMgmt.DBS.DBSReader import DBSReader except ImportError, ex: print ex print 'Please load prodAgent libraries (point $PYTHONPATH to the right path).' sys.exit(2) samplesFile = None processing_version = None initial_run = "666666" initial_event = "1" debug = False DBSURL = None pileup_dataset = None storeFail = False readDBS = 'http://cmsdbsprod.cern.ch/cms_dbs_prod_global/servlet/DBSServlet' onlySites = None scriptsDir = '$PUTIL' skip_config = False extra_label = '' workflow_label = '' try: opts, args = getopt.getopt(argv, "", ["help", "debug", "samples=", "version=", "DBSURL=", "event=", "lumi=", "pileupdataset=", "store-fail", "read-dbs=", "only-sites=", "scripts-dir=", "skip-config", "extra-label=", "workflow-label="]) except getopt.GetoptError: print main.__doc__ sys.exit(2) # check command line parameter for opt, arg in opts : if opt == "--help" : print main.__doc__ sys.exit() elif opt == "--debug" : debug = True elif opt == "--samples" : samplesFile = arg elif opt == "--version" : processing_version = arg elif opt == "--lumi" : initial_run = arg elif opt == "--event" : initial_event = arg elif opt == "--DBSURL" : DBSURL = arg elif opt == "--pileupdataset" : pileup_dataset = arg print arg elif opt == '--store-fail': storeFail = True elif opt == '--read-dbs': readDBS = arg elif opt == '--only-sites': onlySites = arg elif opt == '--scripts-dir': if arg.endswith('/') : scriptsDir = arg[:-1] else: scriptsDir = arg scriptsDirTemp = scriptsDir if scriptsDir.startswith('$') : scriptsDirTemp = os.environ.get(scriptsDir[1:],None) scriptsDir = os.path.expandvars(scriptsDirTemp) if scriptsDirTemp != None: if not os.path.exists(scriptsDirTemp): print "--scripts-dir argument does not exist, please verify." sys.exit(6) else: print "--scripts-dir argument does not exist, please verify." sys.exit(6) elif opt == "--skip-config": skip_config = True elif opt == "--extra-label": extra_label = arg elif opt == "--workflow-label": workflow_label = arg if samplesFile == None or processing_version == None or DBSURL == None : print main.__doc__ sys.exit(2) if debug: print "\nprepareRelValWorkflows.py was started with the following arguments: %s" % \ " ".join(argv) print "\n" samples = [] steps = {} primary_prefix = 'RelVal' max_step = 1 try: file = open(samplesFile) except IOError: print 'file with list of parameter-sets cannot be opened!' sys.exit(1) n_line = 0 print 'Parsing input file...' start_parse_time = time.time() for line in file.readlines(): n_line += 1 # Skipping lines with no info if line.strip() != '' and line.strip() != '\n' and \ not line.strip().startswith("#") and \ line.find('//') != 0: # I don't know what's the last condition for line_parts = [part.strip() for part in line.split('@@@') if part] dqmData = {} # Keys: Scenario, Run # // # // Parsing first step #// if not line.strip().startswith('STEP'): command = '' array = [] special_tag = '' conditions = None total_events = None events_per_job = None pile_up = False output_name = '' input_data = {} input_blocks = "" acq_era = version sample_info = line_parts[0].strip() # // # // Filling up sample's details #// sample_info_parts = [part.strip() for part in \ sample_info.split('++') if part] sample_number = sample_info_parts[0] #We might need this later sample_name = sample_info_parts[1] sample_steps = [i.strip() for i in \ sample_info_parts[2].split(',') if i] primary = primary_prefix + sample_name # // # // Is it a real data processing sample? According to this #// we assign or not the command variable. #\\ if line_parts[0].find('REALDATA') > -1: is_real_data = True else: is_real_data = False command = line_parts[1].strip() # // # // Clean cmsDriver command format #// if command.find('=') > -1: command = command.replace('=',' ') array = [i for i in command.split() if i] # // # // Remove --python_filename if present #// if '--python_filename' in array: del array[array.index('--python_filename'):\ array.index('--python_filename')+2] # // # // Parse conditions #// if '--conditions' in array: conditions_arg = array[array.index('--conditions')+1] if conditions_arg.startswith('auto:'): conditions_key = conditions_arg.split('auto:')[1] conditions_value = autoCond[conditions_key] else: conditions_value = conditions_arg conditions = [ x.strip() \ for x in conditions_value.split(',') \ if x.find("::") != -1 ][0].split('::')[0].strip() else: conditions = 'SpecialConditions' # // # // Parsing number of events #// if '--relval' in array : total_events = array[array.index('--relval')+1\ ].split(',')[0].strip() events_per_job = array[array.index('--relval')+1\ ].split(',')[1].strip() # // # // Special tag #// # FastSim if command.find('FASTSIM') > -1: special_tag = 'FastSim' # PileUp (at the same time with FastSim) if '--pileup' in array : # // # // Will use whatever argument of --pileup option is #// pileup_arg = array[array.index('--pileup') + 1] if pileup_arg.lower().strip() != 'nopileup': if special_tag: special_tag = "_".join( [special_tag, pileup_arg.strip()]) else: special_tag = pileup_arg.strip() pile_up = True if pileup_dataset is None : print "You have to provide a pileup dataset." print "Usually it is a MinBias (RAW)." print "Use option --pileupdataset" sys.exit(5) # // # // Sort of custom tag #// if '--beamspot' in array: beamspot_arg = \ array[array.index('--beamspot') + 1].strip() if special_tag: special_tag = "_".join( [special_tag, beamspot_arg]) else: special_tag = beamspot_arg # // # // Cfg file's output name #// output_name = "_".join( [x for x in [primary, conditions, special_tag] if x] ) + ".py" # // # // Add command options #// if command.find('no_exec') < 0: array.append('--no_exec') if command.find('python_filename') < 0: array.append('--python_filename') array.append(output_name) # Recomposing cmsDriver command command = " ".join(array) # Filling up DQM information dqmData['Run'] = 1 dqmData['Scenario'] = getDQMScenario(command) # // # // Collecting info for real data samples #// if is_real_data: # // # // Parsing dataset details. The following details are #// supported: REALDATA, RUN, LABEL, FILES, EVENTS, PDNAME #\\ # Producing tuples from the input options. data_options = [tuple(x.split(':')) \ for x in sample_info_parts[3].split(',') if x.strip()] # Parsing tuples for arg_v in data_options: if len(arg_v) == 2: input_data[arg_v[0].strip()] = arg_v[1].strip() elif len(arg_v) == 1: input_data[arg_v[0].strip()] = None else: print "Line %s has an extra ','." % (line) sys.exit(7) # // # // Verifiying optional arguments: LABEL, FILE, EVENTS, #// PRIMARY #\\ data_label = input_data.get('LABEL', '') data_files = input_data.get('FILES', '') data_events = input_data.get('EVENTS', '') data_pname = input_data.get('PRIMARY', '') if data_events: data_events = int(data_events) if data_files: data_files = int(data_events) # // # // Looking for best matching dataset. It should be just #// one, otherwise the script will exit. #\\ reader = DBSReader(readDBS) query = "find dataset where dataset like %s" % ( input_data['REALDATA']) result_xml = reader.dbs.executeQuery(query) # XML Handler parsed_datasets = [] global is_dataset is_dataset = False class Handler(xml.sax.handler.ContentHandler): def startElement(self, name, attrs): global is_dataset if name == 'dataset': is_dataset = True def characters(self, content): global is_dataset if is_dataset: parsed_datasets.append(content) def endElement(self, name): global is_dataset if name == 'dataset': is_dataset = False xml.sax.parseString(result_xml, Handler()) target_datasets = parsed_datasets # If more than one dataset is found. if len(target_datasets) > 1: # Is this an input relval dataset produced in the # current release? query = "find dataset where dataset like %s " % ( input_data['REALDATA']) query += "and release=%s" % version parsed_datasets = [] result_xml = reader.dbs.executeQuery(query) xml.sax.parseString(result_xml, Handler()) find_version = lambda x: x in parsed_datasets target_datasets = filter(find_version, target_datasets) # If more than one dataset is found, match the processing # version if len(target_datasets) > 1: find_version = \ lambda x: x.find(processing_version) != -1 target_datasets = filter(find_version, target_datasets) if len(target_datasets) > 1: msg = "Dataset pattern in line %s is too broad." % line msg += "These datasets were found: %s" % ( " ".join(target_datasets)) print msg sys.exit(8) if not target_datasets: msg = "Dataset pattern produced no match in line %s" % ( line) print msg sys.exit(8) # Now I can look up the blocks for this dataset. target_dataset = target_datasets[0] input_data['REALDATA'] = target_dataset # // # // Looking up the blocks for a given Dataset and a given run #// reader = DBSReader(readDBS) input_files = reader.dbs.listFiles(path=target_dataset, \ runNumber=input_data['RUN']) blocks = {} # // # // Parsing input blocks #// for input_file in input_files: cur_files = \ blocks.setdefault(input_file['Block']['Name'], {}).setdefault('Files', 0) cur_events = \ blocks[input_file['Block']['Name']].setdefault( 'Events', 0) blocks[input_file['Block']['Name']]['Files'] += 1 blocks[input_file['Block']['Name']]['Events'] += \ input_file['NumberOfEvents'] # // # // Truncating blocks list #// total_events = 0 total_files = 0 blocks_to_process = [] for block in blocks: blocks_to_process.append(block) total_events += blocks[block]['Events'] total_files += blocks[block]['Files'] if data_events and (data_events < total_events): break if data_files and (data_files < total_files): break input_blocks = ",".join(blocks_to_process) # // # // If PRIMARY is present or true, then it will use the #// sample_name value as primary dataset name, else it #\\ will use the input primary dataset name. # \\ if data_pname is None or \ data_pname.lower() in ('y', 't', 'true'): primary = "".join([primary_prefix, sample_name]) else: primary = \ [x for x in input_data['REALDATA'].split("/") if x][0] # // # // Seting special tag #// special_tag_parts = [] # Add RelVal tag if not present. if target_dataset.find(primary_prefix) == -1: special_tag_parts.append(primary_prefix) # Add LABEL if data_label: special_tag_parts.append(data_label) special_tag = "_".join(special_tag_parts) # // # // Setting Acq. Era #// #processed_dataset = target_dataset.split('/')[2] #dataset_acq_era = processed_dataset.split("-")[0] #if dataset_acq_era.startswith(version): # acq_era = version #else: # acq_era = dataset_acq_era # Filling up DQM information dqmData['Run'] = input_data['RUN'] # // # // Composing a dictionary per sample #// dict = {} dict['sampleName'] = sample_name dict['command'] = command dict['primary'] = primary dict['outputName'] = output_name dict['conditions'] = conditions dict['specialTag'] = special_tag dict['totalEvents'] = total_events dict['eventsPerJob'] = events_per_job dict['pileUp'] = pile_up dict['isRealData'] = is_real_data dict['inputData'] = input_data dict['inputBlocks'] = input_blocks dict['steps'] = sample_steps dict['AcqEra'] = acq_era dict['DQMData'] = dqmData samples.append(dict) if debug: print 'Parsing' print 'Sample:', sample_name print 'Command:', command print 'Conditions:', conditions print 'Special tag:', special_tag print 'Total events:', total_events print 'Events per job:', events_per_job print 'Steps:', sample_steps print 'PileUp:', pile_up print 'Input data:', input_data print 'Input blocks:', input_blocks print 'DQMData:', dqmData print '' # // # // No a first step command (second HLT table, RECO, ALCA, etc) #// else: step_number = int(line_parts[0].split('++')[0].strip()[-1]) step_name = line_parts[0].split('++')[1].strip() command = line_parts[1].strip() # // # // Clean cmsDriver command format #// if command.find('=') > -1: command = command.replace('=',' ') array = [i for i in command.split() if i] # // # // Remove --python_filename if present #// if '--python_filename' in array: del array[array.index('--python_filename'):\ array.index('--python_filename')+2] # // # // Parse conditions #// if '--conditions' in array: conditions_arg = array[array.index('--conditions')+1] if conditions_arg.startswith('auto:'): conditions_key = conditions_arg.split('auto:')[1] conditions_value = autoCond[conditions_key] else: conditions_value = conditions_arg conditions = [ x.strip() \ for x in conditions_value.split(',') \ if x.find("::") != -1 ][0].split('::')[0].strip() else: conditions = 'SpecialConditions' # // # // Cfg file's output name #// output_name = "_".join([step_name, conditions]) + ".py" # // # // Add command options #// if command.find('no_exec') < 0: array.append('--no_exec') if command.find('python_filename') < 0: array.append('--python_filename') array.append(output_name) # Recomposing cmsDriver command command = " ".join(array) # // # // Second trigger table? This may be changed, right now I am #// assuming that all 4 steps workflows are like this. #\\ stage_previous = True if step_number == 2: if '-s' in array: index = array.index('-s') else: index = array.index('--step') if array[index+1].find('RECO') < 0: stage_previous = False if step_number > max_step: max_step = step_number # // # // Composing a dictionary per step #// dict = {} dict['stepNumber'] = step_number dict['command'] = command dict['outputName'] = output_name dict['conditions'] = conditions dict['stagePrevious'] = stage_previous dict['DQMData'] = {'Scenario': getDQMScenario(command)} # // # // Step name should be unique #// if step_name not in steps: steps[step_name] = dict else: print "Label %s is repeated!!!" % step_name sys.exit(1) if debug: print 'Parsing' print 'Step name:', step_name print 'Step number:', step_number print 'Command:', command print 'Conditions:', conditions print 'Stage previous:', stage_previous print 'DQM Data:', dict['DQMData'] print '' parse_time = time.time() - start_parse_time file.close() if debug: print "Collected information step 1" for sample in samples: print 'Sample name:', sample['sampleName'] print 'Command', sample['command'] print 'Real data:', sample['isRealData'] print 'Input data:', sample['inputData'] print 'Input blocks', sample['inputBlocks'] print 'Conditions:', sample['conditions'] print 'Total events:', sample['totalEvents'] print 'Events per job:', sample['eventsPerJob'] print 'Output name:', sample['outputName'] print 'Steps:', sample['steps'] print 'PileUp:', sample['pileUp'] print 'Special tag:', sample['specialTag'] print 'Acq. Era:', sample['AcqEra'] print 'DQM data:', sample['DQMData'] print '' for i in range(2, max_step+1): print 'Collected information step %s' % i for step in steps: if steps[step]['stepNumber'] == i: print 'Step name:', step print 'Command:', steps[step]['command'] print 'Conditions:', steps[step]['conditions'] print 'Stage previous:', steps[step]['stagePrevious'] print 'DQM Data:', steps[step]['DQMData'] print '' # // # // Execute cmsDriver command #// print '' print 'Executing cmsDriver commands for step 1 configurations' print '' start_cmsDriver_time = time.time() for sample in samples: if not sample['isRealData']: # // # // if the cfg. file was already created, we'll skip cmsDriver #// command execution. #\\ if os.path.exists("/".join([os.getcwd(), sample['outputName']])) and skip_config: print 'cmsDriver command for step 1 to produce:', \ sample['outputName'],'was already issued, skipping.' continue exitCode, output, error = executeCommand(sample['command']) if exitCode == 0: print 'cmsDriver command for step 1 to produce:', \ sample['outputName'],'exited with ExitCode:', exitCode else : print 'cmsDriver command for step 1 to produce:', \ sample['outputName'],'failed with ExitCode:', exitCode sys.exit(1) else : msg = 'Real Data:\n' msg += 'Input dataset: %s\n' % (sample['inputData']['REALDATA']) msg += 'Run: %s\n' % (sample['inputData']['RUN']) msg += 'Input blocks: %s' % (sample['inputBlocks']) print msg for i in range(2, max_step+1): print '' print 'Executing cmsDriver commands for step %s configurations' % i print '' for step in steps: if steps[step]['stepNumber'] == i: # // # // if the cfg. file was already created, we'll skip cmsDriver #// command execution. #\\ if os.path.exists("/".join([os.getcwd(), steps[step]['outputName']])) and skip_config: print 'cmsDriver command for step %s to produce:' % i, \ steps[step]['outputName'],'was already issued, skipping.' continue exitCode, output, error = executeCommand(steps[step]['command']) if exitCode == 0: print 'cmsDriver command for step %s to produce:' % i, \ steps[step]['outputName'], \ 'exited with ExitCode:', exitCode else: print 'cmsDriver command for step %s to produce:' % i, \ steps[step]['outputName'], \ 'failed with ExitCode:', exitCode sys.exit(1) cmsDriver_time = time.time() - start_cmsDriver_time print '' print 'Workflow creation' print '' start_workflow_time = time.time() datasets = [] unmergedDatasets = [] mergedDatasets = [] workflows = {} # // # // Create workflows #// for sample in samples: command = 'python ' + scriptsDir conditions = '' # Conditions -> processingString # // # // In case we are processing data #// if sample['isRealData']: command += '/createProcessingWorkflow.py \\\n' # Not changing the primary dataset name for real data. command += '--override-channel=' + sample['primary'] + ' \\\n' command += '--dataset=' + sample['inputData']['REALDATA'] + ' \\\n' command += '--only-blocks=' + sample['inputBlocks'] + ' \\\n' command += '--dbs-url=' + readDBS + ' \\\n' conditions = steps[sample['steps'][0]]['conditions'] command += '--split-type=file \\\n' command += '--split-size=1 \\\n' # // # // MC workflows #// else: command += '/createProductionWorkflow.py \\\n' command += '--channel=' + sample['primary'] + ' \\\n' conditions = sample['conditions'] command += '--starting-run=' + initial_run + ' \\\n' command += '--starting-event=' + initial_event + ' \\\n' command += '--totalevents=' + sample['totalEvents'] + ' \\\n' command += '--eventsperjob=' + sample['eventsPerJob'] + ' \\\n' if sample['pileUp']: command += '--pileup-dataset=' + pileup_dataset + ' \\\n' if storeFail: command += '--store-fail=True \\\n' # // # // First step #// command += '--version=' + version + ' \\\n' command += '--py-cfg=' + sample['outputName'] + ' \\\n' # // # // Input configurations (Second step and further) #// if sample['steps'][0].lower().strip() != 'none': for i, step in enumerate(sample['steps']): command += '--version=' + version + ' \\\n' command += '--py-cfg=' + steps[step]['outputName'] + ' \\\n' if i != 0 or not sample['isRealData']: command += '--stageout-intermediates=%s \\\n' % ( steps[step]['stagePrevious']) command += '--chained-input=output \\\n' else: dqmScenario = steps[step]['DQMData']['Scenario'] # // # // If a two-hlt tables workflow, will take conditions from #// the second step information #\\ if not steps[step]['stagePrevious'] and \ i == 0: conditions = steps[step]['conditions'] # // # // Common options #// command += '--group=RelVal \\\n' command += '--category=relval \\\n' command += '--activity=RelVal \\\n' command += '--acquisition_era=' + sample['AcqEra'] + ' \\\n' command += '--only-sites=' + onlySites + ' \\\n' command += '--processing_version=' + processing_version + ' \\\n' # Workflow label if workflow_label: command += '--workflow_tag=' + workflow_label + ' \\\n' # // # // processingString="CMSSWVersion"_"Conditions"_"specialTag"_"extra-label" #// CMSSWVersion is appended only when the input dataset does not have it. #\\ processing_string_parts = [] if sample['AcqEra'] != version: processing_string_parts.append(version) processing_string_parts.append(conditions) if sample['specialTag']: processing_string_parts.append(sample['specialTag']) if extra_label: processing_string_parts.append(extra_label) command += '--processing_string=' + "_".join(processing_string_parts) if debug: print command print '' start_command_time = time.time() exitCode, output, error = executeCommand(command) command_time = time.time() - start_command_time if debug: print output print '' if exitCode == 0: #parse output tmp = [] index = FindIndex(output,'Output Datasets') for dataset in output[index+1:]: tmp.append(dataset.strip()) # DQM Data dqmInfo = {} dqmInfo['Run'] = sample['DQMData']['Run'] if sample['isRealData']: dqmInfo['Scenario'] = dqmScenario else: dqmInfo['Scenario'] = sample['DQMData']['Scenario'] datasets.append({'unmerged': tmp, 'totalEvents': sample['totalEvents'], 'merged': [x.replace('-unmerged','') for x in tmp], 'DQMData': dqmInfo }) unmergedDatasets.append(tmp) index = FindIndex(output,'Created') if index == -1: print "No workflow was created by create*workflow.py" sys.exit(1) workflow = output[index].split()[1].strip() workflows.setdefault(workflow, {})['isRealData'] = sample['isRealData'] workflows[workflow]['time'] = command_time print 'workflow creation command for workflow:', workflow, \ 'exited with ExitCode:', exitCode else : print 'workflow creation command:' print command print 'failed' sys.exit(1) if debug: print 'Created workflows:' print workflows.keys() print '' print "Unmerged datasets:" print unmergedDatasets # extract merged datasets for sample in unmergedDatasets: tmp = [] for dataset in sample: tmp.append(dataset.replace('-unmerged','')) mergedDatasets.append(tmp) workflow_time = time.time() - start_workflow_time print '' print 'Write helper scripts' print '' # WorkflowInjector:Input script inputScript = open('input.sh','w') inputScript.write('#!/bin/bash\n') feeder = 'None' for workflow in workflows.keys(): if workflows[workflow]['isRealData']: if feeder.find('ReReco') < 0: inputScript.write('python $PRODAGENT_ROOT/util/publish.py WorkflowInjector:SetPlugin BlockFeeder\n') feeder = 'ReReco' else : if feeder.find('Request') < 0: inputScript.write('python $PRODAGENT_ROOT/util/publish.py WorkflowInjector:SetPlugin RequestFeeder\n') feeder = 'Request' inputScript.write('python $PRODAGENT_ROOT/util/publish.py WorkflowInjector:Input ' + os.path.join(os.getcwd(), workflow) + '\n') inputScript.close() os.chmod('input.sh',0755) print 'Wrote WorkflowInjector:Input script to:',os.path.join(os.getcwd(),'input.sh') # ForceMerge forceMergeScript = open('forceMerge.sh','w') forceMergeScript.write('#!/bin/bash\n') for sample in unmergedDatasets : for dataset in sample : forceMergeScript.write('python $PRODAGENT_ROOT/util/publish.py ForceMerge ' + dataset + '\n') forceMergeScript.close() os.chmod('forceMerge.sh',0755) print 'Wrote ForceMerge script to:',os.path.join(os.getcwd(),'forceMerge.sh') # MigrateDatasetToGlobal migrateScript = open('migrateToGlobal.sh','w') migrateScript.write('#!/bin/bash\n') for sample in mergedDatasets : for dataset in sample : migrateScript.write('python $PRODAGENT_ROOT/util/publish.py DBSInterface:MigrateDatasetToGlobal ' + dataset + '\n') migrateScript.close() os.chmod('migrateToGlobal.sh',0755) print 'Wrote DBSInterface:MigrateDatasetToGlobal script to:',os.path.join(os.getcwd(),'migrateToGlobal.sh') # PhEDExInjectDataset phedexScript = open('injectIntoPhEDEx.sh','w') phedexScript.write('#!/bin/bash\n') for sample in mergedDatasets : for dataset in sample : phedexScript.write('python $PRODAGENT_ROOT/util/publish.py PhEDExInjectDataset ' + dataset + '\n') phedexScript.close() os.chmod('injectIntoPhEDEx.sh',0755) print 'Wrote PhEDExInjectDataset script to:',os.path.join(os.getcwd(),'injectIntoPhEDEx.sh') # DBS: query unmerged datasets queryUnmergedScript = open('queryUnmerged.sh','w') queryUnmergedScript.write('#!/bin/bash\n') for sample in unmergedDatasets : for dataset in sample : #if dataset.find('-RECO') == -1 or len(sample) == 1 : queryUnmergedScript.write('python $PRODAGENT_ROOT/util/InspectDBS2.py --DBSURL=' + DBSURL + ' --datasetPath=' + dataset + ' | grep total\n') queryUnmergedScript.close() os.chmod('queryUnmerged.sh',0755) print 'Wrote DBS query script for unmerged datasets to:',os.path.join(os.getcwd(),'queryUnmerged.sh') # DBS: query merged datasets queryMergedScript = open('queryMerged.sh','w') queryMergedScript.write('#!/bin/bash\n') for sample in mergedDatasets : for dataset in sample : #if dataset.find('-RECO') == -1 or len(sample) == 1 : queryMergedScript.write('python $PRODAGENT_ROOT/util/InspectDBS2.py --DBSURL=' + DBSURL + ' --datasetPath=' + dataset + ' | grep total\n') queryMergedScript.close() os.chmod('queryMerged.sh',0755) print 'Wrote DBS query script for merged datasets to:',os.path.join(os.getcwd(),'queryMerged.sh') # DQMHarvesting DQMinputScript = open('DQMinput.sh','w') DQMinputScript.write("#!/bin/bash\n") reHarvest = re.compile(r'/.*/.*/(RECO|.*-RECO)') # Only RECO datasets for now. for sample in datasets: for dataset in sample['merged']: if reHarvest.match(dataset): DQMinputScript.write('python $PRODAGENT_ROOT/util/harvestDQM.py --run=%s --path=%s --scenario=%s\n' % ( sample['DQMData']['Run'], dataset, sample['DQMData']['Scenario'])) os.chmod('DQMinput.sh',0755) print 'Wrote DQMHarvesting script for merged datasets to:', os.path.join(os.getcwd(),'DQMinput.sh') # Output datasets list outputList = open('outputDatasets.txt','w') for sample in mergedDatasets : for dataset in sample : outputList.write(dataset + "\n") print 'Wrote output datasets list to:', os.path.join(os.getcwd(),'outputDatasets.txt') # File with expected number of events numberOfEvents = open('eventsExpected.txt','w') for sample in datasets: for dataset in sample['merged']: numberOfEvents.write("%s %s\n" % (sample['totalEvents'],dataset)) numberOfEvents.close() print 'Wrote events per dataset to:', os.path.join(os.getcwd(),'eventsExpected.txt') total_time = time.time() - start_total_time # File with timing report (Parsing, cmsDriver comands, workflow creation) timingInfo = open('timingInfo.txt', 'w') timingInfo.write('Total time: %s s\n' % total_time) timingInfo.write('Cofigs. creation time: %s s\n' % cmsDriver_time) timingInfo.write('Workflows creation time: %s s\n' % workflow_time) output_text = [] sum = 0 for workflow in workflows: if sum == 0: min = [workflow, workflows[workflow]['time']] max = [workflow, workflows[workflow]['time']] sum += workflows[workflow]['time'] output_text.append("%s: %s s" % (workflow, workflows[workflow]['time'])) if max[1] < workflows[workflow]['time']: max = [workflow, workflows[workflow]['time']] if min[1] > workflows[workflow]['time']: min = [workflow, workflows[workflow]['time']] timingInfo.write('Average time per workflow: %s s\n' % (int(sum) / int(len(workflows)))) timingInfo.write('Max. time on %s: %s s\n' % tuple(max)) timingInfo.write('Min. time on %s: %s s\n' % tuple(max)) timingInfo.write('=' * 10) timingInfo.write('Details of time per workflow:\n%s\n' % "\n".join(output_text)) |
scriptsDir = '$PUTIL' | scriptsDir = os.path.expandvars(os.environ.get('PUTIL', None)) | def main(argv) : """ prepareRelValworkflows prepare workflows for chained processing of RelVal samples - parse file holding cmsDriver commands for 1st and 2nd steps - prepare workflows - prepare WorkflowInjector:Input script - prepare ForceMerge script - prepare DBSMigrationToGlobal script - prepare PhEDExInjection script - prepare local DBS query script required parameters --samples <textfile> : list of RelVal sample parameter-sets in plain text file, one sample per line, # marks comment --version <processing version> : processing version (v1, v2, ... ) --DBSURL <URL> : URL of the local DBS (http://cmsdbsprod.cern.ch/cms_dbs_prod_local_07/servlet/DBSServlet | http://cmssrv46.fnal.gov:8080/DBS208/servlet/DBSServlet) --only-sites : Site where dataset is going to be processed or where the input dataset is taken from. Usually srm-cms.cern.ch and cmssrm.fnal.gov optional parameters --pileupdataset : input pileup dataset. It must be provided if the <samples> txt file contains PilepUp samples --lumi <number> : initial run for generation (default: 666666), set it to 777777 for high statistics samples --event <number> : initial event number (default: 1) --store-fail : store output files for failed jobs in chain processing. --read-dbs : DBS URL used for obtaining the list of available blocks for real data. Default: http://cmsdbsprod.cern.ch/cms_dbs_prod_global/servlet/DBSServlet --scripts-dir : Path to workflow creation scripts (default: $PUTIL) --skip-config : Is the configuration file was already created, it will skip cmsDriver command execution --extra-label : Extra label for identifying the datasets: /RelVal*/CMSSW_X_Y_Z-<Conditions>_<SpecialTag>_<ExtraLabel>_<FilterName>-<version>/TIER --workflow-label : Label for the workflows. --help (-h) : help --debug (-d) : debug statements """ start_total_time = time.time() # default try: version = os.environ.get("CMSSW_VERSION") except: print '' print 'CMSSW version cannot be determined from $CMSSW_VERSION' sys.exit(2) try: architecture = os.environ.get("SCRAM_ARCH") except: print '' print 'CMSSW architecture cannot be determined from $SCRAM_ARCH' sys.exit(2) try: from ProdCommon.DataMgmt.DBS.DBSReader import DBSReader except ImportError, ex: print ex print 'Please load prodAgent libraries (point $PYTHONPATH to the right path).' sys.exit(2) samplesFile = None processing_version = None initial_run = "666666" initial_event = "1" debug = False DBSURL = None pileup_dataset = None storeFail = False readDBS = 'http://cmsdbsprod.cern.ch/cms_dbs_prod_global/servlet/DBSServlet' onlySites = None scriptsDir = '$PUTIL' skip_config = False extra_label = '' workflow_label = '' try: opts, args = getopt.getopt(argv, "", ["help", "debug", "samples=", "version=", "DBSURL=", "event=", "lumi=", "pileupdataset=", "store-fail", "read-dbs=", "only-sites=", "scripts-dir=", "skip-config", "extra-label=", "workflow-label="]) except getopt.GetoptError: print main.__doc__ sys.exit(2) # check command line parameter for opt, arg in opts : if opt == "--help" : print main.__doc__ sys.exit() elif opt == "--debug" : debug = True elif opt == "--samples" : samplesFile = arg elif opt == "--version" : processing_version = arg elif opt == "--lumi" : initial_run = arg elif opt == "--event" : initial_event = arg elif opt == "--DBSURL" : DBSURL = arg elif opt == "--pileupdataset" : pileup_dataset = arg print arg elif opt == '--store-fail': storeFail = True elif opt == '--read-dbs': readDBS = arg elif opt == '--only-sites': onlySites = arg elif opt == '--scripts-dir': if arg.endswith('/') : scriptsDir = arg[:-1] else: scriptsDir = arg scriptsDirTemp = scriptsDir if scriptsDir.startswith('$') : scriptsDirTemp = os.environ.get(scriptsDir[1:],None) scriptsDir = os.path.expandvars(scriptsDirTemp) if scriptsDirTemp != None: if not os.path.exists(scriptsDirTemp): print "--scripts-dir argument does not exist, please verify." sys.exit(6) else: print "--scripts-dir argument does not exist, please verify." sys.exit(6) elif opt == "--skip-config": skip_config = True elif opt == "--extra-label": extra_label = arg elif opt == "--workflow-label": workflow_label = arg if samplesFile == None or processing_version == None or DBSURL == None : print main.__doc__ sys.exit(2) if debug: print "\nprepareRelValWorkflows.py was started with the following arguments: %s" % \ " ".join(argv) print "\n" samples = [] steps = {} primary_prefix = 'RelVal' max_step = 1 try: file = open(samplesFile) except IOError: print 'file with list of parameter-sets cannot be opened!' sys.exit(1) n_line = 0 print 'Parsing input file...' start_parse_time = time.time() for line in file.readlines(): n_line += 1 # Skipping lines with no info if line.strip() != '' and line.strip() != '\n' and \ not line.strip().startswith("#") and \ line.find('//') != 0: # I don't know what's the last condition for line_parts = [part.strip() for part in line.split('@@@') if part] dqmData = {} # Keys: Scenario, Run # // # // Parsing first step #// if not line.strip().startswith('STEP'): command = '' array = [] special_tag = '' conditions = None total_events = None events_per_job = None pile_up = False output_name = '' input_data = {} input_blocks = "" acq_era = version sample_info = line_parts[0].strip() # // # // Filling up sample's details #// sample_info_parts = [part.strip() for part in \ sample_info.split('++') if part] sample_number = sample_info_parts[0] #We might need this later sample_name = sample_info_parts[1] sample_steps = [i.strip() for i in \ sample_info_parts[2].split(',') if i] primary = primary_prefix + sample_name # // # // Is it a real data processing sample? According to this #// we assign or not the command variable. #\\ if line_parts[0].find('REALDATA') > -1: is_real_data = True else: is_real_data = False command = line_parts[1].strip() # // # // Clean cmsDriver command format #// if command.find('=') > -1: command = command.replace('=',' ') array = [i for i in command.split() if i] # // # // Remove --python_filename if present #// if '--python_filename' in array: del array[array.index('--python_filename'):\ array.index('--python_filename')+2] # // # // Parse conditions #// if '--conditions' in array: conditions_arg = array[array.index('--conditions')+1] if conditions_arg.startswith('auto:'): conditions_key = conditions_arg.split('auto:')[1] conditions_value = autoCond[conditions_key] else: conditions_value = conditions_arg conditions = [ x.strip() \ for x in conditions_value.split(',') \ if x.find("::") != -1 ][0].split('::')[0].strip() else: conditions = 'SpecialConditions' # // # // Parsing number of events #// if '--relval' in array : total_events = array[array.index('--relval')+1\ ].split(',')[0].strip() events_per_job = array[array.index('--relval')+1\ ].split(',')[1].strip() # // # // Special tag #// # FastSim if command.find('FASTSIM') > -1: special_tag = 'FastSim' # PileUp (at the same time with FastSim) if '--pileup' in array : # // # // Will use whatever argument of --pileup option is #// pileup_arg = array[array.index('--pileup') + 1] if pileup_arg.lower().strip() != 'nopileup': if special_tag: special_tag = "_".join( [special_tag, pileup_arg.strip()]) else: special_tag = pileup_arg.strip() pile_up = True if pileup_dataset is None : print "You have to provide a pileup dataset." print "Usually it is a MinBias (RAW)." print "Use option --pileupdataset" sys.exit(5) # // # // Sort of custom tag #// if '--beamspot' in array: beamspot_arg = \ array[array.index('--beamspot') + 1].strip() if special_tag: special_tag = "_".join( [special_tag, beamspot_arg]) else: special_tag = beamspot_arg # // # // Cfg file's output name #// output_name = "_".join( [x for x in [primary, conditions, special_tag] if x] ) + ".py" # // # // Add command options #// if command.find('no_exec') < 0: array.append('--no_exec') if command.find('python_filename') < 0: array.append('--python_filename') array.append(output_name) # Recomposing cmsDriver command command = " ".join(array) # Filling up DQM information dqmData['Run'] = 1 dqmData['Scenario'] = getDQMScenario(command) # // # // Collecting info for real data samples #// if is_real_data: # // # // Parsing dataset details. The following details are #// supported: REALDATA, RUN, LABEL, FILES, EVENTS, PDNAME #\\ # Producing tuples from the input options. data_options = [tuple(x.split(':')) \ for x in sample_info_parts[3].split(',') if x.strip()] # Parsing tuples for arg_v in data_options: if len(arg_v) == 2: input_data[arg_v[0].strip()] = arg_v[1].strip() elif len(arg_v) == 1: input_data[arg_v[0].strip()] = None else: print "Line %s has an extra ','." % (line) sys.exit(7) # // # // Verifiying optional arguments: LABEL, FILE, EVENTS, #// PRIMARY #\\ data_label = input_data.get('LABEL', '') data_files = input_data.get('FILES', '') data_events = input_data.get('EVENTS', '') data_pname = input_data.get('PRIMARY', '') if data_events: data_events = int(data_events) if data_files: data_files = int(data_events) # // # // Looking for best matching dataset. It should be just #// one, otherwise the script will exit. #\\ reader = DBSReader(readDBS) query = "find dataset where dataset like %s" % ( input_data['REALDATA']) result_xml = reader.dbs.executeQuery(query) # XML Handler parsed_datasets = [] global is_dataset is_dataset = False class Handler(xml.sax.handler.ContentHandler): def startElement(self, name, attrs): global is_dataset if name == 'dataset': is_dataset = True def characters(self, content): global is_dataset if is_dataset: parsed_datasets.append(content) def endElement(self, name): global is_dataset if name == 'dataset': is_dataset = False xml.sax.parseString(result_xml, Handler()) target_datasets = parsed_datasets # If more than one dataset is found. if len(target_datasets) > 1: # Is this an input relval dataset produced in the # current release? query = "find dataset where dataset like %s " % ( input_data['REALDATA']) query += "and release=%s" % version parsed_datasets = [] result_xml = reader.dbs.executeQuery(query) xml.sax.parseString(result_xml, Handler()) find_version = lambda x: x in parsed_datasets target_datasets = filter(find_version, target_datasets) # If more than one dataset is found, match the processing # version if len(target_datasets) > 1: find_version = \ lambda x: x.find(processing_version) != -1 target_datasets = filter(find_version, target_datasets) if len(target_datasets) > 1: msg = "Dataset pattern in line %s is too broad." % line msg += "These datasets were found: %s" % ( " ".join(target_datasets)) print msg sys.exit(8) if not target_datasets: msg = "Dataset pattern produced no match in line %s" % ( line) print msg sys.exit(8) # Now I can look up the blocks for this dataset. target_dataset = target_datasets[0] input_data['REALDATA'] = target_dataset # // # // Looking up the blocks for a given Dataset and a given run #// reader = DBSReader(readDBS) input_files = reader.dbs.listFiles(path=target_dataset, \ runNumber=input_data['RUN']) blocks = {} # // # // Parsing input blocks #// for input_file in input_files: cur_files = \ blocks.setdefault(input_file['Block']['Name'], {}).setdefault('Files', 0) cur_events = \ blocks[input_file['Block']['Name']].setdefault( 'Events', 0) blocks[input_file['Block']['Name']]['Files'] += 1 blocks[input_file['Block']['Name']]['Events'] += \ input_file['NumberOfEvents'] # // # // Truncating blocks list #// total_events = 0 total_files = 0 blocks_to_process = [] for block in blocks: blocks_to_process.append(block) total_events += blocks[block]['Events'] total_files += blocks[block]['Files'] if data_events and (data_events < total_events): break if data_files and (data_files < total_files): break input_blocks = ",".join(blocks_to_process) # // # // If PRIMARY is present or true, then it will use the #// sample_name value as primary dataset name, else it #\\ will use the input primary dataset name. # \\ if data_pname is None or \ data_pname.lower() in ('y', 't', 'true'): primary = "".join([primary_prefix, sample_name]) else: primary = \ [x for x in input_data['REALDATA'].split("/") if x][0] # // # // Seting special tag #// special_tag_parts = [] # Add RelVal tag if not present. if target_dataset.find(primary_prefix) == -1: special_tag_parts.append(primary_prefix) # Add LABEL if data_label: special_tag_parts.append(data_label) special_tag = "_".join(special_tag_parts) # // # // Setting Acq. Era #// #processed_dataset = target_dataset.split('/')[2] #dataset_acq_era = processed_dataset.split("-")[0] #if dataset_acq_era.startswith(version): # acq_era = version #else: # acq_era = dataset_acq_era # Filling up DQM information dqmData['Run'] = input_data['RUN'] # // # // Composing a dictionary per sample #// dict = {} dict['sampleName'] = sample_name dict['command'] = command dict['primary'] = primary dict['outputName'] = output_name dict['conditions'] = conditions dict['specialTag'] = special_tag dict['totalEvents'] = total_events dict['eventsPerJob'] = events_per_job dict['pileUp'] = pile_up dict['isRealData'] = is_real_data dict['inputData'] = input_data dict['inputBlocks'] = input_blocks dict['steps'] = sample_steps dict['AcqEra'] = acq_era dict['DQMData'] = dqmData samples.append(dict) if debug: print 'Parsing' print 'Sample:', sample_name print 'Command:', command print 'Conditions:', conditions print 'Special tag:', special_tag print 'Total events:', total_events print 'Events per job:', events_per_job print 'Steps:', sample_steps print 'PileUp:', pile_up print 'Input data:', input_data print 'Input blocks:', input_blocks print 'DQMData:', dqmData print '' # // # // No a first step command (second HLT table, RECO, ALCA, etc) #// else: step_number = int(line_parts[0].split('++')[0].strip()[-1]) step_name = line_parts[0].split('++')[1].strip() command = line_parts[1].strip() # // # // Clean cmsDriver command format #// if command.find('=') > -1: command = command.replace('=',' ') array = [i for i in command.split() if i] # // # // Remove --python_filename if present #// if '--python_filename' in array: del array[array.index('--python_filename'):\ array.index('--python_filename')+2] # // # // Parse conditions #// if '--conditions' in array: conditions_arg = array[array.index('--conditions')+1] if conditions_arg.startswith('auto:'): conditions_key = conditions_arg.split('auto:')[1] conditions_value = autoCond[conditions_key] else: conditions_value = conditions_arg conditions = [ x.strip() \ for x in conditions_value.split(',') \ if x.find("::") != -1 ][0].split('::')[0].strip() else: conditions = 'SpecialConditions' # // # // Cfg file's output name #// output_name = "_".join([step_name, conditions]) + ".py" # // # // Add command options #// if command.find('no_exec') < 0: array.append('--no_exec') if command.find('python_filename') < 0: array.append('--python_filename') array.append(output_name) # Recomposing cmsDriver command command = " ".join(array) # // # // Second trigger table? This may be changed, right now I am #// assuming that all 4 steps workflows are like this. #\\ stage_previous = True if step_number == 2: if '-s' in array: index = array.index('-s') else: index = array.index('--step') if array[index+1].find('RECO') < 0: stage_previous = False if step_number > max_step: max_step = step_number # // # // Composing a dictionary per step #// dict = {} dict['stepNumber'] = step_number dict['command'] = command dict['outputName'] = output_name dict['conditions'] = conditions dict['stagePrevious'] = stage_previous dict['DQMData'] = {'Scenario': getDQMScenario(command)} # // # // Step name should be unique #// if step_name not in steps: steps[step_name] = dict else: print "Label %s is repeated!!!" % step_name sys.exit(1) if debug: print 'Parsing' print 'Step name:', step_name print 'Step number:', step_number print 'Command:', command print 'Conditions:', conditions print 'Stage previous:', stage_previous print 'DQM Data:', dict['DQMData'] print '' parse_time = time.time() - start_parse_time file.close() if debug: print "Collected information step 1" for sample in samples: print 'Sample name:', sample['sampleName'] print 'Command', sample['command'] print 'Real data:', sample['isRealData'] print 'Input data:', sample['inputData'] print 'Input blocks', sample['inputBlocks'] print 'Conditions:', sample['conditions'] print 'Total events:', sample['totalEvents'] print 'Events per job:', sample['eventsPerJob'] print 'Output name:', sample['outputName'] print 'Steps:', sample['steps'] print 'PileUp:', sample['pileUp'] print 'Special tag:', sample['specialTag'] print 'Acq. Era:', sample['AcqEra'] print 'DQM data:', sample['DQMData'] print '' for i in range(2, max_step+1): print 'Collected information step %s' % i for step in steps: if steps[step]['stepNumber'] == i: print 'Step name:', step print 'Command:', steps[step]['command'] print 'Conditions:', steps[step]['conditions'] print 'Stage previous:', steps[step]['stagePrevious'] print 'DQM Data:', steps[step]['DQMData'] print '' # // # // Execute cmsDriver command #// print '' print 'Executing cmsDriver commands for step 1 configurations' print '' start_cmsDriver_time = time.time() for sample in samples: if not sample['isRealData']: # // # // if the cfg. file was already created, we'll skip cmsDriver #// command execution. #\\ if os.path.exists("/".join([os.getcwd(), sample['outputName']])) and skip_config: print 'cmsDriver command for step 1 to produce:', \ sample['outputName'],'was already issued, skipping.' continue exitCode, output, error = executeCommand(sample['command']) if exitCode == 0: print 'cmsDriver command for step 1 to produce:', \ sample['outputName'],'exited with ExitCode:', exitCode else : print 'cmsDriver command for step 1 to produce:', \ sample['outputName'],'failed with ExitCode:', exitCode sys.exit(1) else : msg = 'Real Data:\n' msg += 'Input dataset: %s\n' % (sample['inputData']['REALDATA']) msg += 'Run: %s\n' % (sample['inputData']['RUN']) msg += 'Input blocks: %s' % (sample['inputBlocks']) print msg for i in range(2, max_step+1): print '' print 'Executing cmsDriver commands for step %s configurations' % i print '' for step in steps: if steps[step]['stepNumber'] == i: # // # // if the cfg. file was already created, we'll skip cmsDriver #// command execution. #\\ if os.path.exists("/".join([os.getcwd(), steps[step]['outputName']])) and skip_config: print 'cmsDriver command for step %s to produce:' % i, \ steps[step]['outputName'],'was already issued, skipping.' continue exitCode, output, error = executeCommand(steps[step]['command']) if exitCode == 0: print 'cmsDriver command for step %s to produce:' % i, \ steps[step]['outputName'], \ 'exited with ExitCode:', exitCode else: print 'cmsDriver command for step %s to produce:' % i, \ steps[step]['outputName'], \ 'failed with ExitCode:', exitCode sys.exit(1) cmsDriver_time = time.time() - start_cmsDriver_time print '' print 'Workflow creation' print '' start_workflow_time = time.time() datasets = [] unmergedDatasets = [] mergedDatasets = [] workflows = {} # // # // Create workflows #// for sample in samples: command = 'python ' + scriptsDir conditions = '' # Conditions -> processingString # // # // In case we are processing data #// if sample['isRealData']: command += '/createProcessingWorkflow.py \\\n' # Not changing the primary dataset name for real data. command += '--override-channel=' + sample['primary'] + ' \\\n' command += '--dataset=' + sample['inputData']['REALDATA'] + ' \\\n' command += '--only-blocks=' + sample['inputBlocks'] + ' \\\n' command += '--dbs-url=' + readDBS + ' \\\n' conditions = steps[sample['steps'][0]]['conditions'] command += '--split-type=file \\\n' command += '--split-size=1 \\\n' # // # // MC workflows #// else: command += '/createProductionWorkflow.py \\\n' command += '--channel=' + sample['primary'] + ' \\\n' conditions = sample['conditions'] command += '--starting-run=' + initial_run + ' \\\n' command += '--starting-event=' + initial_event + ' \\\n' command += '--totalevents=' + sample['totalEvents'] + ' \\\n' command += '--eventsperjob=' + sample['eventsPerJob'] + ' \\\n' if sample['pileUp']: command += '--pileup-dataset=' + pileup_dataset + ' \\\n' if storeFail: command += '--store-fail=True \\\n' # // # // First step #// command += '--version=' + version + ' \\\n' command += '--py-cfg=' + sample['outputName'] + ' \\\n' # // # // Input configurations (Second step and further) #// if sample['steps'][0].lower().strip() != 'none': for i, step in enumerate(sample['steps']): command += '--version=' + version + ' \\\n' command += '--py-cfg=' + steps[step]['outputName'] + ' \\\n' if i != 0 or not sample['isRealData']: command += '--stageout-intermediates=%s \\\n' % ( steps[step]['stagePrevious']) command += '--chained-input=output \\\n' else: dqmScenario = steps[step]['DQMData']['Scenario'] # // # // If a two-hlt tables workflow, will take conditions from #// the second step information #\\ if not steps[step]['stagePrevious'] and \ i == 0: conditions = steps[step]['conditions'] # // # // Common options #// command += '--group=RelVal \\\n' command += '--category=relval \\\n' command += '--activity=RelVal \\\n' command += '--acquisition_era=' + sample['AcqEra'] + ' \\\n' command += '--only-sites=' + onlySites + ' \\\n' command += '--processing_version=' + processing_version + ' \\\n' # Workflow label if workflow_label: command += '--workflow_tag=' + workflow_label + ' \\\n' # // # // processingString="CMSSWVersion"_"Conditions"_"specialTag"_"extra-label" #// CMSSWVersion is appended only when the input dataset does not have it. #\\ processing_string_parts = [] if sample['AcqEra'] != version: processing_string_parts.append(version) processing_string_parts.append(conditions) if sample['specialTag']: processing_string_parts.append(sample['specialTag']) if extra_label: processing_string_parts.append(extra_label) command += '--processing_string=' + "_".join(processing_string_parts) if debug: print command print '' start_command_time = time.time() exitCode, output, error = executeCommand(command) command_time = time.time() - start_command_time if debug: print output print '' if exitCode == 0: #parse output tmp = [] index = FindIndex(output,'Output Datasets') for dataset in output[index+1:]: tmp.append(dataset.strip()) # DQM Data dqmInfo = {} dqmInfo['Run'] = sample['DQMData']['Run'] if sample['isRealData']: dqmInfo['Scenario'] = dqmScenario else: dqmInfo['Scenario'] = sample['DQMData']['Scenario'] datasets.append({'unmerged': tmp, 'totalEvents': sample['totalEvents'], 'merged': [x.replace('-unmerged','') for x in tmp], 'DQMData': dqmInfo }) unmergedDatasets.append(tmp) index = FindIndex(output,'Created') if index == -1: print "No workflow was created by create*workflow.py" sys.exit(1) workflow = output[index].split()[1].strip() workflows.setdefault(workflow, {})['isRealData'] = sample['isRealData'] workflows[workflow]['time'] = command_time print 'workflow creation command for workflow:', workflow, \ 'exited with ExitCode:', exitCode else : print 'workflow creation command:' print command print 'failed' sys.exit(1) if debug: print 'Created workflows:' print workflows.keys() print '' print "Unmerged datasets:" print unmergedDatasets # extract merged datasets for sample in unmergedDatasets: tmp = [] for dataset in sample: tmp.append(dataset.replace('-unmerged','')) mergedDatasets.append(tmp) workflow_time = time.time() - start_workflow_time print '' print 'Write helper scripts' print '' # WorkflowInjector:Input script inputScript = open('input.sh','w') inputScript.write('#!/bin/bash\n') feeder = 'None' for workflow in workflows.keys(): if workflows[workflow]['isRealData']: if feeder.find('ReReco') < 0: inputScript.write('python $PRODAGENT_ROOT/util/publish.py WorkflowInjector:SetPlugin BlockFeeder\n') feeder = 'ReReco' else : if feeder.find('Request') < 0: inputScript.write('python $PRODAGENT_ROOT/util/publish.py WorkflowInjector:SetPlugin RequestFeeder\n') feeder = 'Request' inputScript.write('python $PRODAGENT_ROOT/util/publish.py WorkflowInjector:Input ' + os.path.join(os.getcwd(), workflow) + '\n') inputScript.close() os.chmod('input.sh',0755) print 'Wrote WorkflowInjector:Input script to:',os.path.join(os.getcwd(),'input.sh') # ForceMerge forceMergeScript = open('forceMerge.sh','w') forceMergeScript.write('#!/bin/bash\n') for sample in unmergedDatasets : for dataset in sample : forceMergeScript.write('python $PRODAGENT_ROOT/util/publish.py ForceMerge ' + dataset + '\n') forceMergeScript.close() os.chmod('forceMerge.sh',0755) print 'Wrote ForceMerge script to:',os.path.join(os.getcwd(),'forceMerge.sh') # MigrateDatasetToGlobal migrateScript = open('migrateToGlobal.sh','w') migrateScript.write('#!/bin/bash\n') for sample in mergedDatasets : for dataset in sample : migrateScript.write('python $PRODAGENT_ROOT/util/publish.py DBSInterface:MigrateDatasetToGlobal ' + dataset + '\n') migrateScript.close() os.chmod('migrateToGlobal.sh',0755) print 'Wrote DBSInterface:MigrateDatasetToGlobal script to:',os.path.join(os.getcwd(),'migrateToGlobal.sh') # PhEDExInjectDataset phedexScript = open('injectIntoPhEDEx.sh','w') phedexScript.write('#!/bin/bash\n') for sample in mergedDatasets : for dataset in sample : phedexScript.write('python $PRODAGENT_ROOT/util/publish.py PhEDExInjectDataset ' + dataset + '\n') phedexScript.close() os.chmod('injectIntoPhEDEx.sh',0755) print 'Wrote PhEDExInjectDataset script to:',os.path.join(os.getcwd(),'injectIntoPhEDEx.sh') # DBS: query unmerged datasets queryUnmergedScript = open('queryUnmerged.sh','w') queryUnmergedScript.write('#!/bin/bash\n') for sample in unmergedDatasets : for dataset in sample : #if dataset.find('-RECO') == -1 or len(sample) == 1 : queryUnmergedScript.write('python $PRODAGENT_ROOT/util/InspectDBS2.py --DBSURL=' + DBSURL + ' --datasetPath=' + dataset + ' | grep total\n') queryUnmergedScript.close() os.chmod('queryUnmerged.sh',0755) print 'Wrote DBS query script for unmerged datasets to:',os.path.join(os.getcwd(),'queryUnmerged.sh') # DBS: query merged datasets queryMergedScript = open('queryMerged.sh','w') queryMergedScript.write('#!/bin/bash\n') for sample in mergedDatasets : for dataset in sample : #if dataset.find('-RECO') == -1 or len(sample) == 1 : queryMergedScript.write('python $PRODAGENT_ROOT/util/InspectDBS2.py --DBSURL=' + DBSURL + ' --datasetPath=' + dataset + ' | grep total\n') queryMergedScript.close() os.chmod('queryMerged.sh',0755) print 'Wrote DBS query script for merged datasets to:',os.path.join(os.getcwd(),'queryMerged.sh') # DQMHarvesting DQMinputScript = open('DQMinput.sh','w') DQMinputScript.write("#!/bin/bash\n") reHarvest = re.compile(r'/.*/.*/(RECO|.*-RECO)') # Only RECO datasets for now. for sample in datasets: for dataset in sample['merged']: if reHarvest.match(dataset): DQMinputScript.write('python $PRODAGENT_ROOT/util/harvestDQM.py --run=%s --path=%s --scenario=%s\n' % ( sample['DQMData']['Run'], dataset, sample['DQMData']['Scenario'])) os.chmod('DQMinput.sh',0755) print 'Wrote DQMHarvesting script for merged datasets to:', os.path.join(os.getcwd(),'DQMinput.sh') # Output datasets list outputList = open('outputDatasets.txt','w') for sample in mergedDatasets : for dataset in sample : outputList.write(dataset + "\n") print 'Wrote output datasets list to:', os.path.join(os.getcwd(),'outputDatasets.txt') # File with expected number of events numberOfEvents = open('eventsExpected.txt','w') for sample in datasets: for dataset in sample['merged']: numberOfEvents.write("%s %s\n" % (sample['totalEvents'],dataset)) numberOfEvents.close() print 'Wrote events per dataset to:', os.path.join(os.getcwd(),'eventsExpected.txt') total_time = time.time() - start_total_time # File with timing report (Parsing, cmsDriver comands, workflow creation) timingInfo = open('timingInfo.txt', 'w') timingInfo.write('Total time: %s s\n' % total_time) timingInfo.write('Cofigs. creation time: %s s\n' % cmsDriver_time) timingInfo.write('Workflows creation time: %s s\n' % workflow_time) output_text = [] sum = 0 for workflow in workflows: if sum == 0: min = [workflow, workflows[workflow]['time']] max = [workflow, workflows[workflow]['time']] sum += workflows[workflow]['time'] output_text.append("%s: %s s" % (workflow, workflows[workflow]['time'])) if max[1] < workflows[workflow]['time']: max = [workflow, workflows[workflow]['time']] if min[1] > workflows[workflow]['time']: min = [workflow, workflows[workflow]['time']] timingInfo.write('Average time per workflow: %s s\n' % (int(sum) / int(len(workflows)))) timingInfo.write('Max. time on %s: %s s\n' % tuple(max)) timingInfo.write('Min. time on %s: %s s\n' % tuple(max)) timingInfo.write('=' * 10) timingInfo.write('Details of time per workflow:\n%s\n' % "\n".join(output_text)) |
print 'failed' | print 'failed: %s' % error | def endElement(self, name): global is_dataset if name == 'dataset': is_dataset = False |
msg = 'DBS URL of the old request is neither analysis_01 nor analysis_02. Please, check!' | msg = 'DBS URL of the old request is neither analysis_01, analysis_02 nor local_09. Please, check!' | def setDBSDropDown(self): ## Get DBS URL by Drop Down control = self.br.find_control("custom_sb4",type="select") dbs_url = self.DBSByValueDict[control.value[0]] |
group_squad = 'cms-storeresults-'+self.GroupByValueDict[group_id].replace("-","_") | group_squad = 'cms-storeresults-'+self.GroupByValueDict[group_id].replace("-","_").lower() | def getRequests(self,**kargs): requests = [] if self.isLoggedIn: self.selectQueryForm(**kargs) self.createValueDicts() self.br.select_form(name="bug_form") response = self.br.submit() |
new_dataset = "" dataset_prefix = "StoreResults"+dataset_version | if len(dataset_version)>0: dataset_prefix = "StoreResults-"+dataset_version else: dataset_prefix = "StoreResults" | def getRequests(self,**kargs): requests = [] if self.isLoggedIn: self.selectQueryForm(**kargs) self.createValueDicts() self.br.select_form(name="bug_form") response = self.br.submit() |
infoDict["cmsswRelease"] = self.ReleaseByValueDict[release_id[0]] | try: infoDict["cmsswRelease"] = self.ReleaseByValueDict[release_id[0]] except: continue | def getRequests(self,**kargs): requests = [] if self.isLoggedIn: self.selectQueryForm(**kargs) self.createValueDicts() self.br.select_form(name="bug_form") response = self.br.submit() |
ds['DataTier'], \ ds['ProcessedDataset']) \ | ds['ProcessedDataset'], \ ds['DataTier']) \ | def closeRequest(self, workflowFile): """ _closeRequest_ |
expr = '' is_the_first = True for run in runs_list: if is_the_first: expr += "(" is_the_first = False else: expr += " or " if run.count("-"): run_limits = \ [x.strip() for x in run.split('-') if x.strip()] expr += "(x >= %s and x <= %s)" % ( run_limits[0], run_limits[1]) else: expr += "x == %s" % run if not is_the_first: expr += ")" runs_filter = lambda x: eval(expr) target_runs = filter(runs_filter, runs_in_dbs) | if runs_list: expr = '' is_the_first = True for run in runs_list: if is_the_first: expr += "(" is_the_first = False else: expr += " or " if run.count("-"): run_limits = \ [x.strip() for x in run.split('-') if x.strip()] expr += "(x >= %s and x <= %s)" % ( run_limits[0], run_limits[1]) else: expr += "x == %s" % run if not is_the_first: expr += ")" runs_filter = lambda x: eval(expr) target_runs = filter(runs_filter, runs_in_dbs) else: target_runs = runs_in_dbs | def main(argv) : """ prepareRelValworkflows prepare workflows for chained processing of RelVal samples - parse file holding cmsDriver commands for 1st and 2nd steps - prepare workflows - prepare WorkflowInjector:Input script - prepare ForceMerge script - prepare DBSMigrationToGlobal script - prepare PhEDExInjection script - prepare local DBS query script required parameters --samples <textfile> : list of RelVal sample parameter-sets in plain text file, one sample per line, # marks comment --version <processing version> : processing version (v1, v2, ... ) --DBSURL <URL> : URL of the local DBS (http://cmsdbsprod.cern.ch/cms_dbs_prod_local_07/servlet/DBSServlet | http://cmssrv46.fnal.gov:8080/DBS208/servlet/DBSServlet) --only-sites : Site where dataset is going to be processed or where the input dataset is taken from. Usually srm-cms.cern.ch and cmssrm.fnal.gov optional parameters --pileupdataset : input pileup dataset. It must be provided if the <samples> txt file contains PilepUp samples --lumi <number> : initial run for generation (default: 666666), set it to 777777 for high statistics samples --event <number> : initial event number (default: 1) --store-fail : store output files for failed jobs in chain processing. --read-dbs : DBS URL used for obtaining the list of available blocks for real data. Default: http://cmsdbsprod.cern.ch/cms_dbs_prod_global/servlet/DBSServlet --scripts-dir : Path to workflow creation scripts (default: $PUTIL) --skip-config : Is the configuration file was already created, it will skip cmsDriver command execution --extra-label : Extra label for identifying the datasets: /RelVal*/CMSSW_X_Y_Z-<Conditions>_<SpecialTag>_<ExtraLabel>_<FilterName>-<version>/TIER --workflow-label : Label for the workflows. --help (-h) : help --debug (-d) : debug statements """ start_total_time = time.time() # default version = os.environ.get("CMSSW_VERSION") if version is None: print '' print 'CMSSW version cannot be determined from $CMSSW_VERSION' sys.exit(2) architecture = os.environ.get("SCRAM_ARCH") if architecture is None: print '' print 'CMSSW architecture cannot be determined from $SCRAM_ARCH' sys.exit(2) try: from ProdCommon.DataMgmt.DBS.DBSReader import DBSReader except ImportError, ex: print ex print 'Please load prodAgent libraries (point $PYTHONPATH to the right path).' sys.exit(2) samplesFile = None processing_version = None initial_run = "666666" initial_event = "1" debug = False DBSURL = None pileup_dataset = None storeFail = False readDBS = 'http://cmsdbsprod.cern.ch/cms_dbs_prod_global/servlet/DBSServlet' onlySites = None scriptsDir = '$PUTIL' #os.path.expandvars(os.environ.get('PUTIL', None)) skip_config = False extra_label = '' workflow_label = '' try: opts, args = getopt.getopt(argv, "", ["help", "debug", "samples=", "version=", "DBSURL=", "event=", "lumi=", "pileupdataset=", "store-fail", "read-dbs=", "only-sites=", "scripts-dir=", "skip-config", "extra-label=", "workflow-label="]) except getopt.GetoptError: print main.__doc__ sys.exit(2) # check command line parameter for opt, arg in opts : if opt == "--help" : print main.__doc__ sys.exit() elif opt == "--debug" : debug = True elif opt == "--samples" : samplesFile = arg elif opt == "--version" : processing_version = arg elif opt == "--lumi" : initial_run = arg elif opt == "--event" : initial_event = arg elif opt == "--DBSURL" : DBSURL = arg elif opt == "--pileupdataset" : pileup_dataset = arg print arg elif opt == '--store-fail': storeFail = True elif opt == '--read-dbs': readDBS = arg elif opt == '--only-sites': onlySites = arg elif opt == '--scripts-dir': if arg.endswith('/') : scriptsDir = arg[:-1] else: scriptsDir = arg scriptsDirTemp = scriptsDir # There's no need to expand the shell variables anymore #if scriptsDir.startswith('$') : # scriptsDirTemp = os.environ.get(scriptsDir[1:],None) # scriptsDir = os.path.expandvars(scriptsDirTemp) if scriptsDirTemp != None: if not os.path.exists(scriptsDirTemp): print "--scripts-dir argument does not exist, please verify." sys.exit(6) else: print "--scripts-dir argument does not exist, please verify." sys.exit(6) elif opt == "--skip-config": skip_config = True elif opt == "--extra-label": extra_label = arg elif opt == "--workflow-label": workflow_label = arg if samplesFile == None or processing_version == None or DBSURL == None : print main.__doc__ sys.exit(2) if debug: print "\nprepareRelValWorkflows.py was started with the following arguments: %s" % \ " ".join(argv) print "\n" samples = [] steps = {} primary_prefix = 'RelVal' max_step = 1 try: file = open(samplesFile) except IOError: print 'file with list of parameter-sets cannot be opened!' sys.exit(1) n_line = 0 print 'Parsing input file...' start_parse_time = time.time() for line in file.readlines(): n_line += 1 # Skipping lines with no info if line.strip() != '' and line.strip() != '\n' and \ not line.strip().startswith("#") and \ line.find('//') != 0: # I don't know what's the last condition for line_parts = [part.strip() for part in line.split('@@@') if part] dqmData = {} # Keys: Scenario, Run # // # // Parsing first step #// if not line.strip().startswith('STEP'): command = '' array = [] special_tag = '' conditions = None total_events = None events_per_job = None pile_up = False output_name = '' input_data = {} input_blocks = "" acq_era = version sample_info = line_parts[0].strip() # // # // Filling up sample's details #// sample_info_parts = [part.strip() for part in \ sample_info.split('++') if part] sample_number = sample_info_parts[0] #We might need this later sample_name = sample_info_parts[1] sample_steps = [i.strip() for i in \ sample_info_parts[2].split(',') if i] primary = primary_prefix + sample_name # // # // Is it a real data processing sample? According to this #// we assign or not the command variable. #\\ if line_parts[0].find('REALDATA') > -1: is_real_data = True else: is_real_data = False command = line_parts[1].strip() # // # // Clean cmsDriver command format #// if command.find('=') > -1: command = command.replace('=',' ') array = [i for i in command.split() if i] # // # // Remove --python_filename if present #// if '--python_filename' in array: del array[array.index('--python_filename'):\ array.index('--python_filename')+2] # // # // Parse conditions #// if '--conditions' in array: conditions_arg = array[array.index('--conditions')+1] if conditions_arg.startswith('auto:'): conditions_key = conditions_arg.split('auto:')[1] conditions_value = autoCond[conditions_key] else: conditions_value = conditions_arg conditions = [ x.strip() \ for x in conditions_value.split(',') \ if x.find("::") != -1 ][0].split('::')[0].strip() else: conditions = 'SpecialConditions' # // # // Parsing number of events #// if '--relval' in array : total_events = array[array.index('--relval')+1\ ].split(',')[0].strip() events_per_job = array[array.index('--relval')+1\ ].split(',')[1].strip() # // # // Special tag #// # FastSim if command.find('FASTSIM') > -1: special_tag = 'FastSim' # PileUp (at the same time with FastSim) if '--pileup' in array : # // # // Will use whatever argument of --pileup option is #// pileup_arg = array[array.index('--pileup') + 1] if pileup_arg.lower().strip() != 'nopileup': if special_tag: special_tag = "_".join( [special_tag, "PU", pileup_arg.strip()]) else: special_tag = "_".join(["PU", pileup_arg.strip()]) pile_up = True if pileup_dataset is None : print "You have to provide a pileup dataset." print "Usually it is a MinBias (RAW)." print "Use option --pileupdataset" sys.exit(5) # // # // Sort of custom tag #// if '--beamspot' in array: beamspot_arg = \ array[array.index('--beamspot') + 1].strip() if special_tag: special_tag = "_".join( [special_tag, beamspot_arg]) else: special_tag = beamspot_arg # // # // Cfg file's output name #// output_name = "_".join( [x for x in [primary, conditions, special_tag] if x] ) + ".py" # // # // Add command options #// if command.find('no_exec') < 0: array.append('--no_exec') if command.find('python_filename') < 0: array.append('--python_filename') array.append(output_name) # Recomposing cmsDriver command command = " ".join(array) # Filling up DQM information dqmData['Runs'] = '1' dqmData['Scenario'] = getDQMScenario(command) # // # // Collecting info for real data samples #// if is_real_data: # // # // Parsing dataset details. The following details are #// supported: REALDATA, RUN, LABEL, FILES, EVENTS, PDNAME #\\ # Producing tuples from the input options. data_options = [tuple(x.split(':')) \ for x in sample_info_parts[3].split(',') if x.strip()] # Parsing tuples for arg_v in data_options: if len(arg_v) == 2: input_data[arg_v[0].strip()] = arg_v[1].strip() elif len(arg_v) == 1: input_data[arg_v[0].strip()] = None else: print "Line %s has an extra ','." % (line) sys.exit(7) # // # // Verifiying optional arguments: RUN, LABEL, FILE, EVENTS, #// PRIMARY #\\ data_run = input_data.get('RUN', '') data_label = input_data.get('LABEL', '') data_files = input_data.get('FILES', '') data_events = input_data.get('EVENTS', '') data_pname = input_data.get('PRIMARY', None) if data_events: data_events = int(data_events) if data_files: data_files = int(data_events) # // # // Looking for best matching dataset. It should be just #// one, otherwise the script will exit. #\\ reader = DBSReader(readDBS) query = "find dataset where dataset like %s" % ( input_data['REALDATA']) result_xml = reader.dbs.executeQuery(query) # XML Handler result_list = DBSXMLParser(result_xml) target_datasets = [x['dataset'] for x in result_list] # If more than one dataset is found. if len(target_datasets) > 1: # Is this an input relval dataset produced in the # current release? query = "find dataset where dataset like %s " % ( input_data['REALDATA']) query += "and release=%s" % version result_xml = reader.dbs.executeQuery(query) result_list = DBSXMLParser(result_xml) target_datasets = [x['dataset'] for x in result_list] # If more than one dataset is found, match the processing # version if len(target_datasets) > 1: find_version = \ lambda x: x.find(processing_version) != -1 target_datasets = filter(find_version, target_datasets) if len(target_datasets) > 1: msg = "Dataset pattern in line %s is too broad." % line msg += "These datasets were found: %s" % ( " ".join(target_datasets)) print msg sys.exit(8) if not target_datasets: msg = "Dataset pattern produced no match in line %s" % ( line) print msg sys.exit(8) # Now I can look up the blocks for this dataset. target_dataset = target_datasets[0] input_data['REALDATA'] = target_dataset # // # // Looking up the blocks for a given Dataset and the #// provided list of runs #\\ runs_list = \ [x.strip() for x in data_run.split('|') if x.strip()] runs_in_dbs = [x['RunNumber'] for x in \ reader.dbs.listRuns(target_dataset)] runs_in_dbs.sort() # Creating lambda function for filtering runs. expr = '' # First a string expression to evaluate is_the_first = True for run in runs_list: if is_the_first: expr += "(" is_the_first = False else: expr += " or " # Run range: XXXXXX-XXXXXX if run.count("-"): run_limits = \ [x.strip() for x in run.split('-') if x.strip()] expr += "(x >= %s and x <= %s)" % ( run_limits[0], run_limits[1]) else: expr += "x == %s" % run if not is_the_first: expr += ")" # Here comes the lambda funtion runs_filter = lambda x: eval(expr) # Filtering runs in DBS using the list provided in the # input file. target_runs = filter(runs_filter, runs_in_dbs) # Pulling up input files from DBS (including run info). input_files = reader.dbs.listFiles( path=target_dataset, retriveList=['retrive_run']) # // # // Parsing input blocks #// blocks = {} for input_file in input_files: # Skip files with no events # A block will be skipped if all its files have 0 # events if input_file['NumberOfEvents'] == 0: continue runs = \ [int(x['RunNumber']) for x in input_file['RunsList']] for run in runs: if run in target_runs: break else: continue # skip file if it's not in the target_runs cur_files = \ blocks.setdefault(input_file['Block']['Name'], {}).setdefault('Files', 0) cur_events = \ blocks[input_file['Block']['Name']].setdefault( 'Events', 0) cur_runs = \ blocks[input_file['Block']['Name']].setdefault( 'Runs', set()) blocks[input_file['Block']['Name']]['Files'] += 1 blocks[input_file['Block']['Name']]['Events'] += \ input_file['NumberOfEvents'] blocks[input_file['Block']['Name']]['Runs'] = \ cur_runs.union(runs) # // # // Truncating blocks list #// total_events = 0 total_files = 0 blocks_to_process = [] runs_to_process = set() for block in blocks: blocks_to_process.append(block) runs_to_process = runs_to_process.union(blocks[block]['Runs']) total_events += blocks[block]['Events'] total_files += blocks[block]['Files'] if data_events and (data_events < total_events): break if data_files and (data_files < total_files): break input_blocks = ",".join(blocks_to_process) # // # // If PRIMARY is true, then it will use the #// sample_name value as primary dataset name, else it #\\ will use the input primary dataset name. # \\ if data_pname is not None and \ data_pname.lower() in ('y', 't', 'true'): primary = "".join([primary_prefix, sample_name]) else: primary = \ [x for x in input_data['REALDATA'].split("/") if x][0] # // # // Seting special tag #// special_tag_parts = [] # Add RelVal tag if not present. if target_dataset.find(primary_prefix) == -1: special_tag_parts.append(primary_prefix) # Add LABEL if data_label: special_tag_parts.append(data_label) special_tag = "_".join(special_tag_parts) # // # // Setting Acq. Era #// #processed_dataset = target_dataset.split('/')[2] #dataset_acq_era = processed_dataset.split("-")[0] #if dataset_acq_era.startswith(version): # acq_era = version #else: # acq_era = dataset_acq_era # Filling up DQM information dqmData['Runs'] = \ ",".join([str(x) for x in list(runs_to_process)]) # // # // Composing a dictionary per sample #// dict = {} dict['sampleName'] = sample_name dict['command'] = command dict['primary'] = primary dict['outputName'] = output_name dict['conditions'] = conditions dict['specialTag'] = special_tag dict['totalEvents'] = total_events dict['eventsPerJob'] = events_per_job dict['pileUp'] = pile_up dict['isRealData'] = is_real_data dict['inputData'] = input_data dict['inputBlocks'] = input_blocks dict['steps'] = sample_steps dict['AcqEra'] = acq_era dict['DQMData'] = dqmData samples.append(dict) if debug: print 'Parsing' print 'Sample:', sample_name print 'Command:', command print 'Conditions:', conditions print 'Special tag:', special_tag print 'Total events:', total_events print 'Events per job:', events_per_job print 'Steps:', sample_steps print 'PileUp:', pile_up print 'Input data:', input_data print 'Input blocks:', input_blocks print 'DQMData:', dqmData print '' # // # // No a first step command (second HLT table, RECO, ALCA, etc) #// else: step_number = int(line_parts[0].split('++')[0].strip()[-1]) step_name = line_parts[0].split('++')[1].strip() command = line_parts[1].strip() # // # // Clean cmsDriver command format #// if command.find('=') > -1: command = command.replace('=',' ') array = [i for i in command.split() if i] # // # // Remove --python_filename if present #// if '--python_filename' in array: del array[array.index('--python_filename'):\ array.index('--python_filename')+2] # // # // Parse conditions #// if '--conditions' in array: conditions_arg = array[array.index('--conditions')+1] if conditions_arg.startswith('auto:'): conditions_key = conditions_arg.split('auto:')[1] conditions_value = autoCond[conditions_key] else: conditions_value = conditions_arg conditions = [ x.strip() \ for x in conditions_value.split(',') \ if x.find("::") != -1 ][0].split('::')[0].strip() else: conditions = 'SpecialConditions' # // # // Cfg file's output name #// output_name = "_".join([step_name, conditions]) + ".py" # // # // Add command options #// if command.find('no_exec') < 0: array.append('--no_exec') if command.find('python_filename') < 0: array.append('--python_filename') array.append(output_name) # Recomposing cmsDriver command command = " ".join(array) # // # // Second trigger table? This may be changed, right now I am #// assuming that all 4 steps workflows are like this. #\\ stage_previous = True if step_number == 2: if '-s' in array: index = array.index('-s') else: index = array.index('--step') if array[index+1].find('RECO') < 0: stage_previous = False if step_number > max_step: max_step = step_number # // # // HARVESTING cmsDriver commands should be ignored. RelVals #// should not run any HARVESTING configuration. Harvestings #\\ run independently after the datasets are produced. # \\ skip_step = False if '-s' in array: index = array.index('-s') else: index = array.index('--step') if array[index+1].count('HARVESTING') > 0: skip_step = True # // # // Composing a dictionary per step #// dict = {} dict['stepNumber'] = step_number dict['command'] = command dict['outputName'] = output_name dict['conditions'] = conditions dict['stagePrevious'] = stage_previous dict['DQMData'] = {'Scenario': getDQMScenario(command)} dict['skipStep'] = skip_step # // # // Step name should be unique #// if step_name not in steps: steps[step_name] = dict else: print "Label %s is repeated!!!" % step_name sys.exit(1) if debug: print 'Parsing' print 'Step name:', step_name print 'Step number:', step_number print 'Command:', command print 'Conditions:', conditions print 'Stage previous:', stage_previous print 'DQM Data:', dict['DQMData'] print '' parse_time = time.time() - start_parse_time file.close() if debug: print "Collected information step 1" for sample in samples: print 'Sample name:', sample['sampleName'] print 'Command', sample['command'] print 'Real data:', sample['isRealData'] print 'Input data:', sample['inputData'] print 'Input blocks', sample['inputBlocks'] print 'Conditions:', sample['conditions'] print 'Total events:', sample['totalEvents'] print 'Events per job:', sample['eventsPerJob'] print 'Output name:', sample['outputName'] print 'Steps:', sample['steps'] print 'PileUp:', sample['pileUp'] print 'Special tag:', sample['specialTag'] print 'Acq. Era:', sample['AcqEra'] print 'DQM data:', sample['DQMData'] print '' for i in range(2, max_step+1): print 'Collected information step %s' % i for step in steps: if steps[step]['stepNumber'] == i: print 'Step name:', step print 'Command:', steps[step]['command'] print 'Conditions:', steps[step]['conditions'] print 'Stage previous:', steps[step]['stagePrevious'] print 'DQM Data:', steps[step]['DQMData'] print '' # // # // Execute cmsDriver command #// print '' print 'Executing cmsDriver commands for step 1 configurations' print '' start_cmsDriver_time = time.time() for sample in samples: if not sample['isRealData']: # // # // if the cfg. file was already created, we'll skip cmsDriver #// command execution. #\\ if os.path.exists("/".join([os.getcwd(), sample['outputName']])) and skip_config: print 'cmsDriver command for step 1 to produce:', \ sample['outputName'],'was already issued, skipping.' continue exitCode, output, error = executeCommand(sample['command']) if exitCode == 0: print 'cmsDriver command for step 1 to produce:', \ sample['outputName'],'exited with ExitCode:', exitCode else : print 'cmsDriver command for step 1 to produce:', \ sample['outputName'],'failed with ExitCode:', exitCode sys.exit(1) else : msg = 'Real Data:\n' msg += 'Input dataset: %s\n' % (sample['inputData']['REALDATA']) msg += 'Run: %s\n' % (sample['inputData'].get('RUN', 'All')) msg += 'Input blocks: %s' % (sample['inputBlocks']) print msg for i in range(2, max_step+1): print '' print 'Executing cmsDriver commands for step %s configurations' % i print '' for step in steps: if steps[step]['stepNumber'] == i: # // # // if the cfg. file was already created, we'll skip cmsDriver #// command execution. #\\ if os.path.exists("/".join([os.getcwd(), steps[step]['outputName']])) and skip_config: print 'cmsDriver command for step %s to produce:' % i, \ steps[step]['outputName'],'was already issued, skipping.' continue # // # // Skip HARVESTING cmsDriver commands #// if steps[step]['skipStep']: print 'This is a HARVESTING cmsDriver command, skipping. ' continue exitCode, output, error = executeCommand(steps[step]['command']) if exitCode == 0: print 'cmsDriver command for step %s to produce:' % i, \ steps[step]['outputName'], \ 'exited with ExitCode:', exitCode else: print 'cmsDriver command for step %s to produce:' % i, \ steps[step]['outputName'], \ 'failed with ExitCode:', exitCode sys.exit(1) cmsDriver_time = time.time() - start_cmsDriver_time print '' print 'Workflow creation' print '' start_workflow_time = time.time() datasets = [] unmergedDatasets = [] mergedDatasets = [] workflows = {} # // # // Create workflows #// for sample in samples: command = 'python ' + scriptsDir conditions = '' # Conditions -> processingString # // # // In case we are processing data #// if sample['isRealData']: command += '/createProcessingWorkflow.py \\\n' # Not changing the primary dataset name for real data. command += '--override-channel=' + sample['primary'] + ' \\\n' command += '--dataset=' + sample['inputData']['REALDATA'] + ' \\\n' command += '--only-blocks=' + sample['inputBlocks'] + ' \\\n' command += '--dbs-url=' + readDBS + ' \\\n' conditions = steps[sample['steps'][0]]['conditions'] command += '--split-type=file \\\n' command += '--split-size=1 \\\n' # // # // MC workflows #// else: command += '/createProductionWorkflow.py \\\n' command += '--channel=' + sample['primary'] + ' \\\n' conditions = sample['conditions'] command += '--starting-run=' + initial_run + ' \\\n' command += '--starting-event=' + initial_event + ' \\\n' command += '--totalevents=' + sample['totalEvents'] + ' \\\n' command += '--eventsperjob=' + sample['eventsPerJob'] + ' \\\n' if sample['pileUp']: command += '--pileup-dataset=' + pileup_dataset + ' \\\n' if storeFail: command += '--store-fail=True \\\n' # // # // First step #// command += '--version=' + version + ' \\\n' command += '--py-cfg=' + sample['outputName'] + ' \\\n' # // # // Input configurations (Second step and further) #// if sample['steps'][0].lower().strip() != 'none': i = 0 for step in sample['steps']: # Is this a HARVESTING step? If so, skip it! if steps[step]['skipStep']: continue # Not a HARVESTING step, continue normally. command += '--version=' + version + ' \\\n' command += '--py-cfg=' + steps[step]['outputName'] + ' \\\n' if i != 0 or not sample['isRealData']: command += '--stageout-intermediates=%s \\\n' % ( steps[step]['stagePrevious']) command += '--chained-input=output \\\n' else: dqmScenario = steps[step]['DQMData']['Scenario'] # // # // If a two-hlt tables workflow, will take conditions from #// the second step information #\\ if not steps[step]['stagePrevious'] and \ i == 0: conditions = steps[step]['conditions'] i += 1 # // # // Common options #// command += '--group=RelVal \\\n' command += '--category=relval \\\n' command += '--activity=RelVal \\\n' command += '--acquisition_era=' + sample['AcqEra'] + ' \\\n' command += '--only-sites=' + onlySites + ' \\\n' command += '--processing_version=' + processing_version + ' \\\n' # Workflow label if workflow_label: command += '--workflow_tag=' + workflow_label + ' \\\n' # // # // processingString="CMSSWVersion"_"Conditions"_"specialTag"_"extra-label" #// CMSSWVersion is appended only when the input dataset does not have it. #\\ processing_string_parts = [] if sample['AcqEra'] != version: processing_string_parts.append(version) processing_string_parts.append(conditions) if sample['specialTag']: processing_string_parts.append(sample['specialTag']) if extra_label: processing_string_parts.append(extra_label) command += '--processing_string=' + "_".join(processing_string_parts) if debug: print command print '' start_command_time = time.time() exitCode, output, error = executeCommand(command) command_time = time.time() - start_command_time if debug: print output print '' output = [x for x in output.split('\n') if x] if exitCode == 0: #parse output tmp = [] index = FindIndex(output,'Output Datasets') for dataset in output[index+1:]: tmp.append(dataset.strip()) # DQM Data dqmInfo = {} dqmInfo['Runs'] = sample['DQMData']['Runs'] if sample['isRealData']: dqmInfo['Scenario'] = dqmScenario else: dqmInfo['Scenario'] = sample['DQMData']['Scenario'] datasets.append({'unmerged': tmp, 'totalEvents': sample['totalEvents'], 'merged': [x.replace('-unmerged','') for x in tmp], 'DQMData': dqmInfo }) unmergedDatasets.append(tmp) index = FindIndex(output,'Created') if index == -1: print "No workflow was created by create*workflow.py" sys.exit(1) workflow = output[index].split()[1].strip() workflows.setdefault(workflow, {})['isRealData'] = sample['isRealData'] workflows[workflow]['time'] = command_time print 'workflow creation command for workflow:', workflow, \ 'exited with ExitCode:', exitCode else : print 'workflow creation command:' print command print 'failed: %s' % error sys.exit(1) if debug: print 'Created workflows:' print workflows.keys() print '' print "Unmerged datasets:" print unmergedDatasets # extract merged datasets for sample in unmergedDatasets: tmp = [] for dataset in sample: tmp.append(dataset.replace('-unmerged','')) mergedDatasets.append(tmp) workflow_time = time.time() - start_workflow_time print '' print 'Write helper scripts' print '' # WorkflowInjector:Input script inputScript = open('input.sh','w') inputScript.write('#!/bin/bash\n') feeder = 'None' for workflow in workflows.keys(): if workflows[workflow]['isRealData']: if feeder.find('ReReco') < 0: inputScript.write('python $PRODAGENT_ROOT/util/publish.py WorkflowInjector:SetPlugin BlockFeeder\n') feeder = 'ReReco' else : if feeder.find('Request') < 0: inputScript.write('python $PRODAGENT_ROOT/util/publish.py WorkflowInjector:SetPlugin RequestFeeder\n') feeder = 'Request' inputScript.write('python $PRODAGENT_ROOT/util/publish.py WorkflowInjector:Input ' + os.path.join(os.getcwd(), workflow) + '\n') inputScript.close() os.chmod('input.sh',0755) print 'Wrote WorkflowInjector:Input script to:',os.path.join(os.getcwd(),'input.sh') # ForceMerge forceMergeScript = open('forceMerge.sh','w') forceMergeScript.write('#!/bin/bash\n') for sample in unmergedDatasets : for dataset in sample : forceMergeScript.write('python $PRODAGENT_ROOT/util/publish.py ForceMerge ' + dataset + '\n') forceMergeScript.close() os.chmod('forceMerge.sh',0755) print 'Wrote ForceMerge script to:',os.path.join(os.getcwd(),'forceMerge.sh') # MigrateDatasetToGlobal migrateScript = open('migrateToGlobal.sh','w') migrateScript.write('#!/bin/bash\n') for sample in mergedDatasets : for dataset in sample : migrateScript.write('python $PRODAGENT_ROOT/util/publish.py DBSInterface:MigrateDatasetToGlobal ' + dataset + '\n') migrateScript.close() os.chmod('migrateToGlobal.sh',0755) print 'Wrote DBSInterface:MigrateDatasetToGlobal script to:',os.path.join(os.getcwd(),'migrateToGlobal.sh') # PhEDExInjectDataset phedexScript = open('injectIntoPhEDEx.sh','w') phedexScript.write('#!/bin/bash\n') for sample in mergedDatasets : for dataset in sample : phedexScript.write('python $PRODAGENT_ROOT/util/publish.py PhEDExInjectDataset ' + dataset + '\n') phedexScript.close() os.chmod('injectIntoPhEDEx.sh',0755) print 'Wrote PhEDExInjectDataset script to:',os.path.join(os.getcwd(),'injectIntoPhEDEx.sh') # DBS: query unmerged datasets queryUnmergedScript = open('queryUnmerged.sh','w') queryUnmergedScript.write('#!/bin/bash\n') for sample in unmergedDatasets : for dataset in sample : #if dataset.find('-RECO') == -1 or len(sample) == 1 : queryUnmergedScript.write('python $PRODAGENT_ROOT/util/InspectDBS2.py --DBSURL=' + DBSURL + ' --datasetPath=' + dataset + ' | grep total\n') queryUnmergedScript.close() os.chmod('queryUnmerged.sh',0755) print 'Wrote DBS query script for unmerged datasets to:',os.path.join(os.getcwd(),'queryUnmerged.sh') # DBS: query merged datasets queryMergedScript = open('queryMerged.sh','w') queryMergedScript.write('#!/bin/bash\n') for sample in mergedDatasets : for dataset in sample : #if dataset.find('-RECO') == -1 or len(sample) == 1 : queryMergedScript.write('python $PRODAGENT_ROOT/util/InspectDBS2.py --DBSURL=' + DBSURL + ' --datasetPath=' + dataset + ' | grep total\n') queryMergedScript.close() os.chmod('queryMerged.sh',0755) print 'Wrote DBS query script for merged datasets to:',os.path.join(os.getcwd(),'queryMerged.sh') # DQMHarvesting DQMinputScript = open('DQMinput.sh','w') DQMinputScript.write("#!/bin/bash\n") reHarvest = re.compile(r'/.*/.*/(RECO|.*-RECO)') # Only RECO datasets for now. for sample in datasets: for dataset in sample['merged']: if reHarvest.match(dataset): for run in sample['DQMData']['Runs'].split(","): DQMinputScript.write('python $PRODAGENT_ROOT/util/harvestDQM.py --run=%s --path=%s --scenario=%s\n' % ( run, dataset, sample['DQMData']['Scenario'])) os.chmod('DQMinput.sh',0755) print 'Wrote DQMHarvesting script for merged datasets to:', os.path.join(os.getcwd(),'DQMinput.sh') # Output datasets list outputList = open('outputDatasets.txt','w') for sample in mergedDatasets : for dataset in sample : outputList.write(dataset + "\n") print 'Wrote output datasets list to:', os.path.join(os.getcwd(),'outputDatasets.txt') # File with expected number of events numberOfEvents = open('eventsExpected.txt','w') for sample in datasets: for dataset in sample['merged']: numberOfEvents.write("%s %s\n" % (sample['totalEvents'],dataset)) numberOfEvents.close() print 'Wrote events per dataset to:', os.path.join(os.getcwd(),'eventsExpected.txt') total_time = time.time() - start_total_time # File with timing report (Parsing, cmsDriver comands, workflow creation) timingInfo = open('timingInfo.txt', 'w') timingInfo.write('Total time: %s s\n' % total_time) timingInfo.write('Cofigs. creation time: %s s\n' % cmsDriver_time) timingInfo.write('Workflows creation time: %s s\n' % workflow_time) output_text = [] sum = 0 for workflow in workflows: if sum == 0: min = [workflow, workflows[workflow]['time']] max = [workflow, workflows[workflow]['time']] sum += workflows[workflow]['time'] output_text.append("%s: %s s" % (workflow, workflows[workflow]['time'])) if max[1] < workflows[workflow]['time']: max = [workflow, workflows[workflow]['time']] if min[1] > workflows[workflow]['time']: min = [workflow, workflows[workflow]['time']] timingInfo.write('Average time per workflow: %s s\n' % (int(sum) / int(len(workflows)))) timingInfo.write('Max. time on %s: %s s\n' % tuple(max)) timingInfo.write('Min. time on %s: %s s\n' % tuple(min)) timingInfo.write('=' * 10) timingInfo.write('Details of time per workflow:\n%s\n' % "\n".join(output_text)) |
[str(x['RunNumber']) for x in input_file['RunsList']] | [int(x['RunNumber']) for x in input_file['RunsList']] | def main(argv) : """ prepareRelValworkflows prepare workflows for chained processing of RelVal samples - parse file holding cmsDriver commands for 1st and 2nd steps - prepare workflows - prepare WorkflowInjector:Input script - prepare ForceMerge script - prepare DBSMigrationToGlobal script - prepare PhEDExInjection script - prepare local DBS query script required parameters --samples <textfile> : list of RelVal sample parameter-sets in plain text file, one sample per line, # marks comment --version <processing version> : processing version (v1, v2, ... ) --DBSURL <URL> : URL of the local DBS (http://cmsdbsprod.cern.ch/cms_dbs_prod_local_07/servlet/DBSServlet | http://cmssrv46.fnal.gov:8080/DBS208/servlet/DBSServlet) --only-sites : Site where dataset is going to be processed or where the input dataset is taken from. Usually srm-cms.cern.ch and cmssrm.fnal.gov optional parameters --pileupdataset : input pileup dataset. It must be provided if the <samples> txt file contains PilepUp samples --lumi <number> : initial run for generation (default: 666666), set it to 777777 for high statistics samples --event <number> : initial event number (default: 1) --store-fail : store output files for failed jobs in chain processing. --read-dbs : DBS URL used for obtaining the list of available blocks for real data. Default: http://cmsdbsprod.cern.ch/cms_dbs_prod_global/servlet/DBSServlet --scripts-dir : Path to workflow creation scripts (default: $PUTIL) --skip-config : Is the configuration file was already created, it will skip cmsDriver command execution --extra-label : Extra label for identifying the datasets: /RelVal*/CMSSW_X_Y_Z-<Conditions>_<SpecialTag>_<ExtraLabel>_<FilterName>-<version>/TIER --workflow-label : Label for the workflows. --help (-h) : help --debug (-d) : debug statements """ start_total_time = time.time() # default version = os.environ.get("CMSSW_VERSION") if version is None: print '' print 'CMSSW version cannot be determined from $CMSSW_VERSION' sys.exit(2) architecture = os.environ.get("SCRAM_ARCH") if architecture is None: print '' print 'CMSSW architecture cannot be determined from $SCRAM_ARCH' sys.exit(2) try: from ProdCommon.DataMgmt.DBS.DBSReader import DBSReader except ImportError, ex: print ex print 'Please load prodAgent libraries (point $PYTHONPATH to the right path).' sys.exit(2) samplesFile = None processing_version = None initial_run = "666666" initial_event = "1" debug = False DBSURL = None pileup_dataset = None storeFail = False readDBS = 'http://cmsdbsprod.cern.ch/cms_dbs_prod_global/servlet/DBSServlet' onlySites = None scriptsDir = '$PUTIL' #os.path.expandvars(os.environ.get('PUTIL', None)) skip_config = False extra_label = '' workflow_label = '' try: opts, args = getopt.getopt(argv, "", ["help", "debug", "samples=", "version=", "DBSURL=", "event=", "lumi=", "pileupdataset=", "store-fail", "read-dbs=", "only-sites=", "scripts-dir=", "skip-config", "extra-label=", "workflow-label="]) except getopt.GetoptError: print main.__doc__ sys.exit(2) # check command line parameter for opt, arg in opts : if opt == "--help" : print main.__doc__ sys.exit() elif opt == "--debug" : debug = True elif opt == "--samples" : samplesFile = arg elif opt == "--version" : processing_version = arg elif opt == "--lumi" : initial_run = arg elif opt == "--event" : initial_event = arg elif opt == "--DBSURL" : DBSURL = arg elif opt == "--pileupdataset" : pileup_dataset = arg print arg elif opt == '--store-fail': storeFail = True elif opt == '--read-dbs': readDBS = arg elif opt == '--only-sites': onlySites = arg elif opt == '--scripts-dir': if arg.endswith('/') : scriptsDir = arg[:-1] else: scriptsDir = arg scriptsDirTemp = scriptsDir # There's no need to expand the shell variables anymore #if scriptsDir.startswith('$') : # scriptsDirTemp = os.environ.get(scriptsDir[1:],None) # scriptsDir = os.path.expandvars(scriptsDirTemp) if scriptsDirTemp != None: if not os.path.exists(scriptsDirTemp): print "--scripts-dir argument does not exist, please verify." sys.exit(6) else: print "--scripts-dir argument does not exist, please verify." sys.exit(6) elif opt == "--skip-config": skip_config = True elif opt == "--extra-label": extra_label = arg elif opt == "--workflow-label": workflow_label = arg if samplesFile == None or processing_version == None or DBSURL == None : print main.__doc__ sys.exit(2) if debug: print "\nprepareRelValWorkflows.py was started with the following arguments: %s" % \ " ".join(argv) print "\n" samples = [] steps = {} primary_prefix = 'RelVal' max_step = 1 try: file = open(samplesFile) except IOError: print 'file with list of parameter-sets cannot be opened!' sys.exit(1) n_line = 0 print 'Parsing input file...' start_parse_time = time.time() for line in file.readlines(): n_line += 1 # Skipping lines with no info if line.strip() != '' and line.strip() != '\n' and \ not line.strip().startswith("#") and \ line.find('//') != 0: # I don't know what's the last condition for line_parts = [part.strip() for part in line.split('@@@') if part] dqmData = {} # Keys: Scenario, Run # // # // Parsing first step #// if not line.strip().startswith('STEP'): command = '' array = [] special_tag = '' conditions = None total_events = None events_per_job = None pile_up = False output_name = '' input_data = {} input_blocks = "" acq_era = version sample_info = line_parts[0].strip() # // # // Filling up sample's details #// sample_info_parts = [part.strip() for part in \ sample_info.split('++') if part] sample_number = sample_info_parts[0] #We might need this later sample_name = sample_info_parts[1] sample_steps = [i.strip() for i in \ sample_info_parts[2].split(',') if i] primary = primary_prefix + sample_name # // # // Is it a real data processing sample? According to this #// we assign or not the command variable. #\\ if line_parts[0].find('REALDATA') > -1: is_real_data = True else: is_real_data = False command = line_parts[1].strip() # // # // Clean cmsDriver command format #// if command.find('=') > -1: command = command.replace('=',' ') array = [i for i in command.split() if i] # // # // Remove --python_filename if present #// if '--python_filename' in array: del array[array.index('--python_filename'):\ array.index('--python_filename')+2] # // # // Parse conditions #// if '--conditions' in array: conditions_arg = array[array.index('--conditions')+1] if conditions_arg.startswith('auto:'): conditions_key = conditions_arg.split('auto:')[1] conditions_value = autoCond[conditions_key] else: conditions_value = conditions_arg conditions = [ x.strip() \ for x in conditions_value.split(',') \ if x.find("::") != -1 ][0].split('::')[0].strip() else: conditions = 'SpecialConditions' # // # // Parsing number of events #// if '--relval' in array : total_events = array[array.index('--relval')+1\ ].split(',')[0].strip() events_per_job = array[array.index('--relval')+1\ ].split(',')[1].strip() # // # // Special tag #// # FastSim if command.find('FASTSIM') > -1: special_tag = 'FastSim' # PileUp (at the same time with FastSim) if '--pileup' in array : # // # // Will use whatever argument of --pileup option is #// pileup_arg = array[array.index('--pileup') + 1] if pileup_arg.lower().strip() != 'nopileup': if special_tag: special_tag = "_".join( [special_tag, "PU", pileup_arg.strip()]) else: special_tag = "_".join(["PU", pileup_arg.strip()]) pile_up = True if pileup_dataset is None : print "You have to provide a pileup dataset." print "Usually it is a MinBias (RAW)." print "Use option --pileupdataset" sys.exit(5) # // # // Sort of custom tag #// if '--beamspot' in array: beamspot_arg = \ array[array.index('--beamspot') + 1].strip() if special_tag: special_tag = "_".join( [special_tag, beamspot_arg]) else: special_tag = beamspot_arg # // # // Cfg file's output name #// output_name = "_".join( [x for x in [primary, conditions, special_tag] if x] ) + ".py" # // # // Add command options #// if command.find('no_exec') < 0: array.append('--no_exec') if command.find('python_filename') < 0: array.append('--python_filename') array.append(output_name) # Recomposing cmsDriver command command = " ".join(array) # Filling up DQM information dqmData['Runs'] = '1' dqmData['Scenario'] = getDQMScenario(command) # // # // Collecting info for real data samples #// if is_real_data: # // # // Parsing dataset details. The following details are #// supported: REALDATA, RUN, LABEL, FILES, EVENTS, PDNAME #\\ # Producing tuples from the input options. data_options = [tuple(x.split(':')) \ for x in sample_info_parts[3].split(',') if x.strip()] # Parsing tuples for arg_v in data_options: if len(arg_v) == 2: input_data[arg_v[0].strip()] = arg_v[1].strip() elif len(arg_v) == 1: input_data[arg_v[0].strip()] = None else: print "Line %s has an extra ','." % (line) sys.exit(7) # // # // Verifiying optional arguments: RUN, LABEL, FILE, EVENTS, #// PRIMARY #\\ data_run = input_data.get('RUN', '') data_label = input_data.get('LABEL', '') data_files = input_data.get('FILES', '') data_events = input_data.get('EVENTS', '') data_pname = input_data.get('PRIMARY', None) if data_events: data_events = int(data_events) if data_files: data_files = int(data_events) # // # // Looking for best matching dataset. It should be just #// one, otherwise the script will exit. #\\ reader = DBSReader(readDBS) query = "find dataset where dataset like %s" % ( input_data['REALDATA']) result_xml = reader.dbs.executeQuery(query) # XML Handler result_list = DBSXMLParser(result_xml) target_datasets = [x['dataset'] for x in result_list] # If more than one dataset is found. if len(target_datasets) > 1: # Is this an input relval dataset produced in the # current release? query = "find dataset where dataset like %s " % ( input_data['REALDATA']) query += "and release=%s" % version result_xml = reader.dbs.executeQuery(query) result_list = DBSXMLParser(result_xml) target_datasets = [x['dataset'] for x in result_list] # If more than one dataset is found, match the processing # version if len(target_datasets) > 1: find_version = \ lambda x: x.find(processing_version) != -1 target_datasets = filter(find_version, target_datasets) if len(target_datasets) > 1: msg = "Dataset pattern in line %s is too broad." % line msg += "These datasets were found: %s" % ( " ".join(target_datasets)) print msg sys.exit(8) if not target_datasets: msg = "Dataset pattern produced no match in line %s" % ( line) print msg sys.exit(8) # Now I can look up the blocks for this dataset. target_dataset = target_datasets[0] input_data['REALDATA'] = target_dataset # // # // Looking up the blocks for a given Dataset and the #// provided list of runs #\\ runs_list = \ [x.strip() for x in data_run.split('|') if x.strip()] runs_in_dbs = [x['RunNumber'] for x in \ reader.dbs.listRuns(target_dataset)] runs_in_dbs.sort() # Creating lambda function for filtering runs. expr = '' # First a string expression to evaluate is_the_first = True for run in runs_list: if is_the_first: expr += "(" is_the_first = False else: expr += " or " # Run range: XXXXXX-XXXXXX if run.count("-"): run_limits = \ [x.strip() for x in run.split('-') if x.strip()] expr += "(x >= %s and x <= %s)" % ( run_limits[0], run_limits[1]) else: expr += "x == %s" % run if not is_the_first: expr += ")" # Here comes the lambda funtion runs_filter = lambda x: eval(expr) # Filtering runs in DBS using the list provided in the # input file. target_runs = filter(runs_filter, runs_in_dbs) # Pulling up input files from DBS (including run info). input_files = reader.dbs.listFiles( path=target_dataset, retriveList=['retrive_run']) # // # // Parsing input blocks #// blocks = {} for input_file in input_files: # Skip files with no events # A block will be skipped if all its files have 0 # events if input_file['NumberOfEvents'] == 0: continue runs = \ [str(x['RunNumber']) for x in input_file['RunsList']] for run in runs: if run in target_runs: break else: continue # skip file if it's not in the target_runs cur_files = \ blocks.setdefault(input_file['Block']['Name'], {}).setdefault('Files', 0) cur_events = \ blocks[input_file['Block']['Name']].setdefault( 'Events', 0) cur_runs = \ blocks[input_file['Block']['Name']].setdefault( 'Runs', set()) blocks[input_file['Block']['Name']]['Files'] += 1 blocks[input_file['Block']['Name']]['Events'] += \ input_file['NumberOfEvents'] blocks[input_file['Block']['Name']]['Runs'] = \ cur_runs.union(runs) # // # // Truncating blocks list #// total_events = 0 total_files = 0 blocks_to_process = [] runs_to_process = set() for block in blocks: blocks_to_process.append(block) runs_to_process = runs_to_process.union(blocks[block]['Runs']) total_events += blocks[block]['Events'] total_files += blocks[block]['Files'] if data_events and (data_events < total_events): break if data_files and (data_files < total_files): break input_blocks = ",".join(blocks_to_process) # // # // If PRIMARY is true, then it will use the #// sample_name value as primary dataset name, else it #\\ will use the input primary dataset name. # \\ if data_pname is not None and \ data_pname.lower() in ('y', 't', 'true'): primary = "".join([primary_prefix, sample_name]) else: primary = \ [x for x in input_data['REALDATA'].split("/") if x][0] # // # // Seting special tag #// special_tag_parts = [] # Add RelVal tag if not present. if target_dataset.find(primary_prefix) == -1: special_tag_parts.append(primary_prefix) # Add LABEL if data_label: special_tag_parts.append(data_label) special_tag = "_".join(special_tag_parts) # // # // Setting Acq. Era #// #processed_dataset = target_dataset.split('/')[2] #dataset_acq_era = processed_dataset.split("-")[0] #if dataset_acq_era.startswith(version): # acq_era = version #else: # acq_era = dataset_acq_era # Filling up DQM information dqmData['Runs'] = ",".join(list(runs_to_process)) # // # // Composing a dictionary per sample #// dict = {} dict['sampleName'] = sample_name dict['command'] = command dict['primary'] = primary dict['outputName'] = output_name dict['conditions'] = conditions dict['specialTag'] = special_tag dict['totalEvents'] = total_events dict['eventsPerJob'] = events_per_job dict['pileUp'] = pile_up dict['isRealData'] = is_real_data dict['inputData'] = input_data dict['inputBlocks'] = input_blocks dict['steps'] = sample_steps dict['AcqEra'] = acq_era dict['DQMData'] = dqmData samples.append(dict) if debug: print 'Parsing' print 'Sample:', sample_name print 'Command:', command print 'Conditions:', conditions print 'Special tag:', special_tag print 'Total events:', total_events print 'Events per job:', events_per_job print 'Steps:', sample_steps print 'PileUp:', pile_up print 'Input data:', input_data print 'Input blocks:', input_blocks print 'DQMData:', dqmData print '' # // # // No a first step command (second HLT table, RECO, ALCA, etc) #// else: step_number = int(line_parts[0].split('++')[0].strip()[-1]) step_name = line_parts[0].split('++')[1].strip() command = line_parts[1].strip() # // # // Clean cmsDriver command format #// if command.find('=') > -1: command = command.replace('=',' ') array = [i for i in command.split() if i] # // # // Remove --python_filename if present #// if '--python_filename' in array: del array[array.index('--python_filename'):\ array.index('--python_filename')+2] # // # // Parse conditions #// if '--conditions' in array: conditions_arg = array[array.index('--conditions')+1] if conditions_arg.startswith('auto:'): conditions_key = conditions_arg.split('auto:')[1] conditions_value = autoCond[conditions_key] else: conditions_value = conditions_arg conditions = [ x.strip() \ for x in conditions_value.split(',') \ if x.find("::") != -1 ][0].split('::')[0].strip() else: conditions = 'SpecialConditions' # // # // Cfg file's output name #// output_name = "_".join([step_name, conditions]) + ".py" # // # // Add command options #// if command.find('no_exec') < 0: array.append('--no_exec') if command.find('python_filename') < 0: array.append('--python_filename') array.append(output_name) # Recomposing cmsDriver command command = " ".join(array) # // # // Second trigger table? This may be changed, right now I am #// assuming that all 4 steps workflows are like this. #\\ stage_previous = True if step_number == 2: if '-s' in array: index = array.index('-s') else: index = array.index('--step') if array[index+1].find('RECO') < 0: stage_previous = False if step_number > max_step: max_step = step_number # // # // HARVESTING cmsDriver commands should be ignored. RelVals #// should not run any HARVESTING configuration. Harvestings #\\ run independently after the datasets are produced. # \\ skip_step = False if '-s' in array: index = array.index('-s') else: index = array.index('--step') if array[index+1].count('HARVESTING') > 0: skip_step = True # // # // Composing a dictionary per step #// dict = {} dict['stepNumber'] = step_number dict['command'] = command dict['outputName'] = output_name dict['conditions'] = conditions dict['stagePrevious'] = stage_previous dict['DQMData'] = {'Scenario': getDQMScenario(command)} dict['skipStep'] = skip_step # // # // Step name should be unique #// if step_name not in steps: steps[step_name] = dict else: print "Label %s is repeated!!!" % step_name sys.exit(1) if debug: print 'Parsing' print 'Step name:', step_name print 'Step number:', step_number print 'Command:', command print 'Conditions:', conditions print 'Stage previous:', stage_previous print 'DQM Data:', dict['DQMData'] print '' parse_time = time.time() - start_parse_time file.close() if debug: print "Collected information step 1" for sample in samples: print 'Sample name:', sample['sampleName'] print 'Command', sample['command'] print 'Real data:', sample['isRealData'] print 'Input data:', sample['inputData'] print 'Input blocks', sample['inputBlocks'] print 'Conditions:', sample['conditions'] print 'Total events:', sample['totalEvents'] print 'Events per job:', sample['eventsPerJob'] print 'Output name:', sample['outputName'] print 'Steps:', sample['steps'] print 'PileUp:', sample['pileUp'] print 'Special tag:', sample['specialTag'] print 'Acq. Era:', sample['AcqEra'] print 'DQM data:', sample['DQMData'] print '' for i in range(2, max_step+1): print 'Collected information step %s' % i for step in steps: if steps[step]['stepNumber'] == i: print 'Step name:', step print 'Command:', steps[step]['command'] print 'Conditions:', steps[step]['conditions'] print 'Stage previous:', steps[step]['stagePrevious'] print 'DQM Data:', steps[step]['DQMData'] print '' # // # // Execute cmsDriver command #// print '' print 'Executing cmsDriver commands for step 1 configurations' print '' start_cmsDriver_time = time.time() for sample in samples: if not sample['isRealData']: # // # // if the cfg. file was already created, we'll skip cmsDriver #// command execution. #\\ if os.path.exists("/".join([os.getcwd(), sample['outputName']])) and skip_config: print 'cmsDriver command for step 1 to produce:', \ sample['outputName'],'was already issued, skipping.' continue exitCode, output, error = executeCommand(sample['command']) if exitCode == 0: print 'cmsDriver command for step 1 to produce:', \ sample['outputName'],'exited with ExitCode:', exitCode else : print 'cmsDriver command for step 1 to produce:', \ sample['outputName'],'failed with ExitCode:', exitCode sys.exit(1) else : msg = 'Real Data:\n' msg += 'Input dataset: %s\n' % (sample['inputData']['REALDATA']) msg += 'Run: %s\n' % (sample['inputData'].get('RUN', 'All')) msg += 'Input blocks: %s' % (sample['inputBlocks']) print msg for i in range(2, max_step+1): print '' print 'Executing cmsDriver commands for step %s configurations' % i print '' for step in steps: if steps[step]['stepNumber'] == i: # // # // if the cfg. file was already created, we'll skip cmsDriver #// command execution. #\\ if os.path.exists("/".join([os.getcwd(), steps[step]['outputName']])) and skip_config: print 'cmsDriver command for step %s to produce:' % i, \ steps[step]['outputName'],'was already issued, skipping.' continue # // # // Skip HARVESTING cmsDriver commands #// if steps[step]['skipStep']: print 'This is a HARVESTING cmsDriver command, skipping. ' continue exitCode, output, error = executeCommand(steps[step]['command']) if exitCode == 0: print 'cmsDriver command for step %s to produce:' % i, \ steps[step]['outputName'], \ 'exited with ExitCode:', exitCode else: print 'cmsDriver command for step %s to produce:' % i, \ steps[step]['outputName'], \ 'failed with ExitCode:', exitCode sys.exit(1) cmsDriver_time = time.time() - start_cmsDriver_time print '' print 'Workflow creation' print '' start_workflow_time = time.time() datasets = [] unmergedDatasets = [] mergedDatasets = [] workflows = {} # // # // Create workflows #// for sample in samples: command = 'python ' + scriptsDir conditions = '' # Conditions -> processingString # // # // In case we are processing data #// if sample['isRealData']: command += '/createProcessingWorkflow.py \\\n' # Not changing the primary dataset name for real data. command += '--override-channel=' + sample['primary'] + ' \\\n' command += '--dataset=' + sample['inputData']['REALDATA'] + ' \\\n' command += '--only-blocks=' + sample['inputBlocks'] + ' \\\n' command += '--dbs-url=' + readDBS + ' \\\n' conditions = steps[sample['steps'][0]]['conditions'] command += '--split-type=file \\\n' command += '--split-size=1 \\\n' # // # // MC workflows #// else: command += '/createProductionWorkflow.py \\\n' command += '--channel=' + sample['primary'] + ' \\\n' conditions = sample['conditions'] command += '--starting-run=' + initial_run + ' \\\n' command += '--starting-event=' + initial_event + ' \\\n' command += '--totalevents=' + sample['totalEvents'] + ' \\\n' command += '--eventsperjob=' + sample['eventsPerJob'] + ' \\\n' if sample['pileUp']: command += '--pileup-dataset=' + pileup_dataset + ' \\\n' if storeFail: command += '--store-fail=True \\\n' # // # // First step #// command += '--version=' + version + ' \\\n' command += '--py-cfg=' + sample['outputName'] + ' \\\n' # // # // Input configurations (Second step and further) #// if sample['steps'][0].lower().strip() != 'none': i = 0 for step in sample['steps']: # Is this a HARVESTING step? If so, skip it! if steps[step]['skipStep']: continue # Not a HARVESTING step, continue normally. command += '--version=' + version + ' \\\n' command += '--py-cfg=' + steps[step]['outputName'] + ' \\\n' if i != 0 or not sample['isRealData']: command += '--stageout-intermediates=%s \\\n' % ( steps[step]['stagePrevious']) command += '--chained-input=output \\\n' else: dqmScenario = steps[step]['DQMData']['Scenario'] # // # // If a two-hlt tables workflow, will take conditions from #// the second step information #\\ if not steps[step]['stagePrevious'] and \ i == 0: conditions = steps[step]['conditions'] i += 1 # // # // Common options #// command += '--group=RelVal \\\n' command += '--category=relval \\\n' command += '--activity=RelVal \\\n' command += '--acquisition_era=' + sample['AcqEra'] + ' \\\n' command += '--only-sites=' + onlySites + ' \\\n' command += '--processing_version=' + processing_version + ' \\\n' # Workflow label if workflow_label: command += '--workflow_tag=' + workflow_label + ' \\\n' # // # // processingString="CMSSWVersion"_"Conditions"_"specialTag"_"extra-label" #// CMSSWVersion is appended only when the input dataset does not have it. #\\ processing_string_parts = [] if sample['AcqEra'] != version: processing_string_parts.append(version) processing_string_parts.append(conditions) if sample['specialTag']: processing_string_parts.append(sample['specialTag']) if extra_label: processing_string_parts.append(extra_label) command += '--processing_string=' + "_".join(processing_string_parts) if debug: print command print '' start_command_time = time.time() exitCode, output, error = executeCommand(command) command_time = time.time() - start_command_time if debug: print output print '' output = [x for x in output.split('\n') if x] if exitCode == 0: #parse output tmp = [] index = FindIndex(output,'Output Datasets') for dataset in output[index+1:]: tmp.append(dataset.strip()) # DQM Data dqmInfo = {} dqmInfo['Runs'] = sample['DQMData']['Runs'] if sample['isRealData']: dqmInfo['Scenario'] = dqmScenario else: dqmInfo['Scenario'] = sample['DQMData']['Scenario'] datasets.append({'unmerged': tmp, 'totalEvents': sample['totalEvents'], 'merged': [x.replace('-unmerged','') for x in tmp], 'DQMData': dqmInfo }) unmergedDatasets.append(tmp) index = FindIndex(output,'Created') if index == -1: print "No workflow was created by create*workflow.py" sys.exit(1) workflow = output[index].split()[1].strip() workflows.setdefault(workflow, {})['isRealData'] = sample['isRealData'] workflows[workflow]['time'] = command_time print 'workflow creation command for workflow:', workflow, \ 'exited with ExitCode:', exitCode else : print 'workflow creation command:' print command print 'failed: %s' % error sys.exit(1) if debug: print 'Created workflows:' print workflows.keys() print '' print "Unmerged datasets:" print unmergedDatasets # extract merged datasets for sample in unmergedDatasets: tmp = [] for dataset in sample: tmp.append(dataset.replace('-unmerged','')) mergedDatasets.append(tmp) workflow_time = time.time() - start_workflow_time print '' print 'Write helper scripts' print '' # WorkflowInjector:Input script inputScript = open('input.sh','w') inputScript.write('#!/bin/bash\n') feeder = 'None' for workflow in workflows.keys(): if workflows[workflow]['isRealData']: if feeder.find('ReReco') < 0: inputScript.write('python $PRODAGENT_ROOT/util/publish.py WorkflowInjector:SetPlugin BlockFeeder\n') feeder = 'ReReco' else : if feeder.find('Request') < 0: inputScript.write('python $PRODAGENT_ROOT/util/publish.py WorkflowInjector:SetPlugin RequestFeeder\n') feeder = 'Request' inputScript.write('python $PRODAGENT_ROOT/util/publish.py WorkflowInjector:Input ' + os.path.join(os.getcwd(), workflow) + '\n') inputScript.close() os.chmod('input.sh',0755) print 'Wrote WorkflowInjector:Input script to:',os.path.join(os.getcwd(),'input.sh') # ForceMerge forceMergeScript = open('forceMerge.sh','w') forceMergeScript.write('#!/bin/bash\n') for sample in unmergedDatasets : for dataset in sample : forceMergeScript.write('python $PRODAGENT_ROOT/util/publish.py ForceMerge ' + dataset + '\n') forceMergeScript.close() os.chmod('forceMerge.sh',0755) print 'Wrote ForceMerge script to:',os.path.join(os.getcwd(),'forceMerge.sh') # MigrateDatasetToGlobal migrateScript = open('migrateToGlobal.sh','w') migrateScript.write('#!/bin/bash\n') for sample in mergedDatasets : for dataset in sample : migrateScript.write('python $PRODAGENT_ROOT/util/publish.py DBSInterface:MigrateDatasetToGlobal ' + dataset + '\n') migrateScript.close() os.chmod('migrateToGlobal.sh',0755) print 'Wrote DBSInterface:MigrateDatasetToGlobal script to:',os.path.join(os.getcwd(),'migrateToGlobal.sh') # PhEDExInjectDataset phedexScript = open('injectIntoPhEDEx.sh','w') phedexScript.write('#!/bin/bash\n') for sample in mergedDatasets : for dataset in sample : phedexScript.write('python $PRODAGENT_ROOT/util/publish.py PhEDExInjectDataset ' + dataset + '\n') phedexScript.close() os.chmod('injectIntoPhEDEx.sh',0755) print 'Wrote PhEDExInjectDataset script to:',os.path.join(os.getcwd(),'injectIntoPhEDEx.sh') # DBS: query unmerged datasets queryUnmergedScript = open('queryUnmerged.sh','w') queryUnmergedScript.write('#!/bin/bash\n') for sample in unmergedDatasets : for dataset in sample : #if dataset.find('-RECO') == -1 or len(sample) == 1 : queryUnmergedScript.write('python $PRODAGENT_ROOT/util/InspectDBS2.py --DBSURL=' + DBSURL + ' --datasetPath=' + dataset + ' | grep total\n') queryUnmergedScript.close() os.chmod('queryUnmerged.sh',0755) print 'Wrote DBS query script for unmerged datasets to:',os.path.join(os.getcwd(),'queryUnmerged.sh') # DBS: query merged datasets queryMergedScript = open('queryMerged.sh','w') queryMergedScript.write('#!/bin/bash\n') for sample in mergedDatasets : for dataset in sample : #if dataset.find('-RECO') == -1 or len(sample) == 1 : queryMergedScript.write('python $PRODAGENT_ROOT/util/InspectDBS2.py --DBSURL=' + DBSURL + ' --datasetPath=' + dataset + ' | grep total\n') queryMergedScript.close() os.chmod('queryMerged.sh',0755) print 'Wrote DBS query script for merged datasets to:',os.path.join(os.getcwd(),'queryMerged.sh') # DQMHarvesting DQMinputScript = open('DQMinput.sh','w') DQMinputScript.write("#!/bin/bash\n") reHarvest = re.compile(r'/.*/.*/(RECO|.*-RECO)') # Only RECO datasets for now. for sample in datasets: for dataset in sample['merged']: if reHarvest.match(dataset): for run in sample['DQMData']['Runs'].split(","): DQMinputScript.write('python $PRODAGENT_ROOT/util/harvestDQM.py --run=%s --path=%s --scenario=%s\n' % ( run, dataset, sample['DQMData']['Scenario'])) os.chmod('DQMinput.sh',0755) print 'Wrote DQMHarvesting script for merged datasets to:', os.path.join(os.getcwd(),'DQMinput.sh') # Output datasets list outputList = open('outputDatasets.txt','w') for sample in mergedDatasets : for dataset in sample : outputList.write(dataset + "\n") print 'Wrote output datasets list to:', os.path.join(os.getcwd(),'outputDatasets.txt') # File with expected number of events numberOfEvents = open('eventsExpected.txt','w') for sample in datasets: for dataset in sample['merged']: numberOfEvents.write("%s %s\n" % (sample['totalEvents'],dataset)) numberOfEvents.close() print 'Wrote events per dataset to:', os.path.join(os.getcwd(),'eventsExpected.txt') total_time = time.time() - start_total_time # File with timing report (Parsing, cmsDriver comands, workflow creation) timingInfo = open('timingInfo.txt', 'w') timingInfo.write('Total time: %s s\n' % total_time) timingInfo.write('Cofigs. creation time: %s s\n' % cmsDriver_time) timingInfo.write('Workflows creation time: %s s\n' % workflow_time) output_text = [] sum = 0 for workflow in workflows: if sum == 0: min = [workflow, workflows[workflow]['time']] max = [workflow, workflows[workflow]['time']] sum += workflows[workflow]['time'] output_text.append("%s: %s s" % (workflow, workflows[workflow]['time'])) if max[1] < workflows[workflow]['time']: max = [workflow, workflows[workflow]['time']] if min[1] > workflows[workflow]['time']: min = [workflow, workflows[workflow]['time']] timingInfo.write('Average time per workflow: %s s\n' % (int(sum) / int(len(workflows)))) timingInfo.write('Max. time on %s: %s s\n' % tuple(max)) timingInfo.write('Min. time on %s: %s s\n' % tuple(min)) timingInfo.write('=' * 10) timingInfo.write('Details of time per workflow:\n%s\n' % "\n".join(output_text)) |
dqmData['Runs'] = ",".join(list(runs_to_process)) | dqmData['Runs'] = \ ",".join([str(x) for x in list(runs_to_process)]) | def main(argv) : """ prepareRelValworkflows prepare workflows for chained processing of RelVal samples - parse file holding cmsDriver commands for 1st and 2nd steps - prepare workflows - prepare WorkflowInjector:Input script - prepare ForceMerge script - prepare DBSMigrationToGlobal script - prepare PhEDExInjection script - prepare local DBS query script required parameters --samples <textfile> : list of RelVal sample parameter-sets in plain text file, one sample per line, # marks comment --version <processing version> : processing version (v1, v2, ... ) --DBSURL <URL> : URL of the local DBS (http://cmsdbsprod.cern.ch/cms_dbs_prod_local_07/servlet/DBSServlet | http://cmssrv46.fnal.gov:8080/DBS208/servlet/DBSServlet) --only-sites : Site where dataset is going to be processed or where the input dataset is taken from. Usually srm-cms.cern.ch and cmssrm.fnal.gov optional parameters --pileupdataset : input pileup dataset. It must be provided if the <samples> txt file contains PilepUp samples --lumi <number> : initial run for generation (default: 666666), set it to 777777 for high statistics samples --event <number> : initial event number (default: 1) --store-fail : store output files for failed jobs in chain processing. --read-dbs : DBS URL used for obtaining the list of available blocks for real data. Default: http://cmsdbsprod.cern.ch/cms_dbs_prod_global/servlet/DBSServlet --scripts-dir : Path to workflow creation scripts (default: $PUTIL) --skip-config : Is the configuration file was already created, it will skip cmsDriver command execution --extra-label : Extra label for identifying the datasets: /RelVal*/CMSSW_X_Y_Z-<Conditions>_<SpecialTag>_<ExtraLabel>_<FilterName>-<version>/TIER --workflow-label : Label for the workflows. --help (-h) : help --debug (-d) : debug statements """ start_total_time = time.time() # default version = os.environ.get("CMSSW_VERSION") if version is None: print '' print 'CMSSW version cannot be determined from $CMSSW_VERSION' sys.exit(2) architecture = os.environ.get("SCRAM_ARCH") if architecture is None: print '' print 'CMSSW architecture cannot be determined from $SCRAM_ARCH' sys.exit(2) try: from ProdCommon.DataMgmt.DBS.DBSReader import DBSReader except ImportError, ex: print ex print 'Please load prodAgent libraries (point $PYTHONPATH to the right path).' sys.exit(2) samplesFile = None processing_version = None initial_run = "666666" initial_event = "1" debug = False DBSURL = None pileup_dataset = None storeFail = False readDBS = 'http://cmsdbsprod.cern.ch/cms_dbs_prod_global/servlet/DBSServlet' onlySites = None scriptsDir = '$PUTIL' #os.path.expandvars(os.environ.get('PUTIL', None)) skip_config = False extra_label = '' workflow_label = '' try: opts, args = getopt.getopt(argv, "", ["help", "debug", "samples=", "version=", "DBSURL=", "event=", "lumi=", "pileupdataset=", "store-fail", "read-dbs=", "only-sites=", "scripts-dir=", "skip-config", "extra-label=", "workflow-label="]) except getopt.GetoptError: print main.__doc__ sys.exit(2) # check command line parameter for opt, arg in opts : if opt == "--help" : print main.__doc__ sys.exit() elif opt == "--debug" : debug = True elif opt == "--samples" : samplesFile = arg elif opt == "--version" : processing_version = arg elif opt == "--lumi" : initial_run = arg elif opt == "--event" : initial_event = arg elif opt == "--DBSURL" : DBSURL = arg elif opt == "--pileupdataset" : pileup_dataset = arg print arg elif opt == '--store-fail': storeFail = True elif opt == '--read-dbs': readDBS = arg elif opt == '--only-sites': onlySites = arg elif opt == '--scripts-dir': if arg.endswith('/') : scriptsDir = arg[:-1] else: scriptsDir = arg scriptsDirTemp = scriptsDir # There's no need to expand the shell variables anymore #if scriptsDir.startswith('$') : # scriptsDirTemp = os.environ.get(scriptsDir[1:],None) # scriptsDir = os.path.expandvars(scriptsDirTemp) if scriptsDirTemp != None: if not os.path.exists(scriptsDirTemp): print "--scripts-dir argument does not exist, please verify." sys.exit(6) else: print "--scripts-dir argument does not exist, please verify." sys.exit(6) elif opt == "--skip-config": skip_config = True elif opt == "--extra-label": extra_label = arg elif opt == "--workflow-label": workflow_label = arg if samplesFile == None or processing_version == None or DBSURL == None : print main.__doc__ sys.exit(2) if debug: print "\nprepareRelValWorkflows.py was started with the following arguments: %s" % \ " ".join(argv) print "\n" samples = [] steps = {} primary_prefix = 'RelVal' max_step = 1 try: file = open(samplesFile) except IOError: print 'file with list of parameter-sets cannot be opened!' sys.exit(1) n_line = 0 print 'Parsing input file...' start_parse_time = time.time() for line in file.readlines(): n_line += 1 # Skipping lines with no info if line.strip() != '' and line.strip() != '\n' and \ not line.strip().startswith("#") and \ line.find('//') != 0: # I don't know what's the last condition for line_parts = [part.strip() for part in line.split('@@@') if part] dqmData = {} # Keys: Scenario, Run # // # // Parsing first step #// if not line.strip().startswith('STEP'): command = '' array = [] special_tag = '' conditions = None total_events = None events_per_job = None pile_up = False output_name = '' input_data = {} input_blocks = "" acq_era = version sample_info = line_parts[0].strip() # // # // Filling up sample's details #// sample_info_parts = [part.strip() for part in \ sample_info.split('++') if part] sample_number = sample_info_parts[0] #We might need this later sample_name = sample_info_parts[1] sample_steps = [i.strip() for i in \ sample_info_parts[2].split(',') if i] primary = primary_prefix + sample_name # // # // Is it a real data processing sample? According to this #// we assign or not the command variable. #\\ if line_parts[0].find('REALDATA') > -1: is_real_data = True else: is_real_data = False command = line_parts[1].strip() # // # // Clean cmsDriver command format #// if command.find('=') > -1: command = command.replace('=',' ') array = [i for i in command.split() if i] # // # // Remove --python_filename if present #// if '--python_filename' in array: del array[array.index('--python_filename'):\ array.index('--python_filename')+2] # // # // Parse conditions #// if '--conditions' in array: conditions_arg = array[array.index('--conditions')+1] if conditions_arg.startswith('auto:'): conditions_key = conditions_arg.split('auto:')[1] conditions_value = autoCond[conditions_key] else: conditions_value = conditions_arg conditions = [ x.strip() \ for x in conditions_value.split(',') \ if x.find("::") != -1 ][0].split('::')[0].strip() else: conditions = 'SpecialConditions' # // # // Parsing number of events #// if '--relval' in array : total_events = array[array.index('--relval')+1\ ].split(',')[0].strip() events_per_job = array[array.index('--relval')+1\ ].split(',')[1].strip() # // # // Special tag #// # FastSim if command.find('FASTSIM') > -1: special_tag = 'FastSim' # PileUp (at the same time with FastSim) if '--pileup' in array : # // # // Will use whatever argument of --pileup option is #// pileup_arg = array[array.index('--pileup') + 1] if pileup_arg.lower().strip() != 'nopileup': if special_tag: special_tag = "_".join( [special_tag, "PU", pileup_arg.strip()]) else: special_tag = "_".join(["PU", pileup_arg.strip()]) pile_up = True if pileup_dataset is None : print "You have to provide a pileup dataset." print "Usually it is a MinBias (RAW)." print "Use option --pileupdataset" sys.exit(5) # // # // Sort of custom tag #// if '--beamspot' in array: beamspot_arg = \ array[array.index('--beamspot') + 1].strip() if special_tag: special_tag = "_".join( [special_tag, beamspot_arg]) else: special_tag = beamspot_arg # // # // Cfg file's output name #// output_name = "_".join( [x for x in [primary, conditions, special_tag] if x] ) + ".py" # // # // Add command options #// if command.find('no_exec') < 0: array.append('--no_exec') if command.find('python_filename') < 0: array.append('--python_filename') array.append(output_name) # Recomposing cmsDriver command command = " ".join(array) # Filling up DQM information dqmData['Runs'] = '1' dqmData['Scenario'] = getDQMScenario(command) # // # // Collecting info for real data samples #// if is_real_data: # // # // Parsing dataset details. The following details are #// supported: REALDATA, RUN, LABEL, FILES, EVENTS, PDNAME #\\ # Producing tuples from the input options. data_options = [tuple(x.split(':')) \ for x in sample_info_parts[3].split(',') if x.strip()] # Parsing tuples for arg_v in data_options: if len(arg_v) == 2: input_data[arg_v[0].strip()] = arg_v[1].strip() elif len(arg_v) == 1: input_data[arg_v[0].strip()] = None else: print "Line %s has an extra ','." % (line) sys.exit(7) # // # // Verifiying optional arguments: RUN, LABEL, FILE, EVENTS, #// PRIMARY #\\ data_run = input_data.get('RUN', '') data_label = input_data.get('LABEL', '') data_files = input_data.get('FILES', '') data_events = input_data.get('EVENTS', '') data_pname = input_data.get('PRIMARY', None) if data_events: data_events = int(data_events) if data_files: data_files = int(data_events) # // # // Looking for best matching dataset. It should be just #// one, otherwise the script will exit. #\\ reader = DBSReader(readDBS) query = "find dataset where dataset like %s" % ( input_data['REALDATA']) result_xml = reader.dbs.executeQuery(query) # XML Handler result_list = DBSXMLParser(result_xml) target_datasets = [x['dataset'] for x in result_list] # If more than one dataset is found. if len(target_datasets) > 1: # Is this an input relval dataset produced in the # current release? query = "find dataset where dataset like %s " % ( input_data['REALDATA']) query += "and release=%s" % version result_xml = reader.dbs.executeQuery(query) result_list = DBSXMLParser(result_xml) target_datasets = [x['dataset'] for x in result_list] # If more than one dataset is found, match the processing # version if len(target_datasets) > 1: find_version = \ lambda x: x.find(processing_version) != -1 target_datasets = filter(find_version, target_datasets) if len(target_datasets) > 1: msg = "Dataset pattern in line %s is too broad." % line msg += "These datasets were found: %s" % ( " ".join(target_datasets)) print msg sys.exit(8) if not target_datasets: msg = "Dataset pattern produced no match in line %s" % ( line) print msg sys.exit(8) # Now I can look up the blocks for this dataset. target_dataset = target_datasets[0] input_data['REALDATA'] = target_dataset # // # // Looking up the blocks for a given Dataset and the #// provided list of runs #\\ runs_list = \ [x.strip() for x in data_run.split('|') if x.strip()] runs_in_dbs = [x['RunNumber'] for x in \ reader.dbs.listRuns(target_dataset)] runs_in_dbs.sort() # Creating lambda function for filtering runs. expr = '' # First a string expression to evaluate is_the_first = True for run in runs_list: if is_the_first: expr += "(" is_the_first = False else: expr += " or " # Run range: XXXXXX-XXXXXX if run.count("-"): run_limits = \ [x.strip() for x in run.split('-') if x.strip()] expr += "(x >= %s and x <= %s)" % ( run_limits[0], run_limits[1]) else: expr += "x == %s" % run if not is_the_first: expr += ")" # Here comes the lambda funtion runs_filter = lambda x: eval(expr) # Filtering runs in DBS using the list provided in the # input file. target_runs = filter(runs_filter, runs_in_dbs) # Pulling up input files from DBS (including run info). input_files = reader.dbs.listFiles( path=target_dataset, retriveList=['retrive_run']) # // # // Parsing input blocks #// blocks = {} for input_file in input_files: # Skip files with no events # A block will be skipped if all its files have 0 # events if input_file['NumberOfEvents'] == 0: continue runs = \ [str(x['RunNumber']) for x in input_file['RunsList']] for run in runs: if run in target_runs: break else: continue # skip file if it's not in the target_runs cur_files = \ blocks.setdefault(input_file['Block']['Name'], {}).setdefault('Files', 0) cur_events = \ blocks[input_file['Block']['Name']].setdefault( 'Events', 0) cur_runs = \ blocks[input_file['Block']['Name']].setdefault( 'Runs', set()) blocks[input_file['Block']['Name']]['Files'] += 1 blocks[input_file['Block']['Name']]['Events'] += \ input_file['NumberOfEvents'] blocks[input_file['Block']['Name']]['Runs'] = \ cur_runs.union(runs) # // # // Truncating blocks list #// total_events = 0 total_files = 0 blocks_to_process = [] runs_to_process = set() for block in blocks: blocks_to_process.append(block) runs_to_process = runs_to_process.union(blocks[block]['Runs']) total_events += blocks[block]['Events'] total_files += blocks[block]['Files'] if data_events and (data_events < total_events): break if data_files and (data_files < total_files): break input_blocks = ",".join(blocks_to_process) # // # // If PRIMARY is true, then it will use the #// sample_name value as primary dataset name, else it #\\ will use the input primary dataset name. # \\ if data_pname is not None and \ data_pname.lower() in ('y', 't', 'true'): primary = "".join([primary_prefix, sample_name]) else: primary = \ [x for x in input_data['REALDATA'].split("/") if x][0] # // # // Seting special tag #// special_tag_parts = [] # Add RelVal tag if not present. if target_dataset.find(primary_prefix) == -1: special_tag_parts.append(primary_prefix) # Add LABEL if data_label: special_tag_parts.append(data_label) special_tag = "_".join(special_tag_parts) # // # // Setting Acq. Era #// #processed_dataset = target_dataset.split('/')[2] #dataset_acq_era = processed_dataset.split("-")[0] #if dataset_acq_era.startswith(version): # acq_era = version #else: # acq_era = dataset_acq_era # Filling up DQM information dqmData['Runs'] = ",".join(list(runs_to_process)) # // # // Composing a dictionary per sample #// dict = {} dict['sampleName'] = sample_name dict['command'] = command dict['primary'] = primary dict['outputName'] = output_name dict['conditions'] = conditions dict['specialTag'] = special_tag dict['totalEvents'] = total_events dict['eventsPerJob'] = events_per_job dict['pileUp'] = pile_up dict['isRealData'] = is_real_data dict['inputData'] = input_data dict['inputBlocks'] = input_blocks dict['steps'] = sample_steps dict['AcqEra'] = acq_era dict['DQMData'] = dqmData samples.append(dict) if debug: print 'Parsing' print 'Sample:', sample_name print 'Command:', command print 'Conditions:', conditions print 'Special tag:', special_tag print 'Total events:', total_events print 'Events per job:', events_per_job print 'Steps:', sample_steps print 'PileUp:', pile_up print 'Input data:', input_data print 'Input blocks:', input_blocks print 'DQMData:', dqmData print '' # // # // No a first step command (second HLT table, RECO, ALCA, etc) #// else: step_number = int(line_parts[0].split('++')[0].strip()[-1]) step_name = line_parts[0].split('++')[1].strip() command = line_parts[1].strip() # // # // Clean cmsDriver command format #// if command.find('=') > -1: command = command.replace('=',' ') array = [i for i in command.split() if i] # // # // Remove --python_filename if present #// if '--python_filename' in array: del array[array.index('--python_filename'):\ array.index('--python_filename')+2] # // # // Parse conditions #// if '--conditions' in array: conditions_arg = array[array.index('--conditions')+1] if conditions_arg.startswith('auto:'): conditions_key = conditions_arg.split('auto:')[1] conditions_value = autoCond[conditions_key] else: conditions_value = conditions_arg conditions = [ x.strip() \ for x in conditions_value.split(',') \ if x.find("::") != -1 ][0].split('::')[0].strip() else: conditions = 'SpecialConditions' # // # // Cfg file's output name #// output_name = "_".join([step_name, conditions]) + ".py" # // # // Add command options #// if command.find('no_exec') < 0: array.append('--no_exec') if command.find('python_filename') < 0: array.append('--python_filename') array.append(output_name) # Recomposing cmsDriver command command = " ".join(array) # // # // Second trigger table? This may be changed, right now I am #// assuming that all 4 steps workflows are like this. #\\ stage_previous = True if step_number == 2: if '-s' in array: index = array.index('-s') else: index = array.index('--step') if array[index+1].find('RECO') < 0: stage_previous = False if step_number > max_step: max_step = step_number # // # // HARVESTING cmsDriver commands should be ignored. RelVals #// should not run any HARVESTING configuration. Harvestings #\\ run independently after the datasets are produced. # \\ skip_step = False if '-s' in array: index = array.index('-s') else: index = array.index('--step') if array[index+1].count('HARVESTING') > 0: skip_step = True # // # // Composing a dictionary per step #// dict = {} dict['stepNumber'] = step_number dict['command'] = command dict['outputName'] = output_name dict['conditions'] = conditions dict['stagePrevious'] = stage_previous dict['DQMData'] = {'Scenario': getDQMScenario(command)} dict['skipStep'] = skip_step # // # // Step name should be unique #// if step_name not in steps: steps[step_name] = dict else: print "Label %s is repeated!!!" % step_name sys.exit(1) if debug: print 'Parsing' print 'Step name:', step_name print 'Step number:', step_number print 'Command:', command print 'Conditions:', conditions print 'Stage previous:', stage_previous print 'DQM Data:', dict['DQMData'] print '' parse_time = time.time() - start_parse_time file.close() if debug: print "Collected information step 1" for sample in samples: print 'Sample name:', sample['sampleName'] print 'Command', sample['command'] print 'Real data:', sample['isRealData'] print 'Input data:', sample['inputData'] print 'Input blocks', sample['inputBlocks'] print 'Conditions:', sample['conditions'] print 'Total events:', sample['totalEvents'] print 'Events per job:', sample['eventsPerJob'] print 'Output name:', sample['outputName'] print 'Steps:', sample['steps'] print 'PileUp:', sample['pileUp'] print 'Special tag:', sample['specialTag'] print 'Acq. Era:', sample['AcqEra'] print 'DQM data:', sample['DQMData'] print '' for i in range(2, max_step+1): print 'Collected information step %s' % i for step in steps: if steps[step]['stepNumber'] == i: print 'Step name:', step print 'Command:', steps[step]['command'] print 'Conditions:', steps[step]['conditions'] print 'Stage previous:', steps[step]['stagePrevious'] print 'DQM Data:', steps[step]['DQMData'] print '' # // # // Execute cmsDriver command #// print '' print 'Executing cmsDriver commands for step 1 configurations' print '' start_cmsDriver_time = time.time() for sample in samples: if not sample['isRealData']: # // # // if the cfg. file was already created, we'll skip cmsDriver #// command execution. #\\ if os.path.exists("/".join([os.getcwd(), sample['outputName']])) and skip_config: print 'cmsDriver command for step 1 to produce:', \ sample['outputName'],'was already issued, skipping.' continue exitCode, output, error = executeCommand(sample['command']) if exitCode == 0: print 'cmsDriver command for step 1 to produce:', \ sample['outputName'],'exited with ExitCode:', exitCode else : print 'cmsDriver command for step 1 to produce:', \ sample['outputName'],'failed with ExitCode:', exitCode sys.exit(1) else : msg = 'Real Data:\n' msg += 'Input dataset: %s\n' % (sample['inputData']['REALDATA']) msg += 'Run: %s\n' % (sample['inputData'].get('RUN', 'All')) msg += 'Input blocks: %s' % (sample['inputBlocks']) print msg for i in range(2, max_step+1): print '' print 'Executing cmsDriver commands for step %s configurations' % i print '' for step in steps: if steps[step]['stepNumber'] == i: # // # // if the cfg. file was already created, we'll skip cmsDriver #// command execution. #\\ if os.path.exists("/".join([os.getcwd(), steps[step]['outputName']])) and skip_config: print 'cmsDriver command for step %s to produce:' % i, \ steps[step]['outputName'],'was already issued, skipping.' continue # // # // Skip HARVESTING cmsDriver commands #// if steps[step]['skipStep']: print 'This is a HARVESTING cmsDriver command, skipping. ' continue exitCode, output, error = executeCommand(steps[step]['command']) if exitCode == 0: print 'cmsDriver command for step %s to produce:' % i, \ steps[step]['outputName'], \ 'exited with ExitCode:', exitCode else: print 'cmsDriver command for step %s to produce:' % i, \ steps[step]['outputName'], \ 'failed with ExitCode:', exitCode sys.exit(1) cmsDriver_time = time.time() - start_cmsDriver_time print '' print 'Workflow creation' print '' start_workflow_time = time.time() datasets = [] unmergedDatasets = [] mergedDatasets = [] workflows = {} # // # // Create workflows #// for sample in samples: command = 'python ' + scriptsDir conditions = '' # Conditions -> processingString # // # // In case we are processing data #// if sample['isRealData']: command += '/createProcessingWorkflow.py \\\n' # Not changing the primary dataset name for real data. command += '--override-channel=' + sample['primary'] + ' \\\n' command += '--dataset=' + sample['inputData']['REALDATA'] + ' \\\n' command += '--only-blocks=' + sample['inputBlocks'] + ' \\\n' command += '--dbs-url=' + readDBS + ' \\\n' conditions = steps[sample['steps'][0]]['conditions'] command += '--split-type=file \\\n' command += '--split-size=1 \\\n' # // # // MC workflows #// else: command += '/createProductionWorkflow.py \\\n' command += '--channel=' + sample['primary'] + ' \\\n' conditions = sample['conditions'] command += '--starting-run=' + initial_run + ' \\\n' command += '--starting-event=' + initial_event + ' \\\n' command += '--totalevents=' + sample['totalEvents'] + ' \\\n' command += '--eventsperjob=' + sample['eventsPerJob'] + ' \\\n' if sample['pileUp']: command += '--pileup-dataset=' + pileup_dataset + ' \\\n' if storeFail: command += '--store-fail=True \\\n' # // # // First step #// command += '--version=' + version + ' \\\n' command += '--py-cfg=' + sample['outputName'] + ' \\\n' # // # // Input configurations (Second step and further) #// if sample['steps'][0].lower().strip() != 'none': i = 0 for step in sample['steps']: # Is this a HARVESTING step? If so, skip it! if steps[step]['skipStep']: continue # Not a HARVESTING step, continue normally. command += '--version=' + version + ' \\\n' command += '--py-cfg=' + steps[step]['outputName'] + ' \\\n' if i != 0 or not sample['isRealData']: command += '--stageout-intermediates=%s \\\n' % ( steps[step]['stagePrevious']) command += '--chained-input=output \\\n' else: dqmScenario = steps[step]['DQMData']['Scenario'] # // # // If a two-hlt tables workflow, will take conditions from #// the second step information #\\ if not steps[step]['stagePrevious'] and \ i == 0: conditions = steps[step]['conditions'] i += 1 # // # // Common options #// command += '--group=RelVal \\\n' command += '--category=relval \\\n' command += '--activity=RelVal \\\n' command += '--acquisition_era=' + sample['AcqEra'] + ' \\\n' command += '--only-sites=' + onlySites + ' \\\n' command += '--processing_version=' + processing_version + ' \\\n' # Workflow label if workflow_label: command += '--workflow_tag=' + workflow_label + ' \\\n' # // # // processingString="CMSSWVersion"_"Conditions"_"specialTag"_"extra-label" #// CMSSWVersion is appended only when the input dataset does not have it. #\\ processing_string_parts = [] if sample['AcqEra'] != version: processing_string_parts.append(version) processing_string_parts.append(conditions) if sample['specialTag']: processing_string_parts.append(sample['specialTag']) if extra_label: processing_string_parts.append(extra_label) command += '--processing_string=' + "_".join(processing_string_parts) if debug: print command print '' start_command_time = time.time() exitCode, output, error = executeCommand(command) command_time = time.time() - start_command_time if debug: print output print '' output = [x for x in output.split('\n') if x] if exitCode == 0: #parse output tmp = [] index = FindIndex(output,'Output Datasets') for dataset in output[index+1:]: tmp.append(dataset.strip()) # DQM Data dqmInfo = {} dqmInfo['Runs'] = sample['DQMData']['Runs'] if sample['isRealData']: dqmInfo['Scenario'] = dqmScenario else: dqmInfo['Scenario'] = sample['DQMData']['Scenario'] datasets.append({'unmerged': tmp, 'totalEvents': sample['totalEvents'], 'merged': [x.replace('-unmerged','') for x in tmp], 'DQMData': dqmInfo }) unmergedDatasets.append(tmp) index = FindIndex(output,'Created') if index == -1: print "No workflow was created by create*workflow.py" sys.exit(1) workflow = output[index].split()[1].strip() workflows.setdefault(workflow, {})['isRealData'] = sample['isRealData'] workflows[workflow]['time'] = command_time print 'workflow creation command for workflow:', workflow, \ 'exited with ExitCode:', exitCode else : print 'workflow creation command:' print command print 'failed: %s' % error sys.exit(1) if debug: print 'Created workflows:' print workflows.keys() print '' print "Unmerged datasets:" print unmergedDatasets # extract merged datasets for sample in unmergedDatasets: tmp = [] for dataset in sample: tmp.append(dataset.replace('-unmerged','')) mergedDatasets.append(tmp) workflow_time = time.time() - start_workflow_time print '' print 'Write helper scripts' print '' # WorkflowInjector:Input script inputScript = open('input.sh','w') inputScript.write('#!/bin/bash\n') feeder = 'None' for workflow in workflows.keys(): if workflows[workflow]['isRealData']: if feeder.find('ReReco') < 0: inputScript.write('python $PRODAGENT_ROOT/util/publish.py WorkflowInjector:SetPlugin BlockFeeder\n') feeder = 'ReReco' else : if feeder.find('Request') < 0: inputScript.write('python $PRODAGENT_ROOT/util/publish.py WorkflowInjector:SetPlugin RequestFeeder\n') feeder = 'Request' inputScript.write('python $PRODAGENT_ROOT/util/publish.py WorkflowInjector:Input ' + os.path.join(os.getcwd(), workflow) + '\n') inputScript.close() os.chmod('input.sh',0755) print 'Wrote WorkflowInjector:Input script to:',os.path.join(os.getcwd(),'input.sh') # ForceMerge forceMergeScript = open('forceMerge.sh','w') forceMergeScript.write('#!/bin/bash\n') for sample in unmergedDatasets : for dataset in sample : forceMergeScript.write('python $PRODAGENT_ROOT/util/publish.py ForceMerge ' + dataset + '\n') forceMergeScript.close() os.chmod('forceMerge.sh',0755) print 'Wrote ForceMerge script to:',os.path.join(os.getcwd(),'forceMerge.sh') # MigrateDatasetToGlobal migrateScript = open('migrateToGlobal.sh','w') migrateScript.write('#!/bin/bash\n') for sample in mergedDatasets : for dataset in sample : migrateScript.write('python $PRODAGENT_ROOT/util/publish.py DBSInterface:MigrateDatasetToGlobal ' + dataset + '\n') migrateScript.close() os.chmod('migrateToGlobal.sh',0755) print 'Wrote DBSInterface:MigrateDatasetToGlobal script to:',os.path.join(os.getcwd(),'migrateToGlobal.sh') # PhEDExInjectDataset phedexScript = open('injectIntoPhEDEx.sh','w') phedexScript.write('#!/bin/bash\n') for sample in mergedDatasets : for dataset in sample : phedexScript.write('python $PRODAGENT_ROOT/util/publish.py PhEDExInjectDataset ' + dataset + '\n') phedexScript.close() os.chmod('injectIntoPhEDEx.sh',0755) print 'Wrote PhEDExInjectDataset script to:',os.path.join(os.getcwd(),'injectIntoPhEDEx.sh') # DBS: query unmerged datasets queryUnmergedScript = open('queryUnmerged.sh','w') queryUnmergedScript.write('#!/bin/bash\n') for sample in unmergedDatasets : for dataset in sample : #if dataset.find('-RECO') == -1 or len(sample) == 1 : queryUnmergedScript.write('python $PRODAGENT_ROOT/util/InspectDBS2.py --DBSURL=' + DBSURL + ' --datasetPath=' + dataset + ' | grep total\n') queryUnmergedScript.close() os.chmod('queryUnmerged.sh',0755) print 'Wrote DBS query script for unmerged datasets to:',os.path.join(os.getcwd(),'queryUnmerged.sh') # DBS: query merged datasets queryMergedScript = open('queryMerged.sh','w') queryMergedScript.write('#!/bin/bash\n') for sample in mergedDatasets : for dataset in sample : #if dataset.find('-RECO') == -1 or len(sample) == 1 : queryMergedScript.write('python $PRODAGENT_ROOT/util/InspectDBS2.py --DBSURL=' + DBSURL + ' --datasetPath=' + dataset + ' | grep total\n') queryMergedScript.close() os.chmod('queryMerged.sh',0755) print 'Wrote DBS query script for merged datasets to:',os.path.join(os.getcwd(),'queryMerged.sh') # DQMHarvesting DQMinputScript = open('DQMinput.sh','w') DQMinputScript.write("#!/bin/bash\n") reHarvest = re.compile(r'/.*/.*/(RECO|.*-RECO)') # Only RECO datasets for now. for sample in datasets: for dataset in sample['merged']: if reHarvest.match(dataset): for run in sample['DQMData']['Runs'].split(","): DQMinputScript.write('python $PRODAGENT_ROOT/util/harvestDQM.py --run=%s --path=%s --scenario=%s\n' % ( run, dataset, sample['DQMData']['Scenario'])) os.chmod('DQMinput.sh',0755) print 'Wrote DQMHarvesting script for merged datasets to:', os.path.join(os.getcwd(),'DQMinput.sh') # Output datasets list outputList = open('outputDatasets.txt','w') for sample in mergedDatasets : for dataset in sample : outputList.write(dataset + "\n") print 'Wrote output datasets list to:', os.path.join(os.getcwd(),'outputDatasets.txt') # File with expected number of events numberOfEvents = open('eventsExpected.txt','w') for sample in datasets: for dataset in sample['merged']: numberOfEvents.write("%s %s\n" % (sample['totalEvents'],dataset)) numberOfEvents.close() print 'Wrote events per dataset to:', os.path.join(os.getcwd(),'eventsExpected.txt') total_time = time.time() - start_total_time # File with timing report (Parsing, cmsDriver comands, workflow creation) timingInfo = open('timingInfo.txt', 'w') timingInfo.write('Total time: %s s\n' % total_time) timingInfo.write('Cofigs. creation time: %s s\n' % cmsDriver_time) timingInfo.write('Workflows creation time: %s s\n' % workflow_time) output_text = [] sum = 0 for workflow in workflows: if sum == 0: min = [workflow, workflows[workflow]['time']] max = [workflow, workflows[workflow]['time']] sum += workflows[workflow]['time'] output_text.append("%s: %s s" % (workflow, workflows[workflow]['time'])) if max[1] < workflows[workflow]['time']: max = [workflow, workflows[workflow]['time']] if min[1] > workflows[workflow]['time']: min = [workflow, workflows[workflow]['time']] timingInfo.write('Average time per workflow: %s s\n' % (int(sum) / int(len(workflows)))) timingInfo.write('Max. time on %s: %s s\n' % tuple(max)) timingInfo.write('Min. time on %s: %s s\n' % tuple(min)) timingInfo.write('=' * 10) timingInfo.write('Details of time per workflow:\n%s\n' % "\n".join(output_text)) |
msg += ex | msg += str(ex) | def publishStatusToDashboard(self, jobSpecId, data): """ _publishStatusToDashboard_ |
targetDirCheck = "rfstat %s 2> /dev/null | grep Protection" % targetDir print "Check dir existence : %s" % targetDirCheck try: targetDirCheckExitCode, targetDirCheckOutput = runCommandWithOutput(targetDirCheck) except Exception, ex: msg = "Error: Exception while invoking command:\n" msg += "%s\n" % targetDirCheck msg += "Exception: %s\n" % str(ex) msg += "Fatal error, abort stageout..." raise StageOutError(msg) if targetDirCheckExitCode: | if not self.checkDirExists(targetDir): | def createOutputDirectory(self, targetPFN): """ _createOutputDirectory_ |
fileclassDirCheck = "rfstat %s 2> /dev/null | grep Protection" % fileclassDir print "Check dir existence : %s" % fileclassDirCheck try: fileclassDirCheckExitCode, fileclassDirCheckOutput = runCommandWithOutput(fileclassDirCheck) except Exception, ex: msg = "Error: Exception while invoking command:\n" msg += "%s\n" % rfstatCmd msg += "Exception: %s\n" % str(ex) msg += "Fatal error, abort stageout..." raise StageOutError(msg) if fileclassDirCheckExitCode: | if not self.checkDirExists(fileclassDir): | def createOutputDirectory(self, targetPFN): """ _createOutputDirectory_ |
self.createDir(fileclassDir, self.permissions) | self.createDir(fileclassDir) | def createOutputDirectory(self, targetPFN): """ _createOutputDirectory_ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.