rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
dst_filename = change_prefix(filename, src_prefix, dst_prefix)
|
dst_filename = change_prefix(filename, dst_prefix)
|
def copy_required_modules(src_prefix, dst_prefix): for modname in REQUIRED_MODULES: if modname in sys.builtin_module_names: logger.notify("Ignoring built-in bootstrap module: %s" % mod) continue try: mod = __import__(modname) except ImportError: logger.notify("Cannot import bootstrap module: %s" % modname) else: filename = mod.__file__ if getattr(mod, '__package__', None) is not None: assert filename.endswith('__init__.py') or \ filename.endswith('__init__.pyc') filename = os.path.dirname(filename) dst_filename = change_prefix(filename, src_prefix, dst_prefix) copyfile(filename, dst_filename) if filename.endswith('.pyc'): pyfile = filename[:-1] if os.path.exists(pyfile): copyfile(pyfile, dst_filename[:-1])
|
copy_required_modules(prefix, home_dir)
|
copy_required_modules(home_dir)
|
def install_python(home_dir, lib_dir, inc_dir, bin_dir, site_packages, clear): """Install just the base environment, no distutils patches etc""" if sys.executable.startswith(bin_dir): print 'Please use the *system* python to run this script' return if clear: rmtree(lib_dir) ## FIXME: why not delete it? ## Maybe it should delete everything with #!/path/to/venv/python in it logger.notify('Not deleting %s', bin_dir) if hasattr(sys, 'real_prefix'): logger.notify('Using real prefix %r' % sys.real_prefix) prefix = sys.real_prefix else: prefix = sys.prefix mkdir(lib_dir) fix_lib64(lib_dir) stdlib_dirs = [os.path.dirname(os.__file__)] if sys.platform == 'win32': stdlib_dirs.append(join(os.path.dirname(stdlib_dirs[0]), 'DLLs')) elif sys.platform == 'darwin': stdlib_dirs.append(join(stdlib_dirs[0], 'site-packages')) if hasattr(os, 'symlink'): logger.info('Symlinking Python bootstrap modules') else: logger.info('Copying Python bootstrap modules') logger.indent += 2 try: # copy required files... for stdlib_dir in stdlib_dirs: if not os.path.isdir(stdlib_dir): continue for fn in os.listdir(stdlib_dir): if fn != 'site-packages' and os.path.splitext(fn)[0] in REQUIRED_FILES: copyfile(join(stdlib_dir, fn), join(lib_dir, fn)) # ...and modules copy_required_modules(prefix, home_dir) finally: logger.indent -= 2 mkdir(join(lib_dir, 'site-packages')) import site site_filename = site.__file__ if site_filename.endswith('.pyc'): site_filename = site_filename[:-1] site_filename_dst = change_prefix(site_filename, prefix, home_dir) site_dir = os.path.dirname(site_filename_dst) writefile(site_filename_dst, SITE_PY) writefile(join(site_dir, 'orig-prefix.txt'), prefix) site_packages_filename = join(site_dir, 'no-global-site-packages.txt') if not site_packages: writefile(site_packages_filename, '') else: if os.path.exists(site_packages_filename): logger.info('Deleting %s' % site_packages_filename) os.unlink(site_packages_filename) stdinc_dir = join(prefix, 'include', py_version) if os.path.exists(stdinc_dir): copyfile(stdinc_dir, inc_dir) else: logger.debug('No include dir %s' % stdinc_dir) if sys.exec_prefix != prefix: if sys.platform == 'win32': exec_dir = join(sys.exec_prefix, 'lib') elif is_jython: exec_dir = join(sys.exec_prefix, 'Lib') else: exec_dir = join(sys.exec_prefix, 'lib', py_version) for fn in os.listdir(exec_dir): copyfile(join(exec_dir, fn), join(lib_dir, fn)) if is_jython: # Jython has either jython-dev.jar and javalib/ dir, or just # jython.jar for name in 'jython-dev.jar', 'javalib', 'jython.jar': src = join(prefix, name) if os.path.exists(src): copyfile(src, join(home_dir, name)) # XXX: registry should always exist after Jython 2.5rc1 src = join(prefix, 'registry') if os.path.exists(src): copyfile(src, join(home_dir, 'registry'), symlink=False) copyfile(join(prefix, 'cachedir'), join(home_dir, 'cachedir'), symlink=False) mkdir(bin_dir) py_executable = join(bin_dir, os.path.basename(sys.executable)) if 'Python.framework' in prefix: if re.search(r'/Python(?:-32|-64)*$', py_executable): # The name of the python executable is not quite what # we want, rename it. py_executable = os.path.join( os.path.dirname(py_executable), 'python') logger.notify('New %s executable in %s', expected_exe, py_executable) if sys.executable != py_executable: ## FIXME: could I just hard link? executable = sys.executable if sys.platform == 'cygwin' and os.path.exists(executable + '.exe'): # Cygwin misreports sys.executable sometimes executable += '.exe' py_executable += '.exe' logger.info('Executable actually exists in %s' % executable) shutil.copyfile(executable, py_executable) make_exe(py_executable) if sys.platform == 'win32' or sys.platform == 'cygwin': pythonw = os.path.join(os.path.dirname(sys.executable), 'pythonw.exe') if os.path.exists(pythonw): logger.info('Also created pythonw.exe') shutil.copyfile(pythonw, os.path.join(os.path.dirname(py_executable), 'pythonw.exe')) if os.path.splitext(os.path.basename(py_executable))[0] != expected_exe: secondary_exe = os.path.join(os.path.dirname(py_executable), expected_exe) py_executable_ext = os.path.splitext(py_executable)[1] if py_executable_ext == '.exe': # python2.4 gives an extension of '.4' :P secondary_exe += py_executable_ext if os.path.exists(secondary_exe): logger.warn('Not overwriting existing %s script %s (you must use %s)' % (expected_exe, secondary_exe, py_executable)) else: logger.notify('Also creating executable in %s' % secondary_exe) shutil.copyfile(sys.executable, secondary_exe) make_exe(secondary_exe) if 'Python.framework' in prefix: logger.debug('MacOSX Python framework detected') # Make sure we use the the embedded interpreter inside # the framework, even if sys.executable points to # the stub executable in ${sys.prefix}/bin # See http://groups.google.com/group/python-virtualenv/ # browse_thread/thread/17cab2f85da75951 shutil.copy( os.path.join( prefix, 'Resources/Python.app/Contents/MacOS/%s' % os.path.basename(sys.executable)), py_executable) # Copy the framework's dylib into the virtual # environment virtual_lib = os.path.join(home_dir, '.Python') if os.path.exists(virtual_lib): os.unlink(virtual_lib) copyfile( os.path.join(prefix, 'Python'), virtual_lib) # And then change the install_name of the copied python executable try: call_subprocess( ["install_name_tool", "-change", os.path.join(prefix, 'Python'), '@executable_path/../.Python', py_executable]) except: logger.fatal( "Could not call install_name_tool -- you must have Apple's development tools installed") raise # Some tools depend on pythonX.Y being present py_executable_version = '%s.%s' % ( sys.version_info[0], sys.version_info[1]) if not py_executable.endswith(py_executable_version): # symlinking pythonX.Y > python pth = py_executable + '%s.%s' % ( sys.version_info[0], sys.version_info[1]) if os.path.exists(pth): os.unlink(pth) os.symlink('python', pth) else: # reverse symlinking python -> pythonX.Y (with --python) pth = join(bin_dir, 'python') if os.path.exists(pth): os.unlink(pth) os.symlink(os.path.basename(py_executable), pth) if sys.platform == 'win32' and ' ' in py_executable: # There's a bug with subprocess on Windows when using a first # argument that has a space in it. Instead we have to quote # the value: py_executable = '"%s"' % py_executable cmd = [py_executable, '-c', 'import sys; print sys.prefix'] logger.info('Testing executable with %s %s "%s"' % tuple(cmd)) proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) proc_stdout, proc_stderr = proc.communicate() proc_stdout = os.path.normcase(os.path.abspath(proc_stdout.strip())) if proc_stdout != os.path.normcase(os.path.abspath(home_dir)): logger.fatal( 'ERROR: The executable %s is not functioning' % py_executable) logger.fatal( 'ERROR: It thinks sys.prefix is %r (should be %r)' % (proc_stdout, os.path.normcase(os.path.abspath(home_dir)))) logger.fatal( 'ERROR: virtualenv is not compatible with this system or executable') if sys.platform == 'win32': logger.fatal( 'Note: some Windows users have reported this error when they installed Python for "Only this user". The problem may be resolvable if you install Python "For all users". (See https://bugs.launchpad.net/virtualenv/+bug/352844)') sys.exit(100) else: logger.info('Got sys.prefix result: %r' % proc_stdout) pydistutils = os.path.expanduser('~/.pydistutils.cfg') if os.path.exists(pydistutils): logger.notify('Please make sure you remove any previous custom paths from ' 'your %s file.' % pydistutils) ## FIXME: really this should be calculated earlier return py_executable
|
site_filename_dst = change_prefix(site_filename, prefix, home_dir)
|
site_filename_dst = change_prefix(site_filename, home_dir)
|
def install_python(home_dir, lib_dir, inc_dir, bin_dir, site_packages, clear): """Install just the base environment, no distutils patches etc""" if sys.executable.startswith(bin_dir): print 'Please use the *system* python to run this script' return if clear: rmtree(lib_dir) ## FIXME: why not delete it? ## Maybe it should delete everything with #!/path/to/venv/python in it logger.notify('Not deleting %s', bin_dir) if hasattr(sys, 'real_prefix'): logger.notify('Using real prefix %r' % sys.real_prefix) prefix = sys.real_prefix else: prefix = sys.prefix mkdir(lib_dir) fix_lib64(lib_dir) stdlib_dirs = [os.path.dirname(os.__file__)] if sys.platform == 'win32': stdlib_dirs.append(join(os.path.dirname(stdlib_dirs[0]), 'DLLs')) elif sys.platform == 'darwin': stdlib_dirs.append(join(stdlib_dirs[0], 'site-packages')) if hasattr(os, 'symlink'): logger.info('Symlinking Python bootstrap modules') else: logger.info('Copying Python bootstrap modules') logger.indent += 2 try: # copy required files... for stdlib_dir in stdlib_dirs: if not os.path.isdir(stdlib_dir): continue for fn in os.listdir(stdlib_dir): if fn != 'site-packages' and os.path.splitext(fn)[0] in REQUIRED_FILES: copyfile(join(stdlib_dir, fn), join(lib_dir, fn)) # ...and modules copy_required_modules(prefix, home_dir) finally: logger.indent -= 2 mkdir(join(lib_dir, 'site-packages')) import site site_filename = site.__file__ if site_filename.endswith('.pyc'): site_filename = site_filename[:-1] site_filename_dst = change_prefix(site_filename, prefix, home_dir) site_dir = os.path.dirname(site_filename_dst) writefile(site_filename_dst, SITE_PY) writefile(join(site_dir, 'orig-prefix.txt'), prefix) site_packages_filename = join(site_dir, 'no-global-site-packages.txt') if not site_packages: writefile(site_packages_filename, '') else: if os.path.exists(site_packages_filename): logger.info('Deleting %s' % site_packages_filename) os.unlink(site_packages_filename) stdinc_dir = join(prefix, 'include', py_version) if os.path.exists(stdinc_dir): copyfile(stdinc_dir, inc_dir) else: logger.debug('No include dir %s' % stdinc_dir) if sys.exec_prefix != prefix: if sys.platform == 'win32': exec_dir = join(sys.exec_prefix, 'lib') elif is_jython: exec_dir = join(sys.exec_prefix, 'Lib') else: exec_dir = join(sys.exec_prefix, 'lib', py_version) for fn in os.listdir(exec_dir): copyfile(join(exec_dir, fn), join(lib_dir, fn)) if is_jython: # Jython has either jython-dev.jar and javalib/ dir, or just # jython.jar for name in 'jython-dev.jar', 'javalib', 'jython.jar': src = join(prefix, name) if os.path.exists(src): copyfile(src, join(home_dir, name)) # XXX: registry should always exist after Jython 2.5rc1 src = join(prefix, 'registry') if os.path.exists(src): copyfile(src, join(home_dir, 'registry'), symlink=False) copyfile(join(prefix, 'cachedir'), join(home_dir, 'cachedir'), symlink=False) mkdir(bin_dir) py_executable = join(bin_dir, os.path.basename(sys.executable)) if 'Python.framework' in prefix: if re.search(r'/Python(?:-32|-64)*$', py_executable): # The name of the python executable is not quite what # we want, rename it. py_executable = os.path.join( os.path.dirname(py_executable), 'python') logger.notify('New %s executable in %s', expected_exe, py_executable) if sys.executable != py_executable: ## FIXME: could I just hard link? executable = sys.executable if sys.platform == 'cygwin' and os.path.exists(executable + '.exe'): # Cygwin misreports sys.executable sometimes executable += '.exe' py_executable += '.exe' logger.info('Executable actually exists in %s' % executable) shutil.copyfile(executable, py_executable) make_exe(py_executable) if sys.platform == 'win32' or sys.platform == 'cygwin': pythonw = os.path.join(os.path.dirname(sys.executable), 'pythonw.exe') if os.path.exists(pythonw): logger.info('Also created pythonw.exe') shutil.copyfile(pythonw, os.path.join(os.path.dirname(py_executable), 'pythonw.exe')) if os.path.splitext(os.path.basename(py_executable))[0] != expected_exe: secondary_exe = os.path.join(os.path.dirname(py_executable), expected_exe) py_executable_ext = os.path.splitext(py_executable)[1] if py_executable_ext == '.exe': # python2.4 gives an extension of '.4' :P secondary_exe += py_executable_ext if os.path.exists(secondary_exe): logger.warn('Not overwriting existing %s script %s (you must use %s)' % (expected_exe, secondary_exe, py_executable)) else: logger.notify('Also creating executable in %s' % secondary_exe) shutil.copyfile(sys.executable, secondary_exe) make_exe(secondary_exe) if 'Python.framework' in prefix: logger.debug('MacOSX Python framework detected') # Make sure we use the the embedded interpreter inside # the framework, even if sys.executable points to # the stub executable in ${sys.prefix}/bin # See http://groups.google.com/group/python-virtualenv/ # browse_thread/thread/17cab2f85da75951 shutil.copy( os.path.join( prefix, 'Resources/Python.app/Contents/MacOS/%s' % os.path.basename(sys.executable)), py_executable) # Copy the framework's dylib into the virtual # environment virtual_lib = os.path.join(home_dir, '.Python') if os.path.exists(virtual_lib): os.unlink(virtual_lib) copyfile( os.path.join(prefix, 'Python'), virtual_lib) # And then change the install_name of the copied python executable try: call_subprocess( ["install_name_tool", "-change", os.path.join(prefix, 'Python'), '@executable_path/../.Python', py_executable]) except: logger.fatal( "Could not call install_name_tool -- you must have Apple's development tools installed") raise # Some tools depend on pythonX.Y being present py_executable_version = '%s.%s' % ( sys.version_info[0], sys.version_info[1]) if not py_executable.endswith(py_executable_version): # symlinking pythonX.Y > python pth = py_executable + '%s.%s' % ( sys.version_info[0], sys.version_info[1]) if os.path.exists(pth): os.unlink(pth) os.symlink('python', pth) else: # reverse symlinking python -> pythonX.Y (with --python) pth = join(bin_dir, 'python') if os.path.exists(pth): os.unlink(pth) os.symlink(os.path.basename(py_executable), pth) if sys.platform == 'win32' and ' ' in py_executable: # There's a bug with subprocess on Windows when using a first # argument that has a space in it. Instead we have to quote # the value: py_executable = '"%s"' % py_executable cmd = [py_executable, '-c', 'import sys; print sys.prefix'] logger.info('Testing executable with %s %s "%s"' % tuple(cmd)) proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) proc_stdout, proc_stderr = proc.communicate() proc_stdout = os.path.normcase(os.path.abspath(proc_stdout.strip())) if proc_stdout != os.path.normcase(os.path.abspath(home_dir)): logger.fatal( 'ERROR: The executable %s is not functioning' % py_executable) logger.fatal( 'ERROR: It thinks sys.prefix is %r (should be %r)' % (proc_stdout, os.path.normcase(os.path.abspath(home_dir)))) logger.fatal( 'ERROR: virtualenv is not compatible with this system or executable') if sys.platform == 'win32': logger.fatal( 'Note: some Windows users have reported this error when they installed Python for "Only this user". The problem may be resolvable if you install Python "For all users". (See https://bugs.launchpad.net/virtualenv/+bug/352844)') sys.exit(100) else: logger.info('Got sys.prefix result: %r' % proc_stdout) pydistutils = os.path.expanduser('~/.pydistutils.cfg') if os.path.exists(pydistutils): logger.notify('Please make sure you remove any previous custom paths from ' 'your %s file.' % pydistutils) ## FIXME: really this should be calculated earlier return py_executable
|
if sys.platform == 'win32': name = '%PYTHONHOME%' else: name = '$PYTHONHOME' logger.warn('%s is set; this can cause problems creating environments' % name)
|
del os.environ['PYTHONHOME']
|
def main(): parser = optparse.OptionParser( version=virtualenv_version, usage="%prog [OPTIONS] DEST_DIR") parser.add_option( '-v', '--verbose', action='count', dest='verbose', default=0, help="Increase verbosity") parser.add_option( '-q', '--quiet', action='count', dest='quiet', default=0, help='Decrease verbosity') parser.add_option( '-p', '--python', dest='python', metavar='PYTHON_EXE', help='The Python interpreter to use, e.g., --python=python2.5 will use the python2.5 ' 'interpreter to create the new environment. The default is the interpreter that ' 'virtualenv was installed with (%s)' % sys.executable) parser.add_option( '--clear', dest='clear', action='store_true', help="Clear out the non-root install and start from scratch") parser.add_option( '--no-site-packages', dest='no_site_packages', action='store_true', help="Don't give access to the global site-packages dir to the " "virtual environment") parser.add_option( '--unzip-setuptools', dest='unzip_setuptools', action='store_true', help="Unzip Setuptools or Distribute when installing it") parser.add_option( '--relocatable', dest='relocatable', action='store_true', help='Make an EXISTING virtualenv environment relocatable. ' 'This fixes up scripts and makes all .pth files relative') parser.add_option( '--distribute', dest='use_distribute', action='store_true', help='Use Distribute instead of Setuptools. Set environ variable' 'VIRTUALENV_USE_DISTRIBUTE to make it the default ') if 'extend_parser' in globals(): extend_parser(parser) options, args = parser.parse_args() global logger if 'adjust_options' in globals(): adjust_options(options, args) verbosity = options.verbose - options.quiet logger = Logger([(Logger.level_for_integer(2-verbosity), sys.stdout)]) if options.python and not os.environ.get('VIRTUALENV_INTERPRETER_RUNNING'): env = os.environ.copy() interpreter = resolve_interpreter(options.python) if interpreter == sys.executable: logger.warn('Already using interpreter %s' % interpreter) else: logger.notify('Running virtualenv with interpreter %s' % interpreter) env['VIRTUALENV_INTERPRETER_RUNNING'] = 'true' file = __file__ if file.endswith('.pyc'): file = file[:-1] os.execvpe(interpreter, [interpreter, file] + sys.argv[1:], env) if not args: print 'You must provide a DEST_DIR' parser.print_help() sys.exit(2) if len(args) > 1: print 'There must be only one argument: DEST_DIR (you gave %s)' % ( ' '.join(args)) parser.print_help() sys.exit(2) home_dir = args[0] if os.environ.get('WORKING_ENV'): logger.fatal('ERROR: you cannot run virtualenv while in a workingenv') logger.fatal('Please deactivate your workingenv, then re-run this script') sys.exit(3) if os.environ.get('PYTHONHOME'): if sys.platform == 'win32': name = '%PYTHONHOME%' else: name = '$PYTHONHOME' logger.warn('%s is set; this can cause problems creating environments' % name) if options.relocatable: make_environment_relocatable(home_dir) return create_environment(home_dir, site_packages=not options.no_site_packages, clear=options.clear, unzip_setuptools=options.unzip_setuptools, use_distribute=options.use_distribute) if 'after_install' in globals(): after_install(options, home_dir)
|
shutil.copy( os.path.join( prefix, 'Resources/Python.app/Contents/MacOS/%s' % os.path.basename(sys.executable)), py_executable)
|
original_python = os.path.join( prefix, 'Resources/Python.app/Contents/MacOS/Python') shutil.copy(original_python, py_executable)
|
def install_python(home_dir, lib_dir, inc_dir, bin_dir, site_packages, clear): """Install just the base environment, no distutils patches etc""" if sys.executable.startswith(bin_dir): print 'Please use the *system* python to run this script' return if clear: rmtree(lib_dir) ## FIXME: why not delete it? ## Maybe it should delete everything with #!/path/to/venv/python in it logger.notify('Not deleting %s', bin_dir) if hasattr(sys, 'real_prefix'): logger.notify('Using real prefix %r' % sys.real_prefix) prefix = sys.real_prefix else: prefix = sys.prefix mkdir(lib_dir) fix_lib64(lib_dir) stdlib_dirs = [os.path.dirname(os.__file__)] if sys.platform == 'win32': stdlib_dirs.append(join(os.path.dirname(stdlib_dirs[0]), 'DLLs')) elif sys.platform == 'darwin': stdlib_dirs.append(join(stdlib_dirs[0], 'site-packages')) for stdlib_dir in stdlib_dirs: if not os.path.isdir(stdlib_dir): continue if hasattr(os, 'symlink'): logger.info('Symlinking Python bootstrap modules') else: logger.info('Copying Python bootstrap modules') logger.indent += 2 try: for fn in os.listdir(stdlib_dir): if fn != 'site-packages' and os.path.splitext(fn)[0] in REQUIRED_MODULES: copyfile(join(stdlib_dir, fn), join(lib_dir, fn)) finally: logger.indent -= 2 mkdir(join(lib_dir, 'site-packages')) writefile(join(lib_dir, 'site.py'), SITE_PY) writefile(join(lib_dir, 'orig-prefix.txt'), prefix) site_packages_filename = join(lib_dir, 'no-global-site-packages.txt') if not site_packages: writefile(site_packages_filename, '') else: if os.path.exists(site_packages_filename): logger.info('Deleting %s' % site_packages_filename) os.unlink(site_packages_filename) stdinc_dir = join(prefix, 'include', py_version) if os.path.exists(stdinc_dir): copyfile(stdinc_dir, inc_dir) else: logger.debug('No include dir %s' % stdinc_dir) if sys.exec_prefix != prefix: if sys.platform == 'win32': exec_dir = join(sys.exec_prefix, 'lib') elif is_jython: exec_dir = join(sys.exec_prefix, 'Lib') else: exec_dir = join(sys.exec_prefix, 'lib', py_version) for fn in os.listdir(exec_dir): copyfile(join(exec_dir, fn), join(lib_dir, fn)) if is_jython: # Jython has either jython-dev.jar and javalib/ dir, or just # jython.jar for name in 'jython-dev.jar', 'javalib', 'jython.jar': src = join(prefix, name) if os.path.exists(src): copyfile(src, join(home_dir, name)) # XXX: registry should always exist after Jython 2.5rc1 src = join(prefix, 'registry') if os.path.exists(src): copyfile(src, join(home_dir, 'registry'), symlink=False) copyfile(join(prefix, 'cachedir'), join(home_dir, 'cachedir'), symlink=False) mkdir(bin_dir) py_executable = join(bin_dir, os.path.basename(sys.executable)) if 'Python.framework' in prefix: if re.search(r'/Python(?:-32|-64)*$', py_executable): # The name of the python executable is not quite what # we want, rename it. py_executable = os.path.join( os.path.dirname(py_executable), 'python') logger.notify('New %s executable in %s', expected_exe, py_executable) if sys.executable != py_executable: ## FIXME: could I just hard link? executable = sys.executable if sys.platform == 'cygwin' and os.path.exists(executable + '.exe'): # Cygwin misreports sys.executable sometimes executable += '.exe' py_executable += '.exe' logger.info('Executable actually exists in %s' % executable) shutil.copyfile(executable, py_executable) make_exe(py_executable) if sys.platform == 'win32' or sys.platform == 'cygwin': pythonw = os.path.join(os.path.dirname(sys.executable), 'pythonw.exe') if os.path.exists(pythonw): logger.info('Also created pythonw.exe') shutil.copyfile(pythonw, os.path.join(os.path.dirname(py_executable), 'pythonw.exe')) if os.path.splitext(os.path.basename(py_executable))[0] != expected_exe: secondary_exe = os.path.join(os.path.dirname(py_executable), expected_exe) py_executable_ext = os.path.splitext(py_executable)[1] if py_executable_ext == '.exe': # python2.4 gives an extension of '.4' :P secondary_exe += py_executable_ext if os.path.exists(secondary_exe): logger.warn('Not overwriting existing %s script %s (you must use %s)' % (expected_exe, secondary_exe, py_executable)) else: logger.notify('Also creating executable in %s' % secondary_exe) shutil.copyfile(sys.executable, secondary_exe) make_exe(secondary_exe) if 'Python.framework' in prefix: logger.debug('MacOSX Python framework detected') # Make sure we use the the embedded interpreter inside # the framework, even if sys.executable points to # the stub executable in ${sys.prefix}/bin # See http://groups.google.com/group/python-virtualenv/ # browse_thread/thread/17cab2f85da75951 shutil.copy( os.path.join( prefix, 'Resources/Python.app/Contents/MacOS/%s' % os.path.basename(sys.executable)), py_executable) # Copy the framework's dylib into the virtual # environment virtual_lib = os.path.join(home_dir, '.Python') if os.path.exists(virtual_lib): os.unlink(virtual_lib) copyfile( os.path.join(prefix, 'Python'), virtual_lib) # And then change the install_name of the copied python executable try: call_subprocess( ["install_name_tool", "-change", os.path.join(prefix, 'Python'), '@executable_path/../.Python', py_executable]) except: logger.fatal( "Could not call install_name_tool -- you must have Apple's development tools installed") raise # Some tools depend on pythonX.Y being present py_executable_version = '%s.%s' % ( sys.version_info[0], sys.version_info[1]) if not py_executable.endswith(py_executable_version): # symlinking pythonX.Y > python pth = py_executable + '%s.%s' % ( sys.version_info[0], sys.version_info[1]) if os.path.exists(pth): os.unlink(pth) os.symlink('python', pth) else: # reverse symlinking python -> pythonX.Y (with --python) pth = join(bin_dir, 'python') if os.path.exists(pth): os.unlink(pth) os.symlink(os.path.basename(py_executable), pth) if sys.platform == 'win32' and ' ' in py_executable: # There's a bug with subprocess on Windows when using a first # argument that has a space in it. Instead we have to quote # the value: py_executable = '"%s"' % py_executable cmd = [py_executable, '-c', 'import sys; print sys.prefix'] logger.info('Testing executable with %s %s "%s"' % tuple(cmd)) proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) proc_stdout, proc_stderr = proc.communicate() proc_stdout = os.path.normcase(os.path.abspath(proc_stdout.strip())) if proc_stdout != os.path.normcase(os.path.abspath(home_dir)): logger.fatal( 'ERROR: The executable %s is not functioning' % py_executable) logger.fatal( 'ERROR: It thinks sys.prefix is %r (should be %r)' % (proc_stdout, os.path.normcase(os.path.abspath(home_dir)))) logger.fatal( 'ERROR: virtualenv is not compatible with this system or executable') if sys.platform == 'win32': logger.fatal( 'Note: some Windows users have reported this error when they installed Python for "Only this user". The problem may be resolvable if you install Python "For all users". (See https://bugs.launchpad.net/virtualenv/+bug/352844)') sys.exit(100) else: logger.info('Got sys.prefix result: %r' % proc_stdout) pydistutils = os.path.expanduser('~/.pydistutils.cfg') if os.path.exists(pydistutils): logger.notify('Please make sure you remove any previous custom paths from ' 'your %s file.' % pydistutils) ## FIXME: really this should be calculated earlier return py_executable
|
return encoder.encode({'success': 'true', 'message': ''})
|
return encoder.encode({'success': 'true', 'message': '', 'file': name})
|
def __call__(self): request = self.request container = self.context.context image = request.form.get('image', '')
|
{'success': 'true', 'message': '', 'file': image.filename})
|
{'success': 'true', 'message': '', 'file': name})
|
def __call__(self): request = self.request container = self.context.context image = request.form.get('image', '')
|
return encoder.encode({'success': 'true', 'message': ''})
|
return encoder.encode({'success': 'true', 'message': '', 'file': name})
|
def __call__(self): request = self.request container = self.context.context media = request.form.get('image', '') description = request.form.get('description', '') if not media: return encoder.encode({'success': 'false', 'message': ''})
|
{'success': 'true', 'message': '', 'file': media.filename})
|
{'success': 'true', 'message': '', 'file': name})
|
def __call__(self): request = self.request container = self.context.context media = request.form.get('image', '') description = request.form.get('description', '') if not media: return encoder.encode({'success': 'false', 'message': ''})
|
daemon_pid_file=pid_file
|
daemon_pid_file=pid_file,
|
def main(): parser = OptionParser() parser.add_option("-c", "--config", help="Full path to config file to use") parser.add_option("-f", "--foreground", default=False, action="store_true", help="run in foreground (do not spawn a daemon)") parser.add_option("-p", "--pid-file", help="specify a pid file") (opts, args) = parser.parse_args() conf = kobo.conf.PyConfigParser() config = opts.config if config is None: config = "/etc/beaker/proxy.conf" conf.load_from_file(config) pid_file = opts.pid_file if pid_file is None: pid_file = conf.get("WPID_FILE", "/var/run/beaker-lab-controller/beaker-watchdog.pid") if opts.foreground: main_loop(conf=conf, foreground=True) else: daemonize(main_loop, daemon_pid_file=pid_file conf=conf, foreground=False)
|
<autopick random="False"/>
|
<autopick random="false"/>
|
def test_recipe_elements_in_different_order(self): self.assert_valid(''' <job> <recipeSet retention_tag="scratch"> <recipe kernel_options="" kernel_options_post="" ks_meta="" role="None" whiteboard=""> <partitions/> <task name="/distribution/install" role="STANDALONE"/> <repos/> <ks_appends/> <hostRequires> <system_type value="Machine"/> </hostRequires> <task name="/distribution/reservesys" role="STANDALONE"/> <packages/> <autopick random="False"/> <distroRequires> <distro_name op="=" value="BlueShoeLinux5-5"/> </distroRequires> <watchdog panic="None"/> </recipe> </recipeSet> </job> ''')
|
<autopick random="True"/> <autopick random="False"/>
|
<autopick random="true"/> <autopick random="false"/>
|
def test_duplicate_elements(self): self.assert_not_valid(''' <job> <recipeSet retention_tag="scratch"> <recipe kernel_options="" kernel_options_post="" ks_meta="" role="None" whiteboard=""> <autopick random="True"/> <autopick random="False"/> <watchdog panic="None"/> <watchdog panic="always"/> <packages/> <packages/> <ks_appends/> <ks_appends/> <repos/> <repos/> <distroRequires/> <hostRequires/> <task name="/distribution/install" role="STANDALONE"/> </recipe> </recipeSet> </job> ''', ['Extra element watchdog in interleave', 'Invalid sequence in interleave', 'Element recipe failed to validate content', 'Element recipeSet failed to validate content'])
|
['Extra element watchdog in interleave',
|
['Extra element autopick in interleave',
|
def test_duplicate_elements(self): self.assert_not_valid(''' <job> <recipeSet retention_tag="scratch"> <recipe kernel_options="" kernel_options_post="" ks_meta="" role="None" whiteboard=""> <autopick random="True"/> <autopick random="False"/> <watchdog panic="None"/> <watchdog panic="always"/> <packages/> <packages/> <ks_appends/> <ks_appends/> <repos/> <repos/> <distroRequires/> <hostRequires/> <task name="/distribution/install" role="STANDALONE"/> </recipe> </recipeSet> </job> ''', ['Extra element watchdog in interleave', 'Invalid sequence in interleave', 'Element recipe failed to validate content', 'Element recipeSet failed to validate content'])
|
watchdog.logger.debug(80 * '-')
|
def main_loop(conf=None, foreground=False): """infinite daemon loop""" # define custom signal handlers signal.signal(signal.SIGTERM, daemon_shutdown) config = kobo.conf.PyConfigParser() # load default config default_config = os.path.abspath(os.path.join(os.path.dirname(__file__), "default.conf")) config.load_from_file(default_config) logger = logging.getLogger("Watchdog") logger.setLevel(logging.DEBUG) log_level = logging._levelNames.get(config["LOG_LEVEL"].upper()) log_file = config["WATCHDOG_LOG_FILE"] add_rotating_file_logger(logger, log_file, log_level=log_level, format=VERBOSE_LOG_FORMAT) try: watchdog = Watchdog(conf=conf, logger=logger) except Exception, ex: sys.stderr.write("Error initializing Watchdog: %s\n" % ex) sys.exit(1) if foreground: add_stderr_logger(watchdog.logger) while True: try: # Poll the scheduler for watchdogs watchdog.hub._login() watchdog.expire_watchdogs() if not watchdog.active_watchdogs(): watchdog.sleep() watchdog.logger.debug(80 * '-') # FIXME: Check for recipes that match systems under # this lab controller, if so take recipe and provision # system. # write to stdout / stderr sys.stdout.flush() sys.stderr.flush() except (ShutdownException, KeyboardInterrupt): # ignore keyboard interrupts and sigterm signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal.SIG_IGN) watchdog.logger.info('Exiting...') break except: # this is a little extreme: log the exception and continue traceback = Traceback() watchdog.logger.error(traceback.get_traceback()) watchdog.sleep()
|
|
def test_can_sort_by_id(self): self.check_column_sort(1)
|
def test_can_sort_by_id(self): self.check_column_sort(1)
|
|
if TaskExclude(task=task,osmajor=osmajor.by_name(family.lstrip('-'))) not in task.excluded: task.excluded.append(TaskExclude(osmajor=osmajor.by_name(family.lstrip('-'))))
|
if family.lstrip('-') not in task.excluded_osmajor: task.excluded_osmajor.append(TaskExcludeOSMajor(osmajor=OSMajor.by_name_alias(family.lstrip('-'))))
|
def process_taskinfo(self, raw_taskinfo): tinfo = testinfo.parse_string(raw_taskinfo['desc'])
|
task.excluded.append(TaskExclude(arch=Arch.by_name(arch)))
|
if arch not in task.excluded_arch: task.excluded_arch.append(TaskExcludeArch(arch=Arch.by_name(arch)))
|
def process_taskinfo(self, raw_taskinfo): tinfo = testinfo.parse_string(raw_taskinfo['desc'])
|
search = su.System.search.search_on(table_field
|
search = su.System.search.search_on(table_field)
|
def get_search_options(self,table_field,**kw): return_dict = {} search = su.System.search.search_on(table_field #Determine what field type we are dealing with. If it is Boolean, convert our values to 0 for False # and 1 for True col_type = su.System.search.field_type(table_field) if col_type.lower() == 'boolean': search['values'] = { 0:'False', 1:'True'} #Determine if we have search values. If we do, then we should only have the operators # 'is' and 'is not'. if search['values']: search['operators'] = filter(lambda x: x == 'is' or x == 'is not', search['operators'])
|
def arch_stat_getter(cls,x): s = 'Pass:%s Fail:%s Warn:%s' % (x.Pass,x.Fail,x.Warn) return s def _job_grid_fields(self,**kw): fields = [DataGrid.Column(name='task', getter=lambda x: x.task_name, title='Task'), DataGrid.Column(name='i386', getter=lambda x: JobMatrix.arch_stat_getter(x), title='i386') ]
|
def arch_stat_getter(cls,this_arch): def f(x): try: if x.arch == this_arch: return 'Pass:%s Fail:%s Warn:%s' % (x.Pass,x.Fail,x.Warn) except Exception,(e): return 'opps exception %s' % e return f @classmethod def _job_grid_fields(self,arches_used,**kw): fields = [] fields.append(DataGrid.Column(name='task', getter=lambda x: x.task_name, title='Task')) for arch in arches_used: fields.append(DataGrid.Column(name=arch, getter=JobMatrix.arch_stat_getter(arch), title=arch))
|
def arch_stat_getter(cls,x): s = 'Pass:%s Fail:%s Warn:%s' % (x.Pass,x.Fail,x.Warn) return s
|
grid = DataGrid(fields = self._job_grid_fields())
|
arches_used = {} for grid in grid_data: arches_used[grid.arch] = 1 grid = DataGrid(fields = self._job_grid_fields(arches_used.keys()))
|
def generate(self,**kw): grid_data = self.generate_data(**kw) grid = DataGrid(fields = self._job_grid_fields()) return {'grid' : grid, 'data' : grid_data }
|
recipe_table_alias.c.whiteboard.label('recipe_whiteboard'), arch_alias.c.arch.label('arch_arch'),
|
recipe_table_alias.c.whiteboard, arch_alias.c.arch,
|
def generate_data(self,**kw): jobs = [] whiteboard_data = {} if 'job_ids' in kw: jobs = kw['job_ids'].split() elif 'whiteboard' in kw: job_query = model.Job.query().filter(model.Job.whiteboard == kw['whiteboard']) for job in job_query: jobs.append(job.id) else: pass #raise AssertionError('Incorrect or no filter passed to job matrix report generator') recipes = model.MachineRecipe.query().join(['distro','arch']).join(['recipeset','job']).add_column(model.Arch.arch) #recipes = model.MachineRecipe.query().join(['distro','arch']).join(['recipeset','job']).filter(model.RecipeSet.job_id.in_(jobs)).add_column(model.Arch.arch) #log.debug(recipes) for recipe,arch in recipes: whiteboard_data[arch] = recipe.whiteboard
|
and_(model.recipe_set_table.c.job_id.in_(jobs), model.task_result_table.c.id == recipe_table_alias.c.result_id, recipe_table_alias.c.id == model.recipe_task_table.c.recipe_id, recipe_table_alias.c.distro_id == model.distro_table.c.id), from_obj=[model.task_result_table, model.distro_table.join(arch_alias), model.recipe_set_table.join(recipe_table_alias), model.task_table.join(model.recipe_task_table)]).alias('foo')
|
and_( model.recipe_set_table.c.job_id.in_(jobs), arch_alias.c.arch == bindparam('arch'), recipe_table_alias.c.whiteboard == bindparam('recipe_whiteboard')), from_obj = [model.recipe_set_table.join(recipe_table_alias). join(model.task_result_table,model.task_result_table.c.id == recipe_table_alias.c.result_id). join(model.distro_table, model.distro_table.c.id == recipe_table_alias.c.distro_id). join(arch_alias, arch_alias.c.id == model.distro_table.c.arch_id). join(model.recipe_task_table, model.recipe_task_table.c.recipe_id == recipe_table_alias.c.id). join(model.task_table, model.task_table.c.id == model.recipe_task_table.c.task_id)]).alias('foo')
|
def generate_data(self,**kw): jobs = [] whiteboard_data = {} if 'job_ids' in kw: jobs = kw['job_ids'].split() elif 'whiteboard' in kw: job_query = model.Job.query().filter(model.Job.whiteboard == kw['whiteboard']) for job in job_query: jobs.append(job.id) else: pass #raise AssertionError('Incorrect or no filter passed to job matrix report generator') recipes = model.MachineRecipe.query().join(['distro','arch']).join(['recipeset','job']).add_column(model.Arch.arch) #recipes = model.MachineRecipe.query().join(['distro','arch']).join(['recipeset','job']).filter(model.RecipeSet.job_id.in_(jobs)).add_column(model.Arch.arch) #log.debug(recipes) for recipe,arch in recipes: whiteboard_data[arch] = recipe.whiteboard
|
class OuterDynamo(object): pass result_data = [] my_hash = {}
|
def generate_data(self,**kw): jobs = [] whiteboard_data = {} if 'job_ids' in kw: jobs = kw['job_ids'].split() elif 'whiteboard' in kw: job_query = model.Job.query().filter(model.Job.whiteboard == kw['whiteboard']) for job in job_query: jobs.append(job.id) else: pass #raise AssertionError('Incorrect or no filter passed to job matrix report generator') recipes = model.MachineRecipe.query().join(['distro','arch']).join(['recipeset','job']).add_column(model.Arch.arch) #recipes = model.MachineRecipe.query().join(['distro','arch']).join(['recipeset','job']).filter(model.RecipeSet.job_id.in_(jobs)).add_column(model.Arch.arch) #log.debug(recipes) for recipe,arch in recipes: whiteboard_data[arch] = recipe.whiteboard
|
|
s1 = select([func.count(s2.c.result), func.sum(s2.c.rc0).label('New'), func.sum(s2.c.rc1).label('Pass'), func.sum(s2.c.rc2).label('Warn'), func.sum(s2.c.rc3).label('Fail'), func.sum(s2.c.rc4).label('Panic'), model.task_table.c.name.label('task_name'), s2.c.arch_arch, s2.c.recipe_whiteboard, s2.c.task_id.label('task_id_pk')], s2.c.task_id == model.task_table.c.id, from_obj=[model.task_table,s2]).group_by(model.task_table.c.name).order_by(model.task_table.c.name) class InnerDynamo(object): def __init__(self, new=None, pass_=None, warn=None, fail=None, panic=None, task_name=None, arch_arch=None, recipe_whiteboard=None, task_id_pk=None): self.New = new self.Pass = pass_ self.Warn = warn self.Fail = fail self.Panic = panic self.task_name = task_name self.arch_arch = arch_arch self.recipe_whiteboard = recipe_whiteboard self.task_id_pk = task_id_pk mapper(InnerDynamo,s1) result_data = [] for arch_val,whiteboard_val in whiteboard_data.iteritems(): log.debug('Arch is %s and whiteboard is %s' % (arch_val,whiteboard_val)) dyn = InnerDynamo.query().filter_by(recipe_whiteboard=whiteboard_val) dyn = dyn.filter_by(arch_arch=arch_val) for d in dyn: log.debug('d is %s %s %s %s %s' % (d.arch_arch,d.New, d.Pass, d.Fail, d.task_name) ) result_data.append(d) return result_data
|
def generate_data(self,**kw): jobs = [] whiteboard_data = {} if 'job_ids' in kw: jobs = kw['job_ids'].split() elif 'whiteboard' in kw: job_query = model.Job.query().filter(model.Job.whiteboard == kw['whiteboard']) for job in job_query: jobs.append(job.id) else: pass #raise AssertionError('Incorrect or no filter passed to job matrix report generator') recipes = model.MachineRecipe.query().join(['distro','arch']).join(['recipeset','job']).add_column(model.Arch.arch) #recipes = model.MachineRecipe.query().join(['distro','arch']).join(['recipeset','job']).filter(model.RecipeSet.job_id.in_(jobs)).add_column(model.Arch.arch) #log.debug(recipes) for recipe,arch in recipes: whiteboard_data[arch] = recipe.whiteboard
|
|
outer_recipe = recipe_table.alias()
|
def suspicious_abort(self): if self.type != SystemType.by_name(u'Machine'): return # Since its last status change, has this system had an # uninterrupted run of aborted recipes leading up to this one, with # at least two different STABLE distros? outer_recipe = recipe_table.alias() status_change_subquery = select([func.max(activity_table.c.created)], from_obj=activity_table.join(system_activity_table))\ .where(and_( system_activity_table.c.system_id == outer_recipe.c.system_id, activity_table.c.field_name == u'Status', activity_table.c.action == u'Changed')) nonaborted_recipe_subquery = select([func.max(recipe_table.c.finish_time)], from_obj=recipe_table.join(system_table))\ .where(and_( recipe_table.c.status_id != TaskStatus.by_name(u'Aborted').id, recipe_table.c.system_id == outer_recipe.c.system_id)) query = select([func.count(outer_recipe.c.distro_id.distinct())], from_obj=outer_recipe.join(distro_table).join(distro_tag_map))\ .where(and_( distro_tag_map.c.distro_tag_id == DistroTag.by_tag(u'STABLE').id, outer_recipe.c.start_time > status_change_subquery, outer_recipe.c.finish_time > nonaborted_recipe_subquery)) if session.execute(query).scalar() >= 2: # Broken! reason = unicode(_(u'System has a run of aborted recipes ' 'with STABLE distros')) log.warn(reason) old_status = self.status self.mark_broken(reason=reason) self.activity.append( SystemActivity(service=u'Scheduler', action=u'Changed', field_name=u'Status', old_value=old_status, new_value=self.status))
|
|
system_activity_table.c.system_id == outer_recipe.c.system_id,
|
system_activity_table.c.system_id == self.id,
|
def suspicious_abort(self): if self.type != SystemType.by_name(u'Machine'): return # Since its last status change, has this system had an # uninterrupted run of aborted recipes leading up to this one, with # at least two different STABLE distros? outer_recipe = recipe_table.alias() status_change_subquery = select([func.max(activity_table.c.created)], from_obj=activity_table.join(system_activity_table))\ .where(and_( system_activity_table.c.system_id == outer_recipe.c.system_id, activity_table.c.field_name == u'Status', activity_table.c.action == u'Changed')) nonaborted_recipe_subquery = select([func.max(recipe_table.c.finish_time)], from_obj=recipe_table.join(system_table))\ .where(and_( recipe_table.c.status_id != TaskStatus.by_name(u'Aborted').id, recipe_table.c.system_id == outer_recipe.c.system_id)) query = select([func.count(outer_recipe.c.distro_id.distinct())], from_obj=outer_recipe.join(distro_table).join(distro_tag_map))\ .where(and_( distro_tag_map.c.distro_tag_id == DistroTag.by_tag(u'STABLE').id, outer_recipe.c.start_time > status_change_subquery, outer_recipe.c.finish_time > nonaborted_recipe_subquery)) if session.execute(query).scalar() >= 2: # Broken! reason = unicode(_(u'System has a run of aborted recipes ' 'with STABLE distros')) log.warn(reason) old_status = self.status self.mark_broken(reason=reason) self.activity.append( SystemActivity(service=u'Scheduler', action=u'Changed', field_name=u'Status', old_value=old_status, new_value=self.status))
|
recipe_table.c.system_id == outer_recipe.c.system_id)) query = select([func.count(outer_recipe.c.distro_id.distinct())], from_obj=outer_recipe.join(distro_table).join(distro_tag_map))\
|
recipe_table.c.system_id == self.id)) query = select([func.count(recipe_table.c.distro_id.distinct())], from_obj=recipe_table.join(distro_table).join(distro_tag_map) .join(system_table, onclause=recipe_table.c.system_id == system_table.c.id))\
|
def suspicious_abort(self): if self.type != SystemType.by_name(u'Machine'): return # Since its last status change, has this system had an # uninterrupted run of aborted recipes leading up to this one, with # at least two different STABLE distros? outer_recipe = recipe_table.alias() status_change_subquery = select([func.max(activity_table.c.created)], from_obj=activity_table.join(system_activity_table))\ .where(and_( system_activity_table.c.system_id == outer_recipe.c.system_id, activity_table.c.field_name == u'Status', activity_table.c.action == u'Changed')) nonaborted_recipe_subquery = select([func.max(recipe_table.c.finish_time)], from_obj=recipe_table.join(system_table))\ .where(and_( recipe_table.c.status_id != TaskStatus.by_name(u'Aborted').id, recipe_table.c.system_id == outer_recipe.c.system_id)) query = select([func.count(outer_recipe.c.distro_id.distinct())], from_obj=outer_recipe.join(distro_table).join(distro_tag_map))\ .where(and_( distro_tag_map.c.distro_tag_id == DistroTag.by_tag(u'STABLE').id, outer_recipe.c.start_time > status_change_subquery, outer_recipe.c.finish_time > nonaborted_recipe_subquery)) if session.execute(query).scalar() >= 2: # Broken! reason = unicode(_(u'System has a run of aborted recipes ' 'with STABLE distros')) log.warn(reason) old_status = self.status self.mark_broken(reason=reason) self.activity.append( SystemActivity(service=u'Scheduler', action=u'Changed', field_name=u'Status', old_value=old_status, new_value=self.status))
|
outer_recipe.c.start_time > status_change_subquery, outer_recipe.c.finish_time > nonaborted_recipe_subquery))
|
recipe_table.c.start_time > status_change_subquery, recipe_table.c.finish_time > nonaborted_recipe_subquery))
|
def suspicious_abort(self): if self.type != SystemType.by_name(u'Machine'): return # Since its last status change, has this system had an # uninterrupted run of aborted recipes leading up to this one, with # at least two different STABLE distros? outer_recipe = recipe_table.alias() status_change_subquery = select([func.max(activity_table.c.created)], from_obj=activity_table.join(system_activity_table))\ .where(and_( system_activity_table.c.system_id == outer_recipe.c.system_id, activity_table.c.field_name == u'Status', activity_table.c.action == u'Changed')) nonaborted_recipe_subquery = select([func.max(recipe_table.c.finish_time)], from_obj=recipe_table.join(system_table))\ .where(and_( recipe_table.c.status_id != TaskStatus.by_name(u'Aborted').id, recipe_table.c.system_id == outer_recipe.c.system_id)) query = select([func.count(outer_recipe.c.distro_id.distinct())], from_obj=outer_recipe.join(distro_table).join(distro_tag_map))\ .where(and_( distro_tag_map.c.distro_tag_id == DistroTag.by_tag(u'STABLE').id, outer_recipe.c.start_time > status_change_subquery, outer_recipe.c.finish_time > nonaborted_recipe_subquery)) if session.execute(query).scalar() >= 2: # Broken! reason = unicode(_(u'System has a run of aborted recipes ' 'with STABLE distros')) log.warn(reason) old_status = self.status self.mark_broken(reason=reason) self.activity.append( SystemActivity(service=u'Scheduler', action=u'Changed', field_name=u'Status', old_value=old_status, new_value=self.status))
|
return self.recipes(recipes=session.query(MachineRecipe),*args,**kw)
|
return self.recipes(recipes=session.query(MachineRecipe) .join('status').join('result').join('system') .join('distro').join(['distro', 'arch']), *args, **kw)
|
def index(self,*args,**kw): return self.recipes(recipes=session.query(MachineRecipe),*args,**kw)
|
return self.recipes(recipes=MachineRecipe.mine(identity.current.user),action='./mine',*args,**kw)
|
return self.recipes(recipes=MachineRecipe.mine(identity.current.user) .join('status').join('result').join('system') .join('distro').join(['distro', 'arch']), action='./mine', *args, **kw)
|
def mine(self,*args,**kw): return self.recipes(recipes=MachineRecipe.mine(identity.current.user),action='./mine',*args,**kw)
|
widgets.PaginateDataGrid.Column(name='arch', getter=lambda x:x.arch, title='Arch', options=dict(sortable=True)), widgets.PaginateDataGrid.Column(name='system', getter=lambda x: x.system and x.system.link, title='System', options=dict(sortable=True)), widgets.PaginateDataGrid.Column(name='distro', getter=lambda x: x.distro and x.distro.link, title='Distro', options=dict(sortable=True)),
|
widgets.PaginateDataGrid.Column(name='distro.arch.arch', getter=lambda x:x.arch, title='Arch', options=dict(sortable=True)), widgets.PaginateDataGrid.Column(name='system.fqdn', getter=lambda x: x.system and x.system.link, title='System', options=dict(sortable=True)), widgets.PaginateDataGrid.Column(name='distro.install_name', getter=lambda x: x.distro and x.distro.link, title='Distro', options=dict(sortable=True)),
|
def recipes(self,recipes,action='.',*args, **kw): recipes_return = self._recipes(recipes,**kw) searchvalue = None search_options = {} if recipes_return: if 'recipes_found' in recipes_return: recipes = recipes_return['recipes_found'] if 'searchvalue' in recipes_return: searchvalue = recipes_return['searchvalue'] if 'simplesearch' in recipes_return: search_options['simplesearch'] = recipes_return['simplesearch']
|
for recipe_id in self.hub.recipes.by_log_server(self.conf.get("ARCHIVE_SERVER")): self.transfer_recipe_log(recipe_id)
|
for recipe_id in self.hub.recipes.by_log_server(self.server): self.transfer_recipe_logs(recipe_id)
|
def transfer_logs(self): self.logger.info("Entering transfer_logs") for recipe_id in self.hub.recipes.by_log_server(self.conf.get("ARCHIVE_SERVER")): self.transfer_recipe_log(recipe_id)
|
myPaginateDataGrid.Column(name='osmajor', getter=lambda x: make_link(url = './edit?id=%s' % x.id, text = x), title='OS Version', options=dict(sortable=True)),
|
myPaginateDataGrid.Column(name='osmajor', getter=lambda x: make_link(url = './edit_osmajor?id=%s' % x.osmajor.id, text = x.osmajor), title='OS Major', options=dict(sortable=True)), myPaginateDataGrid.Column(name='alias', getter=lambda x: x.osmajor.alias, title='Alias', options=dict(sortable=True)), myPaginateDataGrid.Column(name='osversion', getter=lambda x: make_link(url = './edit?id=%s' % x.id, text = x.osminor), title='OS Minor', options=dict(sortable=True)),
|
def index(self,*args,**kw): osversions = session.query(OSVersion) list_by_letters = [] for elem in osversions: osmajor_name = elem.osmajor.osmajor if osmajor_name: list_by_letters.append(osmajor_name[0].capitalize()) list_by_letters = set(list_by_letters) results = self.process_search(**kw) if results: osversions = results
|
log.debug('arch:%s distro_family:%s method:%s tag:%s ' % (arch,distro_family,method,tag))
|
def get_distro_options(self,arch,distro_family,method,tag): """ get_distro_options() will return all the distros for a given arch, distro_family,method and tag """
|
|
options = [elem.install_name for elem in distro] log.debug(distro)
|
options = [elem.install_name for elem in distro]
|
def get_distro_options(self,arch,distro_family,method,tag): """ get_distro_options() will return all the distros for a given arch, distro_family,method and tag """
|
log.debug("recipe: %s labController: %s Removing system %s" % (recipe.id, l)controller, system))
|
log.debug("recipe: %s labController: %s Removing system %s" % (recipe.id, l_controller, system))
|
def processed_recipesets(*args): recipesets = RecipeSet.query()\ .join(['recipes','status'])\ .filter(Recipe.status==TaskStatus.by_name(u'Processed')) if not recipesets.count(): return False log.debug("Entering processed_recipes routine") for recipeset in recipesets: session.begin() try: bad_l_controllers = set() # We only need to do this processing on multi-host recipes if len(recipeset.recipes) == 1: log.info("recipe ID %s moved from Processed to Queued" % recipeset.recipes[0].id) recipeset.recipes[0].queue() else: # Find all the lab controllers that this recipeset may run. rsl_controllers = set(LabController.query()\ .join(['systems', 'queued_recipes', 'recipeset'])\ .filter(RecipeSet.id==recipeset.id).all()) # Any lab controllers that are not associated to all recipes in the # recipe set must have those systems on that lab controller removed # from any recipes. For multi-host all recipes must be schedulable # on one lab controller for recipe in recipeset.recipes: rl_controllers = set(LabController.query()\ .join(['systems', 'queued_recipes'])\ .filter(Recipe.id==recipe.id).all()) bad_l_controllers = bad_l_controllers.union(rl_controllers.difference(rsl_controllers)) for l_controller in rsl_controllers: enough_systems = False for recipe in recipeset.recipes: systems = recipe.dyn_systems.filter( System.lab_controller==l_controller ).all() if len(systems) < len(recipeset.recipes): break else: # There are enough choices We don't need to worry about dead # locks enough_systems = True if not enough_systems: log.debug("recipe: %s labController:%s entering not enough systems logic" % (recipe.id, l_controller)) # Eliminate bad choices. for recipe in recipeset.recipes_orderby(l_controller)[:]: for tmprecipe in recipeset.recipes: systemsa = set(recipe.dyn_systems.filter( System.lab_controller==l_controller ).all()) systemsb = set(tmprecipe.dyn_systems.filter( System.lab_controller==l_controller ).all()) if systemsa.difference(systemsb): for rem_system in systemsa.intersection(systemsb): log.debug("recipe: %s labController:%s Removing system %s" % (recipe.id, l_controller, rem_system)) recipe.systems.remove(rem_system) for recipe in recipeset.recipes: count = 0 systems = recipe.dyn_systems.filter( System.lab_controller==l_controller ).all() for tmprecipe in recipeset.recipes: tmpsystems = tmprecipe.dyn_systems.filter( System.lab_controller==l_controller ).all() if recipe != tmprecipe and \ systems == tmpsystems: count += 1 if len(systems) <= count: # Remove all systems from this lc on this rs. log.debug("recipe: %s labController:%s %s <= %s Removing lab" % (recipe.id, l_controller, len(systems), count)) bad_l_controllers = bad_l_controllers.union([l_controller]) # Remove systems that are on bad lab controllers # This means one of the recipes can be fullfilled on a lab controller # but not the rest of the recipes in the recipeSet. # This could very well remove ALL systems from all recipes in this # recipeSet. If that happens then the recipeSet cannot be scheduled # and will be aborted by the abort process. for recipe in recipeset.recipes: for l_controller in bad_l_controllers: systems = (recipe.dyn_systems.filter( System.lab_controller==l_controller ).all() ) log.debug("recipe: %s labController: %s Removing lab" % (recipe.id, l_controller)) for system in systems: log.debug("recipe: %s labController: %s Removing system %s" % (recipe.id, l)controller, system)) recipe.systems.remove(system) if recipe.systems: # Set status to Queued log.info("recipe: %s moved from Processed to Queued" % recipe.id) recipe.queue() else: # Set status to Aborted log.info("recipe ID %s moved from Processed to Aborted" % recipe.id) recipe.recipeset.abort('Recipe ID %s does not match any systems' % recipe.id) session.commit() except exceptions.Exception, e: session.rollback() log.error("Failed to commit due to :%s" % e) session.close() log.debug("Exiting processed_recipes routine") return True
|
scheduled = scheduled_recipes():
|
scheduled = scheduled_recipes()
|
def schedule(): bkr.server.scheduler._start_scheduler() log.debug("starting new recipes Thread") # Create new_recipes Thread add_onetime_task(action=new_recipes_loop, args=[lambda:datetime.now()]) log.debug("starting processed recipes Thread") # Create processed_recipes Thread add_onetime_task(action=processed_recipesets_loop, args=[lambda:datetime.now()], initialdelay=5) #log.debug("starting queued recipes Thread") # Create queued_recipes Thread #add_onetime_task(action=queued_recipes_loop, # args=[lambda:datetime.now()], # initialdelay=10) log.debug("starting scheduled recipes Thread") # Run scheduled_recipes in this process while True: queued = queued_recipes() scheduled = scheduled_recipes(): if not queued and not scheduled: time.sleep(20)
|
new = PowerType(name=kw['name'])
|
flash(_(u"Invalid Power Type entry")) redirect(".")
|
def save(self, **kw): if kw['id']: edit = PowerType.by_id(kw['id']) edit.name = kw['name'] else: new = PowerType(name=kw['name']) flash( _(u"OK") ) redirect(".")
|
list_by_letters = set([elem.name[0].capitalize() for elem in powertypes])
|
list_by_letters = set([elem.name[0].capitalize() for elem in powertypes if elem.name])
|
def index(self,*args,**kw): powertypes = session.query(PowerType) list_by_letters = set([elem.name[0].capitalize() for elem in powertypes]) results = self.process_search(**kw) if results: powertypes = results
|
seconds=self.task.avg_time)
|
seconds=self.task.avg_time + 1800)
|
def start(self, watchdog_override=None): """ Record the start of this task If watchdog_override is defined we will use that time instead of what the tasks default time is. This should be defined in number of seconds """ if not self.recipe.watchdog: raise BX(_('No watchdog exists for recipe %s' % self.recipe.id)) if not self.start_time: self.start_time = datetime.utcnow() self.status = TaskStatus.by_name(u'Running')
|
line = self.strip_ansi.sub('',line.decode('ascii', 'ignore')) size = len(line)
|
line = self.strip_ansi.sub(' ',line.decode('ascii', 'replace').encode('ascii', 'replace'))
|
def update(self): """ If the log exists and the file has grown then upload the new piece """ if os.path.exists(self.log): file = open(self.log, "r") where = self.where file.seek(where) line = file.read(self.blocksize) if self.strip_ansi: line = self.strip_ansi.sub('',line.decode('ascii', 'ignore')) size = len(line) now = file.tell() file.close() if self.panic: # Search the line for panics # The regex is stored in /etc/beaker/proxy.conf panic = self.panic.search(line) if panic: self.proxy.logger.info("Panic detected for system: %s" % self.watchdog['system']) recipeset = xmltramp.parse(self.proxy.get_recipe(self.watchdog['system'])).recipeSet try: watchdog = recipeset.recipe.watchdog() except AttributeError: watchdog = recipeset.guestrecipe.watchdog() if 'panic' in watchdog and watchdog['panic'] == 'ignore': # Don't Report the panic self.proxy.logger.info("Not reporting panic, recipe set to ignore") else: # Report the panic self.proxy.task_result(self.watchdog['task_id'], 'panic', '/', 0, panic.group()) # set the watchdog timeout to 10 seconds, gives some time for all data to # print out on the serial console # this may abort the recipe depending on what the recipeSets # watchdog behaviour is set to. self.proxy.extend_watchdog(self.watchdog['task_id'], 10) if not line: return False # If we didn't read our full blocksize and we are still growing # then don't send anything yet. elif size < self.blocksize and where == now: return False else: self.where = now data = base64.encodestring(line) md5sum = md5_constructor(line).hexdigest() self.proxy.recipe_upload_file(self.watchdog['recipe_id'], "/", self.filename, size, md5sum, where, data) return True
|
hwdict['model_ver'] = get_entry(tmpdict, 'revision')
|
revision = get_entry(tmpdict, 'revision') try: hwdict['model_number'], hwdict['model_ver'] = revision.split()[0].split('.')[:2] except (ValueError, IndexError): pass
|
def get_entry(a, entry): e = entry.lower() if not a.has_key(e): return "" return a[e]
|
self.strip_ansi = re.compile("[^\t\n\x20-\x7E]+|\[[0-9]+\;[0-9]+H")
|
self.control_chars = ''.join(map(unichr, range(0,9) + range(11,32) + range(127,160))) self.strip_ansi = re.compile('[%s]' % re.escape(self.control_chars))
|
def __init__(self, log, watchdog, proxy, panic, blocksize=65536): self.log = log self.watchdog = watchdog self.proxy = proxy self.blocksize = blocksize self.filename = os.path.basename(self.log) # If filename is the hostname then rename it to console.log if self.filename == self.watchdog['system']: self.filename="console.log" self.strip_ansi = re.compile("[^\t\n\x20-\x7E]+|\[[0-9]+\;[0-9]+H") self.panic = re.compile(r'%s' % panic) else: self.strip_ansi = None self.panic = None self.where = 0
|
line = self.strip_ansi.sub("\n", line)
|
line = self.strip_ansi.sub('',line.decode('ascii', 'ignore'))
|
def update(self): """ If the log exists and the file has grown then upload the new piece """ if os.path.exists(self.log): file = open(self.log, "r") where = self.where file.seek(where) line = file.read(self.blocksize) if self.strip_ansi: line = self.strip_ansi.sub("\n", line) size = len(line) now = file.tell() file.close() if self.panic: # Search the line for panics # The regex is stored in /etc/beaker/proxy.conf panic = self.panic.search(line) if panic: self.proxy.logger.info("Panic detected for system: %s" % self.watchdog['system']) recipeset = xmltramp.parse(self.proxy.get_recipe(self.watchdog['system'])).recipeSet try: recipe = recipeset.recipe() except AttributeError: recipe = recipeset.guestrecipe() if 'panic' in recipe and recipe['panic'] == 'ignore': # Don't Report the panic self.proxy.logger.info("Not reporting panic, recipe set to ignore") else: # Report the panic self.proxy.task_result(self.watchdog['task_id'], 'panic', '/', 0, panic.group()) # set the watchdog timeout to 10 seconds, gives some time for all data to # print out on the serial console # this may abort the recipe depending on what the recipeSets # watchdog behaviour is set to. self.proxy.extend_watchdog(self.watchdog['task_id'], 10) if not line: return False # If we didn't read our full blocksize and we are still growing # then don't send anything yet. elif size < self.blocksize and where == now: return False else: self.where = now data = base64.encodestring(line) md5sum = md5_constructor(line).hexdigest() self.proxy.recipe_upload_file(self.watchdog['recipe_id'], "/", self.filename, size, md5sum, where, data) return True
|
def get_beaker_version(): return beaker. __version__ turbogears.view.variable_providers.append({"beaker_version" : get_beaker_version()})
|
#def search():
|
|
@classmethod def value_less_than_filter(cls,col,val,key_name): result = model.Key.by_name(key_name) int_table = result.numeric key_id = result.id return and_(model.Key_Value_Int.key_value < val, model.Key_Value_Int.key_id == key_id)
|
def value_contains_pre(cls,value,**kw): return cls.value_pre(value,**kw)
|
|
continue
|
break
|
def scheduled_recipes(*args): """ if All recipes in a recipeSet are in Scheduled state then move them to Running. """ recipesets = RecipeSet.query().from_statement( select([recipe_set_table.c.id, func.min(recipe_table.c.status_id)], from_obj=[recipe_set_table.join(recipe_table)])\ .group_by(RecipeSet.id)\ .having(func.min(recipe_table.c.status_id) == TaskStatus.by_name(u'Scheduled').id)).all() if not recipesets: return False log.debug("Entering scheduled_recipes routine") for recipeset in recipesets: session.begin() try: # Go through each recipe in the recipeSet for recipe in recipeset.recipes: # If one of the recipes gets aborted then don't try and run if recipe.status != TaskStatus.by_name(u'Scheduled'): continue recipe.waiting() # Go Through each recipe and find out everyone's role. for peer in recipe.recipeset.recipes: recipe.roles[peer.role].append(peer.system) # Go Through each task and find out the roles of everyone else for i, task in enumerate(recipe.tasks): for peer in recipe.recipeset.recipes: # Roles are only shared amongst like recipe types if type(recipe) == type(peer): try: task.roles[peer.tasks[i].role].append(peer.system) except IndexError: # We have uneven tasks pass # Start the first task in the recipe try: recipe.tasks[0].start() except exceptions.Exception, e: log.error("Failed to Start recipe %s, due to %s" % (recipe.id,e)) recipe.recipeset.abort(u"Failed to provision recipeid %s, %s" % ( recipe.id, e)) ks_meta = "packages=%s" % ":".join([p.package for p in recipe.packages]) harnessrepos="|".join(["%s,%s" % (r["name"], r["url"]) for r in recipe.harness_repos()]) customrepos= "|".join(["%s,%s" % (r.name, r.url) for r in recipe.repos]) ks_meta = "%s customrepos=%s harnessrepos=%s" % (ks_meta, customrepos, harnessrepos) # If ks_meta is defined from recipe pass it along. # add it last to allow for overriding previous settings. if recipe.ks_meta: ks_meta = "%s %s" % ( ks_meta, recipe.ks_meta) try: recipe.system.action_auto_provision(recipe.distro, ks_meta, recipe.kernel_options, recipe.kernel_options_post, recipe.kickstart) recipe.system.activity.append( SystemActivity(recipe.recipeset.job.owner, 'Scheduler', 'Provision', 'Distro', '', '%s' % recipe.distro)) except exceptions.Exception, e: log.error(u"Failed to provision recipeid %s, %s" % ( recipe.id, e)) recipe.recipeset.abort(u"Failed to provision recipeid %s, %s" % ( recipe.id, e)) session.commit() except exceptions.Exception, e: session.rollback() log.error("Failed to commit due to :%s" % e) session.close() log.debug("Exiting scheduled_recipes routine") return True
|
device = Device.query().filter_by(vendor_id = device['vendorID'],
|
mydevice = Device.query().filter_by(vendor_id = device['vendorID'],
|
def updateDevices(self, deviceinfo): for device in deviceinfo: try: device = Device.query().filter_by(vendor_id = device['vendorID'], device_id = device['deviceID'], subsys_vendor_id = device['subsysVendorID'], subsys_device_id = device['subsysDeviceID'], bus = device['bus'], driver = device['driver'], description = device['description']).one() self.devices.append(device) except InvalidRequestError: new_device = Device(vendor_id = device['vendorID'], device_id = device['deviceID'], subsys_vendor_id = device['subsysVendorID'], subsys_device_id = device['subsysDeviceID'], bus = device['bus'], driver = device['driver'], device_class = device['type'], description = device['description']) session.save(new_device) session.flush([new_device]) self.devices.append(new_device)
|
self.devices.append(device)
|
def updateDevices(self, deviceinfo): for device in deviceinfo: try: device = Device.query().filter_by(vendor_id = device['vendorID'], device_id = device['deviceID'], subsys_vendor_id = device['subsysVendorID'], subsys_device_id = device['subsysDeviceID'], bus = device['bus'], driver = device['driver'], description = device['description']).one() self.devices.append(device) except InvalidRequestError: new_device = Device(vendor_id = device['vendorID'], device_id = device['deviceID'], subsys_vendor_id = device['subsysVendorID'], subsys_device_id = device['subsysDeviceID'], bus = device['bus'], driver = device['driver'], device_class = device['type'], description = device['description']) session.save(new_device) session.flush([new_device]) self.devices.append(new_device)
|
|
new_device = Device(vendor_id = device['vendorID'],
|
mydevice = Device(vendor_id = device['vendorID'],
|
def updateDevices(self, deviceinfo): for device in deviceinfo: try: device = Device.query().filter_by(vendor_id = device['vendorID'], device_id = device['deviceID'], subsys_vendor_id = device['subsysVendorID'], subsys_device_id = device['subsysDeviceID'], bus = device['bus'], driver = device['driver'], description = device['description']).one() self.devices.append(device) except InvalidRequestError: new_device = Device(vendor_id = device['vendorID'], device_id = device['deviceID'], subsys_vendor_id = device['subsysVendorID'], subsys_device_id = device['subsysDeviceID'], bus = device['bus'], driver = device['driver'], device_class = device['type'], description = device['description']) session.save(new_device) session.flush([new_device]) self.devices.append(new_device)
|
session.save(new_device) session.flush([new_device]) self.devices.append(new_device)
|
session.save(mydevice) session.flush([mydevice]) self.devices.append(mydevice) currentDevices.append(mydevice) for device in self.devices[:]: if device not in currentDevices: self.devices.remove(device)
|
def updateDevices(self, deviceinfo): for device in deviceinfo: try: device = Device.query().filter_by(vendor_id = device['vendorID'], device_id = device['deviceID'], subsys_vendor_id = device['subsysVendorID'], subsys_device_id = device['subsysDeviceID'], bus = device['bus'], driver = device['driver'], description = device['description']).one() self.devices.append(device) except InvalidRequestError: new_device = Device(vendor_id = device['vendorID'], device_id = device['deviceID'], subsys_vendor_id = device['subsysVendorID'], subsys_device_id = device['subsysDeviceID'], bus = device['bus'], driver = device['driver'], device_class = device['type'], description = device['description']) session.save(new_device) session.flush([new_device]) self.devices.append(new_device)
|
'url --url=$tree\n' + kickstart)
|
'url --url=$tree\n
|
def test_provision(self): kickstart = ''' %%pre kickstart lol! do some stuff etc ''' system = data_setup.create_system( owner=User.by_user_name(data_setup.ADMIN_USER), status=u'Manual', shared=True) data_setup.configure_system_power(system, power_type=u'drac', address=u'nowhere.example.com', user=u'teh_powz0r', password=u'onoffonoff', power_id=u'asdf') system.lab_controller = self.lab_controller user = data_setup.create_user(password=u'password') system.user = user session.flush() self.server.auth.login_password(user.user_name, 'password') self.server.systems.provision(system.fqdn, self.distro.install_name, {'method': 'nfs'}, 'noapic', 'noapic runlevel=3', kickstart) self.assertEqual(self.stub_cobbler_thread.cobbler.systems[system.fqdn], {'power_type': 'drac', 'power_address': 'nowhere.example.com', 'power_user': 'teh_powz0r', 'power_pass': 'onoffonoff', 'power_id': 'asdf', 'ksmeta': {'method': 'nfs'}, 'kopts': 'noapic', 'kopts_post': 'noapic runlevel=3', 'profile': system.fqdn, 'netboot-enabled': True}) kickstart_filename = '/var/lib/cobbler/kickstarts/%s.ks' % system.fqdn self.assertEqual(self.stub_cobbler_thread.cobbler.profiles[system.fqdn], {'kickstart': kickstart_filename, 'parent': self.distro.install_name}) self.assertEqual( self.stub_cobbler_thread.cobbler.kickstarts[kickstart_filename], 'url --url=$tree\n' + kickstart) self.assertEqual( self.stub_cobbler_thread.cobbler.system_actions[system.fqdn], 'reboot')
|
recipeSet.priority = TaskPriority.query().filter_by(priority = TaskPriority.default_priority).one()
|
recipeSet.priority = TaskPriority.query().filter_by(TaskPriority.priority == TaskPriority.default_priority).one()
|
def process_xmljob(self, xmljob, user): job = Job(whiteboard='%s' % xmljob.whiteboard, ttasks=0, owner=user) for xmlrecipeSet in xmljob.iter_recipeSets(): recipeSet = RecipeSet(ttasks=0) recipeset_priority = xmlrecipeSet.get_xml_attr('priority',str,None) if recipeset_priority is not None: try: my_priority = TaskPriority.query().filter_by(priority = recipeset_priority).one() except InvalidRequestError, (e): raise BX(_('You have specified an invalid recipeSet priority:%s' % recipeset_priority)) allowed_priorities = RecipeSet.allowed_priorities_initial(identity.current.user) allowed = [elem for elem in allowed_priorities if elem.priority == recipeset_priority] if allowed: recipeSet.priority = allowed[0] else: recipeSet.priority = TaskPriority.query().filter_by(priority = TaskPriority.default_priority).one() else: recipeSet.priority = TaskPriority.query().filter(priority = TaskPriority.default_priority).one()
|
recipeSet.priority = TaskPriority.query().filter(priority = TaskPriority.default_priority).one()
|
recipeSet.priority = TaskPriority.query().filter(TaskPriority.priority == TaskPriority.default_priority).one()
|
def process_xmljob(self, xmljob, user): job = Job(whiteboard='%s' % xmljob.whiteboard, ttasks=0, owner=user) for xmlrecipeSet in xmljob.iter_recipeSets(): recipeSet = RecipeSet(ttasks=0) recipeset_priority = xmlrecipeSet.get_xml_attr('priority',str,None) if recipeset_priority is not None: try: my_priority = TaskPriority.query().filter_by(priority = recipeset_priority).one() except InvalidRequestError, (e): raise BX(_('You have specified an invalid recipeSet priority:%s' % recipeset_priority)) allowed_priorities = RecipeSet.allowed_priorities_initial(identity.current.user) allowed = [elem for elem in allowed_priorities if elem.priority == recipeset_priority] if allowed: recipeSet.priority = allowed[0] else: recipeSet.priority = TaskPriority.query().filter_by(priority = TaskPriority.default_priority).one() else: recipeSet.priority = TaskPriority.query().filter(priority = TaskPriority.default_priority).one()
|
afterpackages = kickstart[packages_slot:]
|
if end: afterpackages = "%%end\n%s" % afterpackages
|
def provision(self, distro=None, kickstart=None, ks_meta=None, kernel_options=None, kernel_options_post=None, ks_appends=None): """ Provision the System make xmlrpc call to lab controller """ if not distro: return False
|
%(end)s
|
def provision(self, distro=None, kickstart=None, ks_meta=None, kernel_options=None, kernel_options_post=None, ks_appends=None): """ Provision the System make xmlrpc call to lab controller """ if not distro: return False
|
|
key = peer.tasks[i].role task.roles[key].append(peer.system)
|
try: task.roles[peer.tasks[i].role].append(peer.system) except IndexError: pass
|
def scheduled_recipes(*args): """ if All recipes in a recipeSet are in Scheduled state then move them to Running. """ recipesets = RecipeSet.query().from_statement( select([recipe_set_table.c.id, func.min(recipe_table.c.status_id)], from_obj=[recipe_set_table.join(recipe_table)])\ .group_by(RecipeSet.id)\ .having(func.min(recipe_table.c.status_id) == TaskStatus.by_name(u'Scheduled').id)).all() if not recipesets: return False log.debug("Entering scheduled_recipes routine") for recipeset in recipesets: session.begin() try: # Go through each recipe in the recipeSet for recipe in recipeset.recipes: # If one of the recipes gets aborted then don't try and run if recipe.status != TaskStatus.by_name(u'Scheduled'): continue recipe.waiting() # Go Through each recipe and find out everyone's role. for peer in recipe.recipeset.recipes: recipe.roles[peer.role].append(peer.system) # Go Through each task and find out the roles of everyone else for i, task in enumerate(recipe.tasks): for peer in recipe.recipeset.recipes: # Roles are only shared amongst like recipe types if type(recipe) == type(peer): key = peer.tasks[i].role task.roles[key].append(peer.system) # Start the first task in the recipe try: recipe.tasks[0].start() except exceptions.Exception, e: log.error("Failed to Start recipe %s, due to %s" % (recipe.id,e)) recipe.recipeset.abort(u"Failed to provision recipeid %s, %s" % ( recipe.id, e)) ks_meta = "packages=%s" % ":".join([p.package for p in recipe.packages]) try: recipe.system.action_auto_provision(recipe.distro, ks_meta, recipe.kernel_options, recipe.kernel_options_post, recipe.kickstart) recipe.system.activity.append( SystemActivity(recipe.recipeset.job.owner, 'Scheduler', 'Provision', 'Distro', '', '%s' % recipe.distro)) except exceptions.Exception, e: log.error(u"Failed to provision recipeid %s, %s" % ( recipe.id, e)) recipe.recipeset.abort(u"Failed to provision recipeid %s, %s" % ( recipe.id, e)) session.commit() except exceptions.Exception, e: session.rollback() log.error("Failed to commit due to :%s" % e) session.close() log.debug("Exiting scheduled_recipes routine") return True
|
mp = False
|
def kernel_inventory(): # get the data from SMOLT but modify it for how RHTS expects to see it # Eventually we'll switch over to SMOLT properly. data = {} data['VIRT_IOMMU'] = False ########################################## # check for virtual iommu/vt-d capability # if this passes, assume we pick up sr-iov for free if check_for_virt_iommu(): data['VIRT_IOMMU'] = True ########################################## # determine which stroage controller has a disk behind it path = "/sys/block" virt_pat = re.compile('virtual') floppy_pat = re.compile('fd[0-9]') sr_pat = re.compile('sr[0-9]') for block in glob.glob( os.path.join(path, '*')): #skip read only/floppy devices if sr_pat.search(block) or floppy_pat.search(block): continue #skip block devices that don't point to a device if not os.path.islink(block + "/device"): continue sysfs_link = os.readlink(block + "/device") #skip virtual devices if virt_pat.search(sysfs_link): continue #cheap way to create an absolute path, there is probably a better way sysfs_path = sysfs_link.replace('../..','/sys') #start abusing hal to give us the info we want cmd = 'hal-find-by-property --key linux.sysfs_path --string %s' % sysfs_path status,udi = commands.getstatusoutput(cmd) if status: print "DISK_CONTROLLER: hal-find-by-property failed: %d" % status continue while udi: cmd = 'hal-get-property --udi %s --key info.linux.driver 2>/dev/null' % udi status, driver = commands.getstatusoutput(cmd) if status == 0 and driver != "sd" and driver != "sr": #success data['DISK_CONTROLLER'] = driver break #get the parent and try again cmd = 'hal-get-property --udi %s --key info.parent' % udi status,udi = commands.getstatusoutput(cmd) if status: print "DISK_CONTROLLER: hal-get-property failed: %d" % status break if not udi: print "DISK_CONTROLLER: can not determine driver for %s" %block ########################################## # determine if machine is using multipath or not #ok, I am really lazy #remove the default blacklist in /etc/multipath.conf os.system("sed -i '/^blacklist/,/^}$/d' /etc/multipath.conf") #restart multipathd to see what it detects #this spits out errors if the root device is on a #multipath device, I guess ignore for now and hope the code #correctly figures things out os.system("service multipathd restart") #the multipath commands will display the topology if it #exists otherwise nothing #filter out vbds and single device paths status, mpaths = commands.getstatusoutput("multipath -ll") if status: print "MULTIPATH: multipath -ll failed with %d" % status else: count = 0 mp = False mpath_pat = re.compile(" dm-[0-9]* ") sd_pat = re.compile(" sd[a-z]") for line in mpaths.split('\n'): #reset when a new section starts if mpath_pat.search(line): # found at least one mp instance, declare success if count > 1: mp = True break count = 0 #a hit! increment to indicate this if sd_pat.search(line): count = count + 1 if mp == True: data['DISK_MULTIPATH'] = True else: data['DISK_MULTIPATH'] = False return data
|
|
order_by=[model.job_table.c.id],distinct=True,limit=50) log.debug(s1)
|
order_by=[model.job_table.c.id],distinct=True,limit=50)
|
def get_whiteboard_options(self,filter): """ get_whiteboard_options() returns all whiteboards from the job_table if value is passed in for 'filter' it will perform an SQL 'like' operation against whiteboard """ if filter: where = model.job_table.c.whiteboard.like('%%%s%%' % filter) else: where = None s1 = select([model.job_table.c.whiteboard],whereclause=where, group_by=[model.job_table.c.whiteboard,model.job_table.c.id], order_by=[model.job_table.c.id],distinct=True,limit=50) log.debug(s1) res = s1.execute() filtered_whiteboards = [r[0] for r in res] return filtered_whiteboards
|
systems = recipe.dyn_systems.filter(and_(System.user==None, System.status==automated))
|
systems = recipe.dyn_systems.join(['lab_controller','_distros','distro']).\ filter(and_(System.user==None, Distro.id==recipe.distro_id, System.status==automated))
|
def queued_recipes(*args): automated = SystemStatus.by_name(u'Automated') recipes = Recipe.query()\ .join('status')\ .join(['systems','lab_controller','_distros','distro'])\ .join(['recipeset','priority'])\ .join(['distro','lab_controller_assocs','lab_controller'])\ .filter( or_( and_(Recipe.status==TaskStatus.by_name(u'Queued'), System.user==None, System.status==automated, RecipeSet.lab_controller==None, Recipe.distro_id==Distro.id, ), and_(Recipe.status==TaskStatus.by_name(u'Queued'), System.user==None, System.status==automated, Recipe.distro_id==Distro.id, RecipeSet.lab_controller_id==System.lab_controller_id, ) ) ) # Order recipes by priority. # FIXME Add secondary order by number of matched systems. if True: recipes = recipes.order_by(TaskPriority.id.desc()) if not recipes.count(): return False log.debug("Entering queued_recipes routine") for _recipe in recipes: session.begin() try: recipe = Recipe.by_id(_recipe.id) systems = recipe.dyn_systems.filter(and_(System.user==None, System.status==automated)) # Order systems by owner, then Group, finally shared for everyone. # FIXME Make this configurable, so that a user can specify their scheduling # Implemented order, still need to do pool # preference from the job. # <recipe> # <autopick order='sequence|random'> # <pool>owner</pool> # <pool>groups</pool> # <pool>public</pool> # </autopick> # </recipe> user = recipe.recipeset.job.owner if True: #FIXME if pools are defined add them here in the order requested. systems = systems.order_by(case([(System.owner==user, 1), (System.owner!=user and Group.systems==None, 2)], else_=3)) if recipe.recipeset.lab_controller: # First recipe of a recipeSet determines the lab_controller systems = systems.filter( System.lab_controller==recipe.recipeset.lab_controller ) if recipe.autopick_random: try: system = systems[random.randrange(0,systems.count() - 1)] except IndexError: system = None else: system = systems.first() if system: log.debug("System : %s is available for Recipe %s" % (system, recipe.id)) # Check to see if user still has proper permissions to use system # Remember the mapping of available systems could have happend hours or even # days ago and groups or loans could have been put in place since. if not System.free(user).filter(System.fqdn == system).first(): log.debug("System : %s recipe: %s no longer has access. removing" % (system, recipe.id)) recipe.systems.remove(system) # Atomic operation to put recipe in Scheduled state elif session.connection(Recipe).execute(recipe_table.update( and_(recipe_table.c.id==recipe.id, recipe_table.c.status_id==TaskStatus.by_name(u'Queued').id)), status_id=TaskStatus.by_name(u'Scheduled').id).rowcount == 1: recipe.createRepo() # Even though the above put the recipe in the "Scheduled" state # it did not execute the update_status method. recipe.schedule() # Atomic operation to reserve the system if session.connection(Recipe).execute(system_table.update( and_(system_table.c.id==system.id, system_table.c.user_id==None)), user_id=recipe.recipeset.job.owner.user_id).rowcount == 1: recipe.system = system recipe.recipeset.lab_controller = system.lab_controller recipe.systems = [] # Create the watchdog without an Expire time. log.debug("Created watchdog for recipe id: %s and system: %s" % (recipe.id, system)) recipe.watchdog = Watchdog(system=recipe.system) activity = SystemActivity(recipe.recipeset.job.owner, "Scheduler", "Reserved", "User", "", "%s" % recipe.recipeset.job.owner ) system.activity.append(activity) log.info("recipe ID %s moved from Queued to Scheduled" % recipe.id) else: # The system was taken from underneath us. Put recipe # back into queued state and try again. raise BX(_('System %s was stolen from underneath us. will try again.' % system)) else: #Some other thread beat us. Skip this recipe now. # Depending on scheduler load it should be safe to run multiple # Queued processes.. Also, systems that we don't directly # control, for example, systems at a remote location that can # pull jobs but not have any pushed onto them. These systems # could take a recipe and put it in running state. Not sure how # to deal with multi-host jobs at remote locations. May need to # enforce single recipes for remote execution. pass session.commit() except exceptions.Exception, e: session.rollback() log.error("Failed to commit due to :%s" % e) session.close() log.debug("Exiting queued_recipes routine") return True
|
properties = {'recipetask':relation(RecipeTask, uselist=False, backref='watchdog'),
|
properties = {'recipetask':relation(RecipeTask, uselist=False),
|
def __repr__(self): return self.package
|
install_name = args[0]
|
if args: install_name = args[0] else: self.parser.print_help() sys.exit(1)
|
def run(self, *args, **kwargs): filter = dict() username = kwargs.pop("username", None) password = kwargs.pop("password", None) filter['types'] = kwargs.pop("type", None) filter['packages'] = kwargs.pop("package", None) params = kwargs.pop("params", []) xml = kwargs.pop("xml")
|
for vendor in (u'Acer', u'Dell', u'HP'): for model in (u'slow model', u'fast model', u'big model'): for status in (u'Automated', u'Manual', u'Removed'): for type in (u'Machine', u'Virtual', u'Prototype'): for cores in (1, 4): system = data_setup.create_system( vendor=vendor, model=model, status=status, type=type) system.user = data_setup.create_user() system.cpu = Cpu(cores=cores)
|
for cores in [1, 2, 3]: for vendor, model, status, type, user in zip( [u'Acer', u'Dell', u'HP'], [u'slow model', u'fast model', u'big model'], [u'Automated', u'Manual', u'Removed'], [u'Machine', u'Virtual', u'Prototype'], [data_setup.create_user() for _ in range(3)]): system = data_setup.create_system(vendor=vendor, model=model, status=status, type=type) system.user = data_setup.create_user() system.cpu = Cpu(cores=cores)
|
def setUpClass(cls): try: session.begin() # ensure we have lots of systems for vendor in (u'Acer', u'Dell', u'HP'): for model in (u'slow model', u'fast model', u'big model'): for status in (u'Automated', u'Manual', u'Removed'): for type in (u'Machine', u'Virtual', u'Prototype'): for cores in (1, 4): system = data_setup.create_system( vendor=vendor, model=model, status=status, type=type) system.user = data_setup.create_user() system.cpu = Cpu(cores=cores) session.commit() finally: session.close() cls.selenium = sel = cls.get_selenium() sel.start()
|
sel.click('link=Show all')
|
def check_column_sort(self, column): sel = self.selenium sel.click('link=Show all') sel.wait_for_page_to_load('30000') sel.click('//table[@id="widget"]/thead/th[%d]//a[@href]' % column) sel.wait_for_page_to_load('30000') row_count = int(sel.get_xpath_count( '//table[@id="widget"]/tbody/tr/td[%d]' % column)) cell_values = [sel.get_table('widget.%d.%d' % (row, column - 1)) # zero-indexed for row in range(0, row_count)] self.assert_(len(set(cell_values)) > 1) # make sure we're checking something assert_sorted(cell_values, key=lambda x: x.lower())
|
|
self.removedirs(mylog['basepath'])
|
self.removedirs('%s/%s' % (mylog['basepath'], mylog['path']))
|
def finish(self): """ If Cache is turned on then move the recipes logs to there final place """ if self.conf.get("CACHE",False): tmpdir = tempfile.mkdtemp(dir=self.basepath) # Move logs to tmp directory layout mylogs = self.hub.recipes.files(self.watchdog['recipe_id']) trlogs = [] for mylog in mylogs: server = '%s/%s' % (self.conf.get("ARCHIVE_SERVER"), mylog['filepath']) basepath = '%s/%s' % (self.conf.get("ARCHIVE_BASEPATH"), mylog['filepath']) mysrc = '%s/%s/%s' % (mylog['basepath'], mylog['path'], mylog['filename']) mydst = '%s/%s/%s/%s' % (tmpdir, mylog['filepath'], mylog['path'], mylog['filename']) if not os.path.exists(os.path.dirname(mydst)): os.makedirs(os.path.dirname(mydst)) try: os.link(mysrc,mydst) trlogs.append(mylog) except OSError: pass # rsync the logs to there new home rc = self.rsync('%s/' % tmpdir, '%s' % self.conf.get("ARCHIVE_RSYNC")) if rc == 0: # if the logs have been transfered then tell the server the new location for mylog in trlogs: server = '%s/%s' % (self.conf.get("ARCHIVE_SERVER"), mylog['filepath']) basepath = '%s/%s' % (self.conf.get("ARCHIVE_BASEPATH"), mylog['filepath']) mysrc = '%s/%s/%s' % (mylog['basepath'], mylog['path'], mylog['filename']) self.hub.recipes.change_file(mylog['tid'], server, basepath) self.rm(mysrc) try: self.removedirs(mylog['basepath']) except OSError: # Its ok if it fails, dir may not be empty yet pass # get rid of our tmpdir. shutil.rmtree(tmpdir)
|
except exceptions.InvalidRequestError:
|
except InvalidRequestError:
|
def process_taskinfo(self, raw_taskinfo): tinfo = testinfo.parse_string(raw_taskinfo['desc'])
|
output = open('/tmp/beakerlib_journal.%s' % id, 'wb') output.write(newdoc.toxml().encode('utf-8')) output.close()
|
journal = '/tmp/beakerlib-%s/journal.xml' % id try: output = open(journal, 'wb') output.write(newdoc.toxml().encode('utf-8')) output.close() except IOError: printLog('Failed to save journal to %s' % journal, 'BEAKERLIB_WARNING') sys.exit(1)
|
def saveJournal(newdoc, id): output = open('/tmp/beakerlib_journal.%s' % id, 'wb') output.write(newdoc.toxml().encode('utf-8')) output.close()
|
jrnl = xml.dom.minidom.parse("/tmp/beakerlib_journal.%s" % id )
|
jrnl = xml.dom.minidom.parse("/tmp/beakerlib-%s/journal.xml" % id )
|
def _openJournal(id): jrnl = xml.dom.minidom.parse("/tmp/beakerlib_journal.%s" % id ) return jrnl
|
print >> sys.stderr, score
|
def need(args): if None in args: print "need Blargh!" sys.exit(1)
|
|
print xml.dom.minidom.parseString(myxml).toprettyxml()
|
print parseString(myxml).toprettyxml()
|
def run(self, *args, **kwargs): username = kwargs.pop("username", None) password = kwargs.pop("password", None) prettyxml = kwargs.pop("prettyxml", None)
|
@paginate('tasks',default_order='-id', limit=30)
|
@paginate('tasks',default_order='-id', limit=30, allow_limit_override=True)
|
def executed(self, hidden={}, **kw): tmp = self._do_search(hidden, **kw) tmp['form'] = self.task_form tmp['action'] = './do_search' tmp['value'] = None tmp['options'] = dict() return tmp
|
logs = 1,
|
def _do_search(self, hidden={}, **kw): if 'recipe_id' in kw: #most likely we are coming here from a LinkRemoteFunction in recipe_widgets tasks = RecipeTask.query().join(['recipe']).filter(Recipe.id == kw['recipe_id'])
|
|
logs = 1)
|
)
|
def _do_search(self, hidden={}, **kw): if 'recipe_id' in kw: #most likely we are coming here from a LinkRemoteFunction in recipe_widgets tasks = RecipeTask.query().join(['recipe']).filter(Recipe.id == kw['recipe_id'])
|
Group.systems==None,
|
System.groups==None,
|
def available(cls, user,systems=None): """ Builds on all. Only systems which this user has permission to reserve. If a system is loaned then its only available for that person. """ if systems: query = systems else: query = System.all(user) return query.filter(and_( System.status==SystemStatus.by_name(u'Working'), or_(and_(System.owner==user, System.loaned==None), System.loaned==user, and_(System.shared==True, Group.systems==None, System.loaned==None ), and_(System.shared==True, System.loaned==None, User.user_id==user.user_id ) ) ) )
|
@paginate('list')
|
@paginate('list',default_order='fqdn',limit=10,allow_limit_override=True)
|
def by_name(self,name): name = name.lower() search = Arch.list_by_name(name) arches = [match.arch for match in search] return dict(arches=arches)
|
systems = System.all(identity.current.user).join('devices').filter_by(id=id)
|
systems = System.all(identity.current.user).join('devices').filter_by(id=id).distinct()
|
def view(self, id): device = session.query(Device).get(id) systems = System.all(identity.current.user).join('devices').filter_by(id=id) device_grid = myPaginateDataGrid(fields=[ ('System', lambda x: make_link("/view/%s" % x.fqdn, x.fqdn)), ('Description', lambda x: device.description), ]) return dict(title="", grid = device_grid, search_bar=None, object_count = systems.count(), list = systems)
|
whiteboard_options = self.get_whiteboard_options(filter) whiteboard_options = [(w[0],w[0]) for w in whiteboard_options]
|
def index(self,**kw): self.col_call = 0 self.max_cols = 0 self.job_ids = [] matrix_options = {} if 'whiteboard_filter' in kw: filter = kw['whiteboard_filter'] else: filter = None
|
|
require = self.doc.createElement('distro_%s' % key.lower())
|
if key in ['ARCH', 'FAMILY', 'NAME', 'VARIANT', 'METHOD']: require = self.doc.createElement('distro_%s' % key.lower()) require.setAttribute('value', '%s' % value) else: require = self.doc.createElement('distro_tag') require.setAttribute('value', '%s' % key)
|
def handle_distroRequires(self, requires): require = None requires_search = re.compile(r'([^\s]+)\s+([^\s]+)\s+([^\s]+)') if requires_search.match(requires): (dummy, key, op, value, dummy) = requires_search.split(requires) require = self.doc.createElement('distro_%s' % key.lower()) require.setAttribute('op', '%s' % op) require.setAttribute('value', '%s' % value) return require
|
require.setAttribute('value', '%s' % value)
|
def handle_distroRequires(self, requires): require = None requires_search = re.compile(r'([^\s]+)\s+([^\s]+)\s+([^\s]+)') if requires_search.match(requires): (dummy, key, op, value, dummy) = requires_search.split(requires) require = self.doc.createElement('distro_%s' % key.lower()) require.setAttribute('op', '%s' % op) require.setAttribute('value', '%s' % value) return require
|
|
def to_xml(self, clone=False):
|
def to_xml(self, clone=False, from_recipeset=False):
|
def to_xml(self, clone=False): recipe = self.doc.createElement("guestrecipe") recipe.setAttribute("guestname", "%s" % self.guestname) recipe.setAttribute("guestargs", "%s" % self.guestargs) return Recipe.to_xml(self,recipe,clone)
|
return Recipe.to_xml(self,recipe,clone)
|
return Recipe.to_xml(self, recipe, clone, from_recipeset)
|
def to_xml(self, clone=False): recipe = self.doc.createElement("guestrecipe") recipe.setAttribute("guestname", "%s" % self.guestname) recipe.setAttribute("guestargs", "%s" % self.guestargs) return Recipe.to_xml(self,recipe,clone)
|
def to_xml(self, clone=False):
|
def to_xml(self, clone=False, from_recipeset=False):
|
def to_xml(self, clone=False): recipe = self.doc.createElement("recipe") for guest in self.guests: recipe.appendChild(guest.to_xml(clone)) return Recipe.to_xml(self,recipe,clone)
|
return Recipe.to_xml(self,recipe,clone)
|
return Recipe.to_xml(self, recipe, clone, from_recipeset)
|
def to_xml(self, clone=False): recipe = self.doc.createElement("recipe") for guest in self.guests: recipe.appendChild(guest.to_xml(clone)) return Recipe.to_xml(self,recipe,clone)
|
def __init__(self, is_default, tag, *args, **kw):
|
def __init__(self, tag, is_default=False, *args, **kw):
|
def __init__(self, is_default, tag, *args, **kw): self.set_default_val(is_default) super(RetentionTag, self).__init__(tag, *args, **kw) session.flush()
|
super(RetentionTag, self).__init__(tag, *args, **kw)
|
super(RetentionTag, self).__init__(tag, **kw)
|
def __init__(self, is_default, tag, *args, **kw): self.set_default_val(is_default) super(RetentionTag, self).__init__(tag, *args, **kw) session.flush()
|
start_time = report_time finish_time = report_time
|
def time(self): return self.report_time start_time = property(time) finish_time = property(time)
|
def no_value(self): return None
|
if 'install_name' in filter:
|
if 'install_name' in filter and filter['install_name']:
|
def filter(self, filter=None): """ XMLRPC method to query all tasks that apply to this distro """ if 'install_name' in filter: try: distro = Distro.by_install_name(filter['install_name']) except InvalidRequestError, err: raise BX(_('Invalid Distro: %s ' % filter['install_name'])) tasks = distro.tasks() else: tasks = Task.query()
|
m = re.match('^(\d+)', value)
|
m = re.match('^(\d+)(.*)$', value)
|
def handle_testtime(self, key, value): if self.info.avg_test_time: self.handle_error("%s field already defined"%key) return
|
if re.match('.*m$', value):
|
suffix = m.group(2) if suffix == '': pass elif suffix == 'm':
|
def handle_testtime(self, key, value): if self.info.avg_test_time: self.handle_error("%s field already defined"%key) return
|
elif re.match('.*h$', value):
|
elif suffix == 'h':
|
def handle_testtime(self, key, value): if self.info.avg_test_time: self.handle_error("%s field already defined"%key) return
|
warn = 'No Systems compatible with distro %s' % distro_install_name
|
warn = 'No Systems compatible with distro %s' % distro.install_name
|
def reserve_link(x,distro): if x.is_free(): return make_link("/reserveworkflow/reserve?system_id=%s&distro_id=%s" % (Utility.get_correct_system_column(x).id,distro), 'Reserve Now') else: return make_link("/reserveworkflow/reserve?system_id=%s&distro_id=%s" % (Utility.get_correct_system_column(x).id,distro), 'Queue Reservation')
|
distros = Distro.query()
|
distros = Distro.query().join('lab_controller_assocs')
|
def by_filter(cls, filter): """ <distro> <And> <Require name='ARCH' operator='=' value='i386'/> <Require name='FAMILY' operator='=' value='rhelserver5'/> <Require name='TAG' operator='=' value='released'/> </And> </distro> """ from needpropertyxml import ElementWrapper import xmltramp #FIXME Should validate XML before proceeding. queries = [] joins = [] for child in ElementWrapper(xmltramp.parse(filter)): if callable(getattr(child, 'filter', None)): (join, query) = child.filter() queries.append(query) joins.extend(join) distros = Distro.query() if joins: distros = distros.filter(and_(*joins)) if queries: distros = distros.filter(and_(*queries)) return distros.order_by('-date_created')
|
if not queued_recipes() and not scheduled_recipes():
|
queued = queued_recipes() scheduled = scheduled_recipes(): if not queued and not scheduled:
|
def schedule(): bkr.server.scheduler._start_scheduler() log.debug("starting new recipes Thread") # Create new_recipes Thread add_onetime_task(action=new_recipes_loop, args=[lambda:datetime.now()]) log.debug("starting processed recipes Thread") # Create processed_recipes Thread add_onetime_task(action=processed_recipesets_loop, args=[lambda:datetime.now()], initialdelay=5) #log.debug("starting queued recipes Thread") # Create queued_recipes Thread #add_onetime_task(action=queued_recipes_loop, # args=[lambda:datetime.now()], # initialdelay=10) log.debug("starting scheduled recipes Thread") # Run scheduled_recipes in this process while True: if not queued_recipes() and not scheduled_recipes(): time.sleep(20)
|
def install_start(self):
|
def install_start(self, system_name=None):
|
def install_start(self): """ Called from %pre of the test machine. We record a start result on the scheduler and extend the watchdog This is a little ugly.. but better than putting this logic in kickstart """ self.logger.info("install_start") # extend watchdog by 3 hours 60 * 60 * 3 kill_time = 10800 # look up system recipe based on hostname... # get first task task = xmltramp.parse(self.get_recipe()).recipeSet.recipe.task() # Only do this if first task is Running if task['status'] == 'Running': self.logger.info("Extending watchdog for task %s" % task['id']) self.hub.recipes.tasks.extend(task['id'], kill_time) self.logger.info("Recording /start for task %s" % task['id']) self.hub.recipes.tasks.result(task['id'], 'pass_', '/start', 0, 'Install Started') return True return False
|
task = xmltramp.parse(self.get_recipe()).recipeSet.recipe.task()
|
task = xmltramp.parse(self.get_recipe(system_name)).recipeSet.recipe.task()
|
def install_start(self): """ Called from %pre of the test machine. We record a start result on the scheduler and extend the watchdog This is a little ugly.. but better than putting this logic in kickstart """ self.logger.info("install_start") # extend watchdog by 3 hours 60 * 60 * 3 kill_time = 10800 # look up system recipe based on hostname... # get first task task = xmltramp.parse(self.get_recipe()).recipeSet.recipe.task() # Only do this if first task is Running if task['status'] == 'Running': self.logger.info("Extending watchdog for task %s" % task['id']) self.hub.recipes.tasks.extend(task['id'], kill_time) self.logger.info("Recording /start for task %s" % task['id']) self.hub.recipes.tasks.result(task['id'], 'pass_', '/start', 0, 'Install Started') return True return False
|
parser.add_option("-t","--threshold", default=3,
|
parser.add_option("-t","--threshold", default=3, type=int,
|
def get_parser(): usage = "usage: %prog [options]" parser = OptionParser(usage, description=__description__,version=__version__) parser.add_option("-s","--service", default='WEBUI', help="Report on this service (WEBUI,SCHEDULER), Default is WEBUI") parser.add_option("-t","--threshold", default=3, help="This is the number of days after a reservation of a machine takes place, that the nag emails will commence") parser.add_option("-c","--config-file",dest="configfile",default=None) return parser
|
if date_reserved + threshold_delta > date_now:
|
if date_reserved + threshold_delta < date_now:
|
def identify_nags(threshold, service): sys_activities = System.reserved_via(service) for activity in sys_activities: date_reserved = activity.created date_now = datetime.fromtimestamp(time.time()) threshold_delta = timedelta(days=threshold) if date_reserved + threshold_delta > date_now: #Let's send them a reminder system = System.query().filter_by(id=activity.system_id).one() recipient = system.user.email_address subject = "[Beaker Reminder]: System %s" % system.fqdn body = "You have had this System since %s, please return it if you are no longer using it" % activity.created sender = config.get('beaker_email') mail.send_mail(sender=sender,to=recipient,subject=subject,body=body)
|
task = xmltramp.parse(self.get_recipe(system_name)).recipeSet.recipe.task()
|
recipeset = xmltramp.parse(self.get_recipe(system_name)).recipeSet try: task = recipeset.recipe.task() except AttributeError: task = recipeset.guestrecipe.task()
|
def install_start(self, system_name=None): """ Called from %pre of the test machine. We record a start result on the scheduler and extend the watchdog This is a little ugly.. but better than putting this logic in kickstart """ self.logger.info("install_start") # extend watchdog by 3 hours 60 * 60 * 3 kill_time = 10800 # look up system recipe based on hostname... # get first task task = xmltramp.parse(self.get_recipe(system_name)).recipeSet.recipe.task() # Only do this if first task is Running if task['status'] == 'Running': self.logger.info("Extending watchdog for task %s" % task['id']) self.hub.recipes.tasks.extend(task['id'], kill_time) self.logger.info("Recording /start for task %s" % task['id']) self.hub.recipes.tasks.result(task['id'], 'pass_', '/start', 0, 'Install Started') return True return False
|
job.appendChild(self.node("whiteboard", self.recipeset.job.whiteboard))
|
job.appendChild(self.node("whiteboard", self.recipeset.job.whiteboard or ''))
|
def to_xml(self, recipe, clone=False, from_recipeset=False, from_machine=False): if not clone: recipe.setAttribute("id", "%s" % self.id) recipe.setAttribute("job_id", "%s" % self.recipeset.job_id) recipe.setAttribute("recipe_set_id", "%s" % self.recipe_set_id) recipe.setAttribute("whiteboard", "%s" % self.whiteboard and self.whiteboard or '') recipe.setAttribute("role", "%s" % self.role and self.role or 'RECIPE_MEMBERS') if self.kickstart: kickstart = self.doc.createElement("kickstart") text = self.doc.createCDATASection('%s' % self.kickstart) kickstart.appendChild(text) recipe.appendChild(kickstart) recipe.setAttribute("ks_meta", "%s" % self.ks_meta and self.ks_meta or '') recipe.setAttribute("kernel_options", "%s" % self.kernel_options and self.kernel_options or '') recipe.setAttribute("kernel_options_post", "%s" % self.kernel_options_post and self.kernel_options_post or '') if self.duration and not clone: recipe.setAttribute("duration", "%s" % self.duration) if self.result and not clone: recipe.setAttribute("result", "%s" % self.result) if self.status and not clone: recipe.setAttribute("status", "%s" % self.status) if self.distro and not clone: recipe.setAttribute("distro", "%s" % self.distro.name) recipe.setAttribute("install_name", "%s" % self.distro.install_name) recipe.setAttribute("arch", "%s" % self.distro.arch) recipe.setAttribute("family", "%s" % self.distro.osversion.osmajor) recipe.setAttribute("variant", "%s" % self.distro.variant) if self.system and not clone: recipe.setAttribute("system", "%s" % self.system) packages = self.doc.createElement("packages") if self.custom_packages: for package in self.custom_packages: packages.appendChild(package.to_xml()) recipe.appendChild(packages) if self.roles and not clone: roles = self.doc.createElement("roles") for role in self.roles.to_xml(): roles.appendChild(role) recipe.appendChild(roles) repos = self.doc.createElement("repos") for repo in self.repos: repos.appendChild(repo.to_xml()) recipe.appendChild(repos) drs = xml.dom.minidom.parseString(self.distro_requires) hrs = xml.dom.minidom.parseString(self.host_requires) for dr in drs.getElementsByTagName("distroRequires"): recipe.appendChild(dr) hostRequires = self.doc.createElement("hostRequires") for hr in hrs.getElementsByTagName("hostRequires"): for child in hr.childNodes: hostRequires.appendChild(child) recipe.appendChild(hostRequires) for t in self.tasks: recipe.appendChild(t.to_xml(clone)) if not from_recipeset and not from_machine: recipeSet = self.doc.createElement("recipeSet") recipeSet.appendChild(recipe) job = self.doc.createElement("job") if not clone: job.setAttribute("owner", "%s" % self.recipeset.job.owner.email_address) job.appendChild(self.node("whiteboard", self.recipeset.job.whiteboard)) job.appendChild(recipeSet) return job return recipe
|
.join(['recipes','status'])\ .filter(Recipe.status==TaskStatus.by_name(u'Processed'))
|
.join(['status'])\ .filter(RecipeSet.status==TaskStatus.by_name(u'Processed'))
|
def processed_recipesets(*args): recipesets = RecipeSet.query()\ .join(['recipes','status'])\ .filter(Recipe.status==TaskStatus.by_name(u'Processed')) if not recipesets.count(): return False log.debug("Entering processed_recipes routine") for recipeset in recipesets: session.begin() try: bad_l_controllers = set() # We only need to do this processing on multi-host recipes if len(recipeset.recipes) == 1: log.info("recipe ID %s moved from Processed to Queued" % recipeset.recipes[0].id) recipeset.recipes[0].queue() else: # Find all the lab controllers that this recipeset may run. rsl_controllers = set(LabController.query()\ .join(['systems', 'queued_recipes', 'recipeset'])\ .filter(RecipeSet.id==recipeset.id).all()) # Any lab controllers that are not associated to all recipes in the # recipe set must have those systems on that lab controller removed # from any recipes. For multi-host all recipes must be schedulable # on one lab controller for recipe in recipeset.recipes: rl_controllers = set(LabController.query()\ .join(['systems', 'queued_recipes'])\ .filter(Recipe.id==recipe.id).all()) bad_l_controllers = bad_l_controllers.union(rl_controllers.difference(rsl_controllers)) for l_controller in rsl_controllers: enough_systems = False for recipe in recipeset.recipes: systems = recipe.dyn_systems.filter( System.lab_controller==l_controller ).all() if len(systems) < len(recipeset.recipes): break else: # There are enough choices We don't need to worry about dead # locks enough_systems = True if not enough_systems: log.debug("recipe: %s labController:%s entering not enough systems logic" % (recipe.id, l_controller)) # Eliminate bad choices. for recipe in recipeset.recipes_orderby(l_controller)[:]: for tmprecipe in recipeset.recipes: systemsa = set(recipe.dyn_systems.filter( System.lab_controller==l_controller ).all()) systemsb = set(tmprecipe.dyn_systems.filter( System.lab_controller==l_controller ).all()) if systemsa.difference(systemsb): for rem_system in systemsa.intersection(systemsb): log.debug("recipe: %s labController:%s Removing system %s" % (recipe.id, l_controller, rem_system)) recipe.systems.remove(rem_system) for recipe in recipeset.recipes: count = 0 systems = recipe.dyn_systems.filter( System.lab_controller==l_controller ).all() for tmprecipe in recipeset.recipes: tmpsystems = tmprecipe.dyn_systems.filter( System.lab_controller==l_controller ).all() if recipe != tmprecipe and \ systems == tmpsystems: count += 1 if len(systems) <= count: # Remove all systems from this lc on this rs. log.debug("recipe: %s labController:%s %s <= %s Removing lab" % (recipe.id, l_controller, len(systems), count)) bad_l_controllers = bad_l_controllers.union([l_controller]) # Remove systems that are on bad lab controllers # This means one of the recipes can be fullfilled on a lab controller # but not the rest of the recipes in the recipeSet. # This could very well remove ALL systems from all recipes in this # recipeSet. If that happens then the recipeSet cannot be scheduled # and will be aborted by the abort process. for recipe in recipeset.recipes: for l_controller in bad_l_controllers: systems = (recipe.dyn_systems.filter( System.lab_controller==l_controller ).all() ) log.debug("recipe: %s labController: %s Removing lab" % (recipe.id, l_controller)) for system in systems: log.debug("recipe: %s labController: %s Removing system %s" % (recipe.id, l_controller, system)) recipe.systems.remove(system) if recipe.systems: # Set status to Queued log.info("recipe: %s moved from Processed to Queued" % recipe.id) recipe.queue() else: # Set status to Aborted log.info("recipe ID %s moved from Processed to Aborted" % recipe.id) recipe.recipeset.abort('Recipe ID %s does not match any systems' % recipe.id) session.commit() except exceptions.Exception, e: session.rollback() log.error("Failed to commit due to :%s" % e) session.close() log.debug("Exiting processed_recipes routine") return True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.