Search is not available for this dataset
text
stringlengths
75
104k
def get_package_list(self): """ Returns a list of all required packages. """ os_version = self.os_version # OS(type=LINUX, distro=UBUNTU, release='14.04') self.vprint('os_version:', os_version) # Lookup legacy package list. # OS: [package1, package2, ...], req_packages1 = self.required_system_packages if req_packages1: deprecation('The required_system_packages attribute is deprecated, ' 'use the packager_system_packages property instead.') # Lookup new package list. # OS: [package1, package2, ...], req_packages2 = self.packager_system_packages patterns = [ (os_version.type, os_version.distro, os_version.release), (os_version.distro, os_version.release), (os_version.type, os_version.distro), (os_version.distro,), os_version.distro, ] self.vprint('req_packages1:', req_packages1) self.vprint('req_packages2:', req_packages2) package_list = None found = False for pattern in patterns: self.vprint('pattern:', pattern) for req_packages in (req_packages1, req_packages2): if pattern in req_packages: package_list = req_packages[pattern] found = True break if not found: print('Warning: No operating system pattern found for %s' % (os_version,)) self.vprint('package_list:', package_list) return package_list
def install_packages(self): """ Installs all required packages listed for this satchel. Normally called indirectly by running packager.configure(). """ os_version = self.os_version package_list = self.get_package_list() if package_list: package_list_str = ' '.join(package_list) if os_version.distro == UBUNTU: self.sudo('apt-get update --fix-missing; DEBIAN_FRONTEND=noninteractive apt-get install --yes %s' % package_list_str) elif os_version.distro == DEBIAN: self.sudo('apt-get update --fix-missing; DEBIAN_FRONTEND=noninteractive apt-get install --yes %s' % package_list_str) elif os_version.distro == FEDORA: self.sudo('yum install --assumeyes %s' % package_list_str) else: raise NotImplementedError('Unknown distro: %s' % os_version.distro)
def run_on_all_sites(self, cmd, *args, **kwargs): """ Like run(), but re-runs the command for each site in the current role. """ r = self.local_renderer for _site, _data in iter_sites(): r.env.SITE = _site with self.settings(warn_only=True): r.run('export SITE={SITE}; export ROLE={ROLE}; '+cmd)
def file_contains(self, *args, **kwargs): """ filename text http://docs.fabfile.org/en/1.13/api/contrib/files.html#fabric.contrib.files.contains """ from fabric.contrib.files import contains return contains(*args, **kwargs)
def record_manifest(self): """ Returns a dictionary representing a serialized state of the service. """ manifest = get_component_settings(prefixes=[self.name]) # Record a signature of each template so we know to redeploy when they change. for template in self.get_templates(): # Dereference brace notation. e.g. convert '{var}' to `env[var]`. if template and template.startswith('{') and template.endswith('}'): template = self.env[template[1:-1]] if not template: continue if template.startswith('%s/' % self.name): fqfn = self.find_template(template) else: fqfn = self.find_template('%s/%s' % (self.name, template)) assert fqfn, 'Unable to find template: %s/%s' % (self.name, template) manifest['_%s' % template] = get_file_hash(fqfn) for tracker in self.get_trackers(): manifest['_tracker_%s' % tracker.get_natural_key_hash()] = tracker.get_thumbprint() if self.verbose: pprint(manifest, indent=4) return manifest
def has_changes(self): """ Returns true if at least one tracker detects a change. """ lm = self.last_manifest for tracker in self.get_trackers(): last_thumbprint = lm['_tracker_%s' % tracker.get_natural_key_hash()] if tracker.is_changed(last_thumbprint): return True return False
def configure(self): """ The standard method called to apply functionality when the manifest changes. """ lm = self.last_manifest for tracker in self.get_trackers(): self.vprint('Checking tracker:', tracker) last_thumbprint = lm['_tracker_%s' % tracker.get_natural_key_hash()] self.vprint('last thumbprint:', last_thumbprint) has_changed = tracker.is_changed(last_thumbprint) self.vprint('Tracker changed:', has_changed) if has_changed: self.vprint('Change detected!') tracker.act()
def user_exists(name): """ Check if a PostgreSQL user exists. """ with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True): res = _run_as_pg('''psql -t -A -c "SELECT COUNT(*) FROM pg_user WHERE usename = '%(name)s';"''' % locals()) return (res == "1")
def create_user(name, password, superuser=False, createdb=False, createrole=False, inherit=True, login=True, connection_limit=None, encrypted_password=False): """ Create a PostgreSQL user. Example:: import burlap # Create DB user if it does not exist if not burlap.postgres.user_exists('dbuser'): burlap.postgres.create_user('dbuser', password='somerandomstring') # Create DB user with custom options burlap.postgres.create_user('dbuser2', password='s3cr3t', createdb=True, createrole=True, connection_limit=20) """ options = [ 'SUPERUSER' if superuser else 'NOSUPERUSER', 'CREATEDB' if createdb else 'NOCREATEDB', 'CREATEROLE' if createrole else 'NOCREATEROLE', 'INHERIT' if inherit else 'NOINHERIT', 'LOGIN' if login else 'NOLOGIN', ] if connection_limit is not None: options.append('CONNECTION LIMIT %d' % connection_limit) password_type = 'ENCRYPTED' if encrypted_password else 'UNENCRYPTED' options.append("%s PASSWORD '%s'" % (password_type, password)) options = ' '.join(options) _run_as_pg('''psql -c "CREATE USER %(name)s %(options)s;"''' % locals())
def write_pgpass(self, name=None, site=None, use_sudo=0, root=0): """ Write the file used to store login credentials for PostgreSQL. """ r = self.database_renderer(name=name, site=site) root = int(root) use_sudo = int(use_sudo) r.run('touch {pgpass_path}') if '~' in r.env.pgpass_path: r.run('chmod {pgpass_chmod} {pgpass_path}') else: r.sudo('chmod {pgpass_chmod} {pgpass_path}') if root: r.env.shell_username = r.env.get('db_root_username', 'postgres') r.env.shell_password = r.env.get('db_root_password', 'password') else: r.env.shell_username = r.env.db_user r.env.shell_password = r.env.db_password r.append( '{db_host}:{port}:*:{shell_username}:{shell_password}', r.env.pgpass_path, use_sudo=use_sudo)
def dumpload(self, site=None, role=None): """ Dumps and loads a database snapshot simultaneously. Requires that the destination server has direct database access to the source server. This is better than a serial dump+load when: 1. The network connection is reliable. 2. You don't need to save the dump file. The benefits of this over a dump+load are: 1. Usually runs faster, since the load and dump happen in parallel. 2. Usually takes up less disk space since no separate dump file is downloaded. """ r = self.database_renderer(site=site, role=role) r.run('pg_dump -c --host={host_string} --username={db_user} ' '--blobs --format=c {db_name} -n public | ' 'pg_restore -U {db_postgresql_postgres_user} --create ' '--dbname={db_name}')
def exists(self, name='default', site=None, use_root=False): """ Returns true if a database with the given name exists. False otherwise. """ r = self.database_renderer(name=name, site=site) if int(use_root): kwargs = dict( db_user=r.env.get('db_root_username', 'postgres'), db_password=r.env.get('db_root_password', 'password'), db_host=r.env.db_host, db_name=r.env.db_name, ) r.env.update(kwargs) # Set pgpass file. if r.env.db_password: self.write_pgpass(name=name, root=use_root) # cmd = ('psql --username={db_user} --no-password -l '\ # '--host={db_host} --dbname={db_name}'\ # '| grep {db_name} | wc -l').format(**env) ret = None with settings(warn_only=True): ret = r.run('psql --username={db_user} --host={db_host} -l '\ '| grep {db_name} | wc -l') if ret is not None: if 'password authentication failed' in ret: ret = False else: ret = int(ret) >= 1 if ret is not None: print('%s database on site %s %s exist' % (name, self.genv.SITE, 'DOES' if ret else 'DOES NOT')) return ret
def load(self, dump_fn='', prep_only=0, force_upload=0, from_local=0, name=None, site=None, dest_dir=None, force_host=None): """ Restores a database snapshot onto the target database server. If prep_only=1, commands for preparing the load will be generated, but not the command to finally load the snapshot. """ r = self.database_renderer(name=name, site=site) # Render the snapshot filename. r.env.dump_fn = self.get_default_db_fn(fn_template=dump_fn, dest_dir=dest_dir) from_local = int(from_local) prep_only = int(prep_only) missing_local_dump_error = r.format( "Database dump file {dump_fn} does not exist." ) # Copy snapshot file to target. if self.is_local: r.env.remote_dump_fn = dump_fn else: r.env.remote_dump_fn = '/tmp/' + os.path.split(r.env.dump_fn)[-1] if not prep_only and not self.is_local: if not self.dryrun: assert os.path.isfile(r.env.dump_fn), missing_local_dump_error #r.pc('Uploading PostgreSQL database snapshot...') # r.put( # local_path=r.env.dump_fn, # remote_path=r.env.remote_dump_fn) #r.local('rsync -rvz --progress --no-p --no-g ' #'--rsh "ssh -o StrictHostKeyChecking=no -i {key_filename}" ' #'{dump_fn} {user}@{host_string}:{remote_dump_fn}') self.upload_snapshot(name=name, site=site, local_dump_fn=r.env.dump_fn, remote_dump_fn=r.env.remote_dump_fn) if self.is_local and not prep_only and not self.dryrun: assert os.path.isfile(r.env.dump_fn), missing_local_dump_error if force_host: r.env.db_host = force_host with settings(warn_only=True): r.sudo('dropdb --if-exists --no-password --user={db_root_username} --host={db_host} {db_name}', user=r.env.postgres_user) r.sudo('psql --no-password --user={db_root_username} --host={db_host} -c "CREATE DATABASE {db_name};"', user=r.env.postgres_user) with settings(warn_only=True): if r.env.engine == POSTGIS: r.sudo('psql --user={db_root_username} --no-password --dbname={db_name} --host={db_host} --command="CREATE EXTENSION postgis;"', user=r.env.postgres_user) r.sudo('psql --user={db_root_username} --no-password --dbname={db_name} --host={db_host} --command="CREATE EXTENSION postgis_topology;"', user=r.env.postgres_user) with settings(warn_only=True): r.sudo('psql --user={db_root_username} --host={db_host} -c "REASSIGN OWNED BY {db_user} TO {db_root_username};"', user=r.env.postgres_user) with settings(warn_only=True): r.sudo('psql --user={db_root_username} --host={db_host} -c "DROP OWNED BY {db_user} CASCADE;"', user=r.env.postgres_user) r.sudo('psql --user={db_root_username} --host={db_host} -c "DROP USER IF EXISTS {db_user}; ' 'CREATE USER {db_user} WITH PASSWORD \'{db_password}\'; ' 'GRANT ALL PRIVILEGES ON DATABASE {db_name} to {db_user};"', user=r.env.postgres_user) for createlang in r.env.createlangs: r.env.createlang = createlang r.sudo('createlang -U {db_root_username} --host={db_host} {createlang} {db_name} || true', user=r.env.postgres_user) if not prep_only: # Ignore errors needed to work around bug "ERROR: schema "public" already exists", which is thrown in 9.6 even if we use --clean and --if-exists? with settings(warn_only=True): r.sudo(r.env.load_command, user=r.env.postgres_user)
def shell(self, name='default', site=None, **kwargs): """ Opens a SQL shell to the given database, assuming the configured database and user supports this feature. """ r = self.database_renderer(name=name, site=site) self.write_pgpass(name=name, site=site, root=True) db_name = kwargs.get('db_name') if db_name: r.env.db_name = db_name r.run('/bin/bash -i -c "psql --username={db_root_username} --host={db_host} --dbname={db_name}"') else: r.run('/bin/bash -i -c "psql --username={db_root_username} --host={db_host}"')
def drop_database(self, name): """ Delete a PostgreSQL database. Example:: import burlap # Remove DB if it exists if burlap.postgres.database_exists('myapp'): burlap.postgres.drop_database('myapp') """ with settings(warn_only=True): self.sudo('dropdb %s' % (name,), user='postgres')
def load_table(self, table_name, src, dst='localhost', name=None, site=None): """ Directly transfers a table between two databases. """ #TODO: incomplete r = self.database_renderer(name=name, site=site) r.env.table_name = table_name r.run('psql --user={dst_db_user} --host={dst_db_host} --command="DROP TABLE IF EXISTS {table_name} CASCADE;"') r.run('pg_dump -t {table_name} --user={dst_db_user} --host={dst_db_host} | psql --user={src_db_user} --host={src_db_host}')
def set_cwd(new_path): """ Usage: with set_cwd('/some/dir'): walk_around_the_filesystem() """ try: curdir = os.getcwd() except OSError: curdir = new_path try: os.chdir(new_path) yield finally: os.chdir(curdir)
def interfaces(): """ Get the list of network interfaces. Will return all datalinks on SmartOS. """ with settings(hide('running', 'stdout')): if is_file('/usr/sbin/dladm'): res = run('/usr/sbin/dladm show-link') else: res = sudo('/sbin/ifconfig -s') return [line.split(' ')[0] for line in res.splitlines()[1:]]
def address(interface): """ Get the IPv4 address assigned to an interface. Example:: import burlap # Print all configured IP addresses for interface in burlap.network.interfaces(): print(burlap.network.address(interface)) """ with settings(hide('running', 'stdout')): res = (sudo("/sbin/ifconfig %(interface)s | grep 'inet '" % locals()) or '').split('\n')[-1].strip() if 'addr' in res: return res.split()[1].split(':')[1] return res.split()[1]
def record_manifest(self): """ Returns a dictionary representing a serialized state of the service. """ data = {} data['required_packages'] = self.install_required(type=SYSTEM, verbose=False, list_only=True) data['required_packages'].sort() data['custom_packages'] = self.install_custom(list_only=True) data['custom_packages'].sort() data['repositories'] = self.get_repositories() return data
def update(self): """ Preparse the packaging system for installations. """ packager = self.packager if packager == APT: self.sudo('DEBIAN_FRONTEND=noninteractive apt-get -yq update') elif packager == YUM: self.sudo('yum update') else: raise Exception('Unknown packager: %s' % (packager,))
def install_apt(self, fn=None, package_name=None, update=0, list_only=0): """ Installs system packages listed in apt-requirements.txt. """ r = self.local_renderer assert self.genv[ROLE] apt_req_fqfn = fn or (self.env.apt_requirments_fn and self.find_template(self.env.apt_requirments_fn)) if not apt_req_fqfn: return [] assert os.path.isfile(apt_req_fqfn) lines = list(self.env.apt_packages or []) for _ in open(apt_req_fqfn).readlines(): if _.strip() and not _.strip().startswith('#') \ and (not package_name or _.strip() == package_name): lines.extend(_pkg.strip() for _pkg in _.split(' ') if _pkg.strip()) if list_only: return lines tmp_fn = r.write_temp_file('\n'.join(lines)) apt_req_fqfn = tmp_fn if not self.genv.is_local: r.put(local_path=tmp_fn, remote_path=tmp_fn) apt_req_fqfn = self.genv.put_remote_path r.sudo('DEBIAN_FRONTEND=noninteractive apt-get -yq update --fix-missing') r.sudo('DEBIAN_FRONTEND=noninteractive apt-get -yq install `cat "%s" | tr "\\n" " "`' % apt_req_fqfn)
def install_yum(self, fn=None, package_name=None, update=0, list_only=0): """ Installs system packages listed in yum-requirements.txt. """ assert self.genv[ROLE] yum_req_fn = fn or self.find_template(self.genv.yum_requirments_fn) if not yum_req_fn: return [] assert os.path.isfile(yum_req_fn) update = int(update) if list_only: return [ _.strip() for _ in open(yum_req_fn).readlines() if _.strip() and not _.strip.startswith('#') and (not package_name or _.strip() == package_name) ] if update: self.sudo_or_dryrun('yum update --assumeyes') if package_name: self.sudo_or_dryrun('yum install --assumeyes %s' % package_name) else: if self.genv.is_local: self.put_or_dryrun(local_path=yum_req_fn) yum_req_fn = self.genv.put_remote_fn self.sudo_or_dryrun('yum install --assumeyes $(cat %(yum_req_fn)s)' % yum_req_fn)
def install_custom(self, *args, **kwargs): """ Installs all system packages listed in the appropriate <packager>-requirements.txt. """ if not self.env.manage_custom: return packager = self.packager if packager == APT: return self.install_apt(*args, **kwargs) elif packager == YUM: return self.install_yum(*args, **kwargs) else: raise NotImplementedError('Unknown packager: %s' % (packager,))
def refresh(self, *args, **kwargs): """ Updates/upgrades all system packages. """ r = self.local_renderer packager = self.packager if packager == APT: r.sudo('DEBIAN_FRONTEND=noninteractive apt-get -yq update --fix-missing') elif packager == YUM: raise NotImplementedError #return upgrade_yum(*args, **kwargs) else: raise Exception('Unknown packager: %s' % (packager,))
def upgrade(self, full=0): """ Updates/upgrades all system packages. """ full = int(full) r = self.local_renderer packager = self.packager if packager == APT: r.sudo('DEBIAN_FRONTEND=noninteractive apt-get -yq upgrade') if full: r.sudo('DEBIAN_FRONTEND=noninteractive apt-get dist-upgrade -yq') elif packager == YUM: raise NotImplementedError else: raise Exception('Unknown packager: %s' % (packager,))
def list_required(self, type=None, service=None): # pylint: disable=redefined-builtin """ Displays all packages required by the current role based on the documented services provided. """ from burlap.common import ( required_system_packages, required_python_packages, required_ruby_packages, ) service = (service or '').strip().upper() type = (type or '').lower().strip() assert not type or type in PACKAGE_TYPES, 'Unknown package type: %s' % (type,) packages_set = set() packages = [] version = self.os_version for _service, satchel in self.all_other_enabled_satchels.items(): _service = _service.strip().upper() if service and service != _service: continue _new = [] if not type or type == SYSTEM: #TODO:deprecated, remove _new.extend(required_system_packages.get( _service, {}).get((version.distro, version.release), [])) try: _pkgs = satchel.packager_system_packages if self.verbose: print('pkgs:') pprint(_pkgs, indent=4) for _key in [(version.distro, version.release), version.distro]: if self.verbose: print('checking key:', _key) if _key in _pkgs: if self.verbose: print('satchel %s requires:' % satchel, _pkgs[_key]) _new.extend(_pkgs[_key]) break except AttributeError: pass if not type or type == PYTHON: #TODO:deprecated, remove _new.extend(required_python_packages.get( _service, {}).get((version.distro, version.release), [])) try: _pkgs = satchel.packager_python_packages for _key in [(version.distro, version.release), version.distro]: if _key in _pkgs: _new.extend(_pkgs[_key]) except AttributeError: pass print('_new:', _new) if not type or type == RUBY: #TODO:deprecated, remove _new.extend(required_ruby_packages.get( _service, {}).get((version.distro, version.release), [])) for _ in _new: if _ in packages_set: continue packages_set.add(_) packages.append(_) if self.verbose: for package in sorted(packages): print('package:', package) return packages
def install_required(self, type=None, service=None, list_only=0, **kwargs): # pylint: disable=redefined-builtin """ Installs system packages listed as required by services this host uses. """ r = self.local_renderer list_only = int(list_only) type = (type or '').lower().strip() assert not type or type in PACKAGE_TYPES, 'Unknown package type: %s' % (type,) lst = [] if type: types = [type] else: types = PACKAGE_TYPES for _type in types: if _type == SYSTEM: content = '\n'.join(self.list_required(type=_type, service=service)) if list_only: lst.extend(_ for _ in content.split('\n') if _.strip()) if self.verbose: print('content:', content) break fd, fn = tempfile.mkstemp() fout = open(fn, 'w') fout.write(content) fout.close() self.install_custom(fn=fn) else: raise NotImplementedError return lst
def uninstall_blacklisted(self): """ Uninstalls all blacklisted packages. """ from burlap.system import distrib_family blacklisted_packages = self.env.blacklisted_packages if not blacklisted_packages: print('No blacklisted packages.') return else: family = distrib_family() if family == DEBIAN: self.sudo('DEBIAN_FRONTEND=noninteractive apt-get -yq purge %s' % ' '.join(blacklisted_packages)) else: raise NotImplementedError('Unknown family: %s' % family)
def deploy(self, site=None): """ Writes entire crontab to the host. """ r = self.local_renderer self.deploy_logrotate() cron_crontabs = [] # if self.verbose: # print('hostname: "%s"' % (hostname,), file=sys.stderr) for _site, site_data in self.iter_sites(site=site): r.env.cron_stdout_log = r.format(r.env.stdout_log_template) r.env.cron_stderr_log = r.format(r.env.stderr_log_template) r.sudo('touch {cron_stdout_log}') r.sudo('touch {cron_stderr_log}') r.sudo('sudo chown {user}:{user} {cron_stdout_log}') r.sudo('sudo chown {user}:{user} {cron_stderr_log}') if self.verbose: print('site:', site, file=sys.stderr) print('env.crontabs_selected:', self.env.crontabs_selected, file=sys.stderr) for selected_crontab in self.env.crontabs_selected: lines = self.env.crontabs_available.get(selected_crontab, []) if self.verbose: print('lines:', lines, file=sys.stderr) for line in lines: cron_crontabs.append(r.format(line)) if not cron_crontabs: return cron_crontabs = self.env.crontab_headers + cron_crontabs cron_crontabs.append('\n') r.env.crontabs_rendered = '\n'.join(cron_crontabs) fn = self.write_to_file(content=r.env.crontabs_rendered) print('fn:', fn) r.env.put_remote_path = r.put(local_path=fn) if isinstance(r.env.put_remote_path, (tuple, list)): r.env.put_remote_path = r.env.put_remote_path[0] r.sudo('crontab -u {cron_user} {put_remote_path}')
def load(self, dump_fn='', prep_only=0, force_upload=0, from_local=0, name=None, site=None, dest_dir=None): """ Restores a database snapshot onto the target database server. If prep_only=1, commands for preparing the load will be generated, but not the command to finally load the snapshot. """ r = self.database_renderer(name=name, site=site) # Render the snapshot filename. r.env.dump_fn = self.get_default_db_fn(fn_template=dump_fn, dest_dir=dest_dir) from_local = int(from_local) prep_only = int(prep_only) missing_local_dump_error = r.format('Database dump file {dump_fn} does not exist.') # Copy snapshot file to target. if self.is_local: r.env.remote_dump_fn = dump_fn else: r.env.remote_dump_fn = '/tmp/' + os.path.split(r.env.dump_fn)[-1] if not prep_only and not self.is_local: if not self.dryrun: assert os.path.isfile(r.env.dump_fn), missing_local_dump_error r.pc('Uploading MongoDB database snapshot...') # r.put( # local_path=r.env.dump_fn, # remote_path=r.env.remote_dump_fn) r.local('rsync -rvz --progress --no-p --no-g ' '--rsh "ssh -o StrictHostKeyChecking=no -i {key_filename}" ' '{dump_fn} {user}@{host_string}:{remote_dump_fn}') if self.is_local and not prep_only and not self.dryrun: assert os.path.isfile(r.env.dump_fn), missing_local_dump_error r.run_or_local(r.env.load_command)
def shell(self, name='default', user=None, password=None, root=0, verbose=1, write_password=1, no_db=0, no_pw=0): """ Opens a SQL shell to the given database, assuming the configured database and user supports this feature. """ raise NotImplementedError
def update_settings(self, d, role, path='roles/{role}/settings.yaml'): """ Writes a key/value pair to a settings file. """ try: import ruamel.yaml load_func = ruamel.yaml.round_trip_load dump_func = ruamel.yaml.round_trip_dump except ImportError: print('Warning: ruamel.yaml not available, reverting to yaml package, possible lost of formatting may occur.') import yaml load_func = yaml.load dump_func = yaml.dump settings_fn = path.format(role=role) data = load_func(open(settings_fn)) data.update(d) settings_str = dump_func(data) open(settings_fn, 'w').write(settings_str)
def configure_bleeding(self): """ Enables the repository for a most current version on Debian systems. https://www.rabbitmq.com/install-debian.html """ lm = self.last_manifest r = self.local_renderer if self.env.bleeding and not lm.bleeding: # Install. r.append( text='deb http://www.rabbitmq.com/debian/ testing main', filename='/etc/apt/sources.list.d/rabbitmq.list', use_sudo=True) r.sudo('cd /tmp; wget -O- https://www.rabbitmq.com/rabbitmq-release-signing-key.asc | sudo apt-key add -') r.sudo('apt-get update') elif not self.env.bleeding and lm.bleeding: # Uninstall. r.sudo('rm -f /etc/apt/sources.list.d/rabbitmq.list') r.sudo('apt-get update')
def force_stop_and_purge(self): """ Forcibly kills Rabbit and purges all its queues. For emergency use when the server becomes unresponsive, even to service stop calls. If this also fails to correct the performance issues, the server may have to be completely reinstalled. """ r = self.local_renderer self.stop() with settings(warn_only=True): r.sudo('killall rabbitmq-server') with settings(warn_only=True): r.sudo('killall beam.smp') #TODO:explicitly delete all subfolders, star-delete doesn't work r.sudo('rm -Rf /var/lib/rabbitmq/mnesia/*')
def _configure_users(self, site=None, full=0, only_data=0): """ Installs and configures RabbitMQ. """ site = site or ALL full = int(full) if full and not only_data: packager = self.get_satchel('packager') packager.install_required(type=SYSTEM, service=self.name) r = self.local_renderer params = self.get_user_vhosts(site=site) # [(user, password, vhost)] with settings(warn_only=True): self.add_admin_user() params = sorted(list(params)) if not only_data: for user, password, vhost in params: r.env.broker_user = user r.env.broker_password = password r.env.broker_vhost = vhost with settings(warn_only=True): r.sudo('rabbitmqctl add_user {broker_user} {broker_password}') r.sudo('rabbitmqctl add_vhost {broker_vhost}') r.sudo('rabbitmqctl set_permissions -p {broker_vhost} {broker_user} ".*" ".*" ".*"') r.sudo('rabbitmqctl set_permissions -p {broker_vhost} {admin_username} ".*" ".*" ".*"') return params
def record_manifest(self): """ Returns a dictionary representing a serialized state of the service. """ data = super(RabbitMQSatchel, self).record_manifest() params = sorted(list(self.get_user_vhosts())) # [(user, password, vhost)] data['rabbitmq_all_site_vhosts'] = params data['sites'] = list(self.genv.sites or []) return data
def iter_dict_differences(a, b): """ Returns a generator yielding all the keys that have values that differ between each dictionary. """ common_keys = set(a).union(b) for k in common_keys: a_value = a.get(k) b_value = b.get(k) if a_value != b_value: yield k, (a_value, b_value)
def get_component_order(component_names): """ Given a list of components, re-orders them according to inter-component dependencies so the most depended upon are first. """ assert isinstance(component_names, (tuple, list)) component_dependences = {} for _name in component_names: deps = set(manifest_deployers_befores.get(_name, [])) deps = deps.intersection(component_names) component_dependences[_name] = deps component_order = list(topological_sort(component_dependences.items())) return component_order
def get_deploy_funcs(components, current_thumbprint, previous_thumbprint, preview=False): """ Returns a generator yielding the named functions needed for a deployment. """ for component in components: funcs = manifest_deployers.get(component, []) for func_name in funcs: #TODO:remove this after burlap.* naming prefix bug fixed if func_name.startswith('burlap.'): print('skipping %s' % func_name) continue takes_diff = manifest_deployers_takes_diff.get(func_name, False) func = resolve_deployer(func_name) current = current_thumbprint.get(component) last = previous_thumbprint.get(component) if takes_diff: yield func_name, partial(func, last=last, current=current) else: yield func_name, partial(func)
def manifest_filename(self): """ Returns the path to the manifest file. """ r = self.local_renderer tp_fn = r.format(r.env.data_dir + '/manifest.yaml') return tp_fn
def get_current_thumbprint(self, components=None): """ Returns a dictionary representing the current configuration state. Thumbprint is of the form: { component_name1: {key: value}, component_name2: {key: value}, ... } """ components = str_to_component_list(components) if self.verbose: print('deploy.get_current_thumbprint.components:', components) manifest_data = {} # {component:data} for component_name, func in sorted(manifest_recorder.items()): self.vprint('Checking thumbprint for component %s...' % component_name) manifest_key = assert_valid_satchel(component_name) service_name = clean_service_name(component_name) if service_name not in self.genv.services: self.vprint('Skipping unused component:', component_name) continue elif components and service_name not in components: self.vprint('Skipping non-matching component:', component_name) continue try: self.vprint('Retrieving manifest for %s...' % component_name) manifest_data[manifest_key] = func() if self.verbose: pprint(manifest_data[manifest_key], indent=4) except exceptions.AbortDeployment as e: raise return manifest_data
def get_previous_thumbprint(self, components=None): """ Returns a dictionary representing the previous configuration state. Thumbprint is of the form: { component_name1: {key: value}, component_name2: {key: value}, ... } """ components = str_to_component_list(components) tp_fn = self.manifest_filename tp_text = None if self.file_exists(tp_fn): fd = six.BytesIO() get(tp_fn, fd) tp_text = fd.getvalue() manifest_data = {} raw_data = yaml.load(tp_text) for k, v in raw_data.items(): manifest_key = assert_valid_satchel(k) service_name = clean_service_name(k) if components and service_name not in components: continue manifest_data[manifest_key] = v return manifest_data
def lock(self): """ Marks the remote server as currently being deployed to. """ self.init() r = self.local_renderer if self.file_exists(r.env.lockfile_path): raise exceptions.AbortDeployment('Lock file %s exists. Perhaps another deployment is currently underway?' % r.env.lockfile_path) else: self.vprint('Locking %s.' % r.env.lockfile_path) r.env.hostname = socket.gethostname() r.run_or_local('echo "{hostname}" > {lockfile_path}')
def unlock(self): """ Unmarks the remote server as currently being deployed to. """ self.init() r = self.local_renderer if self.file_exists(r.env.lockfile_path): self.vprint('Unlocking %s.' % r.env.lockfile_path) r.run_or_local('rm -f {lockfile_path}')
def fake(self, components=None):#, set_satchels=None): """ Update the thumbprint on the remote server but execute no satchel configurators. components = A comma-delimited list of satchel names to limit the fake deployment to. set_satchels = A semi-colon delimited list of key-value pairs to set in satchels before recording a fake deployment. """ self.init() # In cases where we only want to fake deployment of a specific satchel, then simply copy the last thumbprint and overwrite with a subset # of the current thumbprint filtered by our target components. if components: current_tp = self.get_previous_thumbprint() or {} current_tp.update(self.get_current_thumbprint(components=components) or {}) else: current_tp = self.get_current_thumbprint(components=components) or {} tp_text = yaml.dump(current_tp) r = self.local_renderer r.upload_content(content=tp_text, fn=self.manifest_filename) # Ensure all cached manifests are cleared, so they reflect the newly deployed changes. self.reset_all_satchels()
def get_component_funcs(self, components=None): """ Calculates the components functions that need to be executed for a deployment. """ current_tp = self.get_current_thumbprint(components=components) or {} previous_tp = self.get_previous_thumbprint(components=components) or {} if self.verbose: print('Current thumbprint:') pprint(current_tp, indent=4) print('Previous thumbprint:') pprint(previous_tp, indent=4) differences = list(iter_dict_differences(current_tp, previous_tp)) if self.verbose: print('Differences:') pprint(differences, indent=4) component_order = get_component_order([k for k, (_, _) in differences]) if self.verbose: print('component_order:') pprint(component_order, indent=4) plan_funcs = list(get_deploy_funcs(component_order, current_tp, previous_tp)) return component_order, plan_funcs
def preview(self, components=None, ask=0): """ Inspects differences between the last deployment and the current code state. """ ask = int(ask) self.init() component_order, plan_funcs = self.get_component_funcs(components=components) print('\n%i changes found for host %s.\n' % (len(component_order), self.genv.host_string)) if component_order and plan_funcs: if self.verbose: print('These components have changed:\n') for component in sorted(component_order): print((' '*4)+component) print('Deployment plan for host %s:\n' % self.genv.host_string) for func_name, _ in plan_funcs: print(success_str((' '*4)+func_name)) if component_order: print() if ask and self.genv.host_string == self.genv.hosts[-1]: if component_order: if not raw_input('Begin deployment? [yn] ').strip().lower().startswith('y'): sys.exit(0) else: sys.exit(0)
def push(self, components=None, yes=0): """ Executes all satchel configurators to apply pending changes to the server. """ from burlap import notifier service = self.get_satchel('service') self.lock() try: yes = int(yes) if not yes: # If we want to confirm the deployment with the user, and we're at the first server, # then run the preview. if self.genv.host_string == self.genv.hosts[0]: execute(partial(self.preview, components=components, ask=1)) notifier.notify_pre_deployment() component_order, plan_funcs = self.get_component_funcs(components=components) service.pre_deploy() for func_name, plan_func in plan_funcs: print('Executing %s...' % func_name) plan_func() self.fake(components=components) service.post_deploy() notifier.notify_post_deployment() finally: self.unlock()
def get_thumbprint(self): """ Calculates the current thumbprint of the item being tracked. """ d = {} settings = dj.get_settings() for name in self.names: d[name] = getattr(settings, name) return d
def get_settings(self, site=None, role=None): """ Retrieves the Django settings dictionary. """ r = self.local_renderer _stdout = sys.stdout _stderr = sys.stderr if not self.verbose: sys.stdout = StringIO() sys.stderr = StringIO() try: sys.path.insert(0, r.env.src_dir) # Temporarily override SITE. tmp_site = self.genv.SITE if site and site.endswith('_secure'): site = site[:-7] site = site or self.genv.SITE or self.genv.default_site self.set_site(site) # Temporarily override ROLE. tmp_role = self.genv.ROLE if role: self.set_role(role) try: # We need to explicitly delete sub-modules from sys.modules. Otherwise, reload() skips # them and they'll continue to contain obsolete settings. if r.env.delete_module_with_prefixes: for name in sorted(sys.modules): for prefix in r.env.delete_module_with_prefixes: if name.startswith(prefix): if self.verbose: print('Deleting module %s prior to re-import.' % name) del sys.modules[name] break for name in list(sys.modules): for s in r.env.delete_module_containing: if s in name: del sys.modules[name] break if r.env.settings_module in sys.modules: del sys.modules[r.env.settings_module] #TODO:fix r.env.settings_module not loading from settings? # print('r.genv.django_settings_module:', r.genv.django_settings_module, file=_stdout) # print('r.genv.dj_settings_module:', r.genv.dj_settings_module, file=_stdout) # print('r.env.settings_module:', r.env.settings_module, file=_stdout) if 'django_settings_module' in r.genv: r.env.settings_module = r.genv.django_settings_module else: r.env.settings_module = r.env.settings_module or r.genv.dj_settings_module if self.verbose: print('r.env.settings_module:', r.env.settings_module, r.format(r.env.settings_module)) module = import_module(r.format(r.env.settings_module)) if site: assert site == module.SITE, 'Unable to set SITE to "%s" Instead it is set to "%s".' % (site, module.SITE) # Works as long as settings.py doesn't also reload anything. import imp imp.reload(module) except ImportError as e: print('Warning: Could not import settings for site "%s": %s' % (site, e), file=_stdout) traceback.print_exc(file=_stdout) #raise # breaks *_secure pseudo sites return finally: if tmp_site: self.set_site(tmp_site) if tmp_role: self.set_role(tmp_role) finally: sys.stdout = _stdout sys.stderr = _stderr sys.path.remove(r.env.src_dir) return module
def install_sql(self, site=None, database='default', apps=None, stop_on_error=0, fn=None): """ Installs all custom SQL. """ #from burlap.db import load_db_set stop_on_error = int(stop_on_error) site = site or ALL name = database r = self.local_renderer paths = glob.glob(r.format(r.env.install_sql_path_template)) apps = [_ for _ in (apps or '').split(',') if _.strip()] if self.verbose: print('install_sql.apps:', apps) def cmp_paths(d0, d1): if d0[1] and d0[1] in d1[2]: return -1 if d1[1] and d1[1] in d0[2]: return +1 return cmp(d0[0], d1[0]) def get_paths(t): """ Returns SQL file paths in an execution order that respect dependencies. """ data = [] # [(path, view_name, content)] for path in paths: if fn and fn not in path: continue parts = path.split('.') if len(parts) == 3 and parts[1] != t: continue if not path.lower().endswith('.sql'): continue content = open(path, 'r').read() matches = re.findall(r'[\s\t]+VIEW[\s\t]+([a-zA-Z0-9_]{3,})', content, flags=re.IGNORECASE) view_name = '' if matches: view_name = matches[0] print('Found view %s.' % view_name) data.append((path, view_name, content)) for d in sorted(data, cmp=cmp_paths): yield d[0] def run_paths(paths, cmd_template, max_retries=3): r = self.local_renderer paths = list(paths) error_counts = defaultdict(int) # {path:count} terminal = set() if self.verbose: print('Checking %i paths.' % len(paths)) while paths: path = paths.pop(0) if self.verbose: print('path:', path) app_name = re.findall(r'/([^/]+)/sql/', path)[0] if apps and app_name not in apps: self.vprint('skipping because app_name %s not in apps' % app_name) continue with self.settings(warn_only=True): if self.is_local: r.env.sql_path = path else: r.env.sql_path = '/tmp/%s' % os.path.split(path)[-1] r.put(local_path=path, remote_path=r.env.sql_path) ret = r.run_or_local(cmd_template) if ret and ret.return_code: if stop_on_error: raise Exception('Unable to execute file %s' % path) error_counts[path] += 1 if error_counts[path] < max_retries: paths.append(path) else: terminal.add(path) if terminal: print('%i files could not be loaded.' % len(terminal), file=sys.stderr) for path in sorted(list(terminal)): print(path, file=sys.stderr) print(file=sys.stderr) if self.verbose: print('install_sql.db_engine:', r.env.db_engine) for _site, site_data in self.iter_sites(site=site, no_secure=True): self.set_db(name=name, site=_site) if 'postgres' in r.env.db_engine or 'postgis' in r.env.db_engine: paths = list(get_paths('postgresql')) run_paths( paths=paths, cmd_template="psql --host={db_host} --user={db_user} --no-password -d {db_name} -f {sql_path}") elif 'mysql' in r.env.db_engine: paths = list(get_paths('mysql')) run_paths( paths=paths, cmd_template="mysql -v -h {db_host} -u {db_user} -p'{db_password}' {db_name} < {sql_path}") else: raise NotImplementedError
def createsuperuser(self, username='admin', email=None, password=None, site=None): """ Runs the Django createsuperuser management command. """ r = self.local_renderer site = site or self.genv.SITE self.set_site_specifics(site) options = ['--username=%s' % username] if email: options.append('--email=%s' % email) if password: options.append('--password=%s' % password) r.env.options_str = ' '.join(options) if self.is_local: r.env.project_dir = r.env.local_project_dir r.genv.SITE = r.genv.SITE or site r.run_or_local('export SITE={SITE}; export ROLE={ROLE}; cd {project_dir}; {manage_cmd} {createsuperuser_cmd} {options_str}')
def loaddata(self, path, site=None): """ Runs the Dango loaddata management command. By default, runs on only the current site. Pass site=all to run on all sites. """ site = site or self.genv.SITE r = self.local_renderer r.env._loaddata_path = path for _site, site_data in self.iter_sites(site=site, no_secure=True): try: self.set_db(site=_site) r.env.SITE = _site r.sudo('export SITE={SITE}; export ROLE={ROLE}; ' 'cd {project_dir}; ' '{manage_cmd} loaddata {_loaddata_path}') except KeyError: pass
def manage(self, cmd, *args, **kwargs): """ A generic wrapper around Django's manage command. """ r = self.local_renderer environs = kwargs.pop('environs', '').strip() if environs: environs = ' '.join('export %s=%s;' % tuple(_.split('=')) for _ in environs.split(',')) environs = ' ' + environs + ' ' r.env.cmd = cmd r.env.SITE = r.genv.SITE or r.genv.default_site r.env.args = ' '.join(map(str, args)) r.env.kwargs = ' '.join( ('--%s' % _k if _v in (True, 'True') else '--%s=%s' % (_k, _v)) for _k, _v in kwargs.items()) r.env.environs = environs if self.is_local: r.env.project_dir = r.env.local_project_dir r.run_or_local('export SITE={SITE}; export ROLE={ROLE};{environs} cd {project_dir}; {manage_cmd} {cmd} {args} {kwargs}')
def manage_all(self, *args, **kwargs): """ Runs manage() across all unique site default databases. """ for site, site_data in self.iter_unique_databases(site='all'): if self.verbose: print('-'*80, file=sys.stderr) print('site:', site, file=sys.stderr) if self.env.available_sites_by_host: hostname = self.current_hostname sites_on_host = self.env.available_sites_by_host.get(hostname, []) if sites_on_host and site not in sites_on_host: self.vprint('skipping site:', site, sites_on_host, file=sys.stderr) continue self.manage(*args, **kwargs)
def load_django_settings(self): """ Loads Django settings for the current site and sets them so Django internals can be run. """ r = self.local_renderer # Save environment variables so we can restore them later. _env = {} save_vars = ['ALLOW_CELERY', 'DJANGO_SETTINGS_MODULE'] for var_name in save_vars: _env[var_name] = os.environ.get(var_name) try: # Allow us to import local app modules. if r.env.local_project_dir: sys.path.insert(0, r.env.local_project_dir) #TODO:remove this once bug in django-celery has been fixed os.environ['ALLOW_CELERY'] = '0' # print('settings_module:', r.format(r.env.settings_module)) os.environ['DJANGO_SETTINGS_MODULE'] = r.format(r.env.settings_module) # os.environ['CELERY_LOADER'] = 'django' # os.environ['SITE'] = r.genv.SITE or r.genv.default_site # os.environ['ROLE'] = r.genv.ROLE or r.genv.default_role # In Django >= 1.7, fixes the error AppRegistryNotReady: Apps aren't loaded yet # Disabling, in Django >= 1.10, throws exception: # RuntimeError: Model class django.contrib.contenttypes.models.ContentType # doesn't declare an explicit app_label and isn't in an application in INSTALLED_APPS. # try: # from django.core.wsgi import get_wsgi_application # application = get_wsgi_application() # except (ImportError, RuntimeError): # raise # print('Unable to get wsgi application.') # traceback.print_exc() # In Django >= 1.7, fixes the error AppRegistryNotReady: Apps aren't loaded yet try: import django django.setup() except AttributeError: # This doesn't exist in Django < 1.7, so ignore it. pass # Load Django settings. settings = self.get_settings() try: from django.contrib import staticfiles from django.conf import settings as _settings # get_settings() doesn't raise ImportError but returns None instead if settings is not None: for k, v in settings.__dict__.items(): setattr(_settings, k, v) else: raise ImportError except (ImportError, RuntimeError): print('Unable to load settings.') traceback.print_exc() finally: # Restore environment variables. for var_name, var_value in _env.items(): if var_value is None: del os.environ[var_name] else: os.environ[var_name] = var_value return settings
def shell(self): """ Opens a Django focussed Python shell. Essentially the equivalent of running `manage.py shell`. """ r = self.local_renderer if '@' in self.genv.host_string: r.env.shell_host_string = self.genv.host_string else: r.env.shell_host_string = '{user}@{host_string}' r.env.shell_default_dir = self.genv.shell_default_dir_template r.env.shell_interactive_djshell_str = self.genv.interactive_shell_template r.run_or_local('ssh -t -i {key_filename} {shell_host_string} "{shell_interactive_djshell_str}"')
def syncdb(self, site=None, all=0, database=None, ignore_errors=1): # pylint: disable=redefined-builtin """ Runs the standard Django syncdb command for one or more sites. """ r = self.local_renderer ignore_errors = int(ignore_errors) post_south = self.version_tuple >= (1, 7, 0) use_run_syncdb = self.version_tuple >= (1, 9, 0) # DEPRECATED: removed in Django>=1.7 r.env.db_syncdb_all_flag = '--all' if int(all) else '' r.env.db_syncdb_database = '' if database: r.env.db_syncdb_database = ' --database=%s' % database if self.is_local: r.env.project_dir = r.env.local_project_dir site = site or self.genv.SITE for _site, site_data in r.iter_unique_databases(site=site): r.env.SITE = _site with self.settings(warn_only=ignore_errors): if post_south: if use_run_syncdb: r.run_or_local( 'export SITE={SITE}; export ROLE={ROLE}; cd {project_dir}; ' '{manage_cmd} migrate --run-syncdb --noinput {db_syncdb_database}') else: # Between Django>=1.7,<1.9 we can only do a regular migrate, no true syncdb. r.run_or_local( 'export SITE={SITE}; export ROLE={ROLE}; cd {project_dir}; ' '{manage_cmd} migrate --noinput {db_syncdb_database}') else: r.run_or_local( 'export SITE={SITE}; export ROLE={ROLE}; cd {project_dir}; ' '{manage_cmd} syncdb --noinput {db_syncdb_all_flag} {db_syncdb_database}')
def migrate(self, app='', migration='', site=None, fake=0, ignore_errors=None, skip_databases=None, database=None, migrate_apps='', delete_ghosts=1): """ Runs the standard South migrate command for one or more sites. """ # Note, to pass a comma-delimted list in a fab command, escape the comma with a back slash. # # e.g. # # fab staging dj.migrate:migrate_apps=oneapp\,twoapp\,threeapp r = self.local_renderer ignore_errors = int(r.env.ignore_migration_errors if ignore_errors is None else ignore_errors) delete_ghosts = int(delete_ghosts) post_south = self.version_tuple >= (1, 7, 0) if self.version_tuple >= (1, 9, 0): delete_ghosts = 0 skip_databases = (skip_databases or '') if isinstance(skip_databases, six.string_types): skip_databases = [_.strip() for _ in skip_databases.split(',') if _.strip()] migrate_apps = migrate_apps or '' migrate_apps = [ _.strip().split('.')[-1] for _ in migrate_apps.strip().split(',') if _.strip() ] if app: migrate_apps.append(app) r.env.migrate_migration = migration or '' r.env.migrate_fake_str = '--fake' if int(fake) else '' r.env.migrate_database = '--database=%s' % database if database else '' r.env.migrate_merge = '--merge' if not post_south else '' r.env.delete_ghosts = '--delete-ghost-migrations' if delete_ghosts and not post_south else '' self.vprint('project_dir0:', r.env.project_dir, r.genv.get('dj_project_dir'), r.genv.get('project_dir')) self.vprint('migrate_apps:', migrate_apps) if self.is_local: r.env.project_dir = r.env.local_project_dir # CS 2017-3-29 Don't bypass the iterator. That causes reversion to the global env that could corrupt the generated commands. #databases = list(self.iter_unique_databases(site=site))#TODO:remove # CS 2017-4-24 Don't specify a single site as the default when none is supplied. Otherwise all other sites will be ignored. #site = site or self.genv.SITE site = site or ALL databases = self.iter_unique_databases(site=site) for _site, site_data in databases: self.vprint('-'*80, file=sys.stderr) self.vprint('site:', _site, file=sys.stderr) if self.env.available_sites_by_host: hostname = self.current_hostname sites_on_host = self.env.available_sites_by_host.get(hostname, []) if sites_on_host and _site not in sites_on_host: self.vprint('skipping site:', _site, sites_on_host, file=sys.stderr) continue if not migrate_apps: migrate_apps.append(' ') for _app in migrate_apps: # In cases where we're migrating built-in apps or apps with dotted names # e.g. django.contrib.auth, extract the name used for the migrate command. r.env.migrate_app = _app.split('.')[-1] self.vprint('project_dir1:', r.env.project_dir, r.genv.get('dj_project_dir'), r.genv.get('project_dir')) r.env.SITE = _site with self.settings(warn_only=ignore_errors): r.run_or_local( 'export SITE={SITE}; export ROLE={ROLE}; {migrate_pre_command} cd {project_dir}; ' '{manage_cmd} migrate --noinput {migrate_merge} --traceback ' '{migrate_database} {delete_ghosts} {migrate_app} {migrate_migration} ' '{migrate_fake_str}')
def manage_async(self, command='', name='process', site=ALL, exclude_sites='', end_message='', recipients=''): """ Starts a Django management command in a screen. Parameters: command :- all arguments passed to `./manage` as a single string site :- the site to run the command for (default is all) Designed to be ran like: fab <role> dj.manage_async:"some_management_command --force" """ exclude_sites = exclude_sites.split(':') r = self.local_renderer for _site, site_data in self.iter_sites(site=site, no_secure=True): if _site in exclude_sites: continue r.env.SITE = _site r.env.command = command r.env.end_email_command = '' r.env.recipients = recipients or '' r.env.end_email_command = '' if end_message: end_message = end_message + ' for ' + _site end_message = end_message.replace(' ', '_') r.env.end_message = end_message r.env.end_email_command = r.format('{manage_cmd} send_mail --subject={end_message} --recipients={recipients}') r.env.name = name.format(**r.genv) r.run( 'screen -dmS {name} bash -c "export SITE={SITE}; '\ 'export ROLE={ROLE}; cd {project_dir}; '\ '{manage_cmd} {command} --traceback; {end_email_command}"; sleep 3;')
def get_media_timestamp(self, last_timestamp=None): """ Retrieves the most recent timestamp of the media in the static root. If last_timestamp is given, retrieves the first timestamp more recent than this value. """ r = self.local_renderer _latest_timestamp = -1e9999999999999999 for path in self.iter_static_paths(): path = r.env.static_root + '/' + path self.vprint('checking timestamp of path:', path) if not os.path.isfile(path): continue #print('path:', path) _latest_timestamp = max(_latest_timestamp, get_last_modified_timestamp(path) or _latest_timestamp) if last_timestamp is not None and _latest_timestamp > last_timestamp: break self.vprint('latest_timestamp:', _latest_timestamp) return _latest_timestamp
def set_root_login(self, r): """ Looks up the root login for the given database on the given host and sets it to environment variables. Populates these standard variables: db_root_password db_root_username """ # Check the legacy password location. try: r.env.db_root_username = r.env.root_username except AttributeError: pass try: r.env.db_root_password = r.env.root_password except AttributeError: pass # Check the new password location. key = r.env.get('db_host') if self.verbose: print('db.set_root_login.key:', key) print('db.set_root_logins:', r.env.root_logins) if key in r.env.root_logins: data = r.env.root_logins[key] # print('data:', data) if 'username' in data: r.env.db_root_username = data['username'] r.genv.db_root_username = data['username'] if 'password' in data: r.env.db_root_password = data['password'] r.genv.db_root_password = data['password'] else: msg = 'Warning: No root login entry found for host %s in role %s.' % (r.env.get('db_host'), self.genv.get('ROLE')) print(msg, file=sys.stderr)
def database_renderer(self, name=None, site=None, role=None): """ Renders local settings for a specific database. """ name = name or self.env.default_db_name site = site or self.genv.SITE role = role or self.genv.ROLE key = (name, site, role) self.vprint('checking key:', key) if key not in self._database_renderers: self.vprint('No cached db renderer, generating...') if self.verbose: print('db.name:', name) print('db.databases:', self.env.databases) print('db.databases[%s]:' % name, self.env.databases.get(name)) d = type(self.genv)(self.lenv) d.update(self.get_database_defaults()) d.update(self.env.databases.get(name, {})) d['db_name'] = name if self.verbose: print('db.d:') pprint(d, indent=4) print('db.connection_handler:', d.connection_handler) if d.connection_handler == CONNECTION_HANDLER_DJANGO: self.vprint('Using django handler...') dj = self.get_satchel('dj') if self.verbose: print('Loading Django DB settings for site {} and role {}.'.format(site, role), file=sys.stderr) dj.set_db(name=name, site=site, role=role) _d = dj.local_renderer.collect_genv(include_local=True, include_global=False) # Copy "dj_db_*" into "db_*". for k, v in _d.items(): if k.startswith('dj_db_'): _d[k[3:]] = v del _d[k] if self.verbose: print('Loaded:') pprint(_d) d.update(_d) elif d.connection_handler and d.connection_handler.startswith(CONNECTION_HANDLER_CUSTOM+':'): _callable_str = d.connection_handler[len(CONNECTION_HANDLER_CUSTOM+':'):] self.vprint('Using custom handler %s...' % _callable_str) _d = str_to_callable(_callable_str)(role=self.genv.ROLE) if self.verbose: print('Loaded:') pprint(_d) d.update(_d) r = LocalRenderer(self, lenv=d) # Optionally set any root logins needed for administrative commands. self.set_root_login(r) self._database_renderers[key] = r else: self.vprint('Cached db renderer found.') return self._database_renderers[key]
def get_free_space(self): """ Return free space in bytes. """ cmd = "df -k | grep -vE '^Filesystem|tmpfs|cdrom|none|udev|cgroup' | awk '{ print($1 \" \" $4 }'" lines = [_ for _ in self.run(cmd).strip().split('\n') if _.startswith('/')] assert len(lines) == 1, 'Ambiguous devices: %s' % str(lines) device, kb = lines[0].split(' ') free_space = int(kb) * 1024 self.vprint('free_space (bytes):', free_space) return free_space
def load_db_set(self, name, r=None): """ Loads database parameters from a specific named set. """ r = r or self db_set = r.genv.db_sets.get(name, {}) r.genv.update(db_set)
def loadable(self, src, dst): """ Determines if there's enough space to load the target database. """ from fabric import state from fabric.task_utils import crawl src_task = crawl(src, state.commands) assert src_task, 'Unknown source role: %s' % src dst_task = crawl(dst, state.commands) assert dst_task, 'Unknown destination role: %s' % src # Get source database size. src_task() env.host_string = env.hosts[0] src_size_bytes = self.get_size() # Get target database size, if any. dst_task() env.host_string = env.hosts[0] try: dst_size_bytes = self.get_size() except (ValueError, TypeError): dst_size_bytes = 0 # Get target host disk size. free_space_bytes = self.get_free_space() # Deduct existing database size, because we'll be deleting it. balance_bytes = free_space_bytes + dst_size_bytes - src_size_bytes balance_bytes_scaled, units = pretty_bytes(balance_bytes) viable = balance_bytes >= 0 if self.verbose: print('src_db_size:', pretty_bytes(src_size_bytes)) print('dst_db_size:', pretty_bytes(dst_size_bytes)) print('dst_free_space:', pretty_bytes(free_space_bytes)) print if viable: print('Viable! There will be %.02f %s of disk space left.' % (balance_bytes_scaled, units)) else: print('Not viable! We would be %.02f %s short.' % (balance_bytes_scaled, units)) return viable
def dump(self, dest_dir=None, to_local=1, from_local=0, archive=0, dump_fn=None, name=None, site=None, use_sudo=0, cleanup=1): """ Exports the target database to a single transportable file on the localhost, appropriate for loading using load(). """ r = self.local_renderer site = site or self.genv.SITE r = self.database_renderer(name=name, site=site) # Load optional site-specific command, if given. try: r.env.dump_command = self.genv.sites[site]['postgresql_dump_command'] except KeyError: pass use_sudo = int(use_sudo) from_local = int(from_local) to_local = int(to_local) dump_fn = dump_fn or r.env.dump_fn_template # Render the snapshot filename. r.env.dump_fn = self.get_default_db_fn( fn_template=dump_fn, dest_dir=dest_dir, name=name, site=site, ) # Dump the database to a snapshot file. #if not os.path.isfile(os.path.abspath(r.env.dump_fn))): r.pc('Dumping database snapshot.') if from_local: r.local(r.env.dump_command) elif use_sudo: r.sudo(r.env.dump_command) else: r.run(r.env.dump_command) # Download the database dump file on the remote host to localhost. if not from_local and to_local: r.pc('Downloading database snapshot to localhost.') r.local('rsync -rvz --progress --recursive --no-p --no-g ' '--rsh "ssh -o StrictHostKeyChecking=no -i {key_filename}" {user}@{host_string}:{dump_fn} {dump_fn}') # Delete the snapshot file on the remote system. if int(cleanup): r.pc('Deleting database snapshot on remote host.') r.sudo('rm {dump_fn}') # Move the database snapshot to an archive directory. if to_local and int(archive): r.pc('Archiving database snapshot.') db_fn = r.render_fn(r.env.dump_fn) r.env.archive_fn = '%s/%s' % (env.db_dump_archive_dir, os.path.split(db_fn)[-1]) r.local('mv %s %s' % (db_fn, env.archive_fn)) return r.env.dump_fn
def show(keyword=''): """ Displays a list of all environment key/value pairs for the current role. """ keyword = keyword.strip().lower() max_len = max(len(k) for k in env.iterkeys()) keyword_found = False for k in sorted(env.keys()): if keyword and keyword not in k.lower(): continue keyword_found = True #print '%s: %s' % (k, env[k]) print('%s: ' % (k.ljust(max_len),)) pprint(env[k], indent=4) if keyword: if not keyword_found: print('Keyword "%s" not found.' % keyword)
def record_manifest(): """ Called after a deployment to record any data necessary to detect changes for a future deployment. """ data = {} # Record settings. data['settings'] = dict( (k, v) for k, v in env.items() if not isinstance(v, types.GeneratorType) and k.strip() and not k.startswith('_') and not callable(v) ) # Record tarball hash. # Record database migrations. # Record media hash. return data
def fix_eth0_rename(self, hardware_addr): """ A bug as of 2016.10.10 causes eth0 to be renamed to enx*. This renames it to eth0. http://raspberrypi.stackexchange.com/q/43560/29103 """ r = self.local_renderer r.env.hardware_addr = hardware_addr r.sudo('ln -s /dev/null /etc/udev/rules.d/80-net-name-slot.rules') r.append( text=r'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", '\ r'ATTR\{address\}=="{hardware_addr}", '\ r'ATTR\{dev_id\}=="0x0", '\ r'ATTR\{type\}=="1", '\ r'KERNEL=="eth*", NAME="eth0"', filename='/etc/udev/rules.d/70-persistent-net.rules', use_sudo=True, )
def assume_localhost(self): """ Sets connection parameters to localhost, if not set already. """ if not self.genv.host_string: self.genv.host_string = 'localhost' self.genv.hosts = ['localhost'] self.genv.user = getpass.getuser()
def init_raspbian_disk(self, yes=0): """ Downloads the latest Raspbian image and writes it to a microSD card. Based on the instructions from: https://www.raspberrypi.org/documentation/installation/installing-images/linux.md """ self.assume_localhost() yes = int(yes) device_question = 'SD card present at %s? ' % self.env.sd_device if not yes and not raw_input(device_question).lower().startswith('y'): return r = self.local_renderer r.local_if_missing( fn='{raspbian_image_zip}', cmd='wget {raspbian_download_url} -O raspbian_lite_latest.zip') r.lenv.img_fn = \ r.local("unzip -l {raspbian_image_zip} | sed -n 4p | awk '{{print $4}}'", capture=True) or '$IMG_FN' r.local('echo {img_fn}') r.local('[ ! -f {img_fn} ] && unzip {raspbian_image_zip} {img_fn} || true') r.lenv.img_fn = r.local('readlink -f {img_fn}', capture=True) r.local('echo {img_fn}') with self.settings(warn_only=True): r.sudo('[ -d "{sd_media_mount_dir}" ] && umount {sd_media_mount_dir} || true') with self.settings(warn_only=True): r.sudo('[ -d "{sd_media_mount_dir2}" ] && umount {sd_media_mount_dir2} || true') r.pc('Writing the image onto the card.') r.sudo('time dd bs=4M if={img_fn} of={sd_device}') # Flush all writes to disk. r.run('sync')
def init_ubuntu_disk(self, yes=0): """ Downloads the latest Ubuntu image and writes it to a microSD card. Based on the instructions from: https://wiki.ubuntu.com/ARM/RaspberryPi For recommended SD card brands, see: http://elinux.org/RPi_SD_cards Note, if you get an error like: Kernel panic-not syncing: VFS: unable to mount root fs that means the SD card is corrupted. Try re-imaging the card or use a different card. """ self.assume_localhost() yes = int(yes) if not self.dryrun: device_question = 'SD card present at %s? ' % self.env.sd_device inp = raw_input(device_question).strip() print('inp:', inp) if not yes and inp and not inp.lower().startswith('y'): return r = self.local_renderer # Confirm SD card is present. r.local('ls {sd_device}') # Download image. r.env.ubuntu_image_fn = os.path.abspath(os.path.split(self.env.ubuntu_download_url)[-1]) r.local('[ ! -f {ubuntu_image_fn} ] && wget {ubuntu_download_url} || true') # Ensure SD card is unmounted. with self.settings(warn_only=True): r.sudo('[ -d "{sd_media_mount_dir}" ] && umount {sd_media_mount_dir}') with self.settings(warn_only=True): r.sudo('[ -d "{sd_media_mount_dir2}" ] && umount {sd_media_mount_dir2}') r.pc('Writing the image onto the card.') r.sudo('xzcat {ubuntu_image_fn} | dd bs=4M of={sd_device}') # Flush all writes to disk. r.run('sync')
def init_raspbian_vm(self): """ Creates an image for running Raspbian in a QEMU virtual machine. Based on the guide at: https://github.com/dhruvvyas90/qemu-rpi-kernel/wiki/Emulating-Jessie-image-with-4.1.x-kernel """ r = self.local_renderer r.comment('Installing system packages.') r.sudo('add-apt-repository ppa:linaro-maintainers/tools') r.sudo('apt-get update') r.sudo('apt-get install libsdl-dev qemu-system') r.comment('Download image.') r.local('wget https://downloads.raspberrypi.org/raspbian_lite_latest') r.local('unzip raspbian_lite_latest.zip') #TODO:fix name? #TODO:resize image? r.comment('Find start of the Linux ext4 partition.') r.local( "parted -s 2016-03-18-raspbian-jessie-lite.img unit B print | " "awk '/^Number/{{p=1;next}}; p{{gsub(/[^[:digit:]]/, "", $2); print $2}}' | sed -n 2p", assign_to='START') r.local('mkdir -p {raspbian_mount_point}') r.sudo('mount -v -o offset=$START -t ext4 {raspbian_image} $MNT') r.comment('Comment out everything in ld.so.preload') r.local("sed -i 's/^/#/g' {raspbian_mount_point}/etc/ld.so.preload") r.comment('Comment out entries containing /dev/mmcblk in fstab.') r.local("sed -i '/mmcblk/ s?^?#?' /etc/fstab") r.sudo('umount {raspbian_mount_point}') r.comment('Download kernel.') r.local('wget https://github.com/dhruvvyas90/qemu-rpi-kernel/blob/master/{raspbian_kernel}?raw=true') r.local('mv {raspbian_kernel} {libvirt_images_dir}') r.comment('Creating libvirt machine.') r.local('virsh define libvirt-raspbian.xml') r.comment('You should now be able to boot the VM by running:') r.comment('') r.comment(' qemu-system-arm -kernel {libvirt_boot_dir}/{raspbian_kernel} ' '-cpu arm1176 -m 256 -M versatilepb -serial stdio -append "root=/dev/sda2 rootfstype=ext4 rw" ' '-hda {libvirt_images_dir}/{raspbian_image}') r.comment('') r.comment('Or by running virt-manager.')
def create_raspbian_vagrant_box(self): """ Creates a box for easily spinning up a virtual machine with Vagrant. http://unix.stackexchange.com/a/222907/16477 https://github.com/pradels/vagrant-libvirt """ r = self.local_renderer r.sudo('adduser --disabled-password --gecos "" vagrant') #vagrant user should be able to run sudo commands without a password prompt r.sudo('echo "vagrant ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/vagrant') r.sudo('chmod 0440 /etc/sudoers.d/vagrant') r.sudo('apt-get update') r.sudo('apt-get install -y openssh-server') #put ssh key from vagrant user r.sudo('mkdir -p /home/vagrant/.ssh') r.sudo('chmod 0700 /home/vagrant/.ssh') r.sudo('wget --no-check-certificate https://raw.github.com/mitchellh/vagrant/master/keys/vagrant.pub -O /home/vagrant/.ssh/authorized_keys') r.sudo('chmod 0600 /home/vagrant/.ssh/authorized_keys') r.sudo('chown -R vagrant /home/vagrant/.ssh') #open sudo vi /etc/ssh/sshd_config and change #PubKeyAuthentication yes #PermitEmptyPasswords no r.sudo("sed -i '/AuthorizedKeysFile/s/^#//g' /etc/ssh/sshd_config") #PasswordAuthentication no r.sudo("sed -i '/PasswordAuthentication/s/^#//g' /etc/ssh/sshd_config") r.sudo("sed -i 's/PasswordAuthentication yes/PasswordAuthentication no/g' /etc/ssh/sshd_config") #restart ssh service using #sudo service ssh restart #install additional development packages for the tools to properly compile and install r.sudo('apt-get upgrade') r.sudo('apt-get install -y gcc build-essential') #TODO:fix? throws dpkg: error: fgets gave an empty string from `/var/lib/dpkg/triggers/File' #r.sudo('apt-get install -y linux-headers-rpi') #do any change that you want and shutdown the VM . now , come to host machine on which guest VM is running and goto #the /var/lib/libvirt/images/ and choose raw image in which you did the change and copy somewhere for example /test r.sudo('mkdir /tmp/test') r.sudo('cp {libvirt_images_dir}/{raspbian_image} /tmp/test') r.sudo('cp {libvirt_boot_dir}/{raspbian_kernel} /tmp/test') #create two file metadata.json and Vagrantfile in /test do entry in metadata.json r.render_to_file('rpi/metadata.json', '/tmp/test/metadata.json') r.render_to_file('rpi/Vagrantfile', '/tmp/test/Vagrantfile') #convert test.img to qcow2 format using r.sudo('qemu-img convert -f raw -O qcow2 {libvirt_images_dir}/{raspbian_image} {libvirt_images_dir}/{raspbian_image}.qcow2') #rename ubuntu.qcow2 to box.img r.sudo('mv {libvirt_images_dir}/{raspbian_image}.qcow2 {libvirt_images_dir}/box.img') #Note: currently,libvirt-vagrant support only qcow2 format. so , don't change the format just rename to box.img. #because it takes input with name box.img by default. #create box r.sudo('cd /tmp/test; tar cvzf custom_box.box ./metadata.json ./Vagrantfile ./{raspbian_kernel} ./box.img')
def configure_hdmi(self): """ Configures HDMI to support hot-plugging, so it'll work even if it wasn't plugged in when the Pi was originally powered up. Note, this does cause slightly higher power consumption, so if you don't need HDMI, don't bother with this. http://raspberrypi.stackexchange.com/a/2171/29103 """ r = self.local_renderer # use HDMI mode even if no HDMI monitor is detected r.enable_attr( filename='/boot/config.txt', key='hdmi_force_hotplug', value=1, use_sudo=True, ) # to normal HDMI mode (Sound will be sent if supported and enabled). Without this line, # the Raspbmc would switch to DVI (with no audio) mode by default. r.enable_attr( filename='/boot/config.txt', key='hdmi_drive', value=2, use_sudo=True, )
def configure_camera(self): """ Enables access to the camera. http://raspberrypi.stackexchange.com/questions/14229/how-can-i-enable-the-camera-without-using-raspi-config https://mike632t.wordpress.com/2014/06/26/raspberry-pi-camera-setup/ Afterwards, test with: /opt/vc/bin/raspistill --nopreview --output image.jpg Check for compatibility with: vcgencmd get_camera which should show: supported=1 detected=1 """ #TODO:check per OS? Works on Raspbian Jessie r = self.local_renderer if self.env.camera_enabled: r.pc('Enabling camera.') #TODO:fix, doesn't work on Ubuntu, which uses commented-out values # Set start_x=1 #r.sudo('if grep "start_x=0" /boot/config.txt; then sed -i "s/start_x=0/start_x=1/g" /boot/config.txt; fi') #r.sudo('if grep "start_x" /boot/config.txt; then true; else echo "start_x=1" >> /boot/config.txt; fi') r.enable_attr( filename='/boot/config.txt', key='start_x', value=1, use_sudo=True, ) # Set gpu_mem=128 # r.sudo('if grep "gpu_mem" /boot/config.txt; then true; else echo "gpu_mem=128" >> /boot/config.txt; fi') r.enable_attr( filename='/boot/config.txt', key='gpu_mem', value=r.env.gpu_mem, use_sudo=True, ) # Compile the Raspberry Pi binaries. #https://github.com/raspberrypi/userland r.run('cd ~; git clone https://github.com/raspberrypi/userland.git; cd userland; ./buildme') r.run('touch ~/.bash_aliases') #r.run("echo 'PATH=$PATH:/opt/vc/bin\nexport PATH' >> ~/.bash_aliases") r.append(r'PATH=$PATH:/opt/vc/bin\nexport PATH', '~/.bash_aliases') #r.run("echo 'LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/vc/lib\nexport LD_LIBRARY_PATH' >> ~/.bash_aliases") r.append(r'LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/vc/lib\nexport LD_LIBRARY_PATH', '~/.bash_aliases') r.run('source ~/.bashrc') r.sudo('ldconfig') # Allow our user to access the video device. r.sudo("echo 'SUBSYSTEM==\"vchiq\",GROUP=\"video\",MODE=\"0660\"' > /etc/udev/rules.d/10-vchiq-permissions.rules") r.sudo("usermod -a -G video {user}") r.reboot(wait=300, timeout=60) self.test_camera() else: r.disable_attr( filename='/boot/config.txt', key='start_x', use_sudo=True, ) r.disable_attr( filename='/boot/config.txt', key='gpu_mem', use_sudo=True, ) r.reboot(wait=300, timeout=60)
def fix_lsmod_for_pi3(self): """ Some images purporting to support both the Pi2 and Pi3 use the wrong kernel modules. """ r = self.local_renderer r.env.rpi2_conf = '/etc/modules-load.d/rpi2.conf' r.sudo("sed '/bcm2808_rng/d' {rpi2_conf}") r.sudo("echo bcm2835_rng >> {rpi2_conf}")
def pre_deploy(self): """ Runs methods services have requested be run before each deployment. """ for service in self.genv.services: service = service.strip().upper() funcs = common.service_pre_deployers.get(service) if funcs: print('Running pre-deployments for service %s...' % (service,)) for func in funcs: func()
def deploy(self): """ Applies routine, typically application-level changes to the service. """ for service in self.genv.services: service = service.strip().upper() funcs = common.service_deployers.get(service) if funcs: print('Deploying service %s...' % (service,)) for func in funcs: if not self.dryrun: func()
def post_deploy(self): """ Runs methods services have requested be run before after deployment. """ for service in self.genv.services: service = service.strip().upper() self.vprint('post_deploy:', service) funcs = common.service_post_deployers.get(service) if funcs: self.vprint('Running post-deployments for service %s...' % (service,)) for func in funcs: try: func() except Exception as e: print('Post deployment error: %s' % e, file=sys.stderr) print(traceback.format_exc(), file=sys.stderr)
def pre_db_dump(self): """ Runs methods services that have requested to be run before each database dump. """ for service in self.genv.services: service = service.strip().upper() funcs = common.service_pre_db_dumpers.get(service) if funcs: print('Running pre-database dump for service %s...' % (service,)) for func in funcs: func()
def post_db_dump(self): """ Runs methods services that have requested to be run before each database dump. """ for service in self.genv.services: service = service.strip().upper() funcs = common.service_post_db_dumpers.get(service) if funcs: print('Running post-database dump for service %s...' % (service,)) for func in funcs: func()
def configure(self): """ Applies one-time settings changes to the host, usually to initialize the service. """ print('env.services:', self.genv.services) for service in list(self.genv.services): service = service.strip().upper() funcs = common.service_configurators.get(service, []) if funcs: print('!'*80) print('Configuring service %s...' % (service,)) for func in funcs: print('Function:', func) if not self.dryrun: func()
def get_locale_dict(self, text=None): """ Reads /etc/default/locale and returns a dictionary representing its key pairs. """ text = text or self.cat_locale() # Format NAME="value". return dict(re.findall(r'^([a-zA-Z_]+)\s*=\s*[\'\"]*([0-8a-zA-Z_\.\:\-]+)[\'\"]*', text, re.MULTILINE))
def enable_mods(self): """ Enables all modules in the current module list. Does not disable any currently enabled modules not in the list. """ r = self.local_renderer for mod_name in r.env.mods_enabled: with self.settings(warn_only=True): self.enable_mod(mod_name)
def optimize_wsgi_processes(self): """ Based on the number of sites per server and the number of resources on the server, calculates the optimal number of processes that should be allocated for each WSGI site. """ r = self.local_renderer #r.env.wsgi_processes = 5 r.env.wsgi_server_memory_gb = 8 verbose = self.verbose all_sites = list(self.iter_sites(site=ALL, setter=self.set_site_specifics))
def create_local_renderer(self): """ Instantiates a new local renderer. Override this to do any additional initialization. """ r = super(ApacheSatchel, self).create_local_renderer() # Dynamically set values based on target operating system. os_version = self.os_version apache_specifics = r.env.specifics[os_version.type][os_version.distro] r.env.update(apache_specifics) return r
def install_auth_basic_user_file(self, site=None): """ Installs users for basic httpd auth. """ r = self.local_renderer hostname = self.current_hostname target_sites = self.genv.available_sites_by_host.get(hostname, None) for _site, site_data in self.iter_sites(site=site, setter=self.set_site_specifics): if self.verbose: print('~'*80, file=sys.stderr) print('Site:', _site, file=sys.stderr) print('env.apache_auth_basic:', r.env.auth_basic, file=sys.stderr) # Only load site configurations that are allowed for this host. if target_sites is not None: assert isinstance(target_sites, (tuple, list)) if _site not in target_sites: continue if not r.env.auth_basic: continue assert r.env.auth_basic_users, 'No apache auth users specified.' for username, password in r.env.auth_basic_users: r.env.auth_basic_username = username r.env.auth_basic_password = password r.env.apache_site = _site r.env.fn = r.format(r.env.auth_basic_authuserfile) if self.files.exists(r.env.fn): r.sudo('htpasswd -b {fn} {auth_basic_username} {auth_basic_password}') else: r.sudo('htpasswd -b -c {fn} {auth_basic_username} {auth_basic_password}')
def sync_media(self, sync_set=None, clean=0, iter_local_paths=0): """ Uploads select media to an Apache accessible directory. """ # Ensure a site is selected. self.genv.SITE = self.genv.SITE or self.genv.default_site r = self.local_renderer clean = int(clean) self.vprint('Getting site data for %s...' % self.genv.SITE) self.set_site_specifics(self.genv.SITE) sync_sets = r.env.sync_sets if sync_set: sync_sets = [sync_set] ret_paths = [] for _sync_set in sync_sets: for paths in r.env.sync_sets[_sync_set]: r.env.sync_local_path = os.path.abspath(paths['local_path'] % self.genv) if paths['local_path'].endswith('/') and not r.env.sync_local_path.endswith('/'): r.env.sync_local_path += '/' if iter_local_paths: ret_paths.append(r.env.sync_local_path) continue r.env.sync_remote_path = paths['remote_path'] % self.genv if clean: r.sudo('rm -Rf {apache_sync_remote_path}') print('Syncing %s to %s...' % (r.env.sync_local_path, r.env.sync_remote_path)) r.env.tmp_chmod = paths.get('chmod', r.env.chmod) r.sudo('mkdir -p {apache_sync_remote_path}') r.sudo('chmod -R {apache_tmp_chmod} {apache_sync_remote_path}') r.local('rsync -rvz --progress --recursive --no-p --no-g ' '--rsh "ssh -o StrictHostKeyChecking=no -i {key_filename}" {apache_sync_local_path} {user}@{host_string}:{apache_sync_remote_path}') r.sudo('chown -R {apache_web_user}:{apache_web_group} {apache_sync_remote_path}') if iter_local_paths: return ret_paths
def get_media_timestamp(self): """ Called after a deployment to record any data necessary to detect changes for a future deployment. """ from burlap.common import get_last_modified_timestamp data = 0 for path in self.sync_media(iter_local_paths=1): data = min(data, get_last_modified_timestamp(path) or data) #TODO:hash media names and content if self.verbose: print('date:', data) return data
def record_manifest(self): """ Called after a deployment to record any data necessary to detect changes for a future deployment. """ manifest = super(ApacheSatchel, self).record_manifest() manifest['available_sites'] = self.genv.available_sites manifest['available_sites_by_host'] = self.genv.available_sites_by_host manifest['media_timestamp'] = self.get_media_timestamp() return manifest
def configure_modevasive(self): """ Installs the mod-evasive Apache module for combating DDOS attacks. https://www.linode.com/docs/websites/apache-tips-and-tricks/modevasive-on-apache """ r = self.local_renderer if r.env.modevasive_enabled: self.install_packages() # Write conf for each Ubuntu version since they don't conflict. fn = r.render_to_file('apache/apache_modevasive.template.conf') # Ubuntu 12.04 r.put( local_path=fn, remote_path='/etc/apache2/mods-available/mod-evasive.conf', use_sudo=True) # Ubuntu 14.04 r.put( local_path=fn, remote_path='/etc/apache2/mods-available/evasive.conf', use_sudo=True) self.enable_mod('evasive') else: # print('self.last_manifest:', self.last_manifest) # print('a:', self.last_manifest.apache_modevasive_enabled) # print('b:', self.last_manifest.modevasive_enabled) if self.last_manifest.modevasive_enabled: self.disable_mod('evasive')
def configure_modsecurity(self): """ Installs the mod-security Apache module. https://www.modsecurity.org """ r = self.local_renderer if r.env.modsecurity_enabled and not self.last_manifest.modsecurity_enabled: self.install_packages() # Write modsecurity.conf. fn = self.render_to_file('apache/apache_modsecurity.template.conf') r.put(local_path=fn, remote_path='/etc/modsecurity/modsecurity.conf', use_sudo=True) # Write OWASP rules. r.env.modsecurity_download_filename = '/tmp/owasp-modsecurity-crs.tar.gz' r.sudo('cd /tmp; wget --output-document={apache_modsecurity_download_filename} {apache_modsecurity_download_url}') r.env.modsecurity_download_top = r.sudo( "cd /tmp; " "tar tzf %(apache_modsecurity_download_filename)s | sed -e 's@/.*@@' | uniq" % self.genv) r.sudo('cd /tmp; tar -zxvf %(apache_modsecurity_download_filename)s' % self.genv) r.sudo('cd /tmp; cp -R %(apache_modsecurity_download_top)s/* /etc/modsecurity/' % self.genv) r.sudo('mv /etc/modsecurity/modsecurity_crs_10_setup.conf.example /etc/modsecurity/modsecurity_crs_10_setup.conf') r.sudo('rm -f /etc/modsecurity/activated_rules/*') r.sudo('cd /etc/modsecurity/base_rules; ' 'for f in * ; do ln -s /etc/modsecurity/base_rules/$f /etc/modsecurity/activated_rules/$f ; done') r.sudo('cd /etc/modsecurity/optional_rules; ' 'for f in * ; do ln -s /etc/modsecurity/optional_rules/$f /etc/modsecurity/activated_rules/$f ; done') r.env.httpd_conf_append.append('Include "/etc/modsecurity/activated_rules/*.conf"') self.enable_mod('evasive') self.enable_mod('headers') elif not self.env.modsecurity_enabled and self.last_manifest.modsecurity_enabled: self.disable_mod('modsecurity')
def configure_modrpaf(self): """ Installs the mod-rpaf Apache module. https://github.com/gnif/mod_rpaf """ r = self.local_renderer if r.env.modrpaf_enabled: self.install_packages() self.enable_mod('rpaf') else: if self.last_manifest.modrpaf_enabled: self.disable_mod('mod_rpaf')
def configure_site(self, full=1, site=None, delete_old=0): """ Configures Apache to host one or more websites. """ from burlap import service r = self.local_renderer print('Configuring Apache...', file=sys.stderr) site = site or self.genv.SITE if int(delete_old) and site == ALL: # Delete all existing enabled and available sites. r.sudo('rm -f {sites_available}/*') r.sudo('rm -f {sites_enabled}/*') if r.env.manage_site_conf: # Run an optional customizable command to clear or delete old sites before writing the new ones. if r.env.delete_site_command: r.sudo(r.env.delete_site_command) for _site, site_data in self.iter_sites(site=site, setter=self.set_site_specifics): r = self.local_renderer #r.env.site = site if self.verbose: print('-'*80, file=sys.stderr) print('Site:', _site, file=sys.stderr) print('-'*80, file=sys.stderr) r.env.ssl = _site.endswith('_secure') r.env.apache_site = _site r.env.server_name = r.format(r.env.domain_template) # Write WSGI template if r.env.wsgi_enabled: r.pc('Writing WSGI template for site %s...' % _site) r.env.wsgi_scriptalias = r.format(r.env.wsgi_scriptalias) fn = self.render_to_file(r.env.wsgi_template) r.env.wsgi_dir = r.env.remote_dir = os.path.split(r.env.wsgi_scriptalias)[0] r.sudo('mkdir -p {remote_dir}') r.put(local_path=fn, remote_path=r.env.wsgi_scriptalias, use_sudo=True) # Write site configuration. r.pc('Writing site configuration for site %s...' % _site) r.env.auth_basic_authuserfile = r.format(self.env.auth_basic_authuserfile) r.env.ssl_certificates = list(self.iter_certificates()) if r.env.server_aliases_template: r.env.server_aliases = r.format(r.env.server_aliases_template) if r.env.domain_with_sub_template: r.env.domain_with_sub = r.format(r.env.domain_with_sub_template) if r.env.domain_without_sub_template: r.env.domain_without_sub = r.format(r.env.domain_without_sub_template) if r.env.domain_template: r.env.domain = r.format(r.env.domain_template) genv = r.collect_genv() genv['current_hostname'] = self.current_hostname fn = self.render_to_file( self.env.site_template, extra=genv, formatter=partial(r.format, ignored_variables=self.env.ignored_template_variables)) r.env.site_conf = _site+'.conf' r.env.site_conf_fqfn = os.path.join(r.env.sites_available, r.env.site_conf) r.put(local_path=fn, remote_path=r.env.site_conf_fqfn, use_sudo=True) self.enable_site(_site) self.clear_local_renderer() self.enable_mods() if int(full): # Write master Apache configuration file. if r.env.manage_httpd_conf: fn = self.render_to_file('apache/apache_httpd.template.conf') r.put(local_path=fn, remote_path=r.env.conf, use_sudo=True) # Write Apache listening ports configuration. if r.env.manage_ports_conf: fn = self.render_to_file('apache/apache_ports.template.conf') r.put(local_path=fn, remote_path=r.env.ports_path, use_sudo=True) r.sudo('chown -R {apache_web_user}:{apache_web_group} {apache_root}')
def maint_up(self): """ Forwards all traffic to a page saying the server is down for maintenance. """ r = self.local_renderer fn = self.render_to_file(r.env.maintenance_template, extra={'current_hostname': self.current_hostname}) r.put(local_path=fn, remote_path=r.env.maintenance_path, use_sudo=True) r.sudo('chown -R {apache_web_user}:{apache_web_group} {maintenance_path}')
def restart(self): """ Supervisor can take a very long time to start and stop, so wait for it. """ n = 60 sleep_n = int(self.env.max_restart_wait_minutes/10.*60) for _ in xrange(n): self.stop() if self.dryrun or not self.is_running(): break print('Waiting for supervisor to stop (%i of %i)...' % (_, n)) time.sleep(sleep_n) self.start() for _ in xrange(n): if self.dryrun or self.is_running(): return print('Waiting for supervisor to start (%i of %i)...' % (_, n)) time.sleep(sleep_n) raise Exception('Failed to restart service %s!' % self.name)
def record_manifest(self): """ Called after a deployment to record any data necessary to detect changes for a future deployment. """ data = super(SupervisorSatchel, self).record_manifest() # Celery deploys itself through supervisor, so monitor its changes too in Apache site configs. for site_name, site_data in self.genv.sites.items(): if self.verbose: print(site_name, site_data) data['celery_has_worker_%s' % site_name] = site_data.get('celery_has_worker', False) data['configured'] = True # Generate services list. self.write_configs(upload=0) #data['services_rendered'] = '' return data