desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Return keypair object for the minion.
:rtype: Crypto.PublicKey.RSA._RSAobj
:return: The RSA keypair'
| def get_keys(self):
| user = self.opts.get(u'user', u'root')
salt.utils.verify.check_path_traversal(self.opts[u'pki_dir'], user)
if os.path.exists(self.rsa_path):
with salt.utils.files.fopen(self.rsa_path) as f:
key = RSA.importKey(f.read())
log.debug(u'Loaded minion key: %s', self.rsa_path)
else:
log.info(u'Generating keys: %s', self.opts[u'pki_dir'])
gen_keys(self.opts[u'pki_dir'], u'minion', self.opts[u'keysize'], self.opts.get(u'user'))
with salt.utils.files.fopen(self.rsa_path) as f:
key = RSA.importKey(f.read())
return key
|
'Encrypt a string with the minion private key to verify identity
with the master.
:param str clear_tok: A plaintext token to encrypt
:return: Encrypted token
:rtype: str'
| def gen_token(self, clear_tok):
| return private_encrypt(self.get_keys(), clear_tok)
|
'Generates the payload used to authenticate with the master
server. This payload consists of the passed in id_ and the ssh
public key to encrypt the AES key sent back from the master.
:return: Payload dictionary
:rtype: dict'
| def minion_sign_in_payload(self):
| payload = {}
payload[u'cmd'] = u'_auth'
payload[u'id'] = self.opts[u'id']
try:
pubkey_path = os.path.join(self.opts[u'pki_dir'], self.mpub)
with salt.utils.files.fopen(pubkey_path) as f:
pub = RSA.importKey(f.read())
cipher = PKCS1_OAEP.new(pub)
payload[u'token'] = cipher.encrypt(self.token)
except Exception:
pass
with salt.utils.files.fopen(self.pub_path) as f:
payload[u'pub'] = f.read()
return payload
|
'This function is used to decrypt the AES seed phrase returned from
the master server. The seed phrase is decrypted with the SSH RSA
host key.
Pass in the encrypted AES key.
Returns the decrypted AES seed key, a string
:param dict payload: The incoming payload. This is a dictionary which may have the following keys:
\'aes\': The shared AES key
\'enc\': The format of the message. (\'clear\', \'pub\', etc)
\'sig\': The message signature
\'publish_port\': The TCP port which published the message
\'token\': The encrypted token used to verify the message.
\'pub_key\': The public key of the sender.
:rtype: str
:return: The decrypted token that was provided, with padding.
:rtype: str
:return: The decrypted AES seed key'
| def decrypt_aes(self, payload, master_pub=True):
| if self.opts.get(u'auth_trb', False):
log.warning(u'Auth Called: %s', u''.join(traceback.format_stack()))
else:
log.debug(u'Decrypting the current master AES key')
key = self.get_keys()
cipher = PKCS1_OAEP.new(key)
key_str = cipher.decrypt(payload[u'aes'])
if (u'sig' in payload):
m_path = os.path.join(self.opts[u'pki_dir'], self.mpub)
if os.path.exists(m_path):
try:
with salt.utils.files.fopen(m_path) as f:
mkey = RSA.importKey(f.read())
except Exception:
return (u'', u'')
digest = hashlib.sha256(key_str).hexdigest()
if six.PY3:
digest = salt.utils.stringutils.to_bytes(digest)
m_digest = public_decrypt(mkey.publickey(), payload[u'sig'])
if (m_digest != digest):
return (u'', u'')
else:
return (u'', u'')
if six.PY3:
key_str = salt.utils.stringutils.to_str(key_str)
if (u'_|-' in key_str):
return key_str.split(u'_|-')
elif (u'token' in payload):
token = cipher.decrypt(payload[u'token'])
return (key_str, token)
elif (not master_pub):
return (key_str, u'')
return (u'', u'')
|
'Wraps the verify_signature method so we have
additional checks.
:rtype: bool
:return: Success or failure of public key verification'
| def verify_pubkey_sig(self, message, sig):
| if self.opts[u'master_sign_key_name']:
path = os.path.join(self.opts[u'pki_dir'], (self.opts[u'master_sign_key_name'] + u'.pub'))
if os.path.isfile(path):
res = verify_signature(path, message, binascii.a2b_base64(sig))
else:
log.error(u'Verification public key %s does not exist. You need to copy it from the master to the minions pki directory', os.path.basename(path))
return False
if res:
log.debug(u'Successfully verified signature of master public key with verification public key %s', (self.opts[u'master_sign_key_name'] + u'.pub'))
return True
else:
log.debug(u'Failed to verify signature of public key')
return False
else:
log.error(u'Failed to verify the signature of the message because the verification key-pairs name is not defined. Please make sure that master_sign_key_name is defined.')
return False
|
'Checks if both master and minion either sign (master) and
verify (minion). If one side does not, it should fail.
:param dict payload: The incoming payload. This is a dictionary which may have the following keys:
\'aes\': The shared AES key
\'enc\': The format of the message. (\'clear\', \'pub\', \'aes\')
\'publish_port\': The TCP port which published the message
\'token\': The encrypted token used to verify the message.
\'pub_key\': The RSA public key of the sender.'
| def check_auth_deps(self, payload):
| if ((u'pub_sig' in payload) and self.opts[u'verify_master_pubkey_sign']):
return True
elif ((u'pub_sig' not in payload) and (not self.opts[u'verify_master_pubkey_sign'])):
return True
elif ((u'pub_sig' in payload) and (not self.opts[u'verify_master_pubkey_sign'])):
log.error(u'The masters sent its public-key signature, but signature verification is not enabled on the minion. Either enable signature verification on the minion or disable signing the public key on the master!')
return False
elif ((u'pub_sig' not in payload) and self.opts[u'verify_master_pubkey_sign']):
log.error(u'The master did not send its public-key signature, but signature verification is enabled on the minion. Either disable signature verification on the minion or enable signing the public on the master!')
return False
|
'Return the AES key received from the master after the minion has been
successfully authenticated.
:param dict payload: The incoming payload. This is a dictionary which may have the following keys:
\'aes\': The shared AES key
\'enc\': The format of the message. (\'clear\', \'pub\', etc)
\'publish_port\': The TCP port which published the message
\'token\': The encrypted token used to verify the message.
\'pub_key\': The RSA public key of the sender.
:rtype: str
:return: The shared AES key received from the master.'
| def extract_aes(self, payload, master_pub=True):
| if master_pub:
try:
(aes, token) = self.decrypt_aes(payload, master_pub)
if (token != self.token):
log.error(u'The master failed to decrypt the random minion token')
return u''
except Exception:
log.error(u'The master failed to decrypt the random minion token')
return u''
return aes
else:
(aes, token) = self.decrypt_aes(payload, master_pub)
return aes
|
'Verify that the master is the same one that was previously accepted.
:param dict payload: The incoming payload. This is a dictionary which may have the following keys:
\'aes\': The shared AES key
\'enc\': The format of the message. (\'clear\', \'pub\', etc)
\'publish_port\': The TCP port which published the message
\'token\': The encrypted token used to verify the message.
\'pub_key\': The RSA public key of the sender.
:param bool master_pub: Operate as if minion had no master pubkey when it sent auth request, i.e. don\'t verify
the minion signature
:rtype: str
:return: An empty string on verification failure. On success, the decrypted AES message in the payload.'
| def verify_master(self, payload, master_pub=True):
| m_pub_fn = os.path.join(self.opts[u'pki_dir'], self.mpub)
m_pub_exists = os.path.isfile(m_pub_fn)
if (m_pub_exists and master_pub and (not self.opts[u'open_mode'])):
with salt.utils.files.fopen(m_pub_fn) as fp_:
local_master_pub = fp_.read()
if (payload[u'pub_key'].replace(u'\n', u'').replace(u'\r', u'') != local_master_pub.replace(u'\n', u'').replace(u'\r', u'')):
if (not self.check_auth_deps(payload)):
return u''
if self.opts[u'verify_master_pubkey_sign']:
if self.verify_signing_master(payload):
return self.extract_aes(payload, master_pub=False)
else:
return u''
else:
log.error(u"The master key has changed, the salt master could have been subverted, verify salt master's public key")
return u''
else:
if (not self.check_auth_deps(payload)):
return u''
if self.opts[u'always_verify_signature']:
if self.verify_signing_master(payload):
return self.extract_aes(payload)
else:
log.error(u'The masters public could not be verified. Is the verification pubkey %s up to date?', (self.opts[u'master_sign_key_name'] + u'.pub'))
return u''
else:
return self.extract_aes(payload)
else:
if (not self.check_auth_deps(payload)):
return u''
if self.opts[u'verify_master_pubkey_sign']:
if self.verify_signing_master(payload):
return self.extract_aes(payload, master_pub=False)
else:
return u''
else:
if (not m_pub_exists):
with salt.utils.files.fopen(m_pub_fn, u'wb+') as fp_:
fp_.write(salt.utils.stringutils.to_bytes(payload[u'pub_key']))
return self.extract_aes(payload, master_pub=False)
|
'Only create one instance of SAuth per __key()'
| def __new__(cls, opts, io_loop=None):
| key = cls.__key(opts)
auth = SAuth.instances.get(key)
if (auth is None):
log.debug(u'Initializing new SAuth for %s', key)
auth = object.__new__(cls)
auth.__singleton_init__(opts)
SAuth.instances[key] = auth
else:
log.debug(u'Re-using SAuth for %s', key)
return auth
|
'Init an Auth instance
:param dict opts: Options for this server
:return: Auth instance
:rtype: Auth'
| def __singleton_init__(self, opts, io_loop=None):
| self.opts = opts
if six.PY2:
self.token = Crypticle.generate_key_string()
else:
self.token = salt.utils.stringutils.to_bytes(Crypticle.generate_key_string())
self.serial = salt.payload.Serial(self.opts)
self.pub_path = os.path.join(self.opts[u'pki_dir'], u'minion.pub')
self.rsa_path = os.path.join(self.opts[u'pki_dir'], u'minion.pem')
if (u'syndic_master' in self.opts):
self.mpub = u'syndic_master.pub'
elif (u'alert_master' in self.opts):
self.mpub = u'monitor_master.pub'
else:
self.mpub = u'minion_master.pub'
if (not os.path.isfile(self.pub_path)):
self.get_keys()
|
'Authenticate with the master, this method breaks the functional
paradigm, it will update the master information from a fresh sign
in, signing in can occur as often as needed to keep up with the
revolving master AES key.
:rtype: Crypticle
:returns: A crypticle used for encryption operations'
| def authenticate(self, _=None):
| acceptance_wait_time = self.opts[u'acceptance_wait_time']
acceptance_wait_time_max = self.opts[u'acceptance_wait_time_max']
channel = salt.transport.client.ReqChannel.factory(self.opts, crypt=u'clear')
if (not acceptance_wait_time_max):
acceptance_wait_time_max = acceptance_wait_time
while True:
creds = self.sign_in(channel=channel)
if (creds == u'retry'):
if self.opts.get(u'caller'):
print(u'Minion failed to authenticate with the master, has the minion key been accepted?')
sys.exit(2)
if acceptance_wait_time:
log.info(u'Waiting %s seconds before retry.', acceptance_wait_time)
time.sleep(acceptance_wait_time)
if (acceptance_wait_time < acceptance_wait_time_max):
acceptance_wait_time += acceptance_wait_time
log.debug(u'Authentication wait time is %s', acceptance_wait_time)
continue
break
self._creds = creds
self._crypticle = Crypticle(self.opts, creds[u'aes'])
|
'Send a sign in request to the master, sets the key information and
returns a dict containing the master publish interface to bind to
and the decrypted aes key for transport decryption.
:param int timeout: Number of seconds to wait before timing out the sign-in request
:param bool safe: If True, do not raise an exception on timeout. Retry instead.
:param int tries: The number of times to try to authenticate before giving up.
:raises SaltReqTimeoutError: If the sign-in request has timed out and :param safe: is not set
:return: Return a string on failure indicating the reason for failure. On success, return a dictionary
with the publication port and the shared AES key.'
| def sign_in(self, timeout=60, safe=True, tries=1, channel=None):
| auth = {}
auth_timeout = self.opts.get(u'auth_timeout', None)
if (auth_timeout is not None):
timeout = auth_timeout
auth_safemode = self.opts.get(u'auth_safemode', None)
if (auth_safemode is not None):
safe = auth_safemode
auth_tries = self.opts.get(u'auth_tries', None)
if (auth_tries is not None):
tries = auth_tries
m_pub_fn = os.path.join(self.opts[u'pki_dir'], self.mpub)
auth[u'master_uri'] = self.opts[u'master_uri']
if (not channel):
channel = salt.transport.client.ReqChannel.factory(self.opts, crypt=u'clear')
sign_in_payload = self.minion_sign_in_payload()
try:
payload = channel.send(sign_in_payload, tries=tries, timeout=timeout)
except SaltReqTimeoutError as e:
if safe:
log.warning(u'SaltReqTimeoutError: %s', e)
return u'retry'
raise SaltClientError(u'Attempt to authenticate with the salt master failed with timeout error')
if (u'load' in payload):
if (u'ret' in payload[u'load']):
if (not payload[u'load'][u'ret']):
if self.opts[u'rejected_retry']:
log.error(u"The Salt Master has rejected this minion's public key.\nTo repair this issue, delete the public key for this minion on the Salt Master.\nThe Salt Minion will attempt to to re-authenicate.")
return u'retry'
else:
log.critical(u"The Salt Master has rejected this minion's public key!\nTo repair this issue, delete the public key for this minion on the Salt Master and restart this minion.\nOr restart the Salt Master in open mode to clean out the keys. The Salt Minion will now exit.")
sys.exit(salt.defaults.exitcodes.EX_NOPERM)
elif (payload[u'load'][u'ret'] == u'full'):
return u'full'
else:
log.error(u"The Salt Master has cached the public key for this node. If this is the first time connecting to this master then this key may need to be accepted using 'salt-key -a %s' on the salt master. This salt minion will wait for %s seconds before attempting to re-authenticate.", self.opts[u'id'], self.opts[u'acceptance_wait_time'])
return u'retry'
auth[u'aes'] = self.verify_master(payload, master_pub=(u'token' in sign_in_payload))
if (not auth[u'aes']):
log.critical(u"The Salt Master server's public key did not authenticate!\nThe master may need to be updated if it is a version of Salt lower than %s, or\nIf you are confident that you are connecting to a valid Salt Master, then remove the master public key and restart the Salt Minion.\nThe master public key can be found at:\n%s", salt.version.__version__, m_pub_fn)
sys.exit(42)
if self.opts.get(u'syndic_master', False):
syndic_finger = self.opts.get(u'syndic_finger', self.opts.get(u'master_finger', False))
if syndic_finger:
if (salt.utils.pem_finger(m_pub_fn, sum_type=self.opts[u'hash_type']) != syndic_finger):
self._finger_fail(syndic_finger, m_pub_fn)
elif self.opts.get(u'master_finger', False):
if (salt.utils.pem_finger(m_pub_fn, sum_type=self.opts[u'hash_type']) != self.opts[u'master_finger']):
self._finger_fail(self.opts[u'master_finger'], m_pub_fn)
auth[u'publish_port'] = payload[u'publish_port']
return auth
|
'encrypt data with AES-CBC and sign it with HMAC-SHA256'
| def encrypt(self, data):
| (aes_key, hmac_key) = self.keys
pad = (self.AES_BLOCK_SIZE - (len(data) % self.AES_BLOCK_SIZE))
if six.PY2:
data = (data + (pad * chr(pad)))
else:
data = (data + salt.utils.stringutils.to_bytes((pad * chr(pad))))
iv_bytes = os.urandom(self.AES_BLOCK_SIZE)
cypher = AES.new(aes_key, AES.MODE_CBC, iv_bytes)
data = (iv_bytes + cypher.encrypt(data))
sig = hmac.new(hmac_key, data, hashlib.sha256).digest()
return (data + sig)
|
'verify HMAC-SHA256 signature and decrypt data with AES-CBC'
| def decrypt(self, data):
| (aes_key, hmac_key) = self.keys
sig = data[(- self.SIG_SIZE):]
data = data[:(- self.SIG_SIZE)]
if (six.PY3 and (not isinstance(data, bytes))):
data = salt.utils.stringutils.to_bytes(data)
mac_bytes = hmac.new(hmac_key, data, hashlib.sha256).digest()
if (len(mac_bytes) != len(sig)):
log.debug(u'Failed to authenticate message')
raise AuthenticationError(u'message authentication failed')
result = 0
if six.PY2:
for (zipped_x, zipped_y) in zip(mac_bytes, sig):
result |= (ord(zipped_x) ^ ord(zipped_y))
else:
for (zipped_x, zipped_y) in zip(mac_bytes, sig):
result |= (zipped_x ^ zipped_y)
if (result != 0):
log.debug(u'Failed to authenticate message')
raise AuthenticationError(u'message authentication failed')
iv_bytes = data[:self.AES_BLOCK_SIZE]
data = data[self.AES_BLOCK_SIZE:]
cypher = AES.new(aes_key, AES.MODE_CBC, iv_bytes)
data = cypher.decrypt(data)
if six.PY2:
return data[:(- ord(data[(-1)]))]
else:
return data[:(- data[(-1)])]
|
'Serialize and encrypt a python object'
| def dumps(self, obj):
| return self.encrypt((self.PICKLE_PAD + self.serial.dumps(obj)))
|
'Decrypt and un-serialize a python object'
| def loads(self, data, raw=False):
| data = self.decrypt(data)
if (not data.startswith(self.PICKLE_PAD)):
return {}
load = self.serial.loads(data[len(self.PICKLE_PAD):], raw=raw)
return load
|
'Generate filenames in path that satisfy criteria specified in
the constructor.
This method is a generator and should be repeatedly called
until there are no more results.'
| def find(self, path):
| if (self.mindepth < 1):
(dirpath, name) = os.path.split(path)
(match, fstat) = self._check_criteria(dirpath, name, path)
if match:
for result in self._perform_actions(path, fstat=fstat):
(yield result)
for (dirpath, dirs, files) in os.walk(path):
relpath = os.path.relpath(dirpath, path)
depth = (path_depth(relpath) + 1)
if ((depth >= self.mindepth) and ((self.maxdepth is None) or (self.maxdepth >= depth))):
for name in (dirs + files):
fullpath = os.path.join(dirpath, name)
(match, fstat) = self._check_criteria(dirpath, name, fullpath)
if match:
for result in self._perform_actions(fullpath, fstat=fstat):
(yield result)
if ((self.maxdepth is not None) and (depth > self.maxdepth)):
dirs[:] = []
|
'error(msg : string)
Print a usage message incorporating \'msg\' to stderr and exit.
This keeps option parsing exit status uniform for all parsing errors.'
| def error(self, msg):
| self.print_usage(sys.stderr)
self.exit(salt.defaults.exitcodes.EX_USAGE, '{0}: error: {1}\n'.format(self.get_prog_name(), msg))
|
'Report whether a pidfile exists'
| def check_pidfile(self):
| from salt.utils.process import check_pidfile
return check_pidfile(self.config['pidfile'])
|
'Return a pid contained in a pidfile'
| def get_pidfile(self):
| from salt.utils.process import get_pidfile
return get_pidfile(self.config['pidfile'])
|
'Check if a pid file exists and if it is associated with
a running process.'
| def check_running(self):
| if self.check_pidfile():
pid = self.get_pidfile()
if (not salt.utils.platform.is_windows()):
if (self.check_pidfile() and self.is_daemonized(pid) and (not (os.getppid() == pid))):
return True
elif (self.check_pidfile() and self.is_daemonized(pid)):
return True
return False
|
'Returns true if local RAET Minion is available'
| def _find_raet_minion(self, opts):
| yardname = 'manor'
dirpath = opts['sock_dir']
role = opts.get('id')
if (not role):
emsg = 'Missing role required to setup RAET SaltCaller.'
logging.getLogger(__name__).error((emsg + '\n'))
raise ValueError(emsg)
kind = opts.get('__role')
if (kind not in kinds.APPL_KINDS):
emsg = "Invalid application kind = '{0}' for RAET SaltCaller.".format(kind)
logging.getLogger(__name__).error((emsg + '\n'))
raise ValueError(emsg)
if (kind in [kinds.APPL_KIND_NAMES[kinds.applKinds.minion], kinds.APPL_KIND_NAMES[kinds.applKinds.caller]]):
lanename = '{0}_{1}'.format(role, kind)
else:
emsg = "Unsupported application kind '{0}' for RAET SaltCaller.".format(kind)
logging.getLogger(__name__).error((emsg + '\n'))
raise ValueError(emsg)
if (kind == kinds.APPL_KIND_NAMES[kinds.applKinds.minion]):
from raet.lane.yarding import Yard
(ha, dirpath) = Yard.computeHa(dirpath, lanename, yardname)
if (os.path.exists(ha) and (not os.path.isfile(ha)) and (not os.path.isdir(ha))):
return True
return False
|
'The decorator is instantiated with a list of dependencies (string of
global name)
An example use of this would be:
@depends(\'modulename\')
def test():
return \'foo\'
OR
@depends(\'modulename\', fallback_function=function)
def test():
return \'foo\''
| def __init__(self, *dependencies, **kwargs):
| log.trace('Depends decorator instantiated with dep list of {0}'.format(dependencies))
self.dependencies = dependencies
self.fallback_function = kwargs.get('fallback_function')
|
'The decorator is "__call__"d with the function, we take that function
and determine which module and function name it is to store in the
class wide depandancy_dict'
| def __call__(self, function):
| try:
frame = inspect.stack()[1][0]
(_, kind, mod_name) = frame.f_globals['__name__'].rsplit('.', 2)
fun_name = function.__name__
for dep in self.dependencies:
self.dependency_dict[kind][dep][(mod_name, fun_name)] = (frame, self.fallback_function)
except Exception as exc:
log.error('Exception encountered when attempting to inspect frame in dependency decorator: {0}'.format(exc))
return function
|
'This is a class global method to enforce the dependencies that you
currently know about.
It will modify the "functions" dict and remove/replace modules that
are missing dependencies.'
| @classmethod
def enforce_dependencies(cls, functions, kind):
| for (dependency, dependent_dict) in six.iteritems(cls.dependency_dict[kind]):
for ((mod_name, func_name), (frame, fallback_function)) in six.iteritems(dependent_dict):
if (dependency is True):
log.trace('Dependency for {0}.{1} exists, not unloading'.format(mod_name, func_name))
continue
if ((dependency in frame.f_globals) or (dependency in frame.f_locals)):
log.trace('Dependency ({0}) already loaded inside {1}, skipping'.format(dependency, mod_name))
continue
log.trace('Unloading {0}.{1} because dependency ({2}) is not imported'.format(mod_name, func_name, dependency))
if frame:
try:
func_name = frame.f_globals['__func_alias__'][func_name]
except (AttributeError, KeyError):
pass
mod_key = '{0}.{1}'.format(mod_name, func_name)
if (mod_key not in functions):
continue
try:
if (fallback_function is not None):
functions[mod_key] = fallback_function
else:
del functions[mod_key]
except AttributeError:
log.trace('{0} already removed, skipping'.format(mod_key))
continue
|
'Constructor.
:param globals: Module globals. Important for finding out replacement functions
:param version: Expiration version
:return:'
| def __init__(self, globals, version):
| from salt.version import SaltStackVersion, __saltstack_version__
self._globals = globals
self._exp_version_name = version
self._exp_version = SaltStackVersion.from_name(self._exp_version_name)
self._curr_version = __saltstack_version__.info
self._raise_later = None
self._function = None
self._orig_f_name = None
|
'Extract function-specific keywords from all of the kwargs.
:param kwargs:
:return:'
| def _get_args(self, kwargs):
| _args = list()
_kwargs = dict()
if ('__pub_arg' in kwargs):
for arg_item in kwargs.get('__pub_arg', list()):
if (type(arg_item) == dict):
_kwargs.update(arg_item.copy())
else:
_args.append(arg_item)
else:
_kwargs = kwargs.copy()
return (_args, _kwargs)
|
'Call target function that has been decorated.
:return:'
| def _call_function(self, kwargs):
| if self._raise_later:
raise self._raise_later
if self._function:
(args, kwargs) = self._get_args(kwargs)
try:
return self._function(*args, **kwargs)
except TypeError as error:
error = str(error).replace(self._function, self._orig_f_name)
log.error('Function "{f_name}" was not properly called: {error}'.format(f_name=self._orig_f_name, error=error))
return self._function.__doc__
except Exception as error:
log.error('Unhandled exception occurred in function "{f_name}: {error}'.format(f_name=self._function.__name__, error=error))
raise error
else:
raise CommandExecutionError('Function is deprecated, but the successor function was not found.')
|
'Callable method of the decorator object when
the decorated function is gets called.
:param function:
:return:'
| def __call__(self, function):
| self._function = function
self._orig_f_name = self._function.__name__
|
'Constructor of the decorator \'is_deprecated\'.
:param globals: Module globals
:param version: Version to be deprecated
:param with_successor: Successor function (optional)
:return:'
| def __init__(self, globals, version, with_successor=None):
| _DeprecationDecorator.__init__(self, globals, version)
self._successor = with_successor
|
'Callable method of the decorator object when
the decorated function is gets called.
:param function:
:return:'
| def __call__(self, function):
| _DeprecationDecorator.__call__(self, function)
def _decorate(*args, **kwargs):
'\n Decorator function.\n\n :param args:\n :param kwargs:\n :return:\n '
if (self._curr_version < self._exp_version):
msg = ['The function "{f_name}" is deprecated and will expire in version "{version_name}".'.format(f_name=self._function.__name__, version_name=self._exp_version_name)]
if self._successor:
msg.append('Use successor "{successor}" instead.'.format(successor=self._successor))
log.warning(' '.join(msg))
else:
msg = ['The lifetime of the function "{f_name}" expired.'.format(f_name=self._function.__name__)]
if self._successor:
msg.append('Please use its successor "{successor}" instead.'.format(successor=self._successor))
log.warning(' '.join(msg))
raise CommandExecutionError(' '.join(msg))
return self._call_function(kwargs)
return _decorate
|
'Constructor of the decorator \'with_deprecated\'
:param globals:
:param version:
:param with_name:
:param policy:
:return:'
| def __init__(self, globals, version, with_name=None, policy=_DeprecationDecorator.OPT_OUT):
| _DeprecationDecorator.__init__(self, globals, version)
self._with_name = with_name
self._policy = policy
|
'Based on the configuration, set to execute an old or a new function.
:return:'
| def _set_function(self, function):
| full_name = '{m_name}.{f_name}'.format(m_name=(self._globals.get(self.MODULE_NAME, '') or self._globals['__name__'].split('.')[(-1)]), f_name=function.__name__)
if full_name.startswith('.'):
self._raise_later = CommandExecutionError('Module not found for function "{f_name}"'.format(f_name=function.__name__))
opts = self._globals.get('__opts__', '{}')
pillar = self._globals.get('__pillar__', '{}')
use_deprecated = ((full_name in opts.get(self.CFG_USE_DEPRECATED, list())) or (full_name in pillar.get(self.CFG_USE_DEPRECATED, list())))
use_superseded = ((full_name in opts.get(self.CFG_USE_SUPERSEDED, list())) or (full_name in pillar.get(self.CFG_USE_SUPERSEDED, list())))
if (use_deprecated and use_superseded):
raise SaltConfigurationError("Function '{0}' is mentioned both in deprecated and superseded sections. Please remove any of that.".format(full_name))
old_function = self._globals.get((self._with_name or '_{0}'.format(function.__name__)))
if (self._policy == self.OPT_IN):
self._function = (function if use_superseded else old_function)
else:
self._function = (old_function if use_deprecated else function)
|
'Returns True, if a component configuration explicitly is
asking to use an old version of the deprecated function.
:return:'
| def _is_used_deprecated(self):
| func_path = '{m_name}.{f_name}'.format(m_name=(self._globals.get(self.MODULE_NAME, '') or self._globals['__name__'].split('.')[(-1)]), f_name=self._orig_f_name)
return (((func_path in self._globals.get('__opts__').get(self.CFG_USE_DEPRECATED, list())) or (func_path in self._globals.get('__pillar__').get(self.CFG_USE_DEPRECATED, list())) or ((self._policy == self.OPT_IN) and (not (func_path in self._globals.get('__opts__', {}).get(self.CFG_USE_SUPERSEDED, list()))) and (not (func_path in self._globals.get('__pillar__', {}).get(self.CFG_USE_SUPERSEDED, list()))))), func_path)
|
'Callable method of the decorator object when
the decorated function is gets called.
:param function:
:return:'
| def __call__(self, function):
| _DeprecationDecorator.__call__(self, function)
def _decorate(*args, **kwargs):
'\n Decorator function.\n\n :param args:\n :param kwargs:\n :return:\n '
self._set_function(function)
(is_deprecated, func_path) = self._is_used_deprecated()
if is_deprecated:
if (self._curr_version < self._exp_version):
msg = list()
if self._with_name:
msg.append('The function "{f_name}" is deprecated and will expire in version "{version_name}".'.format(f_name=((self._with_name.startswith('_') and self._orig_f_name) or self._with_name), version_name=self._exp_version_name))
msg.append('Use its successor "{successor}" instead.'.format(successor=self._orig_f_name))
else:
msg.append('The function "{f_name}" is using its deprecated version and will expire in version "{version_name}".'.format(f_name=func_path, version_name=self._exp_version_name))
log.warning(' '.join(msg))
else:
msg_patt = 'The lifetime of the function "{f_name}" expired.'
if (('_' + self._orig_f_name) == self._function.__name__):
msg = [msg_patt.format(f_name=self._orig_f_name), 'Please turn off its deprecated version in the configuration']
else:
msg = ['Although function "{f_name}" is called, an alias "{f_alias}" is configured as its deprecated version.'.format(f_name=self._orig_f_name, f_alias=(self._with_name or self._orig_f_name)), msg_patt.format(f_name=(self._with_name or self._orig_f_name)), 'Please use its successor "{successor}" instead.'.format(successor=self._orig_f_name)]
log.error(' '.join(msg))
raise CommandExecutionError(' '.join(msg))
return self._call_function(kwargs)
_decorate.__doc__ = self._function.__doc__
return _decorate
|
''
| def __init__(self, name=None):
| self.name = name
|
''
| def __call__(self, function):
| name = (self.name or function.__name__)
if (name not in self.salt_jinja_filters):
log.debug(u"Marking '%s' as a jinja filter", name)
self.salt_jinja_filters[name] = function
return function
|
''
| def __init__(self, name=None):
| self.name = name
|
''
| def __call__(self, function):
| name = (self.name or function.__name__)
if (name not in self.salt_jinja_tests):
log.debug("Marking '%s' as a jinja test", name)
self.salt_jinja_tests[name] = function
return function
|
''
| def __init__(self, name=None):
| self.name = name
|
''
| def __call__(self, function):
| name = (self.name or function.__name__)
if (name not in self.salt_jinja_globals):
log.debug('Marking "{0}" as a jinja global'.format(name))
self.salt_jinja_globals[name] = function
return function
|
'Context management protocol. Returns self.'
| def __enter__(self):
| return self
|
'Context management protocol. Calls close()'
| def __exit__(self, *args):
| self.close()
|
'Setup and return file_client'
| def file_client(self):
| if (not self._file_client):
self._file_client = salt.fileclient.get_file_client(self.opts, self.pillar_rend)
return self._file_client
|
'When an object is called it is being used as a requisite'
| def __call__(self, id_, requisite='require'):
| return StateRequisite(requisite, self.module, id_)
|
'Init an RSAX931Signer instance
:param str keydata: The RSA private key in PEM format'
| def __init__(self, keydata):
| keydata = salt.utils.stringutils.to_bytes(keydata, 'ascii')
self._bio = libcrypto.BIO_new_mem_buf(keydata, len(keydata))
self._rsa = c_void_p(libcrypto.RSA_new())
if (not libcrypto.PEM_read_bio_RSAPrivateKey(self._bio, pointer(self._rsa), None, None)):
raise ValueError('invalid RSA private key')
|
'Sign a message (digest) using the private key
:param str msg: The message (digest) to sign
:rtype: str
:return: The signature, or an empty string if the encryption failed'
| def sign(self, msg):
| buf = create_string_buffer(libcrypto.RSA_size(self._rsa))
msg = salt.utils.stringutils.to_bytes(msg)
size = libcrypto.RSA_private_encrypt(len(msg), msg, buf, self._rsa, RSA_X931_PADDING)
if (size < 0):
raise ValueError('Unable to encrypt message')
return buf[0:size]
|
'Init an RSAX931Verifier instance
:param str pubdata: The RSA public key in PEM format'
| def __init__(self, pubdata):
| pubdata = salt.utils.stringutils.to_bytes(pubdata, 'ascii')
pubdata = pubdata.replace(six.b('RSA '), six.b(''))
self._bio = libcrypto.BIO_new_mem_buf(pubdata, len(pubdata))
self._rsa = c_void_p(libcrypto.RSA_new())
if (not libcrypto.PEM_read_bio_RSA_PUBKEY(self._bio, pointer(self._rsa), None, None)):
raise ValueError('invalid RSA public key')
|
'Recover the message (digest) from the signature using the public key
:param str signed: The signature created with the private key
:rtype: str
:return: The message (digest) recovered from the signature, or an empty
string if the decryption failed'
| def verify(self, signed):
| buf = create_string_buffer(libcrypto.RSA_size(self._rsa))
signed = salt.utils.stringutils.to_bytes(signed)
size = libcrypto.RSA_public_decrypt(len(signed), signed, buf, self._rsa, RSA_X931_PADDING)
if (size < 0):
raise ValueError('Unable to decrypt message')
return buf[0:size]
|
'Initialize the updates collection. Can be accessed via
``Updates.updates``'
| def __init__(self):
| self.updates = win32com.client.Dispatch('Microsoft.Update.UpdateColl')
|
'Return how many records are in the Microsoft Update Collection
Returns:
int: The number of updates in the collection
Code Example:
.. code-block:: python
import salt.utils.win_update
updates = salt.utils.win_update.Updates()
updates.count()'
| def count(self):
| return self.updates.Count
|
'Create a dictionary with the details for the updates in the collection.
Returns:
dict: Details about each update
.. code-block:: cfg
List of Updates:
{\'<GUID>\': {\'Title\': <title>,
\'KB\': <KB>,
\'GUID\': <the globally unique identifier for the update>
\'Description\': <description>,
\'Downloaded\': <has the update been downloaded>,
\'Installed\': <has the update been installed>,
\'Mandatory\': <is the update mandatory>,
\'UserInput\': <is user input required>,
\'EULAAccepted\': <has the EULA been accepted>,
\'Severity\': <update severity>,
\'NeedsReboot\': <is the update installed and awaiting reboot>,
\'RebootBehavior\': <will the update require a reboot>,
\'Categories\': [ \'<category 1>\',
\'<category 2>\',
Code Example:
.. code-block:: python
import salt.utils.win_update
updates = salt.utils.win_update.Updates()
updates.list()'
| def list(self):
| if (self.count() == 0):
return 'Nothing to return'
log.debug('Building a detailed report of the results.')
results = {}
for update in self.updates:
results[update.Identity.UpdateID] = {'guid': update.Identity.UpdateID, 'Title': str(update.Title), 'Type': self.update_types[update.Type], 'Description': update.Description, 'Downloaded': bool(update.IsDownloaded), 'Installed': bool(update.IsInstalled), 'Mandatory': bool(update.IsMandatory), 'EULAAccepted': bool(update.EulaAccepted), 'NeedsReboot': bool(update.RebootRequired), 'Severity': str(update.MsrcSeverity), 'UserInput': bool(update.InstallationBehavior.CanRequestUserInput), 'RebootBehavior': self.reboot_behavior[update.InstallationBehavior.RebootBehavior], 'KBs': [('KB' + item) for item in update.KBArticleIDs], 'Categories': [item.Name for item in update.Categories]}
return results
|
'Create a dictionary with a summary of the updates in the collection.
Returns:
dict: Summary of the contents of the collection
.. code-block:: cfg
Summary of Updates:
{\'Total\': <total number of updates returned>,
\'Available\': <updates that are not downloaded or installed>,
\'Downloaded\': <updates that are downloaded but not installed>,
\'Installed\': <updates installed (usually 0 unless installed=True)>,
\'Categories\': { <category 1>: <total for that category>,
<category 2>: <total for category 2>,
Code Example:
.. code-block:: python
import salt.utils.win_update
updates = salt.utils.win_update.Updates()
updates.summary()'
| def summary(self):
| if (self.count() == 0):
return 'Nothing to return'
results = {'Total': 0, 'Available': 0, 'Downloaded': 0, 'Installed': 0, 'Categories': {}, 'Severity': {}}
for update in self.updates:
results['Total'] += 1
if ((not salt.utils.is_true(update.IsDownloaded)) and (not salt.utils.is_true(update.IsInstalled))):
results['Available'] += 1
if (salt.utils.is_true(update.IsDownloaded) and (not salt.utils.is_true(update.IsInstalled))):
results['Downloaded'] += 1
if salt.utils.is_true(update.IsInstalled):
results['Installed'] += 1
for category in update.Categories:
if (category.Name in results['Categories']):
results['Categories'][category.Name] += 1
else:
results['Categories'][category.Name] = 1
if update.MsrcSeverity:
if (update.MsrcSeverity in results['Severity']):
results['Severity'][update.MsrcSeverity] += 1
else:
results['Severity'][update.MsrcSeverity] = 1
return results
|
'Initialize the session and load all updates into the ``_updates``
collection. This collection is used by the other class functions instead
of querying Windows update (expensive).
Need to look at the possibility of loading this into ``__context__``'
| def __init__(self):
| pythoncom.CoInitialize()
self._session = win32com.client.Dispatch('Microsoft.Update.Session')
self._updates = win32com.client.Dispatch('Microsoft.Update.UpdateColl')
self.refresh()
|
'Get the contents of ``_updates`` (all updates) and puts them in an
Updates class to expose the list and summary functions.
Returns:
Updates: An instance of the Updates class with all updates for the
system.
.. code-block:: python
import salt.utils.win_update
wua = salt.utils.win_update.WindowsUpdateAgent()
updates = wua.updates()
# To get a list
updates.list()
# To get a summary
updates.summary()'
| def updates(self):
| updates = Updates()
found = updates.updates
for update in self._updates:
found.Add(update)
return updates
|
'Refresh the contents of the ``_updates`` collection. This gets all
updates in the Windows Update system and loads them into the collection.
This is the part that is slow.
Code Example:
.. code-block:: python
import salt.utils.win_update
wua = salt.utils.win_update.WindowsUpdateAgent()
wua.refresh()'
| def refresh(self):
| search_string = "Type='Software' or Type='Driver'"
searcher = self._session.CreateUpdateSearcher()
self._session.ClientApplicationID = 'Salt: Load Updates'
try:
results = searcher.Search(search_string)
if (results.Updates.Count == 0):
log.debug('No Updates found for:\n DCTB DCTB {0}'.format(search_string))
return 'No Updates found: {0}'.format(search_string)
except pywintypes.com_error as error:
(hr, msg, exc, arg) = error.args
try:
failure_code = self.fail_codes[exc[5]]
except KeyError:
failure_code = 'Unknown Failure: {0}'.format(error)
log.error('Search Failed: {0}\n DCTB DCTB {1}'.format(failure_code, search_string))
raise CommandExecutionError(failure_code)
self._updates = results.Updates
|
'Gets a list of all updates available on the system that match the passed
criteria.
Args:
skip_hidden (bool): Skip hidden updates. Default is True
skip_installed (bool): Skip installed updates. Default is True
skip_mandatory (bool): Skip mandatory updates. Default is False
skip_reboot (bool): Skip updates that can or do require reboot.
Default is False
software (bool): Include software updates. Default is True
drivers (bool): Include driver updates. Default is True
categories (list): Include updates that have these categories.
Default is none (all categories).
Categories include the following:
* Critical Updates
* Definition Updates
* Drivers (make sure you set drivers=True)
* Feature Packs
* Security Updates
* Update Rollups
* Updates
* Update Rollups
* Windows 7
* Windows 8.1
* Windows 8.1 drivers
* Windows 8.1 and later drivers
* Windows Defender
severities (list): Include updates that have these severities.
Default is none (all severities).
Severities include the following:
* Critical
* Important
.. note:: All updates are either software or driver updates. If both
``software`` and ``drivers`` is False, nothing will be returned.
Returns:
Updates: An instance of Updates with the results of the search.
Code Example:
.. code-block:: python
import salt.utils.win_update
wua = salt.utils.win_update.WindowsUpdateAgent()
# Gets all updates and shows a summary
updates = wua.available
updates.summary()
# Get a list of Critical updates
updates = wua.available(categories=[\'Critical Updates\'])
updates.list()'
| def available(self, skip_hidden=True, skip_installed=True, skip_mandatory=False, skip_reboot=False, software=True, drivers=True, categories=None, severities=None):
| updates = Updates()
found = updates.updates
for update in self._updates:
if (salt.utils.is_true(update.IsHidden) and skip_hidden):
continue
if (salt.utils.is_true(update.IsInstalled) and skip_installed):
continue
if (salt.utils.is_true(update.IsMandatory) and skip_mandatory):
continue
if (salt.utils.is_true(update.InstallationBehavior.RebootBehavior) and skip_reboot):
continue
if ((not software) and (update.Type == 1)):
continue
if ((not drivers) and (update.Type == 2)):
continue
if (categories is not None):
match = False
for category in update.Categories:
if (category.Name in categories):
match = True
if (not match):
continue
if (severities is not None):
if (update.MsrcSeverity not in severities):
continue
found.Add(update)
return updates
|
'Search for either a single update or a specific list of updates. GUIDs
are searched first, then KB numbers, and finally Titles.
Args:
search_string (str, list): The search string to use to find the
update. This can be the GUID or KB of the update (preferred). It can
also be the full Title of the update or any part of the Title. A
partial Title search is less specific and can return multiple
results.
Returns:
Updates: An instance of Updates with the results of the search
Code Example:
.. code-block:: python
import salt.utils.win_update
wua = salt.utils.win_update.WindowsUpdateAgent()
# search for a single update and show its details
updates = wua.search(\'KB3194343\')
updates.list()
# search for a list of updates and show their details
updates = wua.search([\'KB3195432\', \'12345678-abcd-1234-abcd-1234567890ab\'])
updates.list()'
| def search(self, search_string):
| updates = Updates()
found = updates.updates
if isinstance(search_string, six.string_types):
search_string = [search_string]
if isinstance(search_string, six.integer_types):
search_string = [str(search_string)]
for update in self._updates:
for find in search_string:
if (find == update.Identity.UpdateID):
found.Add(update)
continue
if (find in [('KB' + item) for item in update.KBArticleIDs]):
found.Add(update)
continue
if (find in [item for item in update.KBArticleIDs]):
found.Add(update)
continue
if (find in update.Title):
found.Add(update)
continue
return updates
|
'Download the updates passed in the updates collection. Load the updates
collection using ``search`` or ``available``
Args:
updates (Updates): An instance of the Updates class containing a
the updates to be downloaded.
Returns:
dict: A dictionary containing the results of the download
Code Example:
.. code-block:: python
import salt.utils.win_update
wua = salt.utils.win_update.WindowsUpdateAgent()
# Download KB3195454
updates = wua.search(\'KB3195454\')
results = wua.download(updates)'
| def download(self, updates):
| if (updates.count() == 0):
ret = {'Success': False, 'Updates': 'Nothing to download'}
return ret
downloader = self._session.CreateUpdateDownloader()
self._session.ClientApplicationID = 'Salt: Download Update'
download_list = win32com.client.Dispatch('Microsoft.Update.UpdateColl')
ret = {'Updates': {}}
for update in updates.updates:
uid = update.Identity.UpdateID
ret['Updates'][uid] = {}
ret['Updates'][uid]['Title'] = update.Title
ret['Updates'][uid]['AlreadyDownloaded'] = bool(update.IsDownloaded)
if (not salt.utils.is_true(update.EulaAccepted)):
log.debug('Accepting EULA: {0}'.format(update.Title))
update.AcceptEula()
if (not salt.utils.is_true(update.IsDownloaded)):
log.debug('To Be Downloaded: {0}'.format(uid))
log.debug(' DCTB Title: {0}'.format(update.Title))
download_list.Add(update)
if (download_list.Count == 0):
ret = {'Success': True, 'Updates': 'Nothing to download'}
return ret
downloader.Updates = download_list
try:
log.debug('Downloading Updates')
result = downloader.Download()
except pywintypes.com_error as error:
(hr, msg, exc, arg) = error.args
try:
failure_code = self.fail_codes[exc[5]]
except KeyError:
failure_code = 'Unknown Failure: {0}'.format(error)
log.error('Download Failed: {0}'.format(failure_code))
raise CommandExecutionError(failure_code)
result_code = {0: 'Download Not Started', 1: 'Download In Progress', 2: 'Download Succeeded', 3: 'Download Succeeded With Errors', 4: 'Download Failed', 5: 'Download Aborted'}
log.debug('Download Complete')
log.debug(result_code[result.ResultCode])
ret['Message'] = result_code[result.ResultCode]
if (result.ResultCode in [2, 3]):
log.debug('Downloaded Successfully')
ret['Success'] = True
else:
log.debug('Download Failed')
ret['Success'] = False
for i in range(download_list.Count):
uid = download_list.Item(i).Identity.UpdateID
ret['Updates'][uid]['Result'] = result_code[result.GetUpdateResult(i).ResultCode]
return ret
|
'Install the updates passed in the updates collection. Load the updates
collection using the ``search`` or ``available`` functions. If the
updates need to be downloaded, use the ``download`` function.
Args:
updates (Updates): An instance of the Updates class containing a
the updates to be installed.
Returns:
dict: A dictionary containing the results of the installation
Code Example:
.. code-block:: python
import salt.utils.win_update
wua = salt.utils.win_update.WindowsUpdateAgent()
# install KB3195454
updates = wua.search(\'KB3195454\')
results = wua.download(updates)
results = wua.install(updates)'
| def install(self, updates):
| if (updates.count() == 0):
ret = {'Success': False, 'Updates': 'Nothing to install'}
return ret
installer = self._session.CreateUpdateInstaller()
self._session.ClientApplicationID = 'Salt: Install Update'
install_list = win32com.client.Dispatch('Microsoft.Update.UpdateColl')
ret = {'Updates': {}}
for update in updates.updates:
uid = update.Identity.UpdateID
ret['Updates'][uid] = {}
ret['Updates'][uid]['Title'] = update.Title
ret['Updates'][uid]['AlreadyInstalled'] = bool(update.IsInstalled)
if (not salt.utils.is_true(update.IsInstalled)):
log.debug('To Be Installed: {0}'.format(uid))
log.debug(' DCTB Title: {0}'.format(update.Title))
install_list.Add(update)
if (install_list.Count == 0):
ret = {'Success': True, 'Updates': 'Nothing to install'}
return ret
installer.Updates = install_list
try:
log.debug('Installing Updates')
result = installer.Install()
except pywintypes.com_error as error:
(hr, msg, exc, arg) = error.args
try:
failure_code = self.fail_codes[exc[5]]
except KeyError:
failure_code = 'Unknown Failure: {0}'.format(error)
log.error('Install Failed: {0}'.format(failure_code))
raise CommandExecutionError(failure_code)
result_code = {0: 'Installation Not Started', 1: 'Installation In Progress', 2: 'Installation Succeeded', 3: 'Installation Succeeded With Errors', 4: 'Installation Failed', 5: 'Installation Aborted'}
log.debug('Install Complete')
log.debug(result_code[result.ResultCode])
ret['Message'] = result_code[result.ResultCode]
if (result.ResultCode in [2, 3]):
ret['Success'] = True
ret['NeedsReboot'] = result.RebootRequired
log.debug('NeedsReboot: {0}'.format(result.RebootRequired))
else:
log.debug('Install Failed')
ret['Success'] = False
reboot = {0: 'Never Reboot', 1: 'Always Reboot', 2: 'Poss Reboot'}
for i in range(install_list.Count):
uid = install_list.Item(i).Identity.UpdateID
ret['Updates'][uid]['Result'] = result_code[result.GetUpdateResult(i).ResultCode]
ret['Updates'][uid]['RebootBehavior'] = reboot[install_list.Item(i).InstallationBehavior.RebootBehavior]
return ret
|
'Uninstall the updates passed in the updates collection. Load the updates
collection using the ``search`` or ``available`` functions.
.. note:: Starting with Windows 10 the Windows Update Agent is unable to
uninstall updates. An ``Uninstall Not Allowed`` error is returned. If
this error is encountered this function will instead attempt to use
``dism.exe`` to perform the uninstallation. ``dism.exe`` may fail to
to find the KB number for the package. In that case, removal will fail.
Args:
updates (Updates): An instance of the Updates class containing a
the updates to be uninstalled.
Returns:
dict: A dictionary containing the results of the uninstallation
Code Example:
.. code-block:: python
import salt.utils.win_update
wua = salt.utils.win_update.WindowsUpdateAgent()
# uninstall KB3195454
updates = wua.search(\'KB3195454\')
results = wua.uninstall(updates)'
| def uninstall(self, updates):
| if (updates.count() == 0):
ret = {'Success': False, 'Updates': 'Nothing to uninstall'}
return ret
installer = self._session.CreateUpdateInstaller()
self._session.ClientApplicationID = 'Salt: Install Update'
uninstall_list = win32com.client.Dispatch('Microsoft.Update.UpdateColl')
ret = {'Updates': {}}
for update in updates.updates:
uid = update.Identity.UpdateID
ret['Updates'][uid] = {}
ret['Updates'][uid]['Title'] = update.Title
ret['Updates'][uid]['AlreadyUninstalled'] = (not bool(update.IsInstalled))
if salt.utils.is_true(update.IsInstalled):
log.debug('To Be Uninstalled: {0}'.format(uid))
log.debug(' DCTB Title: {0}'.format(update.Title))
uninstall_list.Add(update)
if (uninstall_list.Count == 0):
ret = {'Success': False, 'Updates': 'Nothing to uninstall'}
return ret
installer.Updates = uninstall_list
try:
log.debug('Uninstalling Updates')
result = installer.Uninstall()
except pywintypes.com_error as error:
(hr, msg, exc, arg) = error.args
try:
failure_code = self.fail_codes[exc[5]]
except KeyError:
failure_code = 'Unknown Failure: {0}'.format(error)
if (exc[5] == (-2145124312)):
log.debug('Uninstall Failed with WUA, attempting with DISM')
try:
for item in uninstall_list:
for kb in item.KBArticleIDs:
cmd = ['dism', '/Online', '/Get-Packages']
pkg_list = self._run(cmd)[0].splitlines()
for item in pkg_list:
if (('kb' + kb) in item.lower()):
pkg = item.split(' : ')[1]
ret['DismPackage'] = pkg
cmd = ['dism', '/Online', '/Remove-Package', '/PackageName:{0}'.format(pkg), '/Quiet', '/NoRestart']
self._run(cmd)
except CommandExecutionError as exc:
log.debug('Uninstall using DISM failed')
log.debug('Command: {0}'.format(' '.join(cmd)))
log.debug('Error: {0}'.format(str(exc)))
raise CommandExecutionError('Uninstall using DISM failed:{0}'.format(str(exc)))
log.debug('Uninstall Completed using DISM')
ret['Success'] = True
ret['Message'] = 'Uninstalled using DISM'
ret['NeedsReboot'] = needs_reboot()
log.debug('NeedsReboot: {0}'.format(ret['NeedsReboot']))
self.refresh()
reboot = {0: 'Never Reboot', 1: 'Always Reboot', 2: 'Poss Reboot'}
for update in self._updates:
uid = update.Identity.UpdateID
for item in uninstall_list:
if (item.Identity.UpdateID == uid):
if (not update.IsInstalled):
ret['Updates'][uid]['Result'] = 'Uninstallation Succeeded'
else:
ret['Updates'][uid]['Result'] = 'Uninstallation Failed'
ret['Updates'][uid]['RebootBehavior'] = reboot[update.InstallationBehavior.RebootBehavior]
return ret
log.error('Uninstall Failed: {0}'.format(failure_code))
raise CommandExecutionError(failure_code)
result_code = {0: 'Uninstallation Not Started', 1: 'Uninstallation In Progress', 2: 'Uninstallation Succeeded', 3: 'Uninstallation Succeeded With Errors', 4: 'Uninstallation Failed', 5: 'Uninstallation Aborted'}
log.debug('Uninstall Complete')
log.debug(result_code[result.ResultCode])
ret['Message'] = result_code[result.ResultCode]
if (result.ResultCode in [2, 3]):
ret['Success'] = True
ret['NeedsReboot'] = result.RebootRequired
log.debug('NeedsReboot: {0}'.format(result.RebootRequired))
else:
log.debug('Uninstall Failed')
ret['Success'] = False
reboot = {0: 'Never Reboot', 1: 'Always Reboot', 2: 'Poss Reboot'}
for i in range(uninstall_list.Count):
uid = uninstall_list.Item(i).Identity.UpdateID
ret['Updates'][uid]['Result'] = result_code[result.GetUpdateResult(i).ResultCode]
ret['Updates'][uid]['RebootBehavior'] = reboot[uninstall_list.Item(i).InstallationBehavior.RebootBehavior]
return ret
|
'Internal function for running commands. Used by the uninstall function.
Args:
cmd (str, list): The command to run
Returns:
str: The stdout of the command'
| def _run(self, cmd):
| if isinstance(cmd, six.string_types):
cmd = salt.utils.args.shlex_split(cmd)
try:
log.debug(cmd)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p.communicate()
except (OSError, IOError) as exc:
log.debug('Command Failed: {0}'.format(' '.join(cmd)))
log.debug('Error: {0}'.format(str(exc)))
raise CommandExecutionError(exc)
|
'Create a processes and args + kwargs
This will deterimine if it is a Process class, otherwise it assumes
it is a function'
| def add_process(self, tgt, args=None, kwargs=None, name=None):
| if (args is None):
args = []
if (kwargs is None):
kwargs = {}
if salt.utils.platform.is_windows():
if ((type(MultiprocessingProcess) is type(tgt)) and issubclass(tgt, MultiprocessingProcess)):
need_log_queue = True
else:
need_log_queue = False
if (need_log_queue and ('log_queue' not in kwargs)):
if hasattr(self, 'log_queue'):
kwargs['log_queue'] = self.log_queue
else:
kwargs['log_queue'] = salt.log.setup.get_multiprocessing_logging_queue()
if (name is None):
if isinstance(tgt, types.FunctionType):
name = '{0}.{1}'.format(tgt.__module__, tgt.__name__)
else:
name = '{0}{1}.{2}'.format(tgt.__module__, ('.{0}'.format(tgt.__class__) if (str(tgt.__class__) != "<type 'type'>") else ''), tgt.__name__)
if ((type(multiprocessing.Process) is type(tgt)) and issubclass(tgt, multiprocessing.Process)):
process = tgt(*args, **kwargs)
else:
process = multiprocessing.Process(target=tgt, args=args, kwargs=kwargs, name=name)
if isinstance(process, SignalHandlingMultiprocessingProcess):
with default_signals(signal.SIGINT, signal.SIGTERM):
process.start()
else:
process.start()
log.debug("Started '{0}' with pid {1}".format(name, process.pid))
self._process_map[process.pid] = {'tgt': tgt, 'args': args, 'kwargs': kwargs, 'Process': process}
return process
|
'Create new process (assuming this one is dead), then remove the old one'
| def restart_process(self, pid):
| if (self._restart_processes is False):
return
log.info('Process {0} ({1}) died with exit status {2}, restarting...'.format(self._process_map[pid]['tgt'], pid, self._process_map[pid]['Process'].exitcode))
self._process_map[pid]['Process'].join(1)
self.add_process(self._process_map[pid]['tgt'], self._process_map[pid]['args'], self._process_map[pid]['kwargs'])
del self._process_map[pid]
|
'Load and start all available api modules'
| @gen.coroutine
def run(self, async=False):
| log.debug('Process Manager starting!')
salt.utils.appendproctitle(self.name)
if (signal.getsignal(signal.SIGTERM) is signal.SIG_DFL):
signal.signal(signal.SIGTERM, self.kill_children)
if (signal.getsignal(signal.SIGINT) is signal.SIG_DFL):
signal.signal(signal.SIGINT, self.kill_children)
while True:
log.trace('Process manager iteration')
try:
self.check_children()
if async:
(yield gen.sleep(10))
else:
time.sleep(10)
if (len(self._process_map) == 0):
break
except OSError:
break
except IOError as exc:
if (exc.errno != errno.EINTR):
raise
break
|
'Check the children once'
| def check_children(self):
| if (self._restart_processes is True):
for (pid, mapping) in six.iteritems(self._process_map):
if (not mapping['Process'].is_alive()):
log.trace('Process restart of {0}'.format(pid))
self.restart_process(pid)
|
'Kill all of the children'
| def kill_children(self, *args, **kwargs):
| signal.signal(signal.SIGTERM, signal.SIG_IGN)
signal.signal(signal.SIGINT, signal.SIG_IGN)
if (os.getpid() != self._pid):
if callable(self._sigterm_handler):
return self._sigterm_handler(*args)
elif (self._sigterm_handler is not None):
return signal.default_int_handler(signal.SIGTERM)(*args)
else:
return
if salt.utils.platform.is_windows():
if (multiprocessing.current_process().name != 'MainProcess'):
return
with salt.utils.files.fopen(os.devnull, 'wb') as devnull:
for (pid, p_map) in six.iteritems(self._process_map):
subprocess.call(['taskkill', '/F', '/T', '/PID', str(pid)], stdout=devnull, stderr=devnull)
p_map['Process'].terminate()
else:
for (pid, p_map) in six.iteritems(self._process_map.copy()):
log.trace('Terminating pid {0}: {1}'.format(pid, p_map['Process']))
if args:
try:
os.kill(pid, args[0])
except OSError:
pass
try:
p_map['Process'].terminate()
except OSError as exc:
if (exc.errno not in (errno.ESRCH, errno.EACCES)):
raise
if (not p_map['Process'].is_alive()):
try:
del self._process_map[pid]
except KeyError:
pass
end_time = (time.time() + self.wait_for_kill)
log.trace('Waiting to kill process manager children')
while (self._process_map and (time.time() < end_time)):
for (pid, p_map) in six.iteritems(self._process_map.copy()):
log.trace('Joining pid {0}: {1}'.format(pid, p_map['Process']))
p_map['Process'].join(0)
if (not p_map['Process'].is_alive()):
try:
del self._process_map[pid]
except KeyError:
pass
kill_iterations = 2
while (kill_iterations >= 0):
kill_iterations -= 1
for (pid, p_map) in six.iteritems(self._process_map.copy()):
if (not p_map['Process'].is_alive()):
try:
del self._process_map[pid]
except KeyError:
pass
continue
log.trace('Killing pid {0}: {1}'.format(pid, p_map['Process']))
try:
os.kill(pid, signal.SIGKILL)
except OSError as exc:
log.exception(exc)
if (not p_map['Process'].is_alive()):
try:
del self._process_map[pid]
except KeyError:
pass
if self._process_map:
available_retries = kwargs.get('retry', 3)
if (available_retries >= 0):
log.info('Some processes failed to respect the KILL signal: %s', '; '.join(('Process: {0} (Pid: {1})'.format(v['Process'], k) for (k, v) in self._process_map.items())))
log.info('kill_children retries left: %s', available_retries)
kwargs['retry'] = (available_retries - 1)
return self.kill_children(*args, **kwargs)
else:
log.warning('Failed to kill the following processes: %s', '; '.join(('Process: {0} (Pid: {1})'.format(v['Process'], k) for (k, v) in self._process_map.items())))
log.warning('Salt will either fail to terminate now or leave some zombie processes behind')
|
'Return the names of remote refs (stripped of the remote name) and tags
which are map to the branches and tags.'
| def _get_envs_from_ref_paths(self, refs):
| def _check_ref(env_set, rname):
'\n Add the appropriate saltenv(s) to the set\n '
if (rname in self.saltenv_revmap):
env_set.update(self.saltenv_revmap[rname])
else:
env_set.add(('base' if (rname == self.base) else rname))
use_branches = ('branch' in self.ref_types)
use_tags = ('tag' in self.ref_types)
ret = set()
for ref in refs:
ref = re.sub('^refs/', '', ref)
(rtype, rname) = ref.split('/', 1)
if ((rtype == 'remotes') and use_branches):
parted = rname.partition('/')
rname = (parted[2] if parted[2] else parted[0])
_check_ref(ret, rname)
elif ((rtype == 'tags') and use_tags):
_check_ref(ret, rname)
return ret
|
'Programatically determine config value based on the desired saltenv'
| @classmethod
def add_conf_overlay(cls, name):
| def _getconf(self, tgt_env='base'):
strip_sep = (lambda x: (x.rstrip(os.sep) if (name in ('root', 'mountpoint')) else x))
if (self.role != 'gitfs'):
return strip_sep(getattr(self, ('_' + name)))
saltenv_conf = self.saltenv.get(tgt_env, {})
if (name == 'ref'):
def _get_per_saltenv(tgt_env):
if (name in saltenv_conf):
return saltenv_conf[name]
elif ((tgt_env in self.global_saltenv) and (name in self.global_saltenv[tgt_env])):
return self.global_saltenv[tgt_env][name]
else:
return None
per_saltenv_ref = _get_per_saltenv(tgt_env)
try:
all_saltenvs_ref = self.all_saltenvs
if (per_saltenv_ref and (all_saltenvs_ref != per_saltenv_ref)):
log.debug("The per-saltenv configuration has mapped the '%s' branch/tag to saltenv '%s' for %s remote '%s', but this remote has all_saltenvs set to '%s'. The per-saltenv mapping will be ignored in favor of '%s'.", per_saltenv_ref, tgt_env, self.role, self.id, all_saltenvs_ref, all_saltenvs_ref)
return all_saltenvs_ref
except AttributeError:
pass
if (tgt_env == 'base'):
return self.base
elif self.disable_saltenv_mapping:
if (per_saltenv_ref is None):
log.debug("saltenv mapping is diabled for %s remote '%s' and saltenv '%s' is not explicitly mapped", self.role, self.id, tgt_env)
return per_saltenv_ref
else:
return (per_saltenv_ref or tgt_env)
if (name in saltenv_conf):
return strip_sep(saltenv_conf[name])
elif ((tgt_env in self.global_saltenv) and (name in self.global_saltenv[tgt_env])):
return strip_sep(self.global_saltenv[tgt_env][name])
else:
return strip_sep(getattr(self, ('_' + name)))
setattr(cls, name, _getconf)
|
'This function must be overridden in a sub-class'
| def add_refspecs(self, *refspecs):
| raise NotImplementedError()
|
'Check if the relative root path exists in the checked-out copy of the
remote. Return the full path to that relative root if it does exist,
otherwise return None.'
| def check_root(self):
| root_dir = salt.utils.path.join(self.cachedir, self.root()).rstrip(os.sep)
if os.path.isdir(root_dir):
return root_dir
log.error("Root path '%s' not present in %s remote '%s', skipping.", self.root, self.role, self.id)
return None
|
'Remove stale refs so that they are no longer seen as fileserver envs'
| def clean_stale_refs(self):
| cleaned = []
cmd_str = 'git remote prune origin'
cmd = subprocess.Popen(shlex.split(cmd_str), close_fds=(not salt.utils.platform.is_windows()), cwd=os.path.dirname(self.gitdir), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = cmd.communicate()[0]
if six.PY3:
output = output.decode(__salt_system_encoding__)
if (cmd.returncode != 0):
log.warning("Failed to prune stale branches for %s remote '%s'. Output from '%s' follows:\n%s", self.role, self.id, cmd_str, output)
else:
marker = ' * [pruned] '
for line in salt.utils.itertools.split(output, '\n'):
if line.startswith(marker):
cleaned.append(line[len(marker):].strip())
if cleaned:
log.debug('%s pruned the following stale refs: %s', self.role, ', '.join(cleaned))
return cleaned
|
'Clear update.lk'
| def clear_lock(self, lock_type='update'):
| lock_file = self._get_lock_file(lock_type=lock_type)
def _add_error(errlist, exc):
msg = 'Unable to remove update lock for {0} ({1}): {2} '.format(self.url, lock_file, exc)
log.debug(msg)
errlist.append(msg)
success = []
failed = []
try:
os.remove(lock_file)
except OSError as exc:
if (exc.errno == errno.ENOENT):
pass
elif (exc.errno == errno.EISDIR):
try:
shutil.rmtree(lock_file)
except OSError as exc:
_add_error(failed, exc)
else:
_add_error(failed, exc)
else:
msg = "Removed {0} lock for {1} remote '{2}'".format(lock_type, self.role, self.id)
log.debug(msg)
success.append(msg)
return (success, failed)
|
'Ensure that the configured refspecs are set'
| def configure_refspecs(self):
| try:
refspecs = set(self.get_refspecs())
except (git.exc.GitCommandError, GitRemoteError) as exc:
log.error("Failed to get refspecs for %s remote '%s': %s", self.role, self.id, exc)
return
desired_refspecs = set(self.refspecs)
to_delete = ((refspecs - desired_refspecs) if refspecs else set())
if to_delete:
cmd_str = 'git config --unset-all remote.origin.fetch'
cmd = subprocess.Popen(shlex.split(cmd_str), close_fds=(not salt.utils.platform.is_windows()), cwd=os.path.dirname(self.gitdir), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = cmd.communicate()[0]
if (cmd.returncode != 0):
log.error("Failed to unset git config value for %s remote '%s'. Output from '%s' follows:\n%s", self.role, self.id, cmd_str, output)
return
to_add = desired_refspecs
else:
to_add = (desired_refspecs - refspecs)
self.add_refspecs(*to_add)
|
'Fetch the repo. If the local copy was updated, return True. If the
local copy was already up-to-date, return False.
This function requires that a _fetch() function be implemented in a
sub-class.'
| def fetch(self):
| try:
with self.gen_lock(lock_type='update'):
log.debug("Fetching %s remote '%s'", self.role, self.id)
return self._fetch()
except GitLockError as exc:
if (exc.errno == errno.EEXIST):
log.warning("Update lock file is present for %s remote '%s', skipping. If this warning persists, it is possible that the update process was interrupted, but the lock could also have been manually set. Removing %s or running 'salt-run cache.clear_git_lock %s type=update' will allow updates to continue for this remote.", self.role, self.id, self._get_lock_file(lock_type='update'), self.role)
return False
|
'Place a lock file if (and only if) it does not already exist.'
| def _lock(self, lock_type='update', failhard=False):
| try:
fh_ = os.open(self._get_lock_file(lock_type), ((os.O_CREAT | os.O_EXCL) | os.O_WRONLY))
with os.fdopen(fh_, 'w'):
os.write(fh_, six.b(str(os.getpid())))
except (OSError, IOError) as exc:
if (exc.errno == errno.EEXIST):
with salt.utils.files.fopen(self._get_lock_file(lock_type), 'r') as fd_:
try:
pid = int(fd_.readline().rstrip())
except ValueError:
pid = 0
global_lock_key = (self.role + '_global_lock')
lock_file = self._get_lock_file(lock_type=lock_type)
if self.opts[global_lock_key]:
msg = "{0} is enabled and {1} lockfile {2} is present for {3} remote '{4}'.".format(global_lock_key, lock_type, lock_file, self.role, self.id)
if pid:
msg += ' Process {0} obtained the lock'.format(pid)
if (not pid_exists(pid)):
msg += ' but this process is not running. The update may have been interrupted. If using multi-master with shared gitfs cache, the lock may have been obtained by another master.'
log.warning(msg)
if failhard:
raise exc
return
elif (pid and pid_exists(pid)):
log.warning('Process %d has a %s %s lock (%s)', pid, self.role, lock_type, lock_file)
if failhard:
raise
return
else:
if pid:
log.warning('Process %d has a %s %s lock (%s), but this process is not running. Cleaning up lock file.', pid, self.role, lock_type, lock_file)
(success, fail) = self.clear_lock()
if success:
return self._lock(lock_type='update', failhard=failhard)
elif failhard:
raise
return
else:
msg = 'Unable to set {0} lock for {1} ({2}): {3} '.format(lock_type, self.id, self._get_lock_file(lock_type), exc)
log.error(msg, exc_info=True)
raise GitLockError(exc.errno, msg)
msg = "Set {0} lock for {1} remote '{2}'".format(lock_type, self.role, self.id)
log.debug(msg)
return msg
|
'Place an lock file and report on the success/failure. This is an
interface to be used by the fileserver runner, so it is hard-coded to
perform an update lock. We aren\'t using the gen_lock()
contextmanager here because the lock is meant to stay and not be
automatically removed.'
| def lock(self):
| success = []
failed = []
try:
result = self._lock(lock_type='update')
except GitLockError as exc:
failed.append(exc.strerror)
else:
if (result is not None):
success.append(result)
return (success, failed)
|
'Set and automatically clear a lock'
| @contextlib.contextmanager
def gen_lock(self, lock_type='update'):
| lock_set = False
try:
self._lock(lock_type=lock_type, failhard=True)
lock_set = True
(yield)
except (OSError, IOError, GitLockError) as exc:
raise GitLockError(exc.errno, exc.strerror)
finally:
if lock_set:
self.clear_lock(lock_type=lock_type)
|
'This function must be overridden in a sub-class'
| def init_remote(self):
| raise NotImplementedError()
|
'This function must be overridden in a sub-class'
| def checkout(self):
| raise NotImplementedError()
|
'This function must be overridden in a sub-class'
| def dir_list(self, tgt_env):
| raise NotImplementedError()
|
'Check if an environment is exposed by comparing it against a whitelist
and blacklist.'
| def env_is_exposed(self, tgt_env):
| return salt.utils.check_whitelist_blacklist(tgt_env, whitelist=self.saltenv_whitelist, blacklist=self.saltenv_blacklist)
|
'Provider-specific code for fetching, must be implemented in a
sub-class.'
| def _fetch(self):
| raise NotImplementedError()
|
'This function must be overridden in a sub-class'
| def envs(self):
| raise NotImplementedError()
|
'This function must be overridden in a sub-class'
| def file_list(self, tgt_env):
| raise NotImplementedError()
|
'This function must be overridden in a sub-class'
| def find_file(self, path, tgt_env):
| raise NotImplementedError()
|
'Resolve dynamically-set branch'
| def get_checkout_target(self):
| if (self.branch == '__env__'):
target = (self.opts.get('pillarenv') or self.opts.get('environment') or 'base')
return (self.opts['{0}_base'.format(self.role)] if (target == 'base') else target)
return self.branch
|
'This function must be overridden in a sub-class'
| def get_refspecs(self):
| raise NotImplementedError()
|
'Return a tree object for the specified environment'
| def get_tree(self, tgt_env):
| if (not self.env_is_exposed(tgt_env)):
return None
tgt_ref = self.ref(tgt_env)
if (tgt_ref is None):
return None
for ref_type in self.ref_types:
try:
func_name = 'get_tree_from_{0}'.format(ref_type)
func = getattr(self, func_name)
except AttributeError:
log.error("%s class is missing function '%s'", self.__class__.__name__, func_name)
else:
candidate = func(tgt_ref)
if (candidate is not None):
return candidate
return None
|
'Examine self.id and assign self.url (and self.branch, for git_pillar)'
| def get_url(self):
| if (self.role in ('git_pillar', 'winrepo')):
try:
(self.branch, self.url) = self.id.split(None, 1)
except ValueError:
self.branch = self.opts['{0}_branch'.format(self.role)]
self.url = self.id
else:
self.url = self.id
|
'Only needed in pygit2, included in the base class for simplicty of use'
| def setup_callbacks(self):
| pass
|
'Override this function in a sub-class to implement auth checking.'
| def verify_auth(self):
| self.credentials = None
return True
|
'This function must be overridden in a sub-class'
| def write_file(self, blob, dest):
| raise NotImplementedError()
|
'Add the specified refspecs to the "origin" remote'
| def add_refspecs(self, *refspecs):
| for refspec in refspecs:
try:
self.repo.git.config('--add', 'remote.origin.fetch', refspec)
log.debug("Added refspec '%s' to %s remote '%s'", refspec, self.role, self.id)
except git.exc.GitCommandError as exc:
log.error("Failed to add refspec '%s' to %s remote '%s': %s", refspec, self.role, self.id, exc)
|
'Checkout the configured branch/tag. We catch an "Exception" class here
instead of a specific exception class because the exceptions raised by
GitPython when running these functions vary in different versions of
GitPython.'
| def checkout(self):
| tgt_ref = self.get_checkout_target()
try:
head_sha = self.repo.rev_parse('HEAD').hexsha
except Exception:
head_sha = None
for (rev_parse_target, checkout_ref) in ((('origin/' + tgt_ref), ('origin/' + tgt_ref)), (('tags/' + tgt_ref), ('tags/' + tgt_ref))):
try:
target_sha = self.repo.rev_parse(rev_parse_target).hexsha
except Exception:
continue
else:
if (head_sha == target_sha):
return self.check_root()
try:
with self.gen_lock(lock_type='checkout'):
self.repo.git.checkout(checkout_ref)
log.debug("%s remote '%s' has been checked out to %s", self.role, self.id, checkout_ref)
except GitLockError as exc:
if (exc.errno == errno.EEXIST):
raise GitLockError(exc.errno, "Checkout lock exists for {0} remote '{1}'".format(self.role, self.id))
else:
log.error("Error %d encountered obtaining checkout lock for %s remote '%s'", exc.errno, self.role, self.id)
return None
except Exception:
continue
return self.check_root()
log.error("Failed to checkout %s from %s remote '%s': remote ref does not exist", tgt_ref, self.role, self.id)
return None
|
'Initialize/attach to a remote using GitPython. Return a boolean
which will let the calling function know whether or not a new repo was
initialized by this function.'
| def init_remote(self):
| new = False
if (not os.listdir(self.cachedir)):
self.repo = git.Repo.init(self.cachedir)
new = True
else:
try:
self.repo = git.Repo(self.cachedir)
except git.exc.InvalidGitRepositoryError:
log.error(_INVALID_REPO.format(self.cachedir, self.url, self.role))
return new
self.gitdir = salt.utils.path.join(self.repo.working_dir, '.git')
if (not self.repo.remotes):
try:
self.repo.create_remote('origin', self.url)
except os.error:
pass
else:
new = True
try:
ssl_verify = self.repo.git.config('--get', 'http.sslVerify')
except git.exc.GitCommandError:
ssl_verify = ''
desired_ssl_verify = str(self.ssl_verify).lower()
if (ssl_verify != desired_ssl_verify):
self.repo.git.config('http.sslVerify', desired_ssl_verify)
if hasattr(self, 'refspecs'):
self.configure_refspecs()
return new
|
'Get list of directories for the target environment using GitPython'
| def dir_list(self, tgt_env):
| ret = set()
tree = self.get_tree(tgt_env)
if (not tree):
return ret
if self.root(tgt_env):
try:
tree = (tree / self.root(tgt_env))
except KeyError:
return ret
relpath = (lambda path: os.path.relpath(path, self.root(tgt_env)))
else:
relpath = (lambda path: path)
add_mountpoint = (lambda path: salt.utils.path.join(self.mountpoint(tgt_env), path, use_posixpath=True))
for blob in tree.traverse():
if isinstance(blob, git.Tree):
ret.add(add_mountpoint(relpath(blob.path)))
if self.mountpoint(tgt_env):
ret.add(self.mountpoint(tgt_env))
return ret
|
'Check the refs and return a list of the ones which can be used as salt
environments.'
| def envs(self):
| ref_paths = [x.path for x in self.repo.refs]
return self._get_envs_from_ref_paths(ref_paths)
|
'Fetch the repo. If the local copy was updated, return True. If the
local copy was already up-to-date, return False.'
| def _fetch(self):
| origin = self.repo.remotes[0]
try:
fetch_results = origin.fetch()
except AssertionError:
fetch_results = origin.fetch()
new_objs = False
for fetchinfo in fetch_results:
if (fetchinfo.old_commit is not None):
log.debug("%s has updated '%s' for remote '%s' from %s to %s", self.role, fetchinfo.name, self.id, fetchinfo.old_commit.hexsha[:7], fetchinfo.commit.hexsha[:7])
new_objs = True
elif (fetchinfo.flags in (fetchinfo.NEW_TAG, fetchinfo.NEW_HEAD)):
log.debug("%s has fetched new %s '%s' for remote '%s'", self.role, ('tag' if (fetchinfo.flags == fetchinfo.NEW_TAG) else 'head'), fetchinfo.name, self.id)
new_objs = True
cleaned = self.clean_stale_refs()
return bool((new_objs or cleaned))
|
'Get file list for the target environment using GitPython'
| def file_list(self, tgt_env):
| files = set()
symlinks = {}
tree = self.get_tree(tgt_env)
if (not tree):
return (files, symlinks)
if self.root(tgt_env):
try:
tree = (tree / self.root(tgt_env))
except KeyError:
return (files, symlinks)
relpath = (lambda path: os.path.relpath(path, self.root(tgt_env)))
else:
relpath = (lambda path: path)
add_mountpoint = (lambda path: salt.utils.path.join(self.mountpoint(tgt_env), path, use_posixpath=True))
for file_blob in tree.traverse():
if (not isinstance(file_blob, git.Blob)):
continue
file_path = add_mountpoint(relpath(file_blob.path))
files.add(file_path)
if stat.S_ISLNK(file_blob.mode):
stream = six.StringIO()
file_blob.stream_data(stream)
stream.seek(0)
link_tgt = stream.read()
stream.close()
symlinks[file_path] = link_tgt
return (files, symlinks)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.