desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Handle incoming messages from underylying tcp streams'
| @tornado.gen.coroutine
def handle_message(self, stream, header, payload):
| try:
try:
payload = self._decode_payload(payload)
except Exception:
stream.write(salt.transport.frame.frame_msg('bad load', header=header))
raise tornado.gen.Return()
if ((not isinstance(payload, dict)) or (not isinstance(payload.get('load'), dict))):
(yield stream.write(salt.transport.frame.frame_msg('payload and load must be a dict', header=header)))
raise tornado.gen.Return()
if ((payload['enc'] == 'clear') and (payload.get('load', {}).get('cmd') == '_auth')):
(yield stream.write(salt.transport.frame.frame_msg(self._auth(payload['load']), header=header)))
raise tornado.gen.Return()
try:
(ret, req_opts) = (yield self.payload_handler(payload))
except Exception as e:
stream.write('Some exception handling minion payload')
log.error('Some exception handling a payload from minion', exc_info=True)
stream.close()
raise tornado.gen.Return()
req_fun = req_opts.get('fun', 'send')
if (req_fun == 'send_clear'):
stream.write(salt.transport.frame.frame_msg(ret, header=header))
elif (req_fun == 'send'):
stream.write(salt.transport.frame.frame_msg(self.crypticle.dumps(ret), header=header))
elif (req_fun == 'send_private'):
stream.write(salt.transport.frame.frame_msg(self._encrypt_private(ret, req_opts['key'], req_opts['tgt']), header=header))
else:
log.error('Unknown req_fun {0}'.format(req_fun))
stream.write('Server-side exception handling payload')
stream.close()
except tornado.gen.Return:
raise
except tornado.iostream.StreamClosedError:
log.error('Connection was unexpectedly closed', exc_info=True)
except Exception as exc:
log.error('Unexpected exception occurred: {0}'.format(exc), exc_info=True)
raise tornado.gen.Return()
|
'Handle incoming streams and add messages to the incoming queue'
| @tornado.gen.coroutine
def handle_stream(self, stream, address):
| log.trace('Req client {0} connected'.format(address))
self.clients.append((stream, address))
unpacker = msgpack.Unpacker()
try:
while True:
wire_bytes = (yield stream.read_bytes(4096, partial=True))
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(framed_msg)
header = framed_msg['head']
self.io_loop.spawn_callback(self.message_handler, stream, header, framed_msg['body'])
except tornado.iostream.StreamClosedError:
log.trace('req client disconnected {0}'.format(address))
self.clients.remove((stream, address))
except Exception as e:
log.trace('other master-side exception: {0}'.format(e))
self.clients.remove((stream, address))
stream.close()
|
'Shutdown the whole server'
| def shutdown(self):
| for item in self.clients:
(client, address) = item
client.close()
self.clients.remove(item)
|
'Override _create_stream() in TCPClient.
Tornado 4.5 added the kwargs \'source_ip\' and \'source_port\'.
Due to this, use **kwargs to swallow these and any future
kwargs to maintain compatibility.'
| def _create_stream(self, max_buffer_size, af, addr, **kwargs):
| sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_set_tcp_keepalive(sock, self.opts)
stream = tornado.iostream.IOStream(sock, io_loop=self.io_loop, max_buffer_size=max_buffer_size)
return stream.connect(addr)
|
'Ask for this client to reconnect to the origin'
| def connect(self):
| if (hasattr(self, '_connecting_future') and (not self._connecting_future.done())):
future = self._connecting_future
else:
future = tornado.concurrent.Future()
self._connecting_future = future
self.io_loop.add_callback(self._connect)
if (self.connect_callback is not None):
def handle_future(future):
response = future.result()
self.io_loop.add_callback(self.connect_callback, response)
future.add_done_callback(handle_future)
return future
|
'Try to connect for the rest of time!'
| @tornado.gen.coroutine
def _connect(self):
| while True:
if self._closing:
break
try:
self._stream = (yield self._tcp_client.connect(self.host, self.port, ssl_options=self.opts.get('ssl')))
self._connecting_future.set_result(True)
break
except Exception as e:
(yield tornado.gen.sleep(1))
|
'Register a callback for received messages (that we didn\'t initiate)'
| def on_recv(self, callback):
| if (callback is None):
self._on_recv = callback
else:
def wrap_recv(header, body):
callback(body)
self._on_recv = wrap_recv
|
'Send given message, and return a future'
| def send(self, msg, timeout=None, callback=None, raw=False):
| message_id = self._message_id()
header = {'mid': message_id}
future = tornado.concurrent.Future()
if (callback is not None):
def handle_future(future):
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
self.send_future_map[message_id] = future
if (self.opts.get('detect_mode') is True):
timeout = 1
if (timeout is not None):
send_timeout = self.io_loop.call_later(timeout, self.timeout_message, message_id)
self.send_timeout_map[message_id] = send_timeout
if (len(self.send_queue) == 0):
self.io_loop.spawn_callback(self._stream_send)
self.send_queue.append((message_id, salt.transport.frame.frame_msg(msg, header=header)))
return future
|
'Bind to the interface specified in the configuration file'
| def _publish_daemon(self, log_queue=None):
| salt.utils.appendproctitle(self.__class__.__name__)
if (log_queue is not None):
salt.log.setup.set_multiprocessing_logging_queue(log_queue)
salt.log.setup.setup_multiprocessing_logging(log_queue)
if (self.io_loop is None):
self.io_loop = tornado.ioloop.IOLoop.current()
pub_server = PubServer(self.opts, io_loop=self.io_loop)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(sock, self.opts)
sock.setblocking(0)
sock.bind((self.opts['interface'], int(self.opts['publish_port'])))
sock.listen(self.backlog)
pub_server.add_socket(sock)
if (self.opts.get('ipc_mode', '') == 'tcp'):
pull_uri = int(self.opts.get('tcp_master_publish_pull', 4514))
else:
pull_uri = os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
pull_sock = salt.transport.ipc.IPCMessageServer(pull_uri, io_loop=self.io_loop, payload_handler=pub_server.publish_payload)
log.info('Starting the Salt Puller on {0}'.format(pull_uri))
old_umask = os.umask(127)
try:
pull_sock.start()
finally:
os.umask(old_umask)
try:
self.io_loop.start()
except (KeyboardInterrupt, SystemExit):
salt.log.setup.shutdown_multiprocessing_logging()
|
'Do anything necessary pre-fork. Since this is on the master side this will
primarily be used to create IPC channels and create our daemon process to
do the actual publishing'
| def pre_fork(self, process_manager):
| kwargs = {}
if salt.utils.platform.is_windows():
kwargs['log_queue'] = salt.log.setup.get_multiprocessing_logging_queue()
process_manager.add_process(self._publish_daemon, kwargs=kwargs)
|
'Publish "load" to minions'
| def publish(self, load):
| payload = {'enc': 'aes'}
crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
payload['load'] = crypticle.dumps(load)
if self.opts['sign_pub_messages']:
master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem')
log.debug('Signing data packet')
payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load'])
if (self.opts.get('ipc_mode', '') == 'tcp'):
pull_uri = int(self.opts.get('tcp_master_publish_pull', 4514))
else:
pull_uri = os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
pub_sock = salt.utils.async.SyncWrapper(salt.transport.ipc.IPCMessageClient, (pull_uri,))
pub_sock.connect()
int_payload = {'payload': self.serial.dumps(payload)}
if (load['tgt_type'] == 'list'):
int_payload['topic_lst'] = load['tgt']
pub_sock.send(int_payload)
|
'Pre-fork we need to create the zmq router device'
| def pre_fork(self, _):
| if ('aes' not in salt.master.SMaster.secrets):
salt.master.SMaster.secrets['aes'] = {'secret': multiprocessing.Array(ctypes.c_char, six.b(salt.crypt.Crypticle.generate_key_string())), 'reload': salt.crypt.Crypticle.generate_key_string}
|
'The server equivalent of ReqChannel.crypted_transfer_decode_dictentry'
| def _encrypt_private(self, ret, dictkey, target):
| pubfn = os.path.join(self.opts['pki_dir'], 'minions', target)
key = salt.crypt.Crypticle.generate_key_string()
pcrypt = salt.crypt.Crypticle(self.opts, key)
try:
with salt.utils.files.fopen(pubfn) as f:
pub = RSA.importKey(f.read())
except (ValueError, IndexError, TypeError):
return self.crypticle.dumps({})
except IOError:
log.error('AES key not found')
return {'error': 'AES key not found'}
pret = {}
cipher = PKCS1_OAEP.new(pub)
if six.PY2:
pret['key'] = cipher.encrypt(key)
else:
pret['key'] = cipher.encrypt(salt.utils.stringutils.to_bytes(key))
pret[dictkey] = pcrypt.dumps((ret if (ret is not False) else {}))
return pret
|
'Check to see if a fresh AES key is available and update the components
of the worker'
| def _update_aes(self):
| if (salt.master.SMaster.secrets['aes']['secret'].value != self.crypticle.key_string):
self.crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
return True
return False
|
'Authenticate the client, use the sent public key to encrypt the AES key
which was generated at start up.
This method fires an event over the master event manager. The event is
tagged "auth" and returns a dict with information about the auth
event
# Verify that the key we are receiving matches the stored key
# Store the key if it is not there
# Make an RSA key with the pub key
# Encrypt the AES key as an encrypted salt.payload
# Package the return and return it'
| def _auth(self, load):
| if (not salt.utils.verify.valid_id(self.opts, load['id'])):
log.info('Authentication request from invalid id {id}'.format(**load))
return {'enc': 'clear', 'load': {'ret': False}}
log.info('Authentication request from {id}'.format(**load))
if (self.opts['max_minions'] > 0):
if self.cache_cli:
minions = self.cache_cli.get_cached()
else:
minions = self.ckminions.connected_ids()
if (len(minions) > 1000):
log.info("With large numbers of minions it is advised to enable the ConCache with 'con_cache: True' in the masters configuration file.")
if (not (len(minions) <= self.opts['max_minions'])):
if (load['id'] not in minions):
msg = 'Too many minions connected (max_minions={0}). Rejecting connection from id {1}'.format(self.opts['max_minions'], load['id'])
log.info(msg)
eload = {'result': False, 'act': 'full', 'id': load['id'], 'pub': load['pub']}
self.event.fire_event(eload, salt.utils.event.tagify(prefix='auth'))
return {'enc': 'clear', 'load': {'ret': 'full'}}
auto_reject = self.auto_key.check_autoreject(load['id'])
auto_sign = self.auto_key.check_autosign(load['id'])
pubfn = os.path.join(self.opts['pki_dir'], 'minions', load['id'])
pubfn_pend = os.path.join(self.opts['pki_dir'], 'minions_pre', load['id'])
pubfn_rejected = os.path.join(self.opts['pki_dir'], 'minions_rejected', load['id'])
pubfn_denied = os.path.join(self.opts['pki_dir'], 'minions_denied', load['id'])
if self.opts['open_mode']:
pass
elif os.path.isfile(pubfn_rejected):
log.info('Public key rejected for {0}. Key is present in rejection key dir.'.format(load['id']))
eload = {'result': False, 'id': load['id'], 'pub': load['pub']}
self.event.fire_event(eload, salt.utils.event.tagify(prefix='auth'))
return {'enc': 'clear', 'load': {'ret': False}}
elif os.path.isfile(pubfn):
with salt.utils.files.fopen(pubfn, 'r') as pubfn_handle:
if (pubfn_handle.read().strip() != load['pub'].strip()):
log.error('Authentication attempt from {id} failed, the public keys did not match. This may be an attempt to compromise the Salt cluster.'.format(**load))
with salt.utils.files.fopen(pubfn_denied, 'w+') as fp_:
fp_.write(load['pub'])
eload = {'result': False, 'id': load['id'], 'act': 'denied', 'pub': load['pub']}
self.event.fire_event(eload, salt.utils.event.tagify(prefix='auth'))
return {'enc': 'clear', 'load': {'ret': False}}
elif (not os.path.isfile(pubfn_pend)):
if os.path.isdir(pubfn_pend):
log.info('New public key {id} is a directory'.format(**load))
eload = {'result': False, 'id': load['id'], 'pub': load['pub']}
self.event.fire_event(eload, salt.utils.event.tagify(prefix='auth'))
return {'enc': 'clear', 'load': {'ret': False}}
if auto_reject:
key_path = pubfn_rejected
log.info('New public key for {id} rejected via autoreject_file'.format(**load))
key_act = 'reject'
key_result = False
elif (not auto_sign):
key_path = pubfn_pend
log.info('New public key for {id} placed in pending'.format(**load))
key_act = 'pend'
key_result = True
else:
key_path = None
if (key_path is not None):
with salt.utils.files.fopen(key_path, 'w+') as fp_:
fp_.write(load['pub'])
ret = {'enc': 'clear', 'load': {'ret': key_result}}
eload = {'result': key_result, 'act': key_act, 'id': load['id'], 'pub': load['pub']}
self.event.fire_event(eload, salt.utils.event.tagify(prefix='auth'))
return ret
elif os.path.isfile(pubfn_pend):
if auto_reject:
try:
shutil.move(pubfn_pend, pubfn_rejected)
except (IOError, OSError):
pass
log.info('Pending public key for {id} rejected via autoreject_file'.format(**load))
ret = {'enc': 'clear', 'load': {'ret': False}}
eload = {'result': False, 'act': 'reject', 'id': load['id'], 'pub': load['pub']}
self.event.fire_event(eload, salt.utils.event.tagify(prefix='auth'))
return ret
elif (not auto_sign):
with salt.utils.files.fopen(pubfn_pend, 'r') as pubfn_handle:
if (pubfn_handle.read() != load['pub']):
log.error('Authentication attempt from {id} failed, the public key in pending did not match. This may be an attempt to compromise the Salt cluster.'.format(**load))
with salt.utils.files.fopen(pubfn_denied, 'w+') as fp_:
fp_.write(load['pub'])
eload = {'result': False, 'id': load['id'], 'act': 'denied', 'pub': load['pub']}
self.event.fire_event(eload, salt.utils.event.tagify(prefix='auth'))
return {'enc': 'clear', 'load': {'ret': False}}
else:
log.info('Authentication failed from host {id}, the key is in pending and needs to be accepted with salt-key -a {id}'.format(**load))
eload = {'result': True, 'act': 'pend', 'id': load['id'], 'pub': load['pub']}
self.event.fire_event(eload, salt.utils.event.tagify(prefix='auth'))
return {'enc': 'clear', 'load': {'ret': True}}
else:
with salt.utils.files.fopen(pubfn_pend, 'r') as pubfn_handle:
if (pubfn_handle.read() != load['pub']):
log.error('Authentication attempt from {id} failed, the public keys in pending did not match. This may be an attempt to compromise the Salt cluster.'.format(**load))
with salt.utils.files.fopen(pubfn_denied, 'w+') as fp_:
fp_.write(load['pub'])
eload = {'result': False, 'id': load['id'], 'pub': load['pub']}
self.event.fire_event(eload, salt.utils.event.tagify(prefix='auth'))
return {'enc': 'clear', 'load': {'ret': False}}
else:
os.remove(pubfn_pend)
else:
log.warning('Unaccounted for authentication failure')
eload = {'result': False, 'id': load['id'], 'pub': load['pub']}
self.event.fire_event(eload, salt.utils.event.tagify(prefix='auth'))
return {'enc': 'clear', 'load': {'ret': False}}
log.info('Authentication accepted from {id}'.format(**load))
if ((not os.path.isfile(pubfn)) and (not self.opts['open_mode'])):
with salt.utils.files.fopen(pubfn, 'w+') as fp_:
fp_.write(load['pub'])
elif self.opts['open_mode']:
disk_key = ''
if os.path.isfile(pubfn):
with salt.utils.files.fopen(pubfn, 'r') as fp_:
disk_key = fp_.read()
if (load['pub'] and (load['pub'] != disk_key)):
log.debug('Host key change detected in open mode.')
with salt.utils.files.fopen(pubfn, 'w+') as fp_:
fp_.write(load['pub'])
pub = None
if self.cache_cli:
self.cache_cli.put_cache([load['id']])
try:
with salt.utils.files.fopen(pubfn) as f:
pub = RSA.importKey(f.read())
except (ValueError, IndexError, TypeError) as err:
log.error('Corrupt public key "{0}": {1}'.format(pubfn, err))
return {'enc': 'clear', 'load': {'ret': False}}
cipher = PKCS1_OAEP.new(pub)
ret = {'enc': 'pub', 'pub_key': self.master_key.get_pub_str(), 'publish_port': self.opts['publish_port']}
if self.opts['master_sign_pubkey']:
if self.master_key.pubkey_signature():
log.debug('Adding pubkey signature to auth-reply')
log.debug(self.master_key.pubkey_signature())
ret.update({'pub_sig': self.master_key.pubkey_signature()})
else:
key_pass = salt.utils.sdb.sdb_get(self.opts['signing_key_pass'], self.opts)
log.debug('Signing master public key before sending')
pub_sign = salt.crypt.sign_message(self.master_key.get_sign_paths()[1], ret['pub_key'], key_pass)
ret.update({'pub_sig': binascii.b2a_base64(pub_sign)})
mcipher = PKCS1_OAEP.new(self.master_key.key)
if (self.opts['auth_mode'] >= 2):
if ('token' in load):
try:
mtoken = mcipher.decrypt(load['token'])
aes = '{0}_|-{1}'.format(salt.master.SMaster.secrets['aes']['secret'].value, mtoken)
except Exception:
pass
else:
aes = salt.master.SMaster.secrets['aes']['secret'].value
ret['aes'] = cipher.encrypt(aes)
else:
if ('token' in load):
try:
mtoken = mcipher.decrypt(load['token'])
ret['token'] = cipher.encrypt(mtoken)
except Exception:
pass
aes = salt.master.SMaster.secrets['aes']['secret'].value
ret['aes'] = cipher.encrypt(salt.master.SMaster.secrets['aes']['secret'].value)
digest = hashlib.sha256(aes).hexdigest()
ret['sig'] = salt.crypt.private_encrypt(self.master_key.key, digest)
eload = {'result': True, 'act': 'accept', 'id': load['id'], 'pub': load['pub']}
self.event.fire_event(eload, salt.utils.event.tagify(prefix='auth'))
return ret
|
'Only create one instance of channel per __key()'
| def __new__(cls, opts, **kwargs):
| io_loop = kwargs.get('io_loop')
if (io_loop is None):
zmq.eventloop.ioloop.install()
io_loop = tornado.ioloop.IOLoop.current()
if (io_loop not in cls.instance_map):
cls.instance_map[io_loop] = weakref.WeakValueDictionary()
loop_instance_map = cls.instance_map[io_loop]
key = cls.__key(opts, **kwargs)
obj = loop_instance_map.get(key)
if (obj is None):
log.debug('Initializing new AsyncZeroMQReqChannel for {0}'.format(key))
obj = object.__new__(cls)
obj.__singleton_init__(opts, **kwargs)
loop_instance_map[key] = obj
log.trace('Inserted key into loop_instance_map id {0} for key {1} and process {2}'.format(id(loop_instance_map), key, os.getpid()))
else:
log.debug('Re-using AsyncZeroMQReqChannel for {0}'.format(key))
return obj
|
'Since the message_client creates sockets and assigns them to the IOLoop we have to
specifically destroy them, since we aren\'t the only ones with references to the FDs'
| def __del__(self):
| if hasattr(self, 'message_client'):
self.message_client.destroy()
else:
log.debug('No message_client attr for AsyncZeroMQReqChannel found. Not destroying sockets.')
|
'Send a load across the wire, with encryption
In case of authentication errors, try to renegotiate authentication
and retry the method.
Indeed, we can fail too early in case of a master restart during a
minion state execution call
:param dict load: A load to send across the wire
:param int tries: The number of times to make before failure
:param int timeout: The number of seconds on a response before failing'
| @tornado.gen.coroutine
def _crypted_transfer(self, load, tries=3, timeout=60, raw=False):
| @tornado.gen.coroutine
def _do_transfer():
data = (yield self.message_client.send(self._package_load(self.auth.crypticle.dumps(load)), timeout=timeout, tries=tries))
if data:
data = self.auth.crypticle.loads(data, raw)
if (six.PY3 and (not raw)):
data = salt.transport.frame.decode_embedded_strs(data)
raise tornado.gen.Return(data)
if (not self.auth.authenticated):
(yield self.auth.authenticate())
try:
ret = (yield _do_transfer())
except salt.crypt.AuthenticationError:
(yield self.auth.authenticate())
ret = (yield _do_transfer())
raise tornado.gen.Return(ret)
|
'Send a load across the wire in cleartext
:param dict load: A load to send across the wire
:param int tries: The number of times to make before failure
:param int timeout: The number of seconds on a response before failing'
| @tornado.gen.coroutine
def _uncrypted_transfer(self, load, tries=3, timeout=60):
| ret = (yield self.message_client.send(self._package_load(load), timeout=timeout, tries=tries))
raise tornado.gen.Return(ret)
|
'Send a request, return a future which will complete when we send the message'
| @tornado.gen.coroutine
def send(self, load, tries=3, timeout=60, raw=False):
| if (self.crypt == 'clear'):
ret = (yield self._uncrypted_transfer(load, tries=tries, timeout=timeout))
else:
ret = (yield self._crypted_transfer(load, tries=tries, timeout=timeout, raw=raw))
raise tornado.gen.Return(ret)
|
'Return the master publish port'
| @property
def master_pub(self):
| return 'tcp://{ip}:{port}'.format(ip=self.opts['master_ip'], port=self.publish_port)
|
'Take the zmq messages, decrypt/decode them into a payload
:param list messages: A list of messages to be decoded'
| @tornado.gen.coroutine
def _decode_messages(self, messages):
| messages_len = len(messages)
if (messages_len == 1):
payload = self.serial.loads(messages[0])
elif (messages_len == 2):
if (messages[0] not in ('broadcast', self.hexid)):
log.debug('Publish received for not this minion: {0}'.format(messages[0]))
raise tornado.gen.Return(None)
payload = self.serial.loads(messages[1])
else:
raise Exception('Invalid number of messages ({0}) in zeromq pubmessage from master'.format(len(messages_len)))
ret = (yield self._decode_payload(payload))
raise tornado.gen.Return(ret)
|
'Return the current zmqstream, creating one if necessary'
| @property
def stream(self):
| if (not hasattr(self, '_stream')):
self._stream = zmq.eventloop.zmqstream.ZMQStream(self._socket, io_loop=self.io_loop)
return self._stream
|
'Register a callback for received messages (that we didn\'t initiate)
:param func callback: A function which should be called when data is received'
| def on_recv(self, callback):
| if (callback is None):
return self.stream.on_recv(None)
@tornado.gen.coroutine
def wrap_callback(messages):
payload = (yield self._decode_messages(messages))
if (payload is not None):
callback(payload)
return self.stream.on_recv(wrap_callback)
|
'Multiprocessing target for the zmq queue device'
| def zmq_device(self):
| self.__setup_signals()
salt.utils.appendproctitle('MWorkerQueue')
self.context = zmq.Context(self.opts['worker_threads'])
self.uri = 'tcp://{interface}:{ret_port}'.format(**self.opts)
self.clients = self.context.socket(zmq.ROUTER)
if ((self.opts['ipv6'] is True) and hasattr(zmq, 'IPV4ONLY')):
self.clients.setsockopt(zmq.IPV4ONLY, 0)
self.clients.setsockopt(zmq.BACKLOG, self.opts.get('zmq_backlog', 1000))
if (HAS_ZMQ_MONITOR and self.opts['zmq_monitor']):
import threading
self._monitor = ZeroMQSocketMonitor(self.clients)
t = threading.Thread(target=self._monitor.start_poll)
t.start()
self.workers = self.context.socket(zmq.DEALER)
if (self.opts.get('ipc_mode', '') == 'tcp'):
self.w_uri = 'tcp://127.0.0.1:{0}'.format(self.opts.get('tcp_master_workers', 4515))
else:
self.w_uri = 'ipc://{0}'.format(os.path.join(self.opts['sock_dir'], 'workers.ipc'))
log.info('Setting up the master communication server')
self.clients.bind(self.uri)
self.workers.bind(self.w_uri)
while True:
if (self.clients.closed or self.workers.closed):
break
try:
zmq.device(zmq.QUEUE, self.clients, self.workers)
except zmq.ZMQError as exc:
if (exc.errno == errno.EINTR):
continue
raise exc
except (KeyboardInterrupt, SystemExit):
break
|
'Cleanly shutdown the router socket'
| def close(self):
| if self._closing:
return
log.info('MWorkerQueue under PID %s is closing', os.getpid())
self._closing = True
if (hasattr(self, '_monitor') and (self._monitor is not None)):
self._monitor.stop()
self._monitor = None
if (hasattr(self, '_w_monitor') and (self._w_monitor is not None)):
self._w_monitor.stop()
self._w_monitor = None
if (hasattr(self, 'clients') and (self.clients.closed is False)):
self.clients.close()
if (hasattr(self, 'workers') and (self.workers.closed is False)):
self.workers.close()
if hasattr(self, 'stream'):
self.stream.close()
if (hasattr(self, '_socket') and (self._socket.closed is False)):
self._socket.close()
if (hasattr(self, 'context') and (self.context.closed is False)):
self.context.term()
|
'Pre-fork we need to create the zmq router device
:param func process_manager: An instance of salt.utils.process.ProcessManager'
| def pre_fork(self, process_manager):
| salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager)
process_manager.add_process(self.zmq_device)
|
'After forking we need to create all of the local sockets to listen to the
router
:param func payload_handler: A function to called to handle incoming payloads as
they are picked up off the wire
:param IOLoop io_loop: An instance of a Tornado IOLoop, to handle event scheduling'
| def post_fork(self, payload_handler, io_loop):
| self.payload_handler = payload_handler
self.io_loop = io_loop
self.context = zmq.Context(1)
self._socket = self.context.socket(zmq.REP)
if (HAS_ZMQ_MONITOR and self.opts['zmq_monitor']):
import threading
self._w_monitor = ZeroMQSocketMonitor(self._socket)
t = threading.Thread(target=self._w_monitor.start_poll)
t.start()
if (self.opts.get('ipc_mode', '') == 'tcp'):
self.w_uri = 'tcp://127.0.0.1:{0}'.format(self.opts.get('tcp_master_workers', 4515))
else:
self.w_uri = 'ipc://{0}'.format(os.path.join(self.opts['sock_dir'], 'workers.ipc'))
log.info('Worker binding to socket {0}'.format(self.w_uri))
self._socket.connect(self.w_uri)
salt.transport.mixins.auth.AESReqServerMixin.post_fork(self, payload_handler, io_loop)
self.stream = zmq.eventloop.zmqstream.ZMQStream(self._socket, io_loop=self.io_loop)
self.stream.on_recv_stream(self.handle_message)
|
'Handle incoming messages from underylying TCP streams
:stream ZMQStream stream: A ZeroMQ stream.
See http://zeromq.github.io/pyzmq/api/generated/zmq.eventloop.zmqstream.html
:param dict payload: A payload to process'
| @tornado.gen.coroutine
def handle_message(self, stream, payload):
| try:
payload = self.serial.loads(payload[0])
payload = self._decode_payload(payload)
except Exception as exc:
exc_type = type(exc).__name__
if (exc_type == 'AuthenticationError'):
log.debug('Minion failed to auth to master. Since the payload is encrypted, it is not known which minion failed to authenticate. It is likely that this is a transient failure due to the master rotating its public key.')
else:
log.error('Bad load from minion: %s: %s', exc_type, exc)
stream.send(self.serial.dumps('bad load'))
raise tornado.gen.Return()
if ((not isinstance(payload, dict)) or (not isinstance(payload.get('load'), dict))):
log.error('payload and load must be a dict. Payload was: {0} and load was {1}'.format(payload, payload.get('load')))
stream.send(self.serial.dumps('payload and load must be a dict'))
raise tornado.gen.Return()
if ((payload['enc'] == 'clear') and (payload.get('load', {}).get('cmd') == '_auth')):
stream.send(self.serial.dumps(self._auth(payload['load'])))
raise tornado.gen.Return()
try:
(ret, req_opts) = (yield self.payload_handler(payload))
except Exception as e:
stream.send('Some exception handling minion payload')
log.error('Some exception handling a payload from minion', exc_info=True)
raise tornado.gen.Return()
req_fun = req_opts.get('fun', 'send')
if (req_fun == 'send_clear'):
stream.send(self.serial.dumps(ret))
elif (req_fun == 'send'):
stream.send(self.serial.dumps(self.crypticle.dumps(ret)))
elif (req_fun == 'send_private'):
stream.send(self.serial.dumps(self._encrypt_private(ret, req_opts['key'], req_opts['tgt'])))
else:
log.error('Unknown req_fun {0}'.format(req_fun))
stream.send('Server-side exception handling payload')
raise tornado.gen.Return()
|
'Bind to the interface specified in the configuration file'
| def _publish_daemon(self):
| salt.utils.appendproctitle(self.__class__.__name__)
context = zmq.Context(1)
pub_sock = context.socket(zmq.PUB)
_set_tcp_keepalive(pub_sock, self.opts)
try:
pub_sock.setsockopt(zmq.HWM, self.opts.get('pub_hwm', 1000))
except AttributeError:
pub_sock.setsockopt(zmq.SNDHWM, self.opts.get('pub_hwm', 1000))
pub_sock.setsockopt(zmq.RCVHWM, self.opts.get('pub_hwm', 1000))
if ((self.opts['ipv6'] is True) and hasattr(zmq, 'IPV4ONLY')):
pub_sock.setsockopt(zmq.IPV4ONLY, 0)
pub_sock.setsockopt(zmq.BACKLOG, self.opts.get('zmq_backlog', 1000))
pub_uri = 'tcp://{interface}:{publish_port}'.format(**self.opts)
pull_sock = context.socket(zmq.PULL)
if (self.opts.get('ipc_mode', '') == 'tcp'):
pull_uri = 'tcp://127.0.0.1:{0}'.format(self.opts.get('tcp_master_publish_pull', 4514))
else:
pull_uri = 'ipc://{0}'.format(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))
salt.utils.zeromq.check_ipc_path_max_len(pull_uri)
log.info('Starting the Salt Publisher on {0}'.format(pub_uri))
pub_sock.bind(pub_uri)
log.info('Starting the Salt Puller on {0}'.format(pull_uri))
old_umask = os.umask(127)
try:
pull_sock.bind(pull_uri)
finally:
os.umask(old_umask)
try:
while True:
try:
package = pull_sock.recv()
unpacked_package = salt.payload.unpackage(package)
if six.PY3:
unpacked_package = salt.transport.frame.decode_embedded_strs(unpacked_package)
payload = unpacked_package['payload']
if self.opts['zmq_filtering']:
if ('topic_lst' in unpacked_package):
for topic in unpacked_package['topic_lst']:
htopic = hashlib.sha1(topic).hexdigest()
pub_sock.send(htopic, flags=zmq.SNDMORE)
pub_sock.send(payload)
else:
pub_sock.send('broadcast', flags=zmq.SNDMORE)
pub_sock.send(payload)
else:
pub_sock.send(payload)
except zmq.ZMQError as exc:
if (exc.errno == errno.EINTR):
continue
raise exc
except KeyboardInterrupt:
if (pub_sock.closed is False):
pub_sock.setsockopt(zmq.LINGER, 1)
pub_sock.close()
if (pull_sock.closed is False):
pull_sock.setsockopt(zmq.LINGER, 1)
pull_sock.close()
if (context.closed is False):
context.term()
|
'Do anything necessary pre-fork. Since this is on the master side this will
primarily be used to create IPC channels and create our daemon process to
do the actual publishing
:param func process_manager: A ProcessManager, from salt.utils.process.ProcessManager'
| def pre_fork(self, process_manager):
| process_manager.add_process(self._publish_daemon)
|
'Publish "load" to minions
:param dict load: A load to be sent across the wire to minions'
| def publish(self, load):
| payload = {'enc': 'aes'}
crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
payload['load'] = crypticle.dumps(load)
if self.opts['sign_pub_messages']:
master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem')
log.debug('Signing data packet')
payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load'])
context = zmq.Context(1)
pub_sock = context.socket(zmq.PUSH)
if (self.opts.get('ipc_mode', '') == 'tcp'):
pull_uri = 'tcp://127.0.0.1:{0}'.format(self.opts.get('tcp_master_publish_pull', 4514))
else:
pull_uri = 'ipc://{0}'.format(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))
pub_sock.connect(pull_uri)
int_payload = {'payload': self.serial.dumps(payload)}
if (load['tgt_type'] == 'list'):
int_payload['topic_lst'] = load['tgt']
match_targets = ['pcre', 'glob', 'list']
if (self.opts['zmq_filtering'] and (load['tgt_type'] in match_targets)):
match_ids = self.ckminions.check_minions(load['tgt'], tgt_type=load['tgt_type'])
log.debug('Publish Side Match: {0}'.format(match_ids))
int_payload['topic_lst'] = match_ids
pub_sock.send(self.serial.dumps(int_payload))
pub_sock.close()
context.term()
|
'Create an asynchronous message client
:param dict opts: The salt opts dictionary
:param str addr: The interface IP address to bind to
:param int linger: The number of seconds to linger on a ZMQ socket. See
http://api.zeromq.org/2-1:zmq-setsockopt [ZMQ_LINGER]
:param IOLoop io_loop: A Tornado IOLoop event scheduler [tornado.ioloop.IOLoop]'
| def __init__(self, opts, addr, linger=0, io_loop=None):
| self.opts = opts
self.addr = addr
self.linger = linger
if (io_loop is None):
zmq.eventloop.ioloop.install()
tornado.ioloop.IOLoop.current()
else:
self.io_loop = io_loop
self.serial = salt.payload.Serial(self.opts)
self.context = zmq.Context()
self._init_socket()
self.send_queue = []
self.send_future_map = {}
self.send_timeout_map = {}
|
'Handle a message timeout by removing it from the sending queue
and informing the caller
:raises: SaltReqTimeoutError'
| def timeout_message(self, message):
| future = self.send_future_map.pop(message, None)
if (future is not None):
del self.send_timeout_map[message]
if (future.attempts < future.tries):
future.attempts += 1
log.debug('SaltReqTimeoutError, retrying. ({0}/{1})'.format(future.attempts, future.tries))
self.send(message, timeout=future.timeout, tries=future.tries, future=future)
else:
future.set_exception(SaltReqTimeoutError('Message timed out'))
|
'Return a future which will be completed when the message has a response'
| def send(self, message, timeout=None, tries=3, future=None, callback=None, raw=False):
| if (future is None):
future = tornado.concurrent.Future()
future.tries = tries
future.attempts = 0
future.timeout = timeout
message = self.serial.dumps(message)
if (callback is not None):
def handle_future(future):
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
self.send_future_map[message] = future
if (self.opts.get('detect_mode') is True):
timeout = 1
if (timeout is not None):
send_timeout = self.io_loop.call_later(timeout, self.timeout_message, message)
self.send_timeout_map[message] = send_timeout
if (len(self.send_queue) == 0):
self.io_loop.spawn_callback(self._internal_send_recv)
self.send_queue.append(message)
return future
|
'Create ZMQ monitor sockets
More information:
http://api.zeromq.org/4-0:zmq-socket-monitor'
| def __init__(self, socket):
| self._socket = socket
self._monitor_socket = self._socket.get_monitor_socket()
self._monitor_stream = None
|
'Backwards compatibility'
| def call_func(self, fun, **kwargs):
| return self.low(fun, kwargs, print_event=kwargs.get('print_event', True), full_return=kwargs.get('full_return', False))
|
'Execute a wheel function through the master network interface (eauth).'
| def master_call(self, **kwargs):
| load = kwargs
load['cmd'] = 'wheel'
interface = self.opts['interface']
if (interface == '0.0.0.0'):
interface = '127.0.0.1'
master_uri = ((('tcp://' + salt.utils.ip_bracket(interface)) + ':') + str(self.opts['ret_port']))
channel = salt.transport.Channel.factory(self.opts, crypt='clear', master_uri=master_uri, usage='master_call')
ret = channel.send(load)
if isinstance(ret, collections.Mapping):
if ('error' in ret):
salt.utils.error.raise_error(**ret['error'])
return ret
|
'Execute a wheel function synchronously; eauth is respected
This function requires that :conf_master:`external_auth` is configured
and the user is authorized to execute runner functions: (``@wheel``).
.. code-block:: python
>>> wheel.cmd_sync({
\'fun\': \'key.finger\',
\'match\': \'jerry\',
\'eauth\': \'auto\',
\'username\': \'saltdev\',
\'password\': \'saltdev\',
{\'minions\': {\'jerry\': \'5d:f6:79:43:5e:d4:42:3f:57:b8:45:a8:7e:a4:6e:ca\'}}'
| def cmd_sync(self, low, timeout=None, full_return=False):
| return self.master_call(**low)
|
'Execute a function asynchronously; eauth is respected
This function requires that :conf_master:`external_auth` is configured
and the user is authorized
.. code-block:: python
>>> wheel.cmd_async({
\'fun\': \'key.finger\',
\'match\': \'jerry\',
\'eauth\': \'auto\',
\'username\': \'saltdev\',
\'password\': \'saltdev\',
{\'jid\': \'20131219224744416681\', \'tag\': \'salt/wheel/20131219224744416681\'}'
| def cmd_async(self, low):
| fun = low.pop('fun')
return self.async(fun, low)
|
'Execute a function
.. code-block:: python
>>> wheel.cmd(\'key.finger\', [\'jerry\'])
{\'minions\': {\'jerry\': \'5d:f6:79:43:5e:d4:42:3f:57:b8:45:a8:7e:a4:6e:ca\'}}'
| def cmd(self, fun, arg=None, pub_data=None, kwarg=None, print_event=True, full_return=False):
| return super(WheelClient, self).cmd(fun, arg, pub_data, kwarg, print_event, full_return)
|
'Iitialize the differ.'
| def __init__(self, current_dict, past_dict):
| (self.current_dict, self.past_dict) = (current_dict, past_dict)
(self.current_keys, self.past_keys) = [set(d.keys()) for d in (current_dict, past_dict)]
self.intersect = self.current_keys.intersection(self.past_keys)
|
'True if the two dicts are the same.'
| def same(self):
| return (self.current_dict == self.past_dict)
|
'Return a set of additions to past_dict.'
| def added(self):
| return (self.current_keys - self.intersect)
|
'Return a set of things removed from past_dict.'
| def removed(self):
| return (self.past_keys - self.intersect)
|
'Return a set of the keys with changed values.'
| def changed(self):
| return set((o for o in self.intersect if (self.past_dict[o] != self.current_dict[o])))
|
'Return a set of the keys with unchanged values.'
| def unchanged(self):
| return set((o for o in self.intersect if (self.past_dict[o] == self.current_dict[o])))
|
'returns location in the swagger parameter object'
| @property
def location(self):
| _location = self._paramdict.get('in')
if (_location in _Swagger.SwaggerParameter.LOCATIONS):
return _location
raise ValueError('Unsupported parameter location: {0} in Parameter Object'.format(_location))
|
'returns parameter name in the swagger parameter object'
| @property
def name(self):
| _name = self._paramdict.get('name')
if _name:
if (self.location == 'header'):
return 'method.request.header.{0}'.format(_name)
elif (self.location == 'query'):
return 'method.request.querystring.{0}'.format(_name)
elif (self.location == 'path'):
return 'method.request.path.{0}'.format(_name)
return None
raise ValueError('Parameter must have a name: {0}'.format(_dict_to_json_pretty(self._paramdict)))
|
'returns the name of the schema given the reference in the swagger parameter object'
| @property
def schema(self):
| if (self.location == 'body'):
_schema = self._paramdict.get('schema')
if _schema:
if ('$ref' in _schema):
schema_name = _schema.get('$ref').split('/')[(-1)]
return schema_name
raise ValueError('Body parameter must have a JSON reference to the schema definition due to Amazon API restrictions: {0}'.format(self.name))
raise ValueError('Body parameter must have a schema: {0}'.format(self.name))
return None
|
'returns the name of the schema given the reference in the swagger method response object'
| @property
def schema(self):
| _schema = self._r.get('schema')
if _schema:
if ('$ref' in _schema):
return _schema.get('$ref').split('/')[(-1)]
raise ValueError('Method response must have a JSON reference to the schema definition: {0}'.format(_schema))
return None
|
'returns the headers dictionary in the method response object'
| @property
def headers(self):
| _headers = self._r.get('headers', {})
return _headers
|
'Helper function to determine if the passed code is in the 400~599 range of http error
codes'
| def _is_http_error_rescode(self, code):
| return bool(re.match('^\\s*[45]\\d\\d\\s*$', code))
|
'Helper function to help validate the convention established in the swagger file on how
to handle response code mapping/integration'
| def _validate_error_response_model(self, paths, mods):
| for (path, ops) in paths:
for (opname, opobj) in six.iteritems(ops):
if (opname not in _Swagger.SWAGGER_OPERATION_NAMES):
continue
if ('responses' not in opobj):
raise ValueError('missing mandatory responses field in path item object')
for (rescode, resobj) in six.iteritems(opobj.get('responses')):
if (not self._is_http_error_rescode(str(rescode))):
continue
if ('schema' not in resobj):
raise ValueError('missing schema field in path {0}, op {1}, response {2}'.format(path, opname, rescode))
schemaobj = resobj.get('schema')
if ('$ref' not in schemaobj):
raise ValueError('missing $ref field under schema in path {0}, op {1}, response {2}'.format(path, opname, rescode))
schemaobjref = schemaobj.get('$ref', '/')
modelname = schemaobjref.split('/')[(-1)]
if (modelname not in mods):
raise ValueError('model schema {0} reference not found under /definitions'.format(schemaobjref))
model = mods.get(modelname)
if (model.get('type') != 'object'):
raise ValueError('model schema {0} must be type object'.format(modelname))
if ('properties' not in model):
raise ValueError('model schema {0} must have properties fields'.format(modelname))
modelprops = model.get('properties')
if ('errorMessage' not in modelprops):
raise ValueError('model schema {0} must have errorMessage as a property to match AWS convention. If pattern is not set, .+ will be used'.format(modelname))
|
'Checks if the lambda function name format contains only known elements
:return: True on success, ValueError raised on error'
| def _validate_lambda_funcname_format(self):
| try:
if self._lambda_funcname_format:
known_kwargs = dict(stage='', api='', resource='', method='')
self._lambda_funcname_format.format(**known_kwargs)
return True
except:
raise ValueError('Invalid lambda_funcname_format {0}. Please review documentation for known substitutable keys'.format(self._lambda_funcname_format))
|
'High level check/validation of the input swagger file based on
https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
This is not a full schema compliance check, but rather make sure that the input file (YAML or
JSON) can be read into a dictionary, and we check for the content of the Swagger Object for version
and info.'
| def _validate_swagger_file(self):
| for field in self._cfg:
if ((field not in _Swagger.SWAGGER_OBJ_V2_FIELDS) and (not _Swagger.VENDOR_EXT_PATTERN.match(field))):
raise ValueError('Invalid Swagger Object Field: {0}'.format(field))
for field in _Swagger.SWAGGER_OBJ_V2_FIELDS_REQUIRED:
if (field not in self._cfg):
raise ValueError('Missing Swagger Object Field: {0}'.format(field))
self._swagger_version = self._cfg.get('swagger')
if (self._swagger_version not in _Swagger.SWAGGER_VERSIONS_SUPPORTED):
raise ValueError('Unsupported Swagger version: {0},Supported versions are {1}'.format(self._swagger_version, _Swagger.SWAGGER_VERSIONS_SUPPORTED))
log.info(type(self._models))
self._validate_error_response_model(self.paths, self._models())
|
'returns md5 hash for the swagger file'
| @property
def md5_filehash(self):
| return self._md5_filehash
|
'returns the swagger info object as a dictionary'
| @property
def info(self):
| info = self._cfg.get('info')
if (not info):
raise ValueError('Info Object has no values')
return info
|
'returns the swagger info object as a pretty printed json string.'
| @property
def info_json(self):
| return _dict_to_json_pretty(self.info)
|
'returns the name of the api'
| @property
def rest_api_name(self):
| return self._api_name
|
'returns the version field in the swagger info object'
| @property
def rest_api_version(self):
| version = self.info.get('version')
if (not version):
raise ValueError('Missing version value in Info Object')
return version
|
'returns an iterator for the models specified in the swagger file'
| def _models(self):
| models = self._cfg.get('definitions')
if (not models):
raise ValueError('Definitions Object has no values, You need to define them in your swagger file')
return models
|
'generator to return the tuple of model and its schema to create on aws.'
| def models(self):
| model_dict = self._build_all_dependencies()
while True:
model = self._get_model_without_dependencies(model_dict)
if (not model):
break
(yield (model, self._models().get(model)))
|
'returns an iterator for the relative resource paths specified in the swagger file'
| @property
def paths(self):
| paths = self._cfg.get('paths')
if (not paths):
raise ValueError('Paths Object has no values, You need to define them in your swagger file')
for path in paths:
if (not path.startswith('/')):
raise ValueError('Path object {0} should start with /. Please fix it'.format(path))
return six.iteritems(paths)
|
'returns the base path field as defined in the swagger file'
| @property
def basePath(self):
| basePath = self._cfg.get('basePath', '')
return basePath
|
'returns the rest api id as returned by AWS on creation of the rest api'
| @property
def restApiId(self):
| return self._restApiId
|
'allows the assignment of the rest api id on creation of the rest api'
| @restApiId.setter
def restApiId(self, restApiId):
| self._restApiId = restApiId
|
'this property returns the unique description in pretty printed json for
a particular api deployment'
| @property
def deployment_label_json(self):
| return _dict_to_json_pretty(self.deployment_label)
|
'this property returns the deployment label dictionary (mainly used by
stage description)'
| @property
def deployment_label(self):
| label = dict()
label['swagger_info_object'] = self.info
label['api_name'] = self.rest_api_name
label['swagger_file'] = os.path.basename(self._swagger_file)
label['swagger_file_md5sum'] = self.md5_filehash
return label
|
'Helper function to find whether there are other stages still associated with a deployment'
| def _one_or_more_stages_remain(self, deploymentId):
| stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages')
return bool(stages)
|
'Helper function to find whether there are deployments left with stages associated'
| def no_more_deployments_remain(self):
| no_more_deployments = True
deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
deploymentId = deployment.get('id')
stages = __salt__['boto_apigateway.describe_api_stages'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('stages')
if stages:
no_more_deployments = False
break
return no_more_deployments
|
'Helper method to find the deployment id that the stage name is currently assocaited with.'
| def _get_current_deployment_id(self):
| deploymentId = ''
stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage')
if stage:
deploymentId = stage.get('deploymentId')
return deploymentId
|
'Helper method to find the deployment label that the stage_name is currently associated with.'
| def _get_current_deployment_label(self):
| deploymentId = self._get_current_deployment_id()
deployment = __salt__['boto_apigateway.describe_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args).get('deployment')
if deployment:
return deployment.get('description')
return None
|
'Helper method to return the deployment id matching the desired deployment label for
this Swagger object based on the given api_name, swagger_file'
| def _get_desired_deployment_id(self):
| deployments = __salt__['boto_apigateway.describe_api_deployments'](restApiId=self.restApiId, **self._common_aws_args).get('deployments')
if deployments:
for deployment in deployments:
if (deployment.get('description') == self.deployment_label_json):
return deployment.get('id')
return ''
|
'overwrite the given stage_name\'s stage variables with the given stage_variables'
| def overwrite_stage_variables(self, ret, stage_variables):
| res = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args)
if (not res.get('overwrite')):
ret['result'] = False
ret['abort'] = True
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'overwrite_stage_variables', res.get('stage'))
return ret
|
'Helper method to associate the stage_name to the given deploymentId and make this current'
| def _set_current_deployment(self, stage_desc_json, stage_variables):
| stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage')
if (not stage):
stage = __salt__['boto_apigateway.create_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, description=stage_desc_json, variables=stage_variables, **self._common_aws_args)
if (not stage.get('stage')):
return {'set': False, 'error': stage.get('error')}
else:
overwrite = __salt__['boto_apigateway.overwrite_api_stage_variables'](restApiId=self.restApiId, stageName=self._stage_name, variables=stage_variables, **self._common_aws_args)
if (not overwrite.get('stage')):
return {'set': False, 'error': overwrite.get('error')}
return __salt__['boto_apigateway.activate_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, deploymentId=self._deploymentId, **self._common_aws_args)
|
'returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description'
| def _resolve_api_id(self):
| apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args).get('restapi')
if apis:
if (len(apis) == 1):
self.restApiId = apis[0].get('id')
else:
raise ValueError('Multiple APIs matching given name {0} and description {1}'.format(self.rest_api_name, self.info_json))
|
'Method to delete the given stage_name. If the current deployment tied to the given
stage_name has no other stages associated with it, the deployment will be removed
as well'
| def delete_stage(self, ret):
| deploymentId = self._get_current_deployment_id()
if deploymentId:
result = __salt__['boto_apigateway.delete_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args)
if (not result.get('deleted')):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_stage, {0}'.format(result.get('error'))
elif (not self._one_or_more_stages_remain(deploymentId)):
result = __salt__['boto_apigateway.delete_api_deployment'](restApiId=self.restApiId, deploymentId=deploymentId, **self._common_aws_args)
if (not result.get('deleted')):
ret['abort'] = True
ret['result'] = False
ret['comment'] = 'delete_stage delete_api_deployment, {0}'.format(result.get('error'))
else:
ret['comment'] = 'stage {0} has been deleted.\n'.format(self._stage_name)
else:
ret['comment'] = 'stage {0} does not exist'.format(self._stage_name)
return ret
|
'this method helps determine if the given stage_name is already on a deployment
label matching the input api_name, swagger_file.
If yes, returns abort with comment indicating already at desired state.
If not and there is previous deployment labels in AWS matching the given input api_name and
swagger file, indicate to the caller that we only need to reassociate stage_name to the
previously existing deployment label.'
| def verify_api(self, ret):
| if self.restApiId:
deployed_label_json = self._get_current_deployment_label()
if (deployed_label_json == self.deployment_label_json):
ret['comment'] = 'Already at desired state, the stage {0} is already at the desired deployment label:\n{1}'.format(self._stage_name, deployed_label_json)
ret['current'] = True
return ret
else:
self._deploymentId = self._get_desired_deployment_id()
if self._deploymentId:
ret['publish'] = True
return ret
|
'this method tie the given stage_name to a deployment matching the given swagger_file'
| def publish_api(self, ret, stage_variables):
| stage_desc = dict()
stage_desc['current_deployment_label'] = self.deployment_label
stage_desc_json = _dict_to_json_pretty(stage_desc)
if self._deploymentId:
res = self._set_current_deployment(stage_desc_json, stage_variables)
if (not res.get('set')):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'publish_api (reassociate deployment, set stage_variables)', res.get('response'))
else:
res = __salt__['boto_apigateway.create_api_deployment'](restApiId=self.restApiId, stageName=self._stage_name, stageDescription=stage_desc_json, description=self.deployment_label_json, variables=stage_variables, **self._common_aws_args)
if (not res.get('created')):
ret['abort'] = True
ret['result'] = False
ret['comment'] = res.get('error')
else:
ret = _log_changes(ret, 'publish_api (new deployment)', res.get('deployment'))
return ret
|
'Helper method to clean up resources and models if we detected a change in the swagger file
for a stage'
| def _cleanup_api(self):
| resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId, **self._common_aws_args)
if resources.get('resources'):
res = resources.get('resources')[1:]
res.reverse()
for resource in res:
delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId, path=resource.get('path'), **self._common_aws_args)
if (not delres.get('deleted')):
return delres
models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args)
if models.get('models'):
for model in models.get('models'):
delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId, modelName=model.get('name'), **self._common_aws_args)
if (not delres.get('deleted')):
return delres
return {'deleted': True}
|
'this method create the top level rest api in AWS apigateway'
| def deploy_api(self, ret):
| if self.restApiId:
res = self._cleanup_api()
if (not res.get('deleted')):
ret['comment'] = 'Failed to cleanup restAreId {0}'.format(self.restApiId)
ret['abort'] = True
ret['result'] = False
return ret
return ret
response = __salt__['boto_apigateway.create_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args)
if (not response.get('created')):
ret['result'] = False
ret['abort'] = True
if ('error' in response):
ret['comment'] = 'Failed to create rest api: {0}.'.format(response['error']['message'])
return ret
self.restApiId = response.get('restapi', {}).get('id')
return _log_changes(ret, 'deploy_api', response.get('restapi'))
|
'Method to delete a Rest Api named defined in the swagger file\'s Info Object\'s title value.
ret
a dictionary for returning status to Saltstack'
| def delete_api(self, ret):
| exists_response = __salt__['boto_apigateway.api_exists'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args)
if exists_response.get('exists'):
if __opts__['test']:
ret['comment'] = 'Rest API named {0} is set to be deleted.'.format(self.rest_api_name)
ret['result'] = None
ret['abort'] = True
return ret
delete_api_response = __salt__['boto_apigateway.delete_api'](name=self.rest_api_name, description=_Swagger.AWS_API_DESCRIPTION, **self._common_aws_args)
if (not delete_api_response.get('deleted')):
ret['result'] = False
ret['abort'] = True
if ('error' in delete_api_response):
ret['comment'] = 'Failed to delete rest api: {0}.'.format(delete_api_response['error']['message'])
return ret
ret = _log_changes(ret, 'delete_api', delete_api_response)
else:
ret['comment'] = 'api already absent for swagger file: {0}, desc: {1}'.format(self.rest_api_name, self.info_json)
return ret
|
'Helper function to reference models created on aws apigw'
| def _aws_model_ref_from_swagger_ref(self, r):
| model_name = r.split('/')[(-1)]
return 'https://apigateway.amazonaws.com/restapis/{0}/models/{1}'.format(self.restApiId, model_name)
|
'Helper function to map model schema to aws notation'
| def _update_schema_to_aws_notation(self, schema):
| result = {}
for (k, v) in schema.items():
if (k == '$ref'):
v = self._aws_model_ref_from_swagger_ref(v)
if isinstance(v, dict):
v = self._update_schema_to_aws_notation(v)
result[k] = v
return result
|
'Helper function to build the list of models the given object schema is referencing.'
| def _build_dependent_model_list(self, obj_schema):
| dep_models_list = []
if obj_schema:
obj_schema['type'] = obj_schema.get('type', 'object')
if (obj_schema['type'] == 'array'):
dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {})))
else:
ref = obj_schema.get('$ref')
if ref:
ref_obj_model = ref.split('/')[(-1)]
ref_obj_schema = self._models().get(ref_obj_model)
dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema))
dep_models_list.extend([ref_obj_model])
else:
properties = obj_schema.get('properties')
if properties:
for (_, prop_obj_schema) in six.iteritems(properties):
dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema))
return list(set(dep_models_list))
|
'Helper function to build a map of model to their list of model reference dependencies'
| def _build_all_dependencies(self):
| ret = {}
for (model, schema) in six.iteritems(self._models()):
dep_list = self._build_dependent_model_list(schema)
ret[model] = dep_list
return ret
|
'Helper function to find the next model that should be created'
| def _get_model_without_dependencies(self, models_dict):
| next_model = None
if (not models_dict):
return next_model
for (model, dependencies) in six.iteritems(models_dict):
if (dependencies == []):
next_model = model
break
if (next_model is None):
raise ValueError('incomplete model definitions, models in dependency list not defined: {0}'.format(models_dict))
models_dict.pop(next_model)
for (model, dep_list) in six.iteritems(models_dict):
if (next_model in dep_list):
dep_list.remove(next_model)
return next_model
|
'Method to deploy swagger file\'s definition objects and associated schema to AWS Apigateway as Models
ret
a dictionary for returning status to Saltstack'
| def deploy_models(self, ret):
| for (model, schema) in self.models():
_schema = self._update_schema_to_aws_notation(schema)
_schema.update({'$schema': _Swagger.JSON_SCHEMA_DRAFT_4, 'title': '{0} Schema'.format(model)})
model_exists_response = __salt__['boto_apigateway.api_model_exists'](restApiId=self.restApiId, modelName=model, **self._common_aws_args)
if model_exists_response.get('exists'):
update_model_schema_response = __salt__['boto_apigateway.update_api_model_schema'](restApiId=self.restApiId, modelName=model, schema=_dict_to_json_pretty(_schema), **self._common_aws_args)
if (not update_model_schema_response.get('updated')):
ret['result'] = False
ret['abort'] = True
if ('error' in update_model_schema_response):
ret['comment'] = 'Failed to update existing model {0} with schema {1}, error: {2}'.format(model, _dict_to_json_pretty(schema), update_model_schema_response['error']['message'])
return ret
ret = _log_changes(ret, 'deploy_models', update_model_schema_response)
else:
create_model_response = __salt__['boto_apigateway.create_api_model'](restApiId=self.restApiId, modelName=model, modelDescription=model, schema=_dict_to_json_pretty(_schema), contentType='application/json', **self._common_aws_args)
if (not create_model_response.get('created')):
ret['result'] = False
ret['abort'] = True
if ('error' in create_model_response):
ret['comment'] = 'Failed to create model {0}, schema {1}, error: {2}'.format(model, _dict_to_json_pretty(schema), create_model_response['error']['message'])
return ret
ret = _log_changes(ret, 'deploy_models', create_model_response)
return ret
|
'Helper method to construct lambda name based on the rule specified in doc string of
boto_apigateway.api_present function'
| def _lambda_name(self, resourcePath, httpMethod):
| lambda_name = self._lambda_funcname_format.format(stage=self._stage_name, api=self.rest_api_name, resource=resourcePath, method=httpMethod)
lambda_name = lambda_name.strip()
lambda_name = re.sub('{|}', '', lambda_name)
lambda_name = re.sub('\\s+|/', '_', lambda_name).lower()
return re.sub('_+', '_', lambda_name)
|
'Helper Method to construct the lambda uri for use in method integration'
| def _lambda_uri(self, lambda_name, lambda_region):
| profile = self._common_aws_args.get('profile')
region = self._common_aws_args.get('region')
lambda_region = __utils__['boto3.get_region']('lambda', lambda_region, profile)
apigw_region = __utils__['boto3.get_region']('apigateway', region, profile)
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if (lambda_region != apigw_region):
if (not lambda_desc.get('function')):
lambda_desc = __salt__['boto_lambda.describe_function'](lambda_name, **self._common_aws_args)
if (not lambda_desc.get('function')):
raise ValueError('Could not find lambda function {0} in regions [{1}, {2}].'.format(lambda_name, lambda_region, apigw_region))
lambda_arn = lambda_desc.get('function').get('FunctionArn')
lambda_uri = 'arn:aws:apigateway:{0}:lambda:path/2015-03-31/functions/{1}/invocations'.format(apigw_region, lambda_arn)
return lambda_uri
|
'Helper function to construct the method request params, models, request_templates and
integration_type values needed to configure method request integration/mappings.'
| def _parse_method_data(self, method_name, method_data):
| method_params = {}
method_models = {}
if ('parameters' in method_data):
for param in method_data['parameters']:
p = _Swagger.SwaggerParameter(param)
if p.name:
method_params[p.name] = True
if p.schema:
method_models['application/json'] = p.schema
request_templates = (_Swagger.REQUEST_OPTION_TEMPLATE if (method_name == 'options') else _Swagger.REQUEST_TEMPLATE)
integration_type = ('MOCK' if (method_name == 'options') else 'AWS')
return {'params': method_params, 'models': method_models, 'request_templates': request_templates, 'integration_type': integration_type}
|
'returns the pattern specified in a response schema'
| def _get_pattern_for_schema(self, schema_name, httpStatus):
| defaultPattern = ('.+' if self._is_http_error_rescode(httpStatus) else '.*')
model = self._models().get(schema_name)
patterns = self._find_patterns(model)
return (patterns[0] if patterns else defaultPattern)
|
'Helper function to construct the method response params, models, and integration_params
values needed to configure method response integration/mappings.'
| def _parse_method_response(self, method_name, method_response, httpStatus):
| method_response_models = {}
method_response_pattern = '.*'
if method_response.schema:
method_response_models['application/json'] = method_response.schema
method_response_pattern = self._get_pattern_for_schema(method_response.schema, httpStatus)
method_response_params = {}
method_integration_response_params = {}
for header in method_response.headers:
response_header = 'method.response.header.{0}'.format(header)
method_response_params[response_header] = False
header_data = method_response.headers.get(header)
method_integration_response_params[response_header] = ("'{0}'".format(header_data.get('default')) if ('default' in header_data) else "'*'")
response_templates = self._get_response_template(method_name, httpStatus)
return {'params': method_response_params, 'models': method_response_models, 'integration_params': method_integration_response_params, 'pattern': method_response_pattern, 'response_templates': response_templates}
|
'Method to create a method for the given resource path, along with its associated
request and response integrations.
ret
a dictionary for returning status to Saltstack
resource_path
the full resource path where the named method_name will be associated with.
method_name
a string that is one of the following values: \'delete\', \'get\', \'head\', \'options\',
\'patch\', \'post\', \'put\'
method_data
the value dictionary for this method in the swagger definition file.
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
\'NONE\' or \'AWS_IAM\''
| def _deploy_method(self, ret, resource_path, method_name, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type):
| method = self._parse_method_data(method_name.lower(), method_data)
if (method_name.lower() == 'options'):
api_key_required = False
authorization_type = 'NONE'
m = __salt__['boto_apigateway.create_api_method'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), authorizationType=authorization_type, apiKeyRequired=api_key_required, requestParameters=method.get('params'), requestModels=method.get('models'), **self._common_aws_args)
if (not m.get('created')):
ret = _log_error_and_abort(ret, m)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method', m)
lambda_uri = ''
if (method_name.lower() != 'options'):
lambda_uri = self._lambda_uri(self._lambda_name(resource_path, method_name), lambda_region=lambda_region)
integration = __salt__['boto_apigateway.create_api_integration'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), integrationType=method.get('integration_type'), integrationHttpMethod='POST', uri=lambda_uri, credentials=lambda_integration_role, requestTemplates=method.get('request_templates'), **self._common_aws_args)
if (not integration.get('created')):
ret = _log_error_and_abort(ret, integration)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration', integration)
if ('responses' in method_data):
for (response, response_data) in six.iteritems(method_data['responses']):
httpStatus = str(response)
method_response = self._parse_method_response(method_name.lower(), _Swagger.SwaggerMethodResponse(response_data), httpStatus)
mr = __salt__['boto_apigateway.create_api_method_response'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, responseParameters=method_response.get('params'), responseModels=method_response.get('models'), **self._common_aws_args)
if (not mr.get('created')):
ret = _log_error_and_abort(ret, mr)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_method_response', mr)
mir = __salt__['boto_apigateway.create_api_integration_response'](restApiId=self.restApiId, resourcePath=resource_path, httpMethod=method_name.upper(), statusCode=httpStatus, selectionPattern=method_response.get('pattern'), responseParameters=method_response.get('integration_params'), responseTemplates=method_response.get('response_templates'), **self._common_aws_args)
if (not mir.get('created')):
ret = _log_error_and_abort(ret, mir)
return ret
ret = _log_changes(ret, '_deploy_method.create_api_integration_response', mir)
else:
raise ValueError('No responses specified for {0} {1}'.format(resource_path, method_name))
return ret
|
'Method to deploy resources defined in the swagger file.
ret
a dictionary for returning status to Saltstack
api_key_required
True or False, whether api key is required to access this method.
lambda_integration_role
name of the IAM role or IAM role arn that Api Gateway will assume when executing
the associated lambda function
lambda_region
the region for the lambda function that Api Gateway will integrate to.
authorization_type
\'NONE\' or \'AWS_IAM\''
| def deploy_resources(self, ret, api_key_required, lambda_integration_role, lambda_region, authorization_type):
| for (path, pathData) in self.paths:
resource = __salt__['boto_apigateway.create_api_resources'](restApiId=self.restApiId, path=path, **self._common_aws_args)
if (not resource.get('created')):
ret = _log_error_and_abort(ret, resource)
return ret
ret = _log_changes(ret, 'deploy_resources', resource)
for (method, method_data) in six.iteritems(pathData):
if (method in _Swagger.SWAGGER_OPERATION_NAMES):
ret = self._deploy_method(ret, path, method, method_data, api_key_required, lambda_integration_role, lambda_region, authorization_type)
return ret
|
'Returns a pretty dictionary meant for command line output.'
| def todict(self):
| return {'Source port': self.srcport, 'Destination port': self.destport, 'Protocol': self.protocol, 'Destination address': self.destaddr}
|
'Should only run once to cleanup stale lane uxd files.'
| def action(self):
| if ((not is_windows()) and self.opts.value.get('sock_dir')):
sockdirpath = os.path.abspath(self.opts.value['sock_dir'])
console.concise('Cleaning up uxd files in {0}\n'.format(sockdirpath))
protecteds = self.opts.value.get('raet_cleanup_protecteds', [])
for name in os.listdir(sockdirpath):
path = os.path.join(sockdirpath, name)
if os.path.isdir(path):
continue
(root, ext) = os.path.splitext(name)
if (ext != '.uxd'):
continue
if (not all(root.partition('.'))):
continue
if (path in protecteds):
continue
try:
os.unlink(path)
console.concise('Removed {0}\n'.format(path))
except OSError:
console.concise('Failed removing {0}\n'.format(path))
raise
|
'Update .cluster.clustered share from opts'
| def action(self, **kwa):
| self.clustered.update(value=self.opts.value.get('cluster_mode', False))
|
'Create the process manager'
| def action(self):
| self.proc_mgr.value = salt.utils.process.ProcessManager()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.