desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Initialize new SyslogMock instance Args: log_catcher (object: LogCatcher()): a LogCatcher instance that is going to be used by the mock to store captured messages.'
def __init__(self, log_catcher):
self._log_catcher = log_catcher self._cleanup_stale_sockets() self._bind_socket() self._log_catcher.add_fd(self._socket, log_file='syslog.stdout.log')
'Stop the syslog mock and perform the cleanup'
def stop(self):
self._socket.close() os.remove(self.SOCKET_PATH)
'Interface for accessing log lines captured by syslog mock Returns: This returns a reference to the list that log catcher internally uses to append log lines to. Thus it is *VERY* important to not to modify this list and treat it as Read-Only'
@property def stdout_line_buffer(self):
return self._log_catcher.line_buffer(self._socket)
'Normalize newlines in given bytes array Args: line_byes (b\'\'): bytes array that should be normalized Returns: Normalized bytes array.'
@staticmethod def _normalize_line_bytes(line_bytes):
if ((len(line_bytes) >= 2) and line_bytes.endswith('\r\n')): return line_bytes[:(-2)] if (len(line_bytes) and line_bytes.endswith('\n')): return line_bytes[:(-1)] return line_bytes
'Read a line from stored file descriptor Depending on the socket type, either datagram or file interface is used. Returns: A bytes array representing read bytes.'
def _read_line_from_fd(self):
if isinstance(self._fd, socket.socket): assert (self._fd.type == socket.SOCK_DGRAM) line_bytes = self._fd.recv(self.MAX_LINE_LENGTH) else: line_bytes = self._fd.readline() line_bytes = self._normalize_line_bytes(line_bytes) return line_bytes
'Helper method used for determining if given log fd is empty or not'
def _raise_if_source_is_empty(self, event_type):
if isinstance(self._fd, socket.socket): if (event_type == select.POLLNVAL): raise LogSourceEmpty() elif (event_type == select.POLLHUP): raise LogSourceEmpty()
'Initialize new LogWriter instance Args: fd (obj: python file descriptor): file descriptor from which log lines should be read. log_file (str): log all the gathered log lines to file at the given location log_level (int): log level with which all the log lines should be logged with, do not log to stdout if None'
def __init__(self, fd, log_file, log_level=None):
self._fd = fd self._log_level = log_level self._line_buffer = list() self._log_fd = open(log_file, 'ab', buffering=0)
'Stop LogWriter instance and perform a cleanup This method: * delimits end of LogWriter instance logging in the log file with help of `scissors` utf8 character, so that it is easier to separate output from subsequent instances of given object (i.e. nginx) in the same log file. * closes fog file file descriptor'
def stop(self):
delimiter = u'\u2704'.encode('utf-8') msg = (((delimiter * 10) + ' Logging of this instance ends here ') + (delimiter * 10)) self._append_line_to_log_file(msg) self._log_fd.close()
'Method used by LogCatcher instance for sending the data to LogWriter for storing Args: event_type (int): event type as described by pool() objects interface (https://docs.python.org/3/library/select.html#poll-objects)'
def write(self, event_type):
self._raise_if_source_is_empty(event_type) line_bytes = self._read_line_from_fd() if (self._log_fd is not None): self._append_line_to_log_file(line_bytes) line = line_bytes.decode('utf-8', errors='backslashreplace') if (self._log_level is not None): log.log(self._log_level, line) self._append_line_to_line_buffer(line)
'Expose internal log line buffer This method exposes internal log buffer to the caller. Returns: A list with each log line as a single element.'
@property def line_buffer(self):
return self._line_buffer
'A main loop where event are demultiplexed The purpose of this function is to monitor all registered file descriptors and in case when new data is available - hand of taking care of it to LogWriter instance that is responsible for given file descriptor. poll() call generally seems to be easier to use and better fits our use case than e.g. plain select() (no need for global lock while updating FD lists): * http://stackoverflow.com/a/25249958 * http://www.greenend.org.uk/rjk/tech/poll.html'
def _monitor_process_outputs(self):
while True: ready_to_read = self._poll.poll(self._POLL_TIMEOUT) if ((not len(ready_to_read)) and self._termination_flag.is_set()): return for fd_tuple in ready_to_read: (fd_no, event) = fd_tuple writer = self._writers[fd_no] try: writer.write(event) except LogSourceEmpty: self._poll.unregister(fd_no) writer.stop() log.info('LogCatcher unregistered fd `%s`', fd_tuple[0])
'Remove all the old log file from the log directory Removes all the old log files from the log directory before logging anything new. This is necessary because we are always appending to the logfiles due to multiple instances being created and destroy during the tests, and appending to old log files could confuse tests developers.'
def _cleanup_log_dir(self):
for f_name in os.listdir(self._LOG_DIR): if (f_name == self._GIT_KEEP_FILE): log.debug('Skipping git keep-file: `%s`', f_name) continue f_path = os.path.join(self._LOG_DIR, f_name) if os.path.isfile(f_path): log.debug('Removing old log `%s`', f_path) os.unlink(f_path) log.info('Logging path `%s` has been cleaned up', self._LOG_DIR)
'Initialize new LogCatcher object'
def __init__(self):
self._termination_flag = threading.Event() self._poll = select.poll() self._writers = {} self._cleanup_log_dir() self._logger_thread = threading.Thread(target=self._monitor_process_outputs, name='LogCatcher') self._logger_thread.start() log.info('LogCatcher thread has started')
'Begin handling new file descriptor This method adds given file descriptor to the set monitored by internal poll() call and creates new LogWriter instance for it. Args: fd (obj: python file descriptor): file descriptor from which log lines should be read. log_file (str): if not None - log all the gathered log lines to file at the given location log_level (int): log level with which all the log lines should be logged with, do not log to stdout if None'
def add_fd(self, fd, log_file, log_level=None):
assert (fd.fileno() not in self._writers) if (log_file is not None): log_path = os.path.join(self._LOG_DIR, log_file) else: log_path = None writer = LogWriter(fd, log_path, log_level) self._writers[fd.fileno()] = writer self._poll.register(fd, (select.POLLIN | select.POLLHUP)) log.info('LogCatcher registered fd `%d`', fd.fileno())
'Stop the LogCatcher instance and perform resource cleanup.'
def stop(self):
self._termination_flag.set() while self._logger_thread.is_alive(): self._logger_thread.join(timeout=0.5) log.info('Waiting for LogCatcher thread to exit') log.info('LogCatcher thread has terminated, bye!')
'Expose line buffer used for logging data from given file descriptor Args: fd (obj: python file descriptor): file descriptor for which log line buffer should be returned Returns: A list that use used by LogWriter responsible for handling given file descriptor to store log lines.'
def line_buffer(self, fd):
return self._writers[fd.fileno()].line_buffer
'Initialize new subprocess instance. Args: log_catcher (obj: LogCatcher): a log catcher instance that will be handling logs/output created by this subprocess.'
def __init__(self, log_catcher):
self._env = {} self._args = [] self._log_catcher = log_catcher
'Identify this subprocess instance Return a string that will be identifying this ManagedSubprocess object instance. Plain class name should be good enough for now, we may extend it later on.'
@property def id(self):
return self.__class__.__name__
'Return stdout file descriptor of this process'
@property def stdout(self):
assert_msg = '`{}` process must be initialized first'.format(self.id) assert (self._process is not None), assert_msg return self._process.stdout
'Return stderr file descriptor of this process'
@property def stderr(self):
assert_msg = '`{}` process must be initialized first'.format(self.id) assert (self._process is not None), assert_msg return self._process.stderr
'Start a subprocess This method makes python actually spawn the subprocess and wait for it to finish initializing.'
def start(self):
self._start_subprocess() self._register_stdout_stderr_to_logcatcher() if (not self._wait_for_subprocess_to_finish_init()): self.stop() pytest.exit('Failed to start `{}` process'.format(self.id))
'Stop ManagedSubprocess instance and perform a cleanup This method makes sure that there are no child processes left after the object destruction finalizes. In case when a process cannot stop on it\'s own, it\'s forced to using SIGTERM/SIGKILL.'
def stop(self):
self._process.poll() if (self._process.returncode is not None): msg_fmt = '`%s` process has already terminated with code `%s`' pytest.exit((msg_fmt % (self.id, self._process.returncode))) return log.info('Send SIGINT to `%s` session leader', self.id) self._process.send_signal(signal.SIGINT) try: self._process.wait((self._EXIT_TIMEOUT / 2.0)) except subprocess.TimeoutExpired: log.info('Send SIGTERM to `%s` session leader', self.id) self._process.send_signal(signal.SIGTERM) try: self._process.wait((self._EXIT_TIMEOUT / 2.0)) except subprocess.TimeoutExpired: log.info('Send SIGKILL to all `%s` processess', self.id) os.killpg(os.getpgid(self._process.pid), signal.SIGKILL) log.info('wait() for `%s` session leader to die', self.id) self._process.wait() log.info('`%s` session leader has terminated', self.id)
'Monitor process out for indication that the init process is complete Using internal LogCatcher instance, monitor process output is search of self._INIT_COMPLETE_STR in one of log lines. If found, it is assumed that the process has finished init.'
def _wait_for_subprocess_to_finish_init(self):
if (self._INIT_COMPLETE_STR is None): msg_fmt = 'Not waiting for process `%s` to start and assuming that it is already up' log.warning(msg_fmt, self.id) return True deadline = (time.time() + self._START_TIMEOUT) log_buf_pos = 0 log_buf = self._init_log_buf while (time.time() < deadline): self._process.poll() if (self._process.returncode is not None): msg_fmt = '`%s` process exited prematurely during init' log.warning(msg_fmt, self.id) return False log_buf_end = (len(log_buf) - 1) if (log_buf_end >= log_buf_pos): for line in log_buf[log_buf_pos:]: if (self._INIT_COMPLETE_STR in line): log.info('`%s` init process complete', self.id) return True log_buf_pos = log_buf_end log.debug('Waiting for `%s` to start...', self.id) time.sleep(LOG_LINE_SEARCH_INTERVAL) msg_fmt = '`%s` failed to start in `%d` seconds' log.warning(msg_fmt, self.id, self._START_TIMEOUT) return False
'This is just a helper method that inheriting classes override, in order to indicate which log buffer should be monitored for self._INIT_COMPLETE_STR'
@abc.abstractmethod def _init_log_buf(self):
pass
'Return line buffer where all stdout output of this ManagedSubprocess object resides'
@property def stdout_line_buffer(self):
return self._log_catcher.line_buffer(self.stdout)
'Return line buffer where all stderr output of this ManagedSubprocess object resides'
@property def stderr_line_buffer(self):
return self._log_catcher.line_buffer(self.stderr)
'This is just a helper method that inheriting classes override, in order to perform customized registration of log outputs/file descriptors to internal LogCatcher instance.'
@abc.abstractmethod def _register_stdout_stderr_to_logcatcher(self):
pass
'Please check ManagedSubprocess\'es class method description'
def _register_stdout_stderr_to_logcatcher(self):
self._log_catcher.add_fd(self.stdout, log_file='nginx.stdout.log') self._log_catcher.add_fd(self.stderr, log_file='nginx.stderr.log')
'Please check ManagedSubprocess\'es class method description'
@property def _init_log_buf(self):
return self.stderr_line_buffer
'Set environment variable for this AR instance Args: env_name: name of the environment variable to set env_val: value that the new environment should have, if None - it will be skipped/not set.'
def _set_ar_env_from_val(self, env_name, env_val):
if (env_val is None): log.info("Not setting env var `%s` as it's None", env_name) return self._env[env_name] = env_val
'Set environment variable for this AR instance basing on existing environment variable. This function is esp. useful in cases when certain env. variable should be copied from existing env vars that pytest runtimes sees. Args: env_name: name of the environment variable to set'
def _set_ar_env_from_environment(self, env_name):
env_val = os.environ.get(env_name) if (env_val is None): msg_fmt = '`%s` env var is not set, cannot pass it to subprocess' log.warning(msg_fmt, env_name) return self._env[env_name] = env_val
'Helper function used to determine Nginx command line variables basing on how the instance was configured'
def _set_ar_cmdline(self):
openresty_dir = os.environ.get('AR_BIN_DIR') assert (openresty_dir is not None), "'AR_BIN_DIR' env var is not set!" self.binary = os.path.join(openresty_dir, 'nginx', 'sbin', 'nginx') config_file_name = 'nginx.{}.conf'.format(self._role) self.config_path = os.path.join(openresty_dir, 'nginx', 'conf', config_file_name) self._args = [self.binary, '-c', self.config_path, '-g', 'daemon off;']
'Helper function used to determine Nginx env. variables basing on how the instance was configured'
def _set_ar_env(self, auth_enabled, default_scheme, upstream_mesos, host_ip, upstream_marathon, cache_first_poll_delay, cache_poll_period, cache_expiration, cache_max_age_soft_limit, cache_max_age_hard_limit, cache_backend_request_timeout, cache_refresh_lock_timeout):
self._set_ar_env_from_val('ADMINROUTER_ACTIVATE_AUTH_MODULE', auth_enabled.lower()) self._set_ar_env_from_val('DEFAULT_SCHEME', default_scheme) self._set_ar_env_from_val('UPSTREAM_MESOS', upstream_mesos) self._set_ar_env_from_val('HOST_IP', host_ip) self._set_ar_env_from_val('UPSTREAM_MARATHON', upstream_marathon) self._set_ar_env_from_val('CACHE_FIRST_POLL_DELAY', str(cache_first_poll_delay)) self._set_ar_env_from_val('CACHE_POLL_PERIOD', str(cache_poll_period)) self._set_ar_env_from_val('CACHE_EXPIRATION', str(cache_expiration)) self._set_ar_env_from_val('CACHE_MAX_AGE_SOFT_LIMIT', str(cache_max_age_soft_limit)) self._set_ar_env_from_val('CACHE_MAX_AGE_HARD_LIMIT', str(cache_max_age_hard_limit)) self._set_ar_env_from_val('CACHE_BACKEND_REQUEST_TIMEOUT', str(cache_backend_request_timeout)) self._set_ar_env_from_val('CACHE_REFRESH_LOCK_TIMEOUT', str(cache_refresh_lock_timeout)) self._set_ar_env_from_environment('AUTH_ERROR_PAGE_DIR_PATH')
'Initialize new Nginx instance Args: role (\'master\'|\'agent\'): the role of this Nginx instance - either AR master or AR agent. log_catcher (object: LogCatcher()): a LogCatcher instance that is going to be used by the mock to store captured messages. auth_enabled (str): translates to `ADMINROUTER_ACTIVATE_AUTH_MODULE` env var default_scheme (str), upstream_mesos (str), host_ip (str), upstream_marathon (str), cache_first_poll_delay (int), cache_poll_period (int), cache_backend_request_timeout (int), CACHE_REFRESH_LOCK_TIMEOUT (int), cache_expiration (int), cache_max_age_soft_limit (int), cache_max_age_hard_limit (int): translate to `DEFAULT_SCHEME`, `UPSTREAM_MESOS`, `HOST_IP`, `UPSTREAM_MARATHON` `CACHE_FIRST_POLL_DELAY`, `CACHE_POLL_PERIOD`, `CACHE_EXPIRATION`, `CACHE_BACKEND_REQUEST_TIMEOUT`, `CACHE_REFRESH_LOCK_TIMEOUT`, `CACHE_MAX_AGE_SOFT_LIMIT`, `CACHE_MAX_AGE_HARD_LIMIT` env vars. Please check the documentation and/or the source code and its comments for details.'
def __init__(self, auth_enabled='True', default_scheme='http://', upstream_mesos='http://127.0.0.2:5050', host_ip='127.0.0.2', upstream_marathon='http://127.0.0.1:8080', role='master', log_catcher=None, cache_first_poll_delay=CACHE_FIRST_POLL_DELAY, cache_poll_period=CACHE_POLL_PERIOD, cache_expiration=CACHE_EXPIRATION, cache_max_age_soft_limit=CACHE_MAX_AGE_SOFT_LIMIT, cache_max_age_hard_limit=CACHE_MAX_AGE_HARD_LIMIT, cache_backend_request_timeout=CACHE_BACKEND_REQUEST_TIMEOUT, cache_refresh_lock_timeout=CACHE_REFRESH_LOCK_TIMEOUT):
assert (role in ('master', 'agent')), "wrong value of 'role' param" self._role = role super().__init__(log_catcher) self._set_ar_env(auth_enabled, default_scheme, upstream_mesos, host_ip, upstream_marathon, cache_first_poll_delay, cache_poll_period, cache_expiration, cache_max_age_soft_limit, cache_max_age_hard_limit, cache_backend_request_timeout, cache_refresh_lock_timeout) self._set_ar_cmdline()
'Provides read only access to nginx environment'
@property def env(self):
return copy.deepcopy(self._env)
'A helper function used in tests that is meant to abstract AR listen port and provide single point of change for updating the place where all the test expect AR to listen for requests.'
def make_url_from_path(self, path='/exhibitor/some/path'):
if (self._role == 'master'): base = 'http://127.0.0.1/' else: base = 'http://127.0.0.1:61001/' if (not len(path)): return (base + '/') if (path[0] != '/'): return (base + path) return (base + path[1:])
'Please check ManagedSubprocess\'es class method description'
def _register_stdout_stderr_to_logcatcher(self):
log_filename = 'vegeta.stdout.log' self._log_catcher.add_fd(self.stdout, log_file=log_filename) log_filename = 'vegeta.stderr.log' self._log_catcher.add_fd(self.stderr, log_file=log_filename)
'Initialize new Vegeta object Only GET for now. Args: log_catcher (object: LogCatcher()): a LogCatcher instance that is going to be used by the mock to store captured messages.'
def __init__(self, log_catcher, target, jwt=None, rate=3):
super().__init__(log_catcher) self._cleanup_old_report_file() self._setup_targets_file(target, jwt) self._args = [self._VEGETA_BIN, 'attack', '-output', self._REPORT_FILE, '-targets', self._TARGETS_FILE, '-rate', str(rate), '-duration', '0']
'Please check ManagedSubprocess\'es class method description'
@property def _init_log_buf(self):
return self.stdout_line_buffer
'Initialize new AR/Nginx instance Args: ouath_client_id (str): translates to `OUATH_CLIENT_ID` env var ouath_auth_redirector (str): translates to `OUATH_AUTH_REDIRECTOR` env var secret_key_file_path (str): translates to `SECRET_KEY_FILE_PATH` env var'
def __init__(self, ouath_client_id='3yF5TOSzdlI45Q1xspxzeoGBe9fNxm9m', ouath_auth_redirector='https://auth.dcos.io', secret_key_file_path=os.environ.get('IAM_SHARED_SECRET_FILE_PATH'), **base_kwargs):
NginxBase.__init__(self, **base_kwargs) self._set_ar_env_from_val('OAUTH_CLIENT_ID', ouath_client_id) self._set_ar_env_from_val('OAUTH_AUTH_REDIRECTOR', ouath_auth_redirector) self._set_ar_env_from_val('SECRET_KEY_FILE_PATH', secret_key_file_path)
'Register given endpoints list with the mock This method registers all the endpoints that are going to be managed by this Mocker instance. Args: endpoints (object: [EndpointA, EndpointB,...]): list of endpoints that should be registered'
def _register_endpoints(self, endpoints):
self._endpoints = {} for endpoint in endpoints: log.info('Registering endpoint `%s`', endpoint.id) assert (endpoint.id not in self._endpoints) self._endpoints[endpoint.id] = endpoint
'Helper function that takes care of creating/instantiating all the endpoints that are common for both EE and Open repositories'
@staticmethod def _create_common_endpoints():
res = [] res.append(ReflectingUnixSocketEndpoint('/run/dcos/pkgpanda-api.sock')) res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=8181)) res.append(MesosEndpoint(ip='127.0.0.2', port=5050)) res.append(MesosEndpoint(ip='127.0.0.3', port=5050)) res.append(MarathonEndpoint(ip='127.0.0.1', port=8080)) res.append(MarathonEndpoint(ip='127.0.0.2', port=8080)) res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=7070)) res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=62080)) res.append(ReflectingTcpIpEndpoint(ip='127.0.0.2', port=15001)) res.append(ReflectingTcpIpEndpoint(ip='127.0.0.3', port=15002)) res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=15401, certfile='/run/dcos/pki/tls/certs/adminrouter.crt', keyfile='/run/dcos/pki/tls/private/adminrouter.key')) res.append(ReflectingTcpIpEndpoint(ip='127.0.0.4', port=15003)) res.append(ReflectingTcpIpEndpoint(ip='127.0.0.2', port=61001)) res.append(ReflectingTcpIpEndpoint(ip='127.0.0.3', port=61001)) res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=16000)) res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=17000)) res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=18000)) res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=18001)) res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=18002)) res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=18003)) res.append(ReflectingTcpIpEndpoint(ip='127.0.0.15', port=16001)) res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=16002)) res.append(ReflectingTcpIpEndpoint(ip='127.0.0.2', port=80)) res.append(ReflectingTcpIpEndpoint(ip='127.0.0.3', port=80)) res.append(ReflectingTcpIpEndpoint(ip='127.0.0.4', port=443, certfile='/run/dcos/pki/tls/certs/adminrouter.crt', keyfile='/run/dcos/pki/tls/private/adminrouter.key')) res.append(ReflectingUnixSocketEndpoint('/run/dcos/dcos-metrics-master.sock')) res.append(ReflectingUnixSocketEndpoint('/run/dcos/dcos-log.sock')) res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=15055)) res.append(MesosDnsEndpoint(ip='127.0.0.1', port=8123)) res.append(ReflectingUnixSocketEndpoint(path='/run/dcos/dcos-metrics-agent.sock')) res.append(ReflectingTcpIpEndpoint(ip='127.0.0.1', port=1050)) res.append(ReflectingUnixSocketEndpoint('/run/dcos/dcos-diagnostics.sock')) return res
'Initialize new MockerBase instance Args: extra_endpoints (obj: [EndpointA, EndpointB,...]): list of endpoints that are unique to the inheriting class/represent specific behaviour of given flavour'
def __init__(self, extra_endpoints=None):
common_endpoints = self._create_common_endpoints() endpoints = (common_endpoints + extra_endpoints) self._register_endpoints(endpoints)
'Start all endpoints registered with this Mocker instance'
def start(self):
with concurrent.futures.ThreadPoolExecutor() as executor: for endpoint in self._endpoints.values(): executor.submit(endpoint.start)
'Stop all endpoints registered with this Mocker instance. Usually called right before object destruction'
def stop(self):
with concurrent.futures.ThreadPoolExecutor() as executor: for endpoint in self._endpoints.values(): executor.submit(endpoint.stop)
'Reset all the endpoints to their initial state Used to make sure that all the tests start with fresh state/are not interfering with each other through Mocker'
def reset(self):
for endpoint in self._endpoints.values(): endpoint.reset()
'Reconfigure endpoint manager by Mocker This method reconfigures endpoint previously started by Mocker. The reconfiguration is basically calling method `func_name` belonging to endpoint `endpoint_id` with data `aux_data` Args: endpoint_id (str): id of the endpoint to reconfigure func_name (str): name of the endpoint\'s function to call aux_data (str): auxilary data to pass to function Returns: Depends on the endpoint - it returns anything that endpoint returns. Raises: KeyError: endpoint with given id does not exists AttributeError: endpoint does not defines function `func_name`'
def send_command(self, endpoint_id, func_name, aux_data=None):
endpoint = self._endpoints[endpoint_id] f = getattr(endpoint, func_name) return f(aux_data)
'Initialize new Mocker instance'
def __init__(self):
extra_endpoints = [] extra_endpoints.append(IamEndpoint(ip='127.0.0.1', port=8101)) super().__init__(extra_endpoints)
'Store all the relevant data of the request into the endpoint context.'
def _record_request(self):
ctx = self.server.context res = {} res['method'] = self.command res['path'] = self.path res['headers'] = self.headers.items() res['request_version'] = self.request_version if (self.headers.get('Content-Length') is not None): body_length = int(self.headers.get('Content-Length')) res['request_body'] = self.rfile.read(body_length).decode('utf-8') else: res['request_body'] = None res['request_time'] = time.time() with ctx.lock: ctx.data['requests'].append(res) msg_fmt = '[Endpoint `%s`] Request recorded: `%s`' log.debug(msg_fmt, ctx.data['endpoint_id'], res)
'Process all the endpoint configuration and execute things that user requested. Please refer to the description of the BaseHTTPRequestHandler class for details on the arguments of this method.'
def _process_commands(self, blob):
ctx = self.server.context if ctx.data['record_requests']: self._record_request() if ctx.data['encoded_response']: msg_fmt = 'Endpoint `%s` sending encoded response `%s` as requested' log.debug(msg_fmt, ctx.data['endpoint_id'], ctx.data['encoded_response']) self._finalize_request(200, 'text/plain; charset=utf-8', ctx.data['encoded_response']) return True return super()._process_commands(blob)
'Initialize new RecordingTcpIpEndpoint endpoint'
def __init__(self, port, ip='', request_handler=RecordingHTTPRequestHandler):
super().__init__(request_handler, port, ip) self.__context_init()
'Enable recording the requests data by the handler.'
def record_requests(self, *_):
with self._context.lock: self._context.data['record_requests'] = True
'Fetch all the recorded requests data from the handler'
def get_recorded_requests(self, *_):
with self._context.lock: requests_list_copy = copy.deepcopy(self._context.data['requests']) return requests_list_copy
'Make endpoint to respond with provided data without encoding data Arguments: aux_data (bytes): Encoded bytes array'
def set_encoded_response(self, aux_data):
with self._context.lock: self._context.data['encoded_response'] = aux_data
'Fetch all the recorded requests data from the handler'
def erase_recorded_requests(self, *_):
with self._context.lock: self._context.data['requests'] = list()
'Reset the endpoint to the default/initial state.'
def reset(self, *_):
with self._context.lock: super().reset() self.__context_init()
'Helper function meant to initialize all the data relevant to this particular type of endpoint'
def __context_init(self):
self._context.data['record_requests'] = False self._context.data['requests'] = list() self._context.data['encoded_response'] = None
'Reply with the currently set mock-reply for given IAM user query. Please refer to the description of the BaseHTTPRequestHandler class for details on the arguments and return value of this method. Raises: EndpointException: request URL path is unsupported'
def _calculate_response(self, base_path, url_args, body_args=None):
match = self.USERS_QUERY_REGEXP.search(base_path) if match: return self.__users_permissions_request_handler(match.group(1)) reflecting_paths = ['/acs/api/v1/reflect/me', '/dcos-metadata/ui-config.json'] if ((base_path in reflecting_paths) or base_path.startswith('/acs/api/v1/auth/')): return self._reflect_request(base_path, url_args, body_args) raise EndpointException(code=500, content='Path `{}` is not supported yet'.format(base_path))
'Initialize a new IamEndpoint'
def __init__(self, port, ip=''):
super().__init__(port, ip, IamHTTPRequestHandler) self.__context_init()
'Helper function meant to initialize all the data relevant to this particular type of endpoint'
def __context_init(self):
self._context.data['allowed'] = True
'Reply with a static Mesos state-summary response. Please refer to the description of the BaseHTTPRequestHandler class for details on the arguments and return value of this method. Raises: EndpointException: request URL path is unsupported'
def _calculate_response(self, base_path, url_args, body_args=None):
if (base_path == '/reflect/me'): return self._reflect_request(base_path, url_args, body_args) if (base_path != '/master/state-summary'): msg = 'Path `{}` is not supported yet'.format(base_path) blob = msg.encode('utf-8') raise EndpointException(code=500, reason=blob) ctx = self.server.context with ctx.lock: blob = self._convert_data_to_blob(ctx.data['endpoint-content']) return (200, 'application/json', blob)
'Reset the endpoint to the default/initial state.'
def reset(self, *_):
with self._context.lock: super().reset() self.__context_init()
'Helper function meant to initialize all the data relevant to this particular type of endpoint'
def __context_init(self):
self._context.data['endpoint-content'] = copy.deepcopy(INITIAL_STATEJSON)
'Change returned JSON to include extra agent, one that is by default not present in mocked `/state-json summary`'
def enable_extra_agent(self, *_):
with self._context.lock: self._context.data['endpoint-content']['slaves'].append(EXTRA_AGENT_DICT)
'Set response content for frameworks section of /state-summary response Arguments: frameworks (list): a list of framework dicts describing mocked frameworks.'
def set_frameworks_response(self, frameworks):
with self._context.lock: self._context.data['endpoint-content']['frameworks'] = frameworks
'Initialize EndpointContext object. This data is often manipulated by methods nested across inheritance chains, so we need to use RLock() instead of Lock(). The need for the lock itself stems from the fact that very often certain keys of the context need to be manipulated at the same time/in synchronized manner. In some of the places, code relies on thread safety/atomicity of some of Python\'s expressions/statements: https://docs.python.org/3.6/faq/library.html#what-kinds-of-global-value-mutation-are-thread-safe This is why some of the operations on the EndpointContext dictionary are not protected by locks, esp. in case when it\'s only about fetching a single value from context dict or storing/appending one there. Args: initial_data (dict): initial data to initialize context with'
def __init__(self, initial_data=None):
self.lock = threading.RLock() if (initial_data is not None): self.data = initial_data else: self.data = {}
'Initialize new Endpoint object Args: endpoint_id (str): ID of the endpoint that it should identify itself with'
def __init__(self, endpoint_id):
initial_data = {'always_bork': False, 'endpoint_id': endpoint_id, 'always_redirect': False, 'redirect_target': None, 'always_stall': False, 'response_headers': {}, 'stall_time': 0} self._context = EndpointContext(initial_data)
'Return ID of the endpoint'
@property def id(self):
return self._context.data['endpoint_id']
'Start endpoint\'s threaded httpd server'
def start(self):
log.debug('Starting endpoint `%s`', self.id) self._httpd_thread.start() self._httpd.startup_done.wait()
'Perform cleanup of the endpoint threads This method should be used right before destroying the Endpoint object. It takes care of stopping internal httpd server.'
def stop(self):
log.debug('Stopping endpoint `%s`', self.id) self._httpd.shutdown() self._httpd_thread.join() self._httpd.server_close()
'Reset endpoint to the default/good state Args: aux_data (dict): unused, present only to satisfy the endpoint\'s method interface. See class description for details.'
def reset(self, aux_data=None):
del aux_data log.debug('Resetting endpoint `%s`', self.id) with self._context.lock: self._context.data['always_bork'] = False self._context.data['always_stall'] = False self._context.data['stall_time'] = 0 self._context.data['always_redirect'] = False self._context.data['redirect_target'] = None
'Make endpoint sent custom headers in the response Args: aux_data: a dict with header\'s name/content as keys/vals'
def set_response_headers(self, aux_data):
with self._context.lock: self._context.data['response_headers'].update(aux_data)
'Make endpoint always wait given time before answering the request Args: aux_data (numeric): time in seconds, as acepted by time.sleep() function'
def always_stall(self, aux_data=None):
with self._context.lock: self._context.data['always_stall'] = True self._context.data['stall_time'] = aux_data
'Make endpoint always respond with an error Args: aux_data (dict): True or False, depending whether endpoint should always respond with errors or not.'
def always_bork(self, aux_data=True):
self._context.data['always_bork'] = aux_data
'Make endpoint always respond with a redirect Args: aux_data (str): target location for the redirect'
def always_redirect(self, aux_data=None):
with self._context.lock: self._context.data['always_redirect'] = True self._context.data['redirect_target'] = aux_data
'Initialize new TcpIpHttpEndpoint object Args: handler_class (obj): a request handler class that will be handling requests received by internal httpd server port (int): tcp port that httpd server will listen on ip (str): ip address that httpd server will listen on, by default listen on all addresses'
def __init__(self, handler_class, port, ip='', keyfile=None, certfile=None):
if ((certfile is not None) and (keyfile is not None)): endpoint_id = 'https://{}:{}'.format(ip, port) else: endpoint_id = 'http://{}:{}'.format(ip, port) super().__init__(endpoint_id) self._context.data['listen_ip'] = ip self._context.data['listen_port'] = port self._context.data['certfile'] = certfile self._context.data['keyfile'] = keyfile self._handler_class = handler_class self.__setup_httpd_thread(ip, port)
'Setup internal HTTPd server that this endpoints relies on to serve requests.'
def __setup_httpd_thread(self, ip, port):
self._httpd = StatefullHTTPServer(self._context, (ip, port), self._handler_class) httpd_thread_name = 'TcpIpHttpdThread-{}'.format(self.id) self._httpd_thread = threading.Thread(target=self._httpd.serve_forever, name=httpd_thread_name)
'Override default server socket bind behaviour to adapt it to serving on Unix socket. Please check the documentation of http.server.HTTPServer class for more details.'
def server_bind(self):
socketserver.TCPServer.server_bind(self) self.server_name = self.context.data['socket_path'] self.server_port = 0
'Override default client_address method to adapt it to serving on Unix socket. Without it logging will break as Unix socket has no notion of the client\'s IP address. Please check the documentation of http.server.HTTPServer class for more details.'
def client_address(self):
return (self.context.data['socket_path'], 0)
'Initialize new UnixSocketHTTPEndpoint object Args: handler_class (obj): a request handler class that will be handling requests received by internal httpd server path (str): Unix socket path, that internal httpd server will listen on'
def __init__(self, handler_class, path, keyfile=None, certfile=None):
if ((certfile is not None) and (keyfile is not None)): endpoint_id = 'https://{}'.format(path) else: endpoint_id = 'http://{}'.format(path) super().__init__(endpoint_id) self._context.data['socket_path'] = path self._context.data['certfile'] = certfile self._context.data['keyfile'] = keyfile self._handler_class = handler_class self.__cleanup_stale_socket(path) self.__setup_httpd_thread(path)
'Setup internal HTTPd server that this endpoints relies on to serve requests. Args: path (str): Unix socket path, that internal httpd server will listen on'
def __setup_httpd_thread(self, socket_path):
self._httpd = UnixSocketStatefulHTTPServer(self._context, socket_path, self._handler_class) httpd_thread_name = 'UnixSocketHttpdThread-{}'.format(self.id) self._httpd_thread = threading.Thread(target=self._httpd.serve_forever, name=httpd_thread_name) os.chmod(socket_path, 511)
'Gather all the request data into single dict and prepare it for sending it to the client for inspection, irrespective of the request URI. Please refer to the description of the BaseHTTPRequestHandler class method with the same name for details on the arguments and return value of this method.'
def _calculate_response(self, base_path, url_args, body_args=None):
return self._reflect_request(base_path, url_args, body_args)
'Reply with the currently set mock-reply for given SRV record query. Please refer to the description of the BaseHTTPRequestHandler class for details on the arguments and return value of this method. Raises: EndpointException: request URL path is unsupported'
def _calculate_response(self, base_path, url_args, body_args=None):
if (base_path == '/v1/reflect/me'): return self._reflect_request(base_path, url_args, body_args) match = self.SRV_QUERY_REGEXP.search(base_path) if match: return self.__srv_permissions_request_handler(match.group(1)) raise EndpointException(code=500, content='Path `{}` is not supported yet'.format(base_path))
'Calculate reply for given service-ID Arguments: srvid (string): service ID to reply to'
def __srv_permissions_request_handler(self, srvid):
ctx = self.server.context if (srvid not in ctx.data['services']): raise EndpointException(code=500, content='Service `{}` is unknown'.format(srvid)) blob = self._convert_data_to_blob(ctx.data['services'][srvid]) return (200, 'application/json', blob)
'Reset the endpoint to the default/initial state.'
def reset(self, *_):
with self._context.lock: super().reset() self.__context_init()
'Change the endpoint output so that it responds with a non-default MesosDNS srv node.'
def set_srv_response(self, srvs):
with self._context.lock: self._context.data['services'] = srvs
'Helper function meant to initialize all the data relevant to this particular type of endpoint'
def __context_init(self):
self._context.data['services'] = copy.deepcopy(INITIAL_SRVDATA)
'Reply with empty list of apps for the \'/v2/apps\' request Please refer to the description of the BaseHTTPRequestHandler class for details on the arguments and return value of this method. Raises: EndpointException: request URL path is unsupported'
def _calculate_response(self, base_path, url_args, body_args=None):
if (base_path in ['/v2/reflect/me', '/']): return self._reflect_request(base_path, url_args, body_args) if (base_path not in ['/v2/apps', '/v2/leader']): msg = 'Path `{}` is not supported yet'.format(base_path) blob = msg.encode('utf-8') raise EndpointException(code=500, reason=blob) ctx = self.server.context status = 200 content_type = 'application/json' with ctx.lock: if (base_path == '/v2/apps'): blob = self._convert_data_to_blob(ctx.data['endpoint-content']) elif (base_path == '/v2/leader'): if (ctx.data['leader-content'] is None): msg = 'Marathon leader unknown' blob = msg.encode('utf-8') content_type = 'text/plain; charset=utf-8' status = 404 elif isinstance(ctx.data['leader-content'], str): blob = ctx.data['leader-content'].encode('utf-8') content_type = 'text/plain; charset=utf-8' else: blob = self._convert_data_to_blob(ctx.data['leader-content']) return (status, content_type, blob)
'Reset the endpoint to the default/initial state.'
def reset(self, *_):
with self._context.lock: super().reset() self.__context_init()
'Change the response content for apps endpoint Arguments: apps (dict): a dict of marathon app dicts describing mocked apps'
def set_apps_response(self, apps):
with self._context.lock: self._context.data['endpoint-content'] = apps
'Change the endpoint output so that it simulates absence of the Marathon leader node.'
def remove_leader(self, *_):
with self._context.lock: self._context.data['leader-content'] = None
'Change the endpoint output so that it responds with a non-default Marathon leader node.'
def change_leader(self, new_leader):
with self._context.lock: self._context.data['leader-content'] = {'leader': new_leader}
'Change the endpoint output so that it responds with a broken reply to a query for Marathon leader node. NOTE: We cannot just use `always_bork` here as we need a more fine-grained control on what we are breaking. E.G. cache tests need to have apps endpoint up and running while testing broken leader endpoint and vice-versa.'
def break_leader_reply(self, *_):
with self._context.lock: self._context.data['leader-content'] = 'blah blah buh buh'
'Helper function meant to initialize all the data relevant to this particular type of endpoint'
def __context_init(self):
self._context.data['endpoint-content'] = copy.deepcopy({'apps': [SCHEDULER_APP_ALWAYSTHERE, SCHEDULER_APP_ALWAYSTHERE_NEST1, SCHEDULER_APP_ALWAYSTHERE_NEST2, SCHEDULER_APP_ONLYMARATHON_NEST2]}) self._context.data['leader-content'] = {'leader': '127.0.0.2:80'}
'Sends analytics track data to segmentIO. variant: string | open or enterprise action: string | preflight, deploy, or postflight install_method: string | gui, cli or advanced'
def send(self, action, install_method, num_errors):
analytics.write_key = '51ybGTeFEFU1xo6u10XMDrr6kATFyRyh' config = Config(CONFIG_PATH) customer_key = config.hacky_default_get('customer_key', None) provider = 'onprem' platform = config.hacky_default_get('platform', provider) analytics.track(user_id=customer_key, anonymous_id=self.uuid, event='installer', properties={'platform': platform, 'provider': provider, 'source': 'installer', 'variant': os.environ['BOOTSTRAP_VARIANT'], 'install_id': self.uuid, 'bootstrap_id': os.environ['BOOTSTRAP_ID'], 'install_method': install_method, 'action_name': action, 'errors': num_errors, 'customerKey': customer_key}) analytics.flush()
'Has 3 different results: `None` - unknown package, VCS is fixed to commit `False` - package is up-to-date `String` - a found latest version'
def outdated(self, pkg_dir, requirements=None):
assert isdir(pkg_dir) latest = None manifest = self.load_manifest(pkg_dir) if (('@' in pkg_dir) and ('__src_url' not in manifest)): return None if ('__src_url' in manifest): try: vcs = VCSClientFactory.newClient(pkg_dir, manifest['__src_url'], silent=True) except (AttributeError, exception.PlatformioException): return None if (not vcs.can_be_updated): return None latest = vcs.get_latest_revision() else: try: latest = self.get_latest_repo_version((('id=%d' % manifest['id']) if ('id' in manifest) else manifest['name']), requirements, silent=True) except (exception.PlatformioException, ValueError): return None if (not latest): return None up_to_date = False try: assert ('__src_url' not in manifest) up_to_date = (semantic_version.Version.coerce(manifest['version']) >= semantic_version.Version.coerce(latest)) except (AssertionError, ValueError): up_to_date = (latest == manifest['version']) return (False if up_to_date else latest)
'Return the function\'s docstring.'
def __repr__(self):
return self.func.__doc__
'Support instance methods.'
def __get__(self, obj, objtype):
fn = functools.partial(self.__call__, obj) fn.reset = self._reset return fn
'Keys=None, delete expired items'
def delete(self, keys=None):
if (not keys): keys = [] if (not isinstance(keys, list)): keys = [keys] paths_for_delete = [self.get_cache_path(k) for k in keys] found = False newlines = [] with open(self._db_path) as fp: for line in fp.readlines(): if ('=' not in line): continue line = line.strip() (expire, path) = line.split('=') if ((time() < int(expire)) and isfile(path) and (path not in paths_for_delete)): newlines.append(line) continue found = True if isfile(path): try: remove(path) if (not listdir(dirname(path))): util.rmtree_(dirname(path)) except OSError: pass if (found and self._lock_dbindex()): with open(self._db_path, 'w') as fp: fp.write(('\n'.join(newlines) + '\n')) self._unlock_dbindex() return True
'This method is exported as RPC and can be called by connected clients'
def controlLed(self, turnOn):
if turnOn: payload = '1' else: payload = '0' print 'Serial TX: {0}'.format(payload) self.transport.write(payload)