Code
stringlengths
103
85.9k
Summary
listlengths
0
94
Please provide a description of the function:def preformat_call(self, api_call): # Remove possible starting slashes or trailing question marks in call. api_call_formatted = api_call.lstrip('/') api_call_formatted = api_call_formatted.rstrip('?') if api_call != api_call_formatted: # Show difference logger.debug('api_call post strip =\n%s' % api_call_formatted) return api_call_formatted
[ " Return properly formatted QualysGuard API call.\n\n " ]
Please provide a description of the function:def format_call(self, api_version, api_call): # Remove possible starting slashes or trailing question marks in call. api_call = api_call.lstrip('/') api_call = api_call.rstrip('?') logger.debug('api_call post strip =\n%s' % api_call) # Make sure call always ends in slash for API v2 calls. if (api_version == 2 and api_call[-1] != '/'): # Add slash. logger.debug('Adding "/" to api_call.') api_call += '/' if api_call in self.api_methods_with_trailing_slash[api_version]: # Add slash. logger.debug('Adding "/" to api_call.') api_call += '/' return api_call
[ " Return properly formatted QualysGuard API call according to api_version etiquette.\n\n " ]
Please provide a description of the function:def format_payload(self, api_version, data): # Check if payload is for API v1 or API v2. if (api_version in (1, 2)): # Check if string type. if type(data) == str: # Convert to dictionary. logger.debug('Converting string to dict:\n%s' % data) # Remove possible starting question mark & ending ampersands. data = data.lstrip('?') data = data.rstrip('&') # Convert to dictionary. data = parse_qs(data) logger.debug('Converted:\n%s' % str(data)) elif api_version in ('am', 'was', 'am2'): if type(data) == etree._Element: logger.debug('Converting lxml.builder.E to string') data = etree.tostring(data) logger.debug('Converted:\n%s' % data) return data
[ " Return appropriate QualysGuard API call.\n\n " ]
Please provide a description of the function:def request(self, api_call, data=None, api_version=None, http_method=None, concurrent_scans_retries=0, concurrent_scans_retry_delay=0): logger.debug('api_call =\n%s' % api_call) logger.debug('api_version =\n%s' % api_version) logger.debug('data %s =\n %s' % (type(data), str(data))) logger.debug('http_method =\n%s' % http_method) logger.debug('concurrent_scans_retries =\n%s' % str(concurrent_scans_retries)) logger.debug('concurrent_scans_retry_delay =\n%s' % str(concurrent_scans_retry_delay)) concurrent_scans_retries = int(concurrent_scans_retries) concurrent_scans_retry_delay = int(concurrent_scans_retry_delay) # # Determine API version. # Preformat call. api_call = self.preformat_call(api_call) if api_version: # API version specified, format API version inputted. api_version = self.format_api_version(api_version) else: # API version not specified, determine automatically. api_version = self.which_api_version(api_call) # # Set up base url. url = self.url_api_version(api_version) # # Set up headers. headers = {"X-Requested-With": "Parag Baxi QualysAPI (python) v%s" % (qualysapi.version.__version__,)} logger.debug('headers =\n%s' % (str(headers))) # Portal API takes in XML text, requiring custom header. if api_version in ('am', 'was', 'am2'): headers['Content-type'] = 'text/xml' # # Set up http request method, if not specified. if not http_method: http_method = self.format_http_method(api_version, api_call, data) logger.debug('http_method =\n%s' % http_method) # # Format API call. api_call = self.format_call(api_version, api_call) logger.debug('api_call =\n%s' % (api_call)) # Append api_call to url. url += api_call # # Format data, if applicable. if data is not None: data = self.format_payload(api_version, data) # Make request at least once (more if concurrent_retry is enabled). retries = 0 # # set a warning threshold for the rate limit rate_warn_threshold = 10 while retries <= concurrent_scans_retries: # Make request. logger.debug('url =\n%s' % (str(url))) logger.debug('data =\n%s' % (str(data))) logger.debug('headers =\n%s' % (str(headers))) if http_method == 'get': # GET logger.debug('GET request.') request = self.session.get(url, params=data, auth=self.auth, headers=headers, proxies=self.proxies) else: # POST logger.debug('POST request.') # Make POST request. request = self.session.post(url, data=data, auth=self.auth, headers=headers, proxies=self.proxies) logger.debug('response headers =\n%s' % (str(request.headers))) # # Remember how many times left user can make against api_call. try: self.rate_limit_remaining[api_call] = int(request.headers['x-ratelimit-remaining']) logger.debug('rate limit for api_call, %s = %s' % (api_call, self.rate_limit_remaining[api_call])) if (self.rate_limit_remaining[api_call] > rate_warn_threshold): logger.debug('rate limit for api_call, %s = %s' % (api_call, self.rate_limit_remaining[api_call])) elif (self.rate_limit_remaining[api_call] <= rate_warn_threshold) and ( self.rate_limit_remaining[api_call] > 0): logger.warning( 'Rate limit is about to being reached (remaining api calls = %s)' % self.rate_limit_remaining[ api_call]) elif self.rate_limit_remaining[api_call] <= 0: logger.critical('ATTENTION! RATE LIMIT HAS BEEN REACHED (remaining api calls = %s)!' % self.rate_limit_remaining[api_call]) except KeyError as e: # Likely a bad api_call. logger.debug(e) pass except TypeError as e: # Likely an asset search api_call. logger.debug(e) pass # Response received. response = request.text logger.debug('response text =\n%s' % (response)) # Keep track of how many retries. retries += 1 # Check for concurrent scans limit. if not ('<responseCode>INVALID_REQUEST</responseCode>' in response and '<errorMessage>You have reached the maximum number of concurrent running scans' in response and '<errorResolution>Please wait until your previous scans have completed</errorResolution>' in response): # Did not hit concurrent scan limit. break else: # Hit concurrent scan limit. logger.critical(response) # If trying again, delay next try by concurrent_scans_retry_delay. if retries <= concurrent_scans_retries: logger.warning('Waiting %d seconds until next try.' % concurrent_scans_retry_delay) time.sleep(concurrent_scans_retry_delay) # Inform user of how many retries. logger.critical('Retry #%d' % retries) else: # Ran out of retries. Let user know. print('Alert! Ran out of concurrent_scans_retries!') logger.critical('Alert! Ran out of concurrent_scans_retries!') return False # Check to see if there was an error. try: request.raise_for_status() except requests.HTTPError as e: # Error print('Error! Received a 4XX client error or 5XX server error response.') print('Content = \n', response) logger.error('Content = \n%s' % response) print('Headers = \n', request.headers) logger.error('Headers = \n%s' % str(request.headers)) request.raise_for_status() if '<RETURN status="FAILED" number="2007">' in response: print('Error! Your IP address is not in the list of secure IPs. Manager must include this IP (QualysGuard VM > Users > Security).') print('Content = \n', response) logger.error('Content = \n%s' % response) print('Headers = \n', request.headers) logger.error('Headers = \n%s' % str(request.headers)) return False return response
[ " Return QualysGuard API response.\n\n " ]
Please provide a description of the function:def travis_after(ini, envlist): # after-all disabled for pull requests if os.environ.get('TRAVIS_PULL_REQUEST', 'false') != 'false': return if not after_config_matches(ini, envlist): return # This is not the one that needs to wait github_token = os.environ.get('GITHUB_TOKEN') if not github_token: print('No GitHub token given.', file=sys.stderr) sys.exit(NO_GITHUB_TOKEN) api_url = os.environ.get('TRAVIS_API_URL', 'https://api.travis-ci.org') build_id = os.environ.get('TRAVIS_BUILD_ID') job_number = os.environ.get('TRAVIS_JOB_NUMBER') try: polling_interval = int(os.environ.get('TRAVIS_POLLING_INTERVAL', 5)) except ValueError: print('Invalid polling interval given: {0}'.format( repr(os.environ.get('TRAVIS_POLLING_INTERVAL'))), file=sys.stderr) sys.exit(INVALID_POLLING_INTERVAL) if not all([api_url, build_id, job_number]): print('Required Travis environment not given.', file=sys.stderr) sys.exit(INCOMPLETE_TRAVIS_ENVIRONMENT) # This may raise an Exception, and it should be printed job_statuses = get_job_statuses( github_token, api_url, build_id, polling_interval, job_number) if not all(job_statuses): print('Some jobs were not successful.') sys.exit(JOBS_FAILED) print('All required jobs were successful.')
[ "Wait for all jobs to finish, then exit successfully." ]
Please provide a description of the function:def after_config_matches(ini, envlist): section = ini.sections.get('travis:after', {}) if not section: return False # Never wait if it's not configured if 'envlist' in section or 'toxenv' in section: if 'toxenv' in section: print('The "toxenv" key of the [travis:after] section is ' 'deprecated in favor of the "envlist" key.', file=sys.stderr) toxenv = section.get('toxenv') required = set(split_env(section.get('envlist', toxenv) or '')) actual = set(envlist) if required - actual: return False # Translate travis requirements to env requirements env_requirements = [ (TRAVIS_FACTORS[factor], value) for factor, value in parse_dict(section.get('travis', '')).items() if factor in TRAVIS_FACTORS ] + [ (name, value) for name, value in parse_dict(section.get('env', '')).items() ] return all([ os.environ.get(name) == value for name, value in env_requirements ])
[ "Determine if this job should wait for the others." ]
Please provide a description of the function:def get_job_statuses(github_token, api_url, build_id, polling_interval, job_number): auth = get_json('{api_url}/auth/github'.format(api_url=api_url), data={'github_token': github_token})['access_token'] while True: build = get_json('{api_url}/builds/{build_id}'.format( api_url=api_url, build_id=build_id), auth=auth) jobs = [job for job in build['jobs'] if job['number'] != job_number and not job['allow_failure']] # Ignore allowed failures if all(job['finished_at'] for job in jobs): break # All the jobs have completed elif any(job['state'] != 'passed' for job in jobs if job['finished_at']): break # Some required job that finished did not pass print('Waiting for jobs to complete: {job_numbers}'.format( job_numbers=[job['number'] for job in jobs if not job['finished_at']])) time.sleep(polling_interval) return [job['state'] == 'passed' for job in jobs]
[ "Wait for all the travis jobs to complete.\n\n Once the other jobs are complete, return a list of booleans,\n indicating whether or not the job was successful. Ignore jobs\n marked \"allow_failure\".\n " ]
Please provide a description of the function:def get_json(url, auth=None, data=None): headers = { 'Accept': 'application/vnd.travis-ci.2+json', 'User-Agent': 'Travis/Tox-Travis-1.0a', # User-Agent must start with "Travis/" in order to work } if auth: headers['Authorization'] = 'token {auth}'.format(auth=auth) params = {} if data: headers['Content-Type'] = 'application/json' params['data'] = json.dumps(data).encode('utf-8') request = urllib2.Request(url, headers=headers, **params) response = urllib2.urlopen(request).read() return json.loads(response.decode('utf-8'))
[ "Make a GET request, and return the response as parsed JSON." ]
Please provide a description of the function:def detect_envlist(ini): # Find the envs that tox knows about declared_envs = get_declared_envs(ini) # Find all the envs for all the desired factors given desired_factors = get_desired_factors(ini) # Reduce desired factors desired_envs = ['-'.join(env) for env in product(*desired_factors)] # Find matching envs return match_envs(declared_envs, desired_envs, passthru=len(desired_factors) == 1)
[ "Default envlist automatically based on the Travis environment." ]
Please provide a description of the function:def autogen_envconfigs(config, envs): prefix = 'tox' if config.toxinipath.basename == 'setup.cfg' else None reader = tox.config.SectionReader("tox", config._cfg, prefix=prefix) distshare_default = "{homedir}/.tox/distshare" reader.addsubstitutions(toxinidir=config.toxinidir, homedir=config.homedir) reader.addsubstitutions(toxworkdir=config.toxworkdir) config.distdir = reader.getpath("distdir", "{toxworkdir}/dist") reader.addsubstitutions(distdir=config.distdir) config.distshare = reader.getpath("distshare", distshare_default) reader.addsubstitutions(distshare=config.distshare) try: make_envconfig = tox.config.ParseIni.make_envconfig # tox 3.4.0+ except AttributeError: make_envconfig = tox.config.parseini.make_envconfig # Dig past the unbound method in Python 2 make_envconfig = getattr(make_envconfig, '__func__', make_envconfig) # Create the undeclared envs for env in envs: section = tox.config.testenvprefix + env config.envconfigs[env] = make_envconfig( config, env, section, reader._subs, config)
[ "Make the envconfigs for undeclared envs.\n\n This is a stripped-down version of parseini.__init__ made for making\n an envconfig.\n " ]
Please provide a description of the function:def get_declared_envs(ini): tox_section_name = 'tox:tox' if ini.path.endswith('setup.cfg') else 'tox' tox_section = ini.sections.get(tox_section_name, {}) envlist = split_env(tox_section.get('envlist', [])) # Add additional envs that are declared as sections in the ini section_envs = [ section[8:] for section in sorted(ini.sections, key=ini.lineof) if section.startswith('testenv:') ] return envlist + [env for env in section_envs if env not in envlist]
[ "Get the full list of envs from the tox ini.\n\n This notably also includes envs that aren't in the envlist,\n but are declared by having their own testenv:envname section.\n\n The envs are expected in a particular order. First the ones\n declared in the envlist, then the other testenvs in order.\n " ]
Please provide a description of the function:def get_version_info(): overrides = os.environ.get('__TOX_TRAVIS_SYS_VERSION') if overrides: version, major, minor = overrides.split(',')[:3] major, minor = int(major), int(minor) else: version, (major, minor) = sys.version, sys.version_info[:2] return version, major, minor
[ "Get version info from the sys module.\n\n Override from environment for testing.\n " ]
Please provide a description of the function:def guess_python_env(): version, major, minor = get_version_info() if 'PyPy' in version: return 'pypy3' if major == 3 else 'pypy' return 'py{major}{minor}'.format(major=major, minor=minor)
[ "Guess the default python env to use." ]
Please provide a description of the function:def get_default_envlist(version): if version in ['pypy', 'pypy3']: return version # Assume single digit major and minor versions match = re.match(r'^(\d)\.(\d)(?:\.\d+)?$', version or '') if match: major, minor = match.groups() return 'py{major}{minor}'.format(major=major, minor=minor) return guess_python_env()
[ "Parse a default tox env based on the version.\n\n The version comes from the ``TRAVIS_PYTHON_VERSION`` environment\n variable. If that isn't set or is invalid, then use\n sys.version_info to come up with a reasonable default.\n " ]
Please provide a description of the function:def get_desired_factors(ini): # Find configuration based on known travis factors travis_section = ini.sections.get('travis', {}) found_factors = [ (factor, parse_dict(travis_section[factor])) for factor in TRAVIS_FACTORS if factor in travis_section ] # Backward compatibility with the old tox:travis section if 'tox:travis' in ini.sections: print('The [tox:travis] section is deprecated in favor of' ' the "python" key of the [travis] section.', file=sys.stderr) found_factors.append(('python', ini.sections['tox:travis'])) # Inject any needed autoenv version = os.environ.get('TRAVIS_PYTHON_VERSION') if version: default_envlist = get_default_envlist(version) if not any(factor == 'python' for factor, _ in found_factors): found_factors.insert(0, ('python', {version: default_envlist})) python_factors = [(factor, mapping) for factor, mapping in found_factors if version and factor == 'python'] for _, mapping in python_factors: mapping.setdefault(version, default_envlist) # Convert known travis factors to env factors, # and combine with declared env factors. env_factors = [ (TRAVIS_FACTORS[factor], mapping) for factor, mapping in found_factors ] + [ (name, parse_dict(value)) for name, value in ini.sections.get('travis:env', {}).items() ] # Choose the correct envlists based on the factor values return [ split_env(mapping[os.environ[name]]) for name, mapping in env_factors if name in os.environ and os.environ[name] in mapping ]
[ "Get the list of desired envs per declared factor.\n\n Look at all the accepted configuration locations, and give a list\n of envlists, one for each Travis factor found.\n\n Look in the ``[travis]`` section for the known Travis factors,\n which are backed by environment variable checking behind the\n scenes, but provide a cleaner interface.\n\n Also look for the ``[tox:travis]`` section, which is deprecated,\n and treat it as an additional ``python`` key from the ``[travis]``\n section.\n\n Finally, look for factors based directly on environment variables,\n listed in the ``[travis:env]`` section. Configuration found in the\n ``[travis]`` and ``[tox:travis]`` sections are converted to this\n form under the hood, and are considered in the same way.\n\n Special consideration is given to the ``python`` factor. If this\n factor is set in the environment, then an appropriate configuration\n will be provided automatically if no manual configuration is\n provided.\n\n To allow for the most flexible processing, the envlists provided\n by each factor are not combined after they are selected, but\n instead returned as a list of envlists, and expected to be\n combined as and when appropriate by the caller. This allows for\n special handling based on the number of factors that were found\n to apply to this environment.\n " ]
Please provide a description of the function:def match_envs(declared_envs, desired_envs, passthru): matched = [ declared for declared in declared_envs if any(env_matches(declared, desired) for desired in desired_envs) ] return desired_envs if not matched and passthru else matched
[ "Determine the envs that match the desired_envs.\n\n If ``passthru` is True, and none of the declared envs match the\n desired envs, then the desired envs will be used verbatim.\n\n :param declared_envs: The envs that are declared in the tox config.\n :param desired_envs: The envs desired from the tox-travis config.\n :param bool passthru: Whether to used the ``desired_envs`` as a\n fallback if no declared envs match.\n " ]
Please provide a description of the function:def env_matches(declared, desired): desired_factors = desired.split('-') declared_factors = declared.split('-') return all(factor in declared_factors for factor in desired_factors)
[ "Determine if a declared env matches a desired env.\n\n Rather than simply using the name of the env verbatim, take a\n closer look to see if all the desired factors are fulfilled. If\n the desired factors are fulfilled, but there are other factors,\n it should still match the env.\n " ]
Please provide a description of the function:def override_ignore_outcome(ini): travis_reader = tox.config.SectionReader("travis", ini) return travis_reader.getbool('unignore_outcomes', False)
[ "Decide whether to override ignore_outcomes." ]
Please provide a description of the function:def tox_addoption(parser): parser.add_argument( '--travis-after', dest='travis_after', action='store_true', help='Exit successfully after all Travis jobs complete successfully.') if 'TRAVIS' in os.environ: pypy_version_monkeypatch() subcommand_test_monkeypatch(tox_subcommand_test_post)
[ "Add arguments and needed monkeypatches." ]
Please provide a description of the function:def tox_configure(config): if 'TRAVIS' not in os.environ: return ini = config._cfg # envlist if 'TOXENV' not in os.environ and not config.option.env: envlist = detect_envlist(ini) undeclared = set(envlist) - set(config.envconfigs) if undeclared: print('Matching undeclared envs is deprecated. Be sure all the ' 'envs that Tox should run are declared in the tox config.', file=sys.stderr) autogen_envconfigs(config, undeclared) config.envlist = envlist # Override ignore_outcomes if override_ignore_outcome(ini): for envconfig in config.envconfigs.values(): envconfig.ignore_outcome = False # after if config.option.travis_after: print('The after all feature has been deprecated. Check out Travis\' ' 'build stages, which are a better solution. ' 'See https://tox-travis.readthedocs.io/en/stable/after.html ' 'for more details.', file=sys.stderr)
[ "Check for the presence of the added options." ]
Please provide a description of the function:def parse_dict(value): lines = [line.strip() for line in value.strip().splitlines()] pairs = [line.split(':', 1) for line in lines if line] return dict((k.strip(), v.strip()) for k, v in pairs)
[ "Parse a dict value from the tox config.\n\n .. code-block: ini\n\n [travis]\n python =\n 2.7: py27, docs\n 3.5: py{35,36}\n\n With this config, the value of ``python`` would be parsed\n by this function, and would return::\n\n {\n '2.7': 'py27, docs',\n '3.5': 'py{35,36}',\n }\n\n " ]
Please provide a description of the function:def pypy_version_monkeypatch(): # Travis virtualenv do not provide `pypy3`, which tox tries to execute. # This doesnt affect Travis python version `pypy3`, as the pyenv pypy3 # is in the PATH. # https://github.com/travis-ci/travis-ci/issues/6304 # Force use of the virtualenv `python`. version = os.environ.get('TRAVIS_PYTHON_VERSION') if version and default_factors and version.startswith('pypy3.3-'): default_factors['pypy3'] = 'python'
[ "Patch Tox to work with non-default PyPy 3 versions." ]
Please provide a description of the function:def switch_to_output(self, value=False, **kwargs): self.direction = digitalio.Direction.OUTPUT self.value = value
[ "Switch the pin state to a digital output with the provided starting\n value (True/False for high or low, default is False/low).\n " ]
Please provide a description of the function:def switch_to_input(self, pull=None, **kwargs): self.direction = digitalio.Direction.INPUT self.pull = pull
[ "Switch the pin state to a digital input with the provided starting\n pull-up resistor state (optional, no pull-up by default). Note that\n pull-down resistors are NOT supported!\n " ]
Please provide a description of the function:def direction(self): if _get_bit(self._mcp.iodir, self._pin): return digitalio.Direction.INPUT return digitalio.Direction.OUTPUT
[ "The direction of the pin, either True for an input or\n False for an output.\n " ]
Please provide a description of the function:def pull(self): if _get_bit(self._mcp.gppu, self._pin): return digitalio.Pull.UP return None
[ "Enable or disable internal pull-up resistors for this pin. A\n value of digitalio.Pull.UP will enable a pull-up resistor, and None will\n disable it. Pull-down resistors are NOT supported!\n " ]
Please provide a description of the function:def get_consumed_read_units_percent( table_name, lookback_window_start=15, lookback_period=5): try: metrics = __get_aws_metric( table_name, lookback_window_start, lookback_period, 'ConsumedReadCapacityUnits') except BotoServerError: raise if metrics: lookback_seconds = lookback_period * 60 consumed_read_units = ( float(metrics[0]['Sum']) / float(lookback_seconds)) else: consumed_read_units = 0 try: table_read_units = dynamodb.get_provisioned_table_read_units( table_name) consumed_read_units_percent = ( float(consumed_read_units) / float(table_read_units) * 100) except JSONResponseError: raise logger.info('{0} - Consumed read units: {1:.2f}%'.format( table_name, consumed_read_units_percent)) return consumed_read_units_percent
[ " Returns the number of consumed read units in percent\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :type lookback_window_start: int\n :param lookback_window_start: Relative start time for the CloudWatch metric\n :type lookback_period: int\n :param lookback_period: Number of minutes to look at\n :returns: float -- Number of consumed reads as a\n percentage of provisioned reads\n " ]
Please provide a description of the function:def get_throttled_read_event_count( table_name, lookback_window_start=15, lookback_period=5): try: metrics = __get_aws_metric( table_name, lookback_window_start, lookback_period, 'ReadThrottleEvents') except BotoServerError: raise if metrics: throttled_read_events = int(metrics[0]['Sum']) else: throttled_read_events = 0 logger.info('{0} - Read throttle count: {1:d}'.format( table_name, throttled_read_events)) return throttled_read_events
[ " Returns the number of throttled read events during a given time frame\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :type lookback_window_start: int\n :param lookback_window_start: Relative start time for the CloudWatch metric\n :type lookback_period: int\n :param lookback_period: Number of minutes to look at\n :returns: int -- Number of throttled read events during the time period\n " ]
Please provide a description of the function:def get_throttled_by_provisioned_read_event_percent( table_name, lookback_window_start=15, lookback_period=5): try: metrics = __get_aws_metric( table_name, lookback_window_start, lookback_period, 'ReadThrottleEvents') except BotoServerError: raise if metrics: lookback_seconds = lookback_period * 60 throttled_read_events = ( float(metrics[0]['Sum']) / float(lookback_seconds)) else: throttled_read_events = 0 try: table_read_units = dynamodb.get_provisioned_table_read_units( table_name) throttled_by_provisioned_read_percent = ( float(throttled_read_events) / float(table_read_units) * 100) except JSONResponseError: raise logger.info('{0} - Throttled read percent by provision: {1:.2f}%'.format( table_name, throttled_by_provisioned_read_percent)) return throttled_by_provisioned_read_percent
[ " Returns the number of throttled read events in percent\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :type lookback_window_start: int\n :param lookback_window_start: Relative start time for the CloudWatch metric\n :type lookback_period: int\n :param lookback_period: Number of minutes to look at\n :returns: float -- Percent of throttled read events by provisioning\n " ]
Please provide a description of the function:def get_throttled_by_consumed_read_percent( table_name, lookback_window_start=15, lookback_period=5): try: metrics1 = __get_aws_metric( table_name, lookback_window_start, lookback_period, 'ConsumedReadCapacityUnits') metrics2 = __get_aws_metric( table_name, lookback_window_start, lookback_period, 'ReadThrottleEvents') except BotoServerError: raise if metrics1 and metrics2: lookback_seconds = lookback_period * 60 throttled_by_consumed_read_percent = ( ( (float(metrics2[0]['Sum']) / float(lookback_seconds)) / (float(metrics1[0]['Sum']) / float(lookback_seconds)) ) * 100) else: throttled_by_consumed_read_percent = 0 logger.info('{0} - Throttled read percent by consumption: {1:.2f}%'.format( table_name, throttled_by_consumed_read_percent)) return throttled_by_consumed_read_percent
[ " Returns the number of throttled read events in percent of consumption\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :type lookback_window_start: int\n :param lookback_window_start: Relative start time for the CloudWatch metric\n :type lookback_period: int\n :param lookback_period: Number of minutes to look at\n :returns: float -- Percent of throttled read events by consumption\n " ]
Please provide a description of the function:def get_consumed_write_units_percent( table_name, lookback_window_start=15, lookback_period=5): try: metrics = __get_aws_metric( table_name, lookback_window_start, lookback_period, 'ConsumedWriteCapacityUnits') except BotoServerError: raise if metrics: lookback_seconds = lookback_period * 60 consumed_write_units = ( float(metrics[0]['Sum']) / float(lookback_seconds)) else: consumed_write_units = 0 try: table_write_units = dynamodb.get_provisioned_table_write_units( table_name) consumed_write_units_percent = ( float(consumed_write_units) / float(table_write_units) * 100) except JSONResponseError: raise logger.info('{0} - Consumed write units: {1:.2f}%'.format( table_name, consumed_write_units_percent)) return consumed_write_units_percent
[ " Returns the number of consumed write units in percent\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :type lookback_window_start: int\n :param lookback_window_start: Relative start time for the CloudWatch metric\n :type lookback_period: int\n :param lookback_period: Number of minutes to look at\n :returns: float -- Number of consumed writes as a\n percentage of provisioned writes\n " ]
Please provide a description of the function:def get_throttled_write_event_count( table_name, lookback_window_start=15, lookback_period=5): try: metrics = __get_aws_metric( table_name, lookback_window_start, lookback_period, 'WriteThrottleEvents') except BotoServerError: raise if metrics: throttled_write_count = int(metrics[0]['Sum']) else: throttled_write_count = 0 logger.info('{0} - Write throttle count: {1:d}'.format( table_name, throttled_write_count)) return throttled_write_count
[ " Returns the number of throttled write events during a given time frame\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :type lookback_window_start: int\n :param lookback_window_start: Relative start time for the CloudWatch metric\n :type lookback_period: int\n :param lookback_period: Number of minutes to look at\n :returns: int -- Number of throttled write events during the time period\n " ]
Please provide a description of the function:def get_throttled_by_provisioned_write_event_percent( table_name, lookback_window_start=15, lookback_period=5): try: metrics = __get_aws_metric( table_name, lookback_window_start, lookback_period, 'WriteThrottleEvents') except BotoServerError: raise if metrics: lookback_seconds = lookback_period * 60 throttled_write_events = float(metrics[0]['Sum']) / float( lookback_seconds) else: throttled_write_events = 0 try: table_write_units = dynamodb.get_provisioned_table_write_units( table_name) throttled_by_provisioned_write_percent = ( float(throttled_write_events) / float(table_write_units) * 100) except JSONResponseError: raise logger.info('{0} - Throttled write percent by provision: {1:.2f}%'.format( table_name, throttled_by_provisioned_write_percent)) return throttled_by_provisioned_write_percent
[ " Returns the number of throttled write events during a given time frame\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :type lookback_window_start: int\n :param lookback_window_start: Relative start time for the CloudWatch metric\n :type lookback_period: int\n :param lookback_period: Number of minutes to look at\n :returns: float -- Percent of throttled write events by provisioning\n " ]
Please provide a description of the function:def get_throttled_by_consumed_write_percent( table_name, lookback_window_start=15, lookback_period=5): try: metrics1 = __get_aws_metric( table_name, lookback_window_start, lookback_period, 'ConsumedWriteCapacityUnits') metrics2 = __get_aws_metric( table_name, lookback_window_start, lookback_period, 'WriteThrottleEvents') except BotoServerError: raise if metrics1 and metrics2: lookback_seconds = lookback_period * 60 throttled_by_consumed_write_percent = ( ( (float(metrics2[0]['Sum']) / float(lookback_seconds)) / (float(metrics1[0]['Sum']) / float(lookback_seconds)) ) * 100) else: throttled_by_consumed_write_percent = 0 logger.info( '{0} - Throttled write percent by consumption: {1:.2f}%'.format( table_name, throttled_by_consumed_write_percent)) return throttled_by_consumed_write_percent
[ " Returns the number of throttled write events in percent of consumption\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :type lookback_window_start: int\n :param lookback_window_start: Relative start time for the CloudWatch metric\n :type lookback_period: int\n :param lookback_period: Number of minutes to look at\n :returns: float -- Percent of throttled write events by consumption\n " ]
Please provide a description of the function:def __get_aws_metric(table_name, lookback_window_start, lookback_period, metric_name): try: now = datetime.utcnow() start_time = now - timedelta(minutes=lookback_window_start) end_time = now - timedelta( minutes=lookback_window_start - lookback_period) return cloudwatch_connection.get_metric_statistics( period=lookback_period * 60, start_time=start_time, end_time=end_time, metric_name=metric_name, namespace='AWS/DynamoDB', statistics=['Sum'], dimensions={'TableName': table_name}, unit='Count') except BotoServerError as error: logger.error( 'Unknown boto error. Status: "{0}". ' 'Reason: "{1}". Message: {2}'.format( error.status, error.reason, error.message)) raise
[ " Returns a metric list from the AWS CloudWatch service, may return\n None if no metric exists\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :type lookback_window_start: int\n :param lookback_window_start: How many minutes to look at\n :type lookback_period: int\n :type lookback_period: Length of the lookback period in minutes\n :type metric_name: str\n :param metric_name: Name of the metric to retrieve from CloudWatch\n :returns: list -- A list of time series data for the given metric, may\n be None if there was no data\n " ]
Please provide a description of the function:def ensure_provisioning( table_name, table_key, gsi_name, gsi_key, num_consec_read_checks, num_consec_write_checks): if get_global_option('circuit_breaker_url') or get_gsi_option( table_key, gsi_key, 'circuit_breaker_url'): if circuit_breaker.is_open(table_name, table_key, gsi_name, gsi_key): logger.warning('Circuit breaker is OPEN!') return (0, 0) logger.info( '{0} - Will ensure provisioning for global secondary index {1}'.format( table_name, gsi_name)) # Handle throughput alarm checks __ensure_provisioning_alarm(table_name, table_key, gsi_name, gsi_key) try: read_update_needed, updated_read_units, num_consec_read_checks = \ __ensure_provisioning_reads( table_name, table_key, gsi_name, gsi_key, num_consec_read_checks) write_update_needed, updated_write_units, num_consec_write_checks = \ __ensure_provisioning_writes( table_name, table_key, gsi_name, gsi_key, num_consec_write_checks) if read_update_needed: num_consec_read_checks = 0 if write_update_needed: num_consec_write_checks = 0 # Handle throughput updates if read_update_needed or write_update_needed: logger.info( '{0} - GSI: {1} - Changing provisioning to {2:d} ' 'read units and {3:d} write units'.format( table_name, gsi_name, int(updated_read_units), int(updated_write_units))) __update_throughput( table_name, table_key, gsi_name, gsi_key, updated_read_units, updated_write_units) else: logger.info( '{0} - GSI: {1} - No need to change provisioning'.format( table_name, gsi_name)) except JSONResponseError: raise except BotoServerError: raise return num_consec_read_checks, num_consec_write_checks
[ " Ensure that provisioning is correct for Global Secondary Indexes\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :type table_key: str\n :param table_key: Table configuration option key name\n :type gsi_name: str\n :param gsi_name: Name of the GSI\n :type gsi_key: str\n :param gsi_key: GSI configuration option key name\n :type num_consec_read_checks: int\n :param num_consec_read_checks: How many consecutive checks have we had\n :type num_consec_write_checks: int\n :param num_consec_write_checks: How many consecutive checks have we had\n :returns: (int, int) -- num_consec_read_checks, num_consec_write_checks\n " ]
Please provide a description of the function:def __ensure_provisioning_reads( table_name, table_key, gsi_name, gsi_key, num_consec_read_checks): if not get_gsi_option(table_key, gsi_key, 'enable_reads_autoscaling'): logger.info( '{0} - GSI: {1} - ' 'Autoscaling of reads has been disabled'.format( table_name, gsi_name)) return False, dynamodb.get_provisioned_gsi_read_units( table_name, gsi_name), 0 update_needed = False try: lookback_window_start = get_gsi_option( table_key, gsi_key, 'lookback_window_start') lookback_period = get_gsi_option( table_key, gsi_key, 'lookback_period') current_read_units = dynamodb.get_provisioned_gsi_read_units( table_name, gsi_name) consumed_read_units_percent = \ gsi_stats.get_consumed_read_units_percent( table_name, gsi_name, lookback_window_start, lookback_period) throttled_read_count = \ gsi_stats.get_throttled_read_event_count( table_name, gsi_name, lookback_window_start, lookback_period) throttled_by_provisioned_read_percent = \ gsi_stats.get_throttled_by_provisioned_read_event_percent( table_name, gsi_name, lookback_window_start, lookback_period) throttled_by_consumed_read_percent = \ gsi_stats.get_throttled_by_consumed_read_percent( table_name, gsi_name, lookback_window_start, lookback_period) reads_upper_threshold = \ get_gsi_option(table_key, gsi_key, 'reads_upper_threshold') reads_lower_threshold = \ get_gsi_option(table_key, gsi_key, 'reads_lower_threshold') increase_reads_unit = \ get_gsi_option(table_key, gsi_key, 'increase_reads_unit') decrease_reads_unit = \ get_gsi_option(table_key, gsi_key, 'decrease_reads_unit') increase_reads_with = \ get_gsi_option(table_key, gsi_key, 'increase_reads_with') decrease_reads_with = \ get_gsi_option(table_key, gsi_key, 'decrease_reads_with') throttled_reads_upper_threshold = \ get_gsi_option( table_key, gsi_key, 'throttled_reads_upper_threshold') min_provisioned_reads = \ get_gsi_option(table_key, gsi_key, 'min_provisioned_reads') max_provisioned_reads = \ get_gsi_option(table_key, gsi_key, 'max_provisioned_reads') num_read_checks_before_scale_down = \ get_gsi_option( table_key, gsi_key, 'num_read_checks_before_scale_down') num_read_checks_reset_percent = \ get_gsi_option(table_key, gsi_key, 'num_read_checks_reset_percent') increase_throttled_by_provisioned_reads_unit = \ get_gsi_option( table_key, gsi_key, 'increase_throttled_by_provisioned_reads_unit') increase_throttled_by_provisioned_reads_scale = \ get_gsi_option( table_key, gsi_key, 'increase_throttled_by_provisioned_reads_scale') increase_throttled_by_consumed_reads_unit = \ get_gsi_option( table_key, gsi_key, 'increase_throttled_by_consumed_reads_unit') increase_throttled_by_consumed_reads_scale = \ get_gsi_option( table_key, gsi_key, 'increase_throttled_by_consumed_reads_scale') increase_consumed_reads_unit = \ get_gsi_option(table_key, gsi_key, 'increase_consumed_reads_unit') increase_consumed_reads_with = \ get_gsi_option(table_key, gsi_key, 'increase_consumed_reads_with') increase_consumed_reads_scale = \ get_gsi_option(table_key, gsi_key, 'increase_consumed_reads_scale') decrease_consumed_reads_unit = \ get_gsi_option(table_key, gsi_key, 'decrease_consumed_reads_unit') decrease_consumed_reads_with = \ get_gsi_option(table_key, gsi_key, 'decrease_consumed_reads_with') decrease_consumed_reads_scale = \ get_gsi_option(table_key, gsi_key, 'decrease_consumed_reads_scale') except JSONResponseError: raise except BotoServerError: raise # Set the updated units to the current read unit value updated_read_units = current_read_units # Reset consecutive reads if num_read_checks_reset_percent is reached if num_read_checks_reset_percent: if consumed_read_units_percent >= num_read_checks_reset_percent: logger.info( '{0} - GSI: {1} - Resetting the number of consecutive ' 'read checks. Reason: Consumed percent {2} is ' 'greater than reset percent: {3}'.format( table_name, gsi_name, consumed_read_units_percent, num_read_checks_reset_percent)) num_consec_read_checks = 0 # Exit if up scaling has been disabled if not get_gsi_option(table_key, gsi_key, 'enable_reads_up_scaling'): logger.debug( '{0} - GSI: {1} - Up scaling event detected. No action taken as ' 'scaling up reads has been disabled in the configuration'.format( table_name, gsi_name)) else: # If local/granular values not specified use global values increase_consumed_reads_unit = \ increase_consumed_reads_unit or increase_reads_unit increase_throttled_by_provisioned_reads_unit = \ increase_throttled_by_provisioned_reads_unit or increase_reads_unit increase_throttled_by_consumed_reads_unit = \ increase_throttled_by_consumed_reads_unit or increase_reads_unit increase_consumed_reads_with = \ increase_consumed_reads_with or increase_reads_with # Initialise variables to store calculated provisioning throttled_by_provisioned_calculated_provisioning = scale_reader( increase_throttled_by_provisioned_reads_scale, throttled_by_provisioned_read_percent) throttled_by_consumed_calculated_provisioning = scale_reader( increase_throttled_by_consumed_reads_scale, throttled_by_consumed_read_percent) consumed_calculated_provisioning = scale_reader( increase_consumed_reads_scale, consumed_read_units_percent) throttled_count_calculated_provisioning = 0 calculated_provisioning = 0 # Increase needed due to high throttled to provisioned ratio if throttled_by_provisioned_calculated_provisioning: if increase_throttled_by_provisioned_reads_unit == 'percent': throttled_by_provisioned_calculated_provisioning = \ calculators.increase_reads_in_percent( current_read_units, throttled_by_provisioned_calculated_provisioning, get_gsi_option( table_key, gsi_key, 'max_provisioned_reads'), consumed_read_units_percent, '{0} - GSI: {1}'.format(table_name, gsi_name)) else: throttled_by_provisioned_calculated_provisioning = \ calculators.increase_reads_in_units( current_read_units, throttled_by_provisioned_calculated_provisioning, get_gsi_option( table_key, gsi_key, 'max_provisioned_reads'), consumed_read_units_percent, '{0} - GSI: {1}'.format(table_name, gsi_name)) # Increase needed due to high throttled to consumed ratio if throttled_by_consumed_calculated_provisioning: if increase_throttled_by_consumed_reads_unit == 'percent': throttled_by_consumed_calculated_provisioning = \ calculators.increase_reads_in_percent( current_read_units, throttled_by_consumed_calculated_provisioning, get_gsi_option( table_key, gsi_key, 'max_provisioned_reads'), consumed_read_units_percent, '{0} - GSI: {1}'.format(table_name, gsi_name)) else: throttled_by_consumed_calculated_provisioning = \ calculators.increase_reads_in_units( current_read_units, throttled_by_consumed_calculated_provisioning, get_gsi_option( table_key, gsi_key, 'max_provisioned_reads'), consumed_read_units_percent, '{0} - GSI: {1}'.format(table_name, gsi_name)) # Increase needed due to high CU consumption if consumed_calculated_provisioning: if increase_consumed_reads_unit == 'percent': consumed_calculated_provisioning = \ calculators.increase_reads_in_percent( current_read_units, consumed_calculated_provisioning, get_gsi_option( table_key, gsi_key, 'max_provisioned_reads'), consumed_read_units_percent, '{0} - GSI: {1}'.format(table_name, gsi_name)) else: consumed_calculated_provisioning = \ calculators.increase_reads_in_units( current_read_units, consumed_calculated_provisioning, get_gsi_option( table_key, gsi_key, 'max_provisioned_reads'), consumed_read_units_percent, '{0} - GSI: {1}'.format(table_name, gsi_name)) elif (reads_upper_threshold and consumed_read_units_percent > reads_upper_threshold and not increase_consumed_reads_scale): if increase_consumed_reads_unit == 'percent': consumed_calculated_provisioning = \ calculators.increase_reads_in_percent( current_read_units, increase_consumed_reads_with, get_gsi_option( table_key, gsi_key, 'max_provisioned_reads'), consumed_read_units_percent, '{0} - GSI: {1}'.format(table_name, gsi_name)) else: consumed_calculated_provisioning = \ calculators.increase_reads_in_units( current_read_units, increase_consumed_reads_with, get_gsi_option( table_key, gsi_key, 'max_provisioned_reads'), consumed_read_units_percent, '{0} - GSI: {1}'.format(table_name, gsi_name)) # Increase needed due to high throttling if (throttled_reads_upper_threshold and throttled_read_count > throttled_reads_upper_threshold): if increase_reads_unit == 'percent': throttled_count_calculated_provisioning = \ calculators.increase_reads_in_percent( updated_read_units, increase_reads_with, get_gsi_option( table_key, gsi_key, 'max_provisioned_reads'), consumed_read_units_percent, '{0} - GSI: {1}'.format(table_name, gsi_name)) else: throttled_count_calculated_provisioning = \ calculators.increase_reads_in_units( updated_read_units, increase_reads_with, get_gsi_option( table_key, gsi_key, 'max_provisioned_reads'), consumed_read_units_percent, '{0} - GSI: {1}'.format(table_name, gsi_name)) # Determine which metric requires the most scaling if (throttled_by_provisioned_calculated_provisioning > calculated_provisioning): calculated_provisioning = \ throttled_by_provisioned_calculated_provisioning scale_reason = ( "due to throttled events by provisioned " "units threshold being exceeded") if (throttled_by_consumed_calculated_provisioning > calculated_provisioning): calculated_provisioning = \ throttled_by_consumed_calculated_provisioning scale_reason = ( "due to throttled events by consumed " "units threshold being exceeded") if consumed_calculated_provisioning > calculated_provisioning: calculated_provisioning = consumed_calculated_provisioning scale_reason = "due to consumed threshold being exceeded" if throttled_count_calculated_provisioning > calculated_provisioning: calculated_provisioning = throttled_count_calculated_provisioning scale_reason = "due to throttled events threshold being exceeded" if calculated_provisioning > current_read_units: logger.info( '{0} - GSI: {1} - Resetting the number of consecutive ' 'read checks. Reason: scale up {2}'.format( table_name, gsi_name, scale_reason)) num_consec_read_checks = 0 update_needed = True updated_read_units = calculated_provisioning # Decrease needed due to low CU consumption if not update_needed: # If local/granular values not specified use global values decrease_consumed_reads_unit = \ decrease_consumed_reads_unit or decrease_reads_unit decrease_consumed_reads_with = \ decrease_consumed_reads_with or decrease_reads_with # Initialise variables to store calculated provisioning consumed_calculated_provisioning = scale_reader_decrease( decrease_consumed_reads_scale, consumed_read_units_percent) calculated_provisioning = None # Exit if down scaling has been disabled if not get_gsi_option(table_key, gsi_key, 'enable_reads_down_scaling'): logger.debug( '{0} - GSI: {1} - Down scaling event detected. ' 'No action taken as scaling ' 'down reads has been disabled in the configuration'.format( table_name, gsi_name)) # Exit if reads == 0% and downscaling has been disabled at 0% elif (consumed_read_units_percent == 0 and not get_gsi_option( table_key, gsi_key, 'allow_scaling_down_reads_on_0_percent')): logger.info( '{0} - GSI: {1} - Down scaling event detected. ' 'No action taken as scaling down reads is not done when' ' usage is at 0%'.format(table_name, gsi_name)) else: if consumed_calculated_provisioning: if decrease_consumed_reads_unit == 'percent': calculated_provisioning = \ calculators.decrease_reads_in_percent( updated_read_units, consumed_calculated_provisioning, get_gsi_option( table_key, gsi_key, 'min_provisioned_reads'), '{0} - GSI: {1}'.format(table_name, gsi_name)) else: calculated_provisioning = \ calculators.decrease_reads_in_units( updated_read_units, consumed_calculated_provisioning, get_gsi_option( table_key, gsi_key, 'min_provisioned_reads'), '{0} - GSI: {1}'.format(table_name, gsi_name)) elif (reads_lower_threshold and consumed_read_units_percent < reads_lower_threshold and not decrease_consumed_reads_scale): if decrease_consumed_reads_unit == 'percent': calculated_provisioning = \ calculators.decrease_reads_in_percent( updated_read_units, decrease_consumed_reads_with, get_gsi_option( table_key, gsi_key, 'min_provisioned_reads'), '{0} - GSI: {1}'.format(table_name, gsi_name)) else: calculated_provisioning = \ calculators.decrease_reads_in_units( updated_read_units, decrease_consumed_reads_with, get_gsi_option( table_key, gsi_key, 'min_provisioned_reads'), '{0} - GSI: {1}'.format(table_name, gsi_name)) if (calculated_provisioning and current_read_units != calculated_provisioning): num_consec_read_checks += 1 if num_consec_read_checks >= num_read_checks_before_scale_down: update_needed = True updated_read_units = calculated_provisioning # Never go over the configured max provisioning if max_provisioned_reads: if int(updated_read_units) > int(max_provisioned_reads): update_needed = True updated_read_units = int(max_provisioned_reads) logger.info( '{0} - GSI: {1} - Will not increase writes over ' 'gsi-max-provisioned-reads ' 'limit ({2} writes)'.format( table_name, gsi_name, updated_read_units)) # Ensure that we have met the min-provisioning if min_provisioned_reads: if int(min_provisioned_reads) > int(updated_read_units): update_needed = True updated_read_units = int(min_provisioned_reads) logger.info( '{0} - GSI: {1} - Increasing reads to ' 'meet gsi-min-provisioned-reads ' 'limit ({2} reads)'.format( table_name, gsi_name, updated_read_units)) if calculators.is_consumed_over_proposed( current_read_units, updated_read_units, consumed_read_units_percent): update_needed = False updated_read_units = current_read_units logger.info( '{0} - GSI: {1} - Consumed is over proposed read units. Will leave ' 'table at current setting.'.format(table_name, gsi_name)) logger.info('{0} - GSI: {1} - Consecutive read checks {2}/{3}'.format( table_name, gsi_name, num_consec_read_checks, num_read_checks_before_scale_down)) return update_needed, updated_read_units, num_consec_read_checks
[ " Ensure that provisioning is correct\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :type table_key: str\n :param table_key: Table configuration option key name\n :type gsi_name: str\n :param gsi_name: Name of the GSI\n :type gsi_key: str\n :param gsi_key: Configuration option key name\n :type num_consec_read_checks: int\n :param num_consec_read_checks: How many consecutive checks have we had\n :returns: (bool, int, int)\n update_needed, updated_read_units, num_consec_read_checks\n " ]
Please provide a description of the function:def __ensure_provisioning_writes( table_name, table_key, gsi_name, gsi_key, num_consec_write_checks): if not get_gsi_option(table_key, gsi_key, 'enable_writes_autoscaling'): logger.info( '{0} - GSI: {1} - ' 'Autoscaling of writes has been disabled'.format( table_name, gsi_name)) return False, dynamodb.get_provisioned_gsi_write_units( table_name, gsi_name), 0 update_needed = False try: lookback_window_start = get_gsi_option( table_key, gsi_key, 'lookback_window_start') lookback_period = get_gsi_option( table_key, gsi_key, 'lookback_period') current_write_units = dynamodb.get_provisioned_gsi_write_units( table_name, gsi_name) consumed_write_units_percent = \ gsi_stats.get_consumed_write_units_percent( table_name, gsi_name, lookback_window_start, lookback_period) throttled_write_count = \ gsi_stats.get_throttled_write_event_count( table_name, gsi_name, lookback_window_start, lookback_period) throttled_by_provisioned_write_percent = \ gsi_stats.get_throttled_by_provisioned_write_event_percent( table_name, gsi_name, lookback_window_start, lookback_period) throttled_by_consumed_write_percent = \ gsi_stats.get_throttled_by_consumed_write_percent( table_name, gsi_name, lookback_window_start, lookback_period) writes_upper_threshold = \ get_gsi_option(table_key, gsi_key, 'writes_upper_threshold') writes_lower_threshold = \ get_gsi_option(table_key, gsi_key, 'writes_lower_threshold') throttled_writes_upper_threshold = \ get_gsi_option( table_key, gsi_key, 'throttled_writes_upper_threshold') increase_writes_unit = \ get_gsi_option(table_key, gsi_key, 'increase_writes_unit') increase_writes_with = \ get_gsi_option(table_key, gsi_key, 'increase_writes_with') decrease_writes_unit = \ get_gsi_option(table_key, gsi_key, 'decrease_writes_unit') decrease_writes_with = \ get_gsi_option(table_key, gsi_key, 'decrease_writes_with') min_provisioned_writes = \ get_gsi_option(table_key, gsi_key, 'min_provisioned_writes') max_provisioned_writes = \ get_gsi_option(table_key, gsi_key, 'max_provisioned_writes') num_write_checks_before_scale_down = \ get_gsi_option( table_key, gsi_key, 'num_write_checks_before_scale_down') num_write_checks_reset_percent = \ get_gsi_option( table_key, gsi_key, 'num_write_checks_reset_percent') increase_throttled_by_provisioned_writes_unit = \ get_gsi_option( table_key, gsi_key, 'increase_throttled_by_provisioned_writes_unit') increase_throttled_by_provisioned_writes_scale = \ get_gsi_option( table_key, gsi_key, 'increase_throttled_by_provisioned_writes_scale') increase_throttled_by_consumed_writes_unit = \ get_gsi_option( table_key, gsi_key, 'increase_throttled_by_consumed_writes_unit') increase_throttled_by_consumed_writes_scale = \ get_gsi_option( table_key, gsi_key, 'increase_throttled_by_consumed_writes_scale') increase_consumed_writes_unit = \ get_gsi_option(table_key, gsi_key, 'increase_consumed_writes_unit') increase_consumed_writes_with = \ get_gsi_option(table_key, gsi_key, 'increase_consumed_writes_with') increase_consumed_writes_scale = \ get_gsi_option( table_key, gsi_key, 'increase_consumed_writes_scale') decrease_consumed_writes_unit = \ get_gsi_option(table_key, gsi_key, 'decrease_consumed_writes_unit') decrease_consumed_writes_with = \ get_gsi_option(table_key, gsi_key, 'decrease_consumed_writes_with') decrease_consumed_writes_scale = \ get_gsi_option( table_key, gsi_key, 'decrease_consumed_writes_scale') except JSONResponseError: raise except BotoServerError: raise # Set the updated units to the current write unit value updated_write_units = current_write_units # Reset consecutive write count if num_write_checks_reset_percent # is reached if num_write_checks_reset_percent: if consumed_write_units_percent >= num_write_checks_reset_percent: logger.info( '{0} - GSI: {1} - Resetting the number of consecutive ' 'write checks. Reason: Consumed percent {2} is ' 'greater than reset percent: {3}'.format( table_name, gsi_name, consumed_write_units_percent, num_write_checks_reset_percent)) num_consec_write_checks = 0 # Exit if up scaling has been disabled if not get_gsi_option(table_key, gsi_key, 'enable_writes_up_scaling'): logger.debug( '{0} - GSI: {1} - Up scaling event detected. No action taken as ' 'scaling up writes has been disabled in the configuration'.format( table_name, gsi_name)) else: # If local/granular values not specified use global values increase_consumed_writes_unit = \ increase_consumed_writes_unit or increase_writes_unit increase_throttled_by_provisioned_writes_unit = ( increase_throttled_by_provisioned_writes_unit or increase_writes_unit) increase_throttled_by_consumed_writes_unit = \ increase_throttled_by_consumed_writes_unit or increase_writes_unit increase_consumed_writes_with = \ increase_consumed_writes_with or increase_writes_with # Initialise variables to store calculated provisioning throttled_by_provisioned_calculated_provisioning = scale_reader( increase_throttled_by_provisioned_writes_scale, throttled_by_provisioned_write_percent) throttled_by_consumed_calculated_provisioning = scale_reader( increase_throttled_by_consumed_writes_scale, throttled_by_consumed_write_percent) consumed_calculated_provisioning = scale_reader( increase_consumed_writes_scale, consumed_write_units_percent) throttled_count_calculated_provisioning = 0 calculated_provisioning = 0 # Increase needed due to high throttled to provisioned ratio if throttled_by_provisioned_calculated_provisioning: if increase_throttled_by_provisioned_writes_unit == 'percent': throttled_by_provisioned_calculated_provisioning = \ calculators.increase_writes_in_percent( current_write_units, throttled_by_provisioned_calculated_provisioning, get_gsi_option( table_key, gsi_key, 'max_provisioned_writes'), consumed_write_units_percent, '{0} - GSI: {1}'.format(table_name, gsi_name)) else: throttled_by_provisioned_calculated_provisioning = \ calculators.increase_writes_in_units( current_write_units, throttled_by_provisioned_calculated_provisioning, get_gsi_option( table_key, gsi_key, 'max_provisioned_writes'), consumed_write_units_percent, '{0} - GSI: {1}'.format(table_name, gsi_name)) # Increase needed due to high throttled to consumed ratio if throttled_by_consumed_calculated_provisioning: if increase_throttled_by_consumed_writes_unit == 'percent': throttled_by_consumed_calculated_provisioning = \ calculators.increase_writes_in_percent( current_write_units, throttled_by_consumed_calculated_provisioning, get_gsi_option( table_key, gsi_key, 'max_provisioned_writes'), consumed_write_units_percent, '{0} - GSI: {1}'.format(table_name, gsi_name)) else: throttled_by_consumed_calculated_provisioning = \ calculators.increase_writes_in_units( current_write_units, throttled_by_consumed_calculated_provisioning, get_gsi_option( table_key, gsi_key, 'max_provisioned_writes'), consumed_write_units_percent, '{0} - GSI: {1}'.format(table_name, gsi_name)) # Increase needed due to high CU consumption if consumed_calculated_provisioning: if increase_consumed_writes_unit == 'percent': consumed_calculated_provisioning = \ calculators.increase_writes_in_percent( current_write_units, consumed_calculated_provisioning, get_gsi_option( table_key, gsi_key, 'max_provisioned_writes'), consumed_write_units_percent, '{0} - GSI: {1}'.format(table_name, gsi_name)) else: consumed_calculated_provisioning = \ calculators.increase_writes_in_units( current_write_units, consumed_calculated_provisioning, get_gsi_option( table_key, gsi_key, 'max_provisioned_writes'), consumed_write_units_percent, '{0} - GSI: {1}'.format(table_name, gsi_name)) elif (writes_upper_threshold and consumed_write_units_percent > writes_upper_threshold and not increase_consumed_writes_scale): if increase_consumed_writes_unit == 'percent': consumed_calculated_provisioning = \ calculators.increase_writes_in_percent( current_write_units, increase_consumed_writes_with, get_gsi_option( table_key, gsi_key, 'max_provisioned_writes'), consumed_write_units_percent, '{0} - GSI: {1}'.format(table_name, gsi_name)) else: consumed_calculated_provisioning = \ calculators.increase_writes_in_units( current_write_units, increase_consumed_writes_with, get_gsi_option( table_key, gsi_key, 'max_provisioned_writes'), consumed_write_units_percent, '{0} - GSI: {1}'.format(table_name, gsi_name)) # Increase needed due to high throttling if (throttled_writes_upper_threshold and throttled_write_count > throttled_writes_upper_threshold): if increase_writes_unit == 'percent': throttled_count_calculated_provisioning = \ calculators.increase_writes_in_percent( updated_write_units, increase_writes_with, get_gsi_option( table_key, gsi_key, 'max_provisioned_writes'), consumed_write_units_percent, '{0} - GSI: {1}'.format(table_name, gsi_name)) else: throttled_count_calculated_provisioning = \ calculators.increase_writes_in_units( updated_write_units, increase_writes_with, get_gsi_option( table_key, gsi_key, 'max_provisioned_writes'), consumed_write_units_percent, '{0} - GSI: {1}'.format(table_name, gsi_name)) # Determine which metric requires the most scaling if (throttled_by_provisioned_calculated_provisioning > calculated_provisioning): calculated_provisioning = \ throttled_by_provisioned_calculated_provisioning scale_reason = ( "due to throttled events by provisioned " "units threshold being exceeded") if (throttled_by_consumed_calculated_provisioning > calculated_provisioning): calculated_provisioning = \ throttled_by_consumed_calculated_provisioning scale_reason = ( "due to throttled events by consumed " "units threshold being exceeded") if consumed_calculated_provisioning > calculated_provisioning: calculated_provisioning = consumed_calculated_provisioning scale_reason = "due to consumed threshold being exceeded" if throttled_count_calculated_provisioning > calculated_provisioning: calculated_provisioning = throttled_count_calculated_provisioning scale_reason = "due to throttled events threshold being exceeded" if calculated_provisioning > current_write_units: logger.info( '{0} - GSI: {1} - Resetting the number of consecutive ' 'write checks. Reason: scale up {2}'.format( table_name, gsi_name, scale_reason)) num_consec_write_checks = 0 update_needed = True updated_write_units = calculated_provisioning # Decrease needed due to low CU consumption if not update_needed: # If local/granular values not specified use global values decrease_consumed_writes_unit = \ decrease_consumed_writes_unit or decrease_writes_unit decrease_consumed_writes_with = \ decrease_consumed_writes_with or decrease_writes_with # Initialise variables to store calculated provisioning consumed_calculated_provisioning = scale_reader_decrease( decrease_consumed_writes_scale, consumed_write_units_percent) calculated_provisioning = None # Exit if down scaling has been disabled if not get_gsi_option( table_key, gsi_key, 'enable_writes_down_scaling'): logger.debug( '{0} - GSI: {1} - Down scaling event detected. ' 'No action taken as scaling ' 'down writes has been disabled in the configuration'.format( table_name, gsi_name)) # Exit if writes == 0% and downscaling has been disabled at 0% elif (consumed_write_units_percent == 0 and not get_gsi_option( table_key, gsi_key, 'allow_scaling_down_writes_on_0_percent')): logger.info( '{0} - GSI: {1} - Down scaling event detected. ' 'No action taken as scaling down writes is not done when' ' usage is at 0%'.format(table_name, gsi_name)) else: if consumed_calculated_provisioning: if decrease_consumed_writes_unit == 'percent': calculated_provisioning = \ calculators.decrease_writes_in_percent( updated_write_units, consumed_calculated_provisioning, get_gsi_option( table_key, gsi_key, 'min_provisioned_writes'), '{0} - GSI: {1}'.format(table_name, gsi_name)) else: calculated_provisioning = \ calculators.decrease_writes_in_units( updated_write_units, consumed_calculated_provisioning, get_gsi_option( table_key, gsi_key, 'min_provisioned_writes'), '{0} - GSI: {1}'.format(table_name, gsi_name)) elif (writes_lower_threshold and consumed_write_units_percent < writes_lower_threshold and not decrease_consumed_writes_scale): if decrease_consumed_writes_unit == 'percent': calculated_provisioning = \ calculators.decrease_writes_in_percent( updated_write_units, decrease_consumed_writes_with, get_gsi_option( table_key, gsi_key, 'min_provisioned_writes'), '{0} - GSI: {1}'.format(table_name, gsi_name)) else: calculated_provisioning = \ calculators.decrease_writes_in_units( updated_write_units, decrease_consumed_writes_with, get_gsi_option( table_key, gsi_key, 'min_provisioned_writes'), '{0} - GSI: {1}'.format(table_name, gsi_name)) if (calculated_provisioning and current_write_units != calculated_provisioning): num_consec_write_checks += 1 if num_consec_write_checks >= \ num_write_checks_before_scale_down: update_needed = True updated_write_units = calculated_provisioning # Never go over the configured max provisioning if max_provisioned_writes: if int(updated_write_units) > int(max_provisioned_writes): update_needed = True updated_write_units = int(max_provisioned_writes) logger.info( '{0} - GSI: {1} - ' 'Will not increase writes over gsi-max-provisioned-writes ' 'limit ({2} writes)'.format( table_name, gsi_name, updated_write_units)) # Ensure that we have met the min-provisioning if min_provisioned_writes: if int(min_provisioned_writes) > int(updated_write_units): update_needed = True updated_write_units = int(min_provisioned_writes) logger.info( '{0} - GSI: {1} - Increasing writes to ' 'meet gsi-min-provisioned-writes ' 'limit ({2} writes)'.format( table_name, gsi_name, updated_write_units)) if calculators.is_consumed_over_proposed( current_write_units, updated_write_units, consumed_write_units_percent): update_needed = False updated_write_units = current_write_units logger.info( '{0} - GSI: {1} - Consumed is over proposed write units. Will leave ' 'table at current setting.'.format(table_name, gsi_name)) logger.info('{0} - GSI: {1} - Consecutive write checks {2}/{3}'.format( table_name, gsi_name, num_consec_write_checks, num_write_checks_before_scale_down)) return update_needed, updated_write_units, num_consec_write_checks
[ " Ensure that provisioning of writes is correct\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :type table_key: str\n :param table_key: Table configuration option key name\n :type gsi_name: str\n :param gsi_name: Name of the GSI\n :type gsi_key: str\n :param gsi_key: Configuration option key name\n :type num_consec_write_checks: int\n :param num_consec_write_checks: How many consecutive checks have we had\n :returns: (bool, int, int)\n update_needed, updated_write_units, num_consec_write_checks\n " ]
Please provide a description of the function:def __update_throughput( table_name, table_key, gsi_name, gsi_key, read_units, write_units): try: current_ru = dynamodb.get_provisioned_gsi_read_units( table_name, gsi_name) current_wu = dynamodb.get_provisioned_gsi_write_units( table_name, gsi_name) except JSONResponseError: raise # Check table status try: gsi_status = dynamodb.get_gsi_status(table_name, gsi_name) except JSONResponseError: raise logger.debug('{0} - GSI: {1} - GSI status is {2}'.format( table_name, gsi_name, gsi_status)) if gsi_status != 'ACTIVE': logger.warning( '{0} - GSI: {1} - Not performing throughput changes when GSI ' 'status is {2}'.format(table_name, gsi_name, gsi_status)) return # If this setting is True, we will only scale down when # BOTH reads AND writes are low if get_gsi_option(table_key, gsi_key, 'always_decrease_rw_together'): read_units, write_units = __calculate_always_decrease_rw_values( table_name, gsi_name, read_units, current_ru, write_units, current_wu) if read_units == current_ru and write_units == current_wu: logger.info('{0} - GSI: {1} - No changes to perform'.format( table_name, gsi_name)) return dynamodb.update_gsi_provisioning( table_name, table_key, gsi_name, gsi_key, int(read_units), int(write_units))
[ " Update throughput on the GSI\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :type table_key: str\n :param table_key: Table configuration option key name\n :type gsi_name: str\n :param gsi_name: Name of the GSI\n :type gsi_key: str\n :param gsi_key: Configuration option key name\n :type read_units: int\n :param read_units: New read unit provisioning\n :type write_units: int\n :param write_units: New write unit provisioning\n " ]
Please provide a description of the function:def __ensure_provisioning_alarm(table_name, table_key, gsi_name, gsi_key): lookback_window_start = get_gsi_option( table_key, gsi_key, 'lookback_window_start') lookback_period = get_gsi_option( table_key, gsi_key, 'lookback_period') consumed_read_units_percent = gsi_stats.get_consumed_read_units_percent( table_name, gsi_name, lookback_window_start, lookback_period) consumed_write_units_percent = gsi_stats.get_consumed_write_units_percent( table_name, gsi_name, lookback_window_start, lookback_period) reads_upper_alarm_threshold = \ get_gsi_option(table_key, gsi_key, 'reads-upper-alarm-threshold') reads_lower_alarm_threshold = \ get_gsi_option(table_key, gsi_key, 'reads-lower-alarm-threshold') writes_upper_alarm_threshold = \ get_gsi_option(table_key, gsi_key, 'writes-upper-alarm-threshold') writes_lower_alarm_threshold = \ get_gsi_option(table_key, gsi_key, 'writes-lower-alarm-threshold') # Check upper alarm thresholds upper_alert_triggered = False upper_alert_message = [] if 0 < reads_upper_alarm_threshold <= consumed_read_units_percent: upper_alert_triggered = True upper_alert_message.append( '{0} - GSI: {1} - Consumed Read Capacity {2:f}% ' 'was greater than or equal to the upper alarm ' 'threshold {3:f}%\n'.format( table_name, gsi_name, consumed_read_units_percent, reads_upper_alarm_threshold)) if 0 < writes_upper_alarm_threshold <= consumed_write_units_percent: upper_alert_triggered = True upper_alert_message.append( '{0} - GSI: {1} - Consumed Write Capacity {2:f}% ' 'was greater than or equal to the upper alarm ' 'threshold {3:f}%\n'.format( table_name, gsi_name, consumed_write_units_percent, writes_upper_alarm_threshold)) # Check lower alarm thresholds lower_alert_triggered = False lower_alert_message = [] if (reads_lower_alarm_threshold > 0 and consumed_read_units_percent < reads_lower_alarm_threshold): lower_alert_triggered = True lower_alert_message.append( '{0} - GSI: {1} - Consumed Read Capacity {2:f}% ' 'was below the lower alarm threshold {3:f}%\n'.format( table_name, gsi_name, consumed_read_units_percent, reads_lower_alarm_threshold)) if (writes_lower_alarm_threshold > 0 and consumed_write_units_percent < writes_lower_alarm_threshold): lower_alert_triggered = True lower_alert_message.append( '{0} - GSI: {1} - Consumed Write Capacity {2:f}% ' 'was below the lower alarm threshold {3:f}%\n'.format( table_name, gsi_name, consumed_write_units_percent, writes_lower_alarm_threshold)) # Send alert if needed if upper_alert_triggered: logger.info( '{0} - GSI: {1} - Will send high provisioning alert'.format( table_name, gsi_name)) sns.publish_gsi_notification( table_key, gsi_key, ''.join(upper_alert_message), ['high-throughput-alarm'], subject='ALARM: High Throughput for Table {0} - GSI: {1}'.format( table_name, gsi_name)) elif lower_alert_triggered: logger.info( '{0} - GSI: {1} - Will send low provisioning alert'.format( table_name, gsi_name)) sns.publish_gsi_notification( table_key, gsi_key, ''.join(lower_alert_message), ['low-throughput-alarm'], subject='ALARM: Low Throughput for Table {0} - GSI: {1}'.format( table_name, gsi_name)) else: logger.debug( '{0} - GSI: {1} - Throughput alarm thresholds not crossed'.format( table_name, gsi_name))
[ " Ensure that provisioning alarm threshold is not exceeded\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :type table_key: str\n :param table_key: Table configuration option key name\n :type gsi_name: str\n :param gsi_name: Name of the GSI\n :type gsi_key: str\n :param gsi_key: Configuration option key name\n " ]
Please provide a description of the function:def is_open(table_name=None, table_key=None, gsi_name=None, gsi_key=None): logger.debug('Checking circuit breaker status') # Parse the URL to make sure it is OK pattern = re.compile( r'^(?P<scheme>http(s)?://)' r'((?P<username>.+):(?P<password>.+)@){0,1}' r'(?P<url>.*)$' ) url = timeout = None if gsi_name: url = get_gsi_option(table_key, gsi_key, 'circuit_breaker_url') timeout = get_gsi_option(table_key, gsi_key, 'circuit_breaker_timeout') elif table_name: url = get_table_option(table_key, 'circuit_breaker_url') timeout = get_table_option(table_key, 'circuit_breaker_timeout') if not url: url = get_global_option('circuit_breaker_url') timeout = get_global_option('circuit_breaker_timeout') match = pattern.match(url) if not match: logger.error('Malformatted URL: {0}'.format(url)) sys.exit(1) use_basic_auth = False if match.group('username') and match.group('password'): use_basic_auth = True # Make the actual URL to call auth = () if use_basic_auth: url = '{scheme}{url}'.format( scheme=match.group('scheme'), url=match.group('url')) auth = (match.group('username'), match.group('password')) headers = {} if table_name: headers["x-table-name"] = table_name if gsi_name: headers["x-gsi-name"] = gsi_name # Make the actual request try: response = requests.get( url, auth=auth, timeout=timeout / 1000.00, headers=headers) if int(response.status_code) >= 200 and int(response.status_code) < 300: logger.info('Circuit breaker is closed') return False else: logger.warning( 'Circuit breaker returned with status code {0:d}'.format( response.status_code)) except requests.exceptions.SSLError as error: logger.warning('Circuit breaker: {0}'.format(error)) except requests.exceptions.Timeout as error: logger.warning('Circuit breaker: {0}'.format(error)) except requests.exceptions.ConnectionError as error: logger.warning('Circuit breaker: {0}'.format(error)) except requests.exceptions.HTTPError as error: logger.warning('Circuit breaker: {0}'.format(error)) except requests.exceptions.TooManyRedirects as error: logger.warning('Circuit breaker: {0}'.format(error)) except Exception as error: logger.error('Unhandled exception: {0}'.format(error)) logger.error( 'Please file a bug at ' 'https://github.com/sebdah/dynamic-dynamodb/issues') return True
[ " Checks whether the circuit breaker is open\n\n :param table_name: Name of the table being checked\n :param table_key: Configuration key for table\n :param gsi_name: Name of the GSI being checked\n :param gsi_key: Configuration key for the GSI\n :returns: bool -- True if the circuit is open\n " ]
Please provide a description of the function:def __get_connection_cloudwatch(): region = get_global_option('region') try: if (get_global_option('aws_access_key_id') and get_global_option('aws_secret_access_key')): logger.debug( 'Authenticating to CloudWatch using ' 'credentials in configuration file') connection = cloudwatch.connect_to_region( region, aws_access_key_id=get_global_option('aws_access_key_id'), aws_secret_access_key=get_global_option( 'aws_secret_access_key')) else: logger.debug( 'Authenticating using boto\'s authentication handler') connection = cloudwatch.connect_to_region(region) except Exception as err: logger.error('Failed connecting to CloudWatch: {0}'.format(err)) logger.error( 'Please report an issue at: ' 'https://github.com/sebdah/dynamic-dynamodb/issues') raise logger.debug('Connected to CloudWatch in {0}'.format(region)) return connection
[ " Ensure connection to CloudWatch " ]
Please provide a description of the function:def get_consumed_read_units_percent( table_name, gsi_name, lookback_window_start=15, lookback_period=5): try: metrics = __get_aws_metric( table_name, gsi_name, lookback_window_start, lookback_period, 'ConsumedReadCapacityUnits') except BotoServerError: raise if metrics: lookback_seconds = lookback_period * 60 consumed_read_units = ( float(metrics[0]['Sum']) / float(lookback_seconds)) else: consumed_read_units = 0 try: gsi_read_units = dynamodb.get_provisioned_gsi_read_units( table_name, gsi_name) consumed_read_units_percent = ( float(consumed_read_units) / float(gsi_read_units) * 100) except JSONResponseError: raise logger.info('{0} - GSI: {1} - Consumed read units: {2:.2f}%'.format( table_name, gsi_name, consumed_read_units_percent)) return consumed_read_units_percent
[ " Returns the number of consumed read units in percent\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :type gsi_name: str\n :param gsi_name: Name of the GSI\n :type lookback_window_start: int\n :param lookback_window_start: Relative start time for the CloudWatch metric\n :type lookback_period: int\n :param lookback_period: Number of minutes to look at\n :returns: float -- Number of consumed reads as a\n percentage of provisioned reads\n " ]
Please provide a description of the function:def get_throttled_by_provisioned_read_event_percent( table_name, gsi_name, lookback_window_start=15, lookback_period=5): try: metrics = __get_aws_metric( table_name, gsi_name, lookback_window_start, lookback_period, 'ReadThrottleEvents') except BotoServerError: raise if metrics: lookback_seconds = lookback_period * 60 throttled_read_events = ( float(metrics[0]['Sum']) / float(lookback_seconds)) else: throttled_read_events = 0 try: gsi_read_units = dynamodb.get_provisioned_gsi_read_units( table_name, gsi_name) throttled_by_provisioned_read_percent = ( float(throttled_read_events) / float(gsi_read_units) * 100) except JSONResponseError: raise logger.info( '{0} - GSI: {1} - Throttled read percent ' 'by provision: {2:.2f}%'.format( table_name, gsi_name, throttled_by_provisioned_read_percent)) return throttled_by_provisioned_read_percent
[ " Returns the number of throttled read events in percent\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :type gsi_name: str\n :param gsi_name: Name of the GSI\n :type lookback_window_start: int\n :param lookback_window_start: Relative start time for the CloudWatch metric\n :type lookback_period: int\n :param lookback_period: Number of minutes to look at\n :returns: float -- Percent of throttled read events by provisioning\n " ]
Please provide a description of the function:def get_consumed_write_units_percent( table_name, gsi_name, lookback_window_start=15, lookback_period=5): try: metrics = __get_aws_metric( table_name, gsi_name, lookback_window_start, lookback_period, 'ConsumedWriteCapacityUnits') except BotoServerError: raise if metrics: lookback_seconds = lookback_period * 60 consumed_write_units = ( float(metrics[0]['Sum']) / float(lookback_seconds)) else: consumed_write_units = 0 try: gsi_write_units = dynamodb.get_provisioned_gsi_write_units( table_name, gsi_name) consumed_write_units_percent = ( float(consumed_write_units) / float(gsi_write_units) * 100) except JSONResponseError: raise logger.info('{0} - GSI: {1} - Consumed write units: {2:.2f}%'.format( table_name, gsi_name, consumed_write_units_percent)) return consumed_write_units_percent
[ " Returns the number of consumed write units in percent\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :type gsi_name: str\n :param gsi_name: Name of the GSI\n :type lookback_window_start: int\n :param lookback_window_start: Relative start time for the CloudWatch metric\n :type lookback_period: int\n :param lookback_period: Number of minutes to look at\n :returns: float -- Number of consumed writes as a\n percentage of provisioned writes\n " ]
Please provide a description of the function:def get_throttled_write_event_count( table_name, gsi_name, lookback_window_start=15, lookback_period=5): try: metrics = __get_aws_metric( table_name, gsi_name, lookback_window_start, lookback_period, 'WriteThrottleEvents') except BotoServerError: raise if metrics: throttled_write_events = int(metrics[0]['Sum']) else: throttled_write_events = 0 logger.info('{0} - GSI: {1} - Write throttle count: {2:d}'.format( table_name, gsi_name, throttled_write_events)) return throttled_write_events
[ " Returns the number of throttled write events during a given time frame\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :type gsi_name: str\n :param gsi_name: Name of the GSI\n :type lookback_window_start: int\n :param lookback_window_start: Relative start time for the CloudWatch metric\n :type lookback_period: int\n :param lookback_period: Number of minutes to look at\n :returns: int -- Number of throttled write events during the time period\n " ]
Please provide a description of the function:def get_throttled_by_provisioned_write_event_percent( table_name, gsi_name, lookback_window_start=15, lookback_period=5): try: metrics = __get_aws_metric( table_name, gsi_name, lookback_window_start, lookback_period, 'WriteThrottleEvents') except BotoServerError: raise if metrics: lookback_seconds = lookback_period * 60 throttled_write_events = float(metrics[0]['Sum']) / float( lookback_seconds) else: throttled_write_events = 0 try: gsi_write_units = dynamodb.get_provisioned_gsi_write_units( table_name, gsi_name) throttled_by_provisioned_write_percent = ( float(throttled_write_events) / float(gsi_write_units) * 100) except JSONResponseError: raise logger.info( '{0} - GSI: {1} - Throttled write percent ' 'by provision: {2:.2f}%'.format( table_name, gsi_name, throttled_by_provisioned_write_percent)) return throttled_by_provisioned_write_percent
[ " Returns the number of throttled write events during a given time frame\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :type gsi_name: str\n :param gsi_name: Name of the GSI\n :type lookback_window_start: int\n :param lookback_window_start: Relative start time for the CloudWatch metric\n :type lookback_period: int\n :param lookback_period: Number of minutes to look at\n :returns: float -- Percent of throttled write events by provisioning\n " ]
Please provide a description of the function:def get_tables_and_gsis(): table_names = set() configured_tables = get_configured_tables() not_used_tables = set(configured_tables) # Add regexp table names for table_instance in list_tables(): for key_name in configured_tables: try: if re.match(key_name, table_instance.table_name): logger.debug("Table {0} match with config key {1}".format( table_instance.table_name, key_name)) # Notify users about regexps that match multiple tables if table_instance.table_name in [x[0] for x in table_names]: logger.warning( 'Table {0} matches more than one regexp in config, ' 'skipping this match: "{1}"'.format( table_instance.table_name, key_name)) else: table_names.add( ( table_instance.table_name, key_name )) not_used_tables.discard(key_name) else: logger.debug( "Table {0} did not match with config key {1}".format( table_instance.table_name, key_name)) except re.error: logger.error('Invalid regular expression: "{0}"'.format( key_name)) sys.exit(1) if not_used_tables: logger.warning( 'No tables matching the following configured ' 'tables found: {0}'.format(', '.join(not_used_tables))) return sorted(table_names)
[ " Get a set of tables and gsis and their configuration keys\n\n :returns: set -- A set of tuples (table_name, table_conf_key)\n " ]
Please provide a description of the function:def get_table(table_name): try: table = Table(table_name, connection=DYNAMODB_CONNECTION) except DynamoDBResponseError as error: dynamodb_error = error.body['__type'].rsplit('#', 1)[1] if dynamodb_error == 'ResourceNotFoundException': logger.error( '{0} - Table {1} not found'.format(table_name, table_name)) raise return table
[ " Return the DynamoDB table\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :returns: boto.dynamodb.table.Table\n " ]
Please provide a description of the function:def get_gsi_status(table_name, gsi_name): try: desc = DYNAMODB_CONNECTION.describe_table(table_name) except JSONResponseError: raise for gsi in desc[u'Table'][u'GlobalSecondaryIndexes']: if gsi[u'IndexName'] == gsi_name: return gsi[u'IndexStatus']
[ " Return the DynamoDB table\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :type gsi_name: str\n :param gsi_name: Name of the GSI\n :returns: str\n " ]
Please provide a description of the function:def get_provisioned_gsi_read_units(table_name, gsi_name): try: desc = DYNAMODB_CONNECTION.describe_table(table_name) except JSONResponseError: raise for gsi in desc[u'Table'][u'GlobalSecondaryIndexes']: if gsi[u'IndexName'] == gsi_name: read_units = int( gsi[u'ProvisionedThroughput'][u'ReadCapacityUnits']) break logger.debug( '{0} - GSI: {1} - Currently provisioned read units: {2:d}'.format( table_name, gsi_name, read_units)) return read_units
[ " Returns the number of provisioned read units for the table\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :type gsi_name: str\n :param gsi_name: Name of the GSI\n :returns: int -- Number of read units\n " ]
Please provide a description of the function:def get_provisioned_gsi_write_units(table_name, gsi_name): try: desc = DYNAMODB_CONNECTION.describe_table(table_name) except JSONResponseError: raise for gsi in desc[u'Table'][u'GlobalSecondaryIndexes']: if gsi[u'IndexName'] == gsi_name: write_units = int( gsi[u'ProvisionedThroughput'][u'WriteCapacityUnits']) break logger.debug( '{0} - GSI: {1} - Currently provisioned write units: {2:d}'.format( table_name, gsi_name, write_units)) return write_units
[ " Returns the number of provisioned write units for the table\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :type gsi_name: str\n :param gsi_name: Name of the GSI\n :returns: int -- Number of write units\n " ]
Please provide a description of the function:def get_provisioned_table_read_units(table_name): try: desc = DYNAMODB_CONNECTION.describe_table(table_name) except JSONResponseError: raise read_units = int( desc[u'Table'][u'ProvisionedThroughput'][u'ReadCapacityUnits']) logger.debug('{0} - Currently provisioned read units: {1:d}'.format( table_name, read_units)) return read_units
[ " Returns the number of provisioned read units for the table\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :returns: int -- Number of read units\n " ]
Please provide a description of the function:def get_provisioned_table_write_units(table_name): try: desc = DYNAMODB_CONNECTION.describe_table(table_name) except JSONResponseError: raise write_units = int( desc[u'Table'][u'ProvisionedThroughput'][u'WriteCapacityUnits']) logger.debug('{0} - Currently provisioned write units: {1:d}'.format( table_name, write_units)) return write_units
[ " Returns the number of provisioned write units for the table\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :returns: int -- Number of write units\n " ]
Please provide a description of the function:def get_table_status(table_name): try: desc = DYNAMODB_CONNECTION.describe_table(table_name) except JSONResponseError: raise return desc[u'Table'][u'TableStatus']
[ " Return the DynamoDB table\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :returns: str\n " ]
Please provide a description of the function:def list_tables(): tables = [] try: table_list = DYNAMODB_CONNECTION.list_tables() while True: for table_name in table_list[u'TableNames']: tables.append(get_table(table_name)) if u'LastEvaluatedTableName' in table_list: table_list = DYNAMODB_CONNECTION.list_tables( table_list[u'LastEvaluatedTableName']) else: break except DynamoDBResponseError as error: dynamodb_error = error.body['__type'].rsplit('#', 1)[1] if dynamodb_error == 'ResourceNotFoundException': logger.error('No tables found') elif dynamodb_error == 'AccessDeniedException': logger.debug( 'Your AWS API keys lack access to listing tables. ' 'That is an issue if you are trying to use regular ' 'expressions in your table configuration.') elif dynamodb_error == 'UnrecognizedClientException': logger.error( 'Invalid security token. Are your AWS API keys correct?') else: logger.error( ( 'Unhandled exception: {0}: {1}. ' 'Please file a bug report at ' 'https://github.com/sebdah/dynamic-dynamodb/issues' ).format( dynamodb_error, error.body['message'])) except JSONResponseError as error: logger.error('Communication error: {0}'.format(error)) sys.exit(1) return tables
[ " Return list of DynamoDB tables available from AWS\n\n :returns: list -- List of DynamoDB tables\n " ]
Please provide a description of the function:def update_table_provisioning( table_name, key_name, reads, writes, retry_with_only_increase=False): table = get_table(table_name) current_reads = int(get_provisioned_table_read_units(table_name)) current_writes = int(get_provisioned_table_write_units(table_name)) # Make sure we aren't scaling down if we turned off downscaling if (not get_table_option(key_name, 'enable_reads_down_scaling') or not get_table_option(key_name, 'enable_writes_down_scaling')): if (not get_table_option(key_name, 'enable_reads_down_scaling') and current_reads > reads): reads = current_reads if (not get_table_option(key_name, 'enable_writes_down_scaling') and current_writes > writes): writes = current_writes # Return if we do not need to scale at all if reads == current_reads and writes == current_writes: logger.info( '{0} - No need to scale up reads nor writes'.format( table_name)) return if retry_with_only_increase: # Ensure that we are only doing increases if current_reads > reads: reads = current_reads if current_writes > writes: writes = current_writes # Return if we do not need to scale at all if reads == current_reads and writes == current_writes: logger.info( '{0} - No need to scale up reads nor writes'.format( table_name)) return logger.info( '{0} - Retrying to update provisioning, excluding any decreases. ' 'Setting new reads to {1} and new writes to {2}'.format( table_name, reads, writes)) # Check that we are in the right time frame maintenance_windows = get_table_option(key_name, 'maintenance_windows') if maintenance_windows: if not __is_table_maintenance_window(table_name, maintenance_windows): logger.warning( '{0} - We are outside a maintenace window. ' 'Will only perform up scaling activites'.format(table_name)) # Ensure that we are only doing increases if current_reads > reads: reads = current_reads if current_writes > writes: writes = current_writes # Return if we do not need to scale up if reads == current_reads and writes == current_writes: logger.info( '{0} - No need to scale up reads nor writes'.format( table_name)) return else: logger.info( '{0} - Current time is within maintenance window'.format( table_name)) logger.info( '{0} - Updating provisioning to {1} reads and {2} writes'.format( table_name, reads, writes)) # Return if dry-run if get_global_option('dry_run'): return try: table.update( throughput={ 'read': reads, 'write': writes }) # See if we should send notifications for scale-down, scale-up or both sns_message_types = [] if current_reads > reads or current_writes > writes: sns_message_types.append('scale-down') if current_reads < reads or current_writes < writes: sns_message_types.append('scale-up') message = [] if current_reads > reads: message.append('{0} - Reads: DOWN from {1} to {2}\n'.format( table_name, current_reads, reads)) elif current_reads < reads: message.append('{0} - Reads: UP from {1} to {2}\n'.format( table_name, current_reads, reads)) if current_writes > writes: message.append('{0} - Writes: DOWN from {1} to {2}\n'.format( table_name, current_writes, writes)) elif current_writes < writes: message.append('{0} - Writes: UP from {1} to {2}\n'.format( table_name, current_writes, writes)) sns.publish_table_notification( key_name, ''.join(message), sns_message_types, subject='Updated provisioning for table {0}'.format(table_name)) except JSONResponseError as error: exception = error.body['__type'].split('#')[1] know_exceptions = [ 'LimitExceededException', 'ValidationException', 'ResourceInUseException'] if exception in know_exceptions: logger.warning('{0} - {1}: {2}'.format( table_name, exception, error.body['message'])) else: if 'message' in error.body: msg = error.body['message'] else: msg = error logger.error( ( '{0} - Unhandled exception: {1}: {2}. ' 'Please file a bug report at ' 'https://github.com/sebdah/dynamic-dynamodb/issues' ).format(table_name, exception, msg)) if (not retry_with_only_increase and exception == 'LimitExceededException'): logger.info( '{0} - Will retry to update provisioning ' 'with only increases'.format(table_name)) update_table_provisioning( table_name, key_name, reads, writes, retry_with_only_increase=True)
[ " Update provisioning for a given table\n\n :type table_name: str\n :param table_name: Name of the table\n :type key_name: str\n :param key_name: Configuration option key name\n :type reads: int\n :param reads: New number of provisioned read units\n :type writes: int\n :param writes: New number of provisioned write units\n :type retry_with_only_increase: bool\n :param retry_with_only_increase: Set to True to ensure only increases\n " ]
Please provide a description of the function:def update_gsi_provisioning( table_name, table_key, gsi_name, gsi_key, reads, writes, retry_with_only_increase=False): current_reads = int(get_provisioned_gsi_read_units(table_name, gsi_name)) current_writes = int(get_provisioned_gsi_write_units(table_name, gsi_name)) # Make sure we aren't scaling down if we turned off downscaling if (not get_gsi_option(table_key, gsi_key, 'enable_reads_down_scaling') or not get_gsi_option( table_key, gsi_key, 'enable_writes_down_scaling')): if (not get_gsi_option( table_key, gsi_key, 'enable_reads_down_scaling') and current_reads > reads): reads = current_reads if (not get_gsi_option( table_key, gsi_key, 'enable_writes_down_scaling') and current_writes > writes): writes = current_writes # Return if we do not need to scale at all if reads == current_reads and writes == current_writes: logger.info( '{0} - No need to scale up reads nor writes'.format( table_name)) return if retry_with_only_increase: # Ensure that we are only doing increases if current_reads > reads: reads = current_reads if current_writes > writes: writes = current_writes # Return if we do not need to scale at all if reads == current_reads and writes == current_writes: logger.info( '{0} - GSI: {1} - No need to scale up reads nor writes'.format( table_name, gsi_name)) return logger.info( '{0} - GSI: {1} - Retrying to update provisioning, ' 'excluding any decreases. ' 'Setting new reads to {2} and new writes to {3}'.format( table_name, gsi_name, reads, writes)) # Check that we are in the right time frame m_windows = get_gsi_option(table_key, gsi_key, 'maintenance_windows') if m_windows: if not __is_gsi_maintenance_window(table_name, gsi_name, m_windows): logger.warning( '{0} - GSI: {1} - We are outside a maintenace window. ' 'Will only perform up scaling activites'.format( table_name, gsi_name)) # Ensure that we are only doing increases if current_reads > reads: reads = current_reads if current_writes > writes: writes = current_writes # Return if we do not need to scale up if reads == current_reads and writes == current_writes: logger.info( '{0} - GSI: {1} - ' 'No need to scale up reads nor writes'.format( table_name, gsi_name)) return else: logger.info( '{0} - GSI: {1} - ' 'Current time is within maintenance window'.format( table_name, gsi_name)) logger.info( '{0} - GSI: {1} - ' 'Updating provisioning to {2} reads and {3} writes'.format( table_name, gsi_name, reads, writes)) # Return if dry-run if get_global_option('dry_run'): return try: DYNAMODB_CONNECTION.update_table( table_name=table_name, global_secondary_index_updates=[ { "Update": { "IndexName": gsi_name, "ProvisionedThroughput": { "ReadCapacityUnits": reads, "WriteCapacityUnits": writes } } } ]) message = [] if current_reads > reads: message.append( '{0} - GSI: {1} - Reads: DOWN from {2} to {3}\n'.format( table_name, gsi_name, current_reads, reads)) elif current_reads < reads: message.append( '{0} - GSI: {1} - Reads: UP from {2} to {3}\n'.format( table_name, gsi_name, current_reads, reads)) if current_writes > writes: message.append( '{0} - GSI: {1} - Writes: DOWN from {2} to {3}\n'.format( table_name, gsi_name, current_writes, writes)) elif current_writes < writes: message.append( '{0} - GSI: {1} - Writes: UP from {2} to {3}\n'.format( table_name, gsi_name, current_writes, writes)) # See if we should send notifications for scale-down, scale-up or both sns_message_types = [] if current_reads > reads or current_writes > writes: sns_message_types.append('scale-down') if current_reads < reads or current_writes < writes: sns_message_types.append('scale-up') sns.publish_gsi_notification( table_key, gsi_key, ''.join(message), sns_message_types, subject='Updated provisioning for GSI {0}'.format(gsi_name)) except JSONResponseError as error: exception = error.body['__type'].split('#')[1] know_exceptions = ['LimitExceededException'] if exception in know_exceptions: logger.warning('{0} - GSI: {1} - {2}: {3}'.format( table_name, gsi_name, exception, error.body['message'])) else: logger.error( ( '{0} - GSI: {1} - Unhandled exception: {2}: {3}. ' 'Please file a bug report at ' 'https://github.com/sebdah/dynamic-dynamodb/issues' ).format( table_name, gsi_name, exception, error.body['message'])) if (not retry_with_only_increase and exception == 'LimitExceededException'): logger.info( '{0} - GSI: {1} - Will retry to update provisioning ' 'with only increases'.format(table_name, gsi_name)) update_gsi_provisioning( table_name, table_key, gsi_name, gsi_key, reads, writes, retry_with_only_increase=True)
[ " Update provisioning on a global secondary index\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :type table_key: str\n :param table_key: Table configuration option key name\n :type gsi_name: str\n :param gsi_name: Name of the GSI\n :type gsi_key: str\n :param gsi_key: GSI configuration option key name\n :type reads: int\n :param reads: Number of reads to provision\n :type writes: int\n :param writes: Number of writes to provision\n :type retry_with_only_increase: bool\n :param retry_with_only_increase: Set to True to ensure only increases\n " ]
Please provide a description of the function:def table_gsis(table_name): try: desc = DYNAMODB_CONNECTION.describe_table(table_name)[u'Table'] except JSONResponseError: raise if u'GlobalSecondaryIndexes' in desc: return desc[u'GlobalSecondaryIndexes'] return []
[ " Returns a list of GSIs for the given table\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :returns: list -- List of GSI names\n " ]
Please provide a description of the function:def __get_connection_dynamodb(retries=3): connected = False region = get_global_option('region') while not connected: if (get_global_option('aws_access_key_id') and get_global_option('aws_secret_access_key')): logger.debug( 'Authenticating to DynamoDB using ' 'credentials in configuration file') connection = dynamodb2.connect_to_region( region, aws_access_key_id=get_global_option('aws_access_key_id'), aws_secret_access_key=get_global_option( 'aws_secret_access_key')) else: logger.debug( 'Authenticating using boto\'s authentication handler') connection = dynamodb2.connect_to_region(region) if not connection: if retries == 0: logger.error('Failed to connect to DynamoDB. Giving up.') raise else: logger.error( 'Failed to connect to DynamoDB. Retrying in 5 seconds') retries -= 1 time.sleep(5) else: connected = True logger.debug('Connected to DynamoDB in {0}'.format(region)) return connection
[ " Ensure connection to DynamoDB\n\n :type retries: int\n :param retries: Number of times to retry to connect to DynamoDB\n " ]
Please provide a description of the function:def __is_gsi_maintenance_window(table_name, gsi_name, maintenance_windows): # Example string '00:00-01:00,10:00-11:00' maintenance_window_list = [] for window in maintenance_windows.split(','): try: start, end = window.split('-', 1) except ValueError: logger.error( '{0} - GSI: {1} - ' 'Malformatted maintenance window'.format(table_name, gsi_name)) return False maintenance_window_list.append((start, end)) now = datetime.datetime.utcnow().strftime('%H%M') for maintenance_window in maintenance_window_list: start = ''.join(maintenance_window[0].split(':')) end = ''.join(maintenance_window[1].split(':')) if now >= start and now <= end: return True return False
[ " Checks that the current time is within the maintenance window\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :type gsi_name: str\n :param gsi_name: Name of the GSI\n :type maintenance_windows: str\n :param maintenance_windows: Example: '00:00-01:00,10:00-11:00'\n :returns: bool -- True if within maintenance window\n " ]
Please provide a description of the function:def publish_gsi_notification( table_key, gsi_key, message, message_types, subject=None): topic = get_gsi_option(table_key, gsi_key, 'sns_topic_arn') if not topic: return for message_type in message_types: if (message_type in get_gsi_option(table_key, gsi_key, 'sns_message_types')): __publish(topic, message, subject) return
[ " Publish a notification for a specific GSI\n\n :type table_key: str\n :param table_key: Table configuration option key name\n :type gsi_key: str\n :param gsi_key: Table configuration option key name\n :type message: str\n :param message: Message to send via SNS\n :type message_types: list\n :param message_types:\n List with types:\n - scale-up\n - scale-down\n - high-throughput-alarm\n - low-throughput-alarm\n :type subject: str\n :param subject: Subject to use for e-mail notifications\n :returns: None\n " ]
Please provide a description of the function:def publish_table_notification(table_key, message, message_types, subject=None): topic = get_table_option(table_key, 'sns_topic_arn') if not topic: return for message_type in message_types: if message_type in get_table_option(table_key, 'sns_message_types'): __publish(topic, message, subject) return
[ " Publish a notification for a specific table\n\n :type table_key: str\n :param table_key: Table configuration option key name\n :type message: str\n :param message: Message to send via SNS\n :type message_types: list\n :param message_types:\n List with types:\n - scale-up\n - scale-down\n - high-throughput-alarm\n - low-throughput-alarm\n :type subject: str\n :param subject: Subject to use for e-mail notifications\n :returns: None\n " ]
Please provide a description of the function:def __publish(topic, message, subject=None): try: SNS_CONNECTION.publish(topic=topic, message=message, subject=subject) logger.info('Sent SNS notification to {0}'.format(topic)) except BotoServerError as error: logger.error('Problem sending SNS notification: {0}'.format( error.message)) return
[ " Publish a message to a SNS topic\n\n :type topic: str\n :param topic: SNS topic to publish the message to\n :type message: str\n :param message: Message to send via SNS\n :type subject: str\n :param subject: Subject to use for e-mail notifications\n :returns: None\n " ]
Please provide a description of the function:def __get_connection_SNS(): region = get_global_option('region') try: if (get_global_option('aws_access_key_id') and get_global_option('aws_secret_access_key')): logger.debug( 'Authenticating to SNS using ' 'credentials in configuration file') connection = sns.connect_to_region( region, aws_access_key_id=get_global_option( 'aws_access_key_id'), aws_secret_access_key=get_global_option( 'aws_secret_access_key')) else: logger.debug( 'Authenticating using boto\'s authentication handler') connection = sns.connect_to_region(region) except Exception as err: logger.error('Failed connecting to SNS: {0}'.format(err)) logger.error( 'Please report an issue at: ' 'https://github.com/sebdah/dynamic-dynamodb/issues') raise logger.debug('Connected to SNS in {0}'.format(region)) return connection
[ " Ensure connection to SNS " ]
Please provide a description of the function:def parse(): parser = argparse.ArgumentParser( description='Dynamic DynamoDB - Auto provisioning AWS DynamoDB') parser.add_argument( '-c', '--config', help='Read configuration from a configuration file') parser.add_argument( '--dry-run', action='store_true', help='Run without making any changes to your DynamoDB table') parser.add_argument( '--run-once', action='store_true', help='Run once and then exit Dynamic DynamoDB, instead of looping') parser.add_argument( '--show-config', action='store_true', help='Parse config files, print parsed data and then exit Dynamic DynamoDB') parser.add_argument( '--check-interval', type=int, help=) parser.add_argument( '--log-file', help='Send output to the given log file') parser.add_argument( '--log-level', choices=['debug', 'info', 'warning', 'error'], help='Log level to use (default: info)') parser.add_argument( '--log-config-file', help=( 'Use a custom Python logging configuration file. Overrides both ' '--log-level and --log-file.' )) parser.add_argument( '--version', action='store_true', help='Print current version number') parser.add_argument( '--aws-access-key-id', help="Override Boto configuration with the following AWS access key") parser.add_argument( '--aws-secret-access-key', help="Override Boto configuration with the following AWS secret key") daemon_ag = parser.add_argument_group('Daemon options') daemon_ag.add_argument( '--daemon', help=( 'Run Dynamic DynamoDB in daemon mode. Valid modes are ' '[start|stop|restart|foreground]')) daemon_ag.add_argument( '--instance', default='default', help=( 'Name of the Dynamic DynamoDB instance. ' 'Used to run multiple instances of Dynamic DynamoDB. ' 'Give each instance a unique name and control them separately ' 'with the --daemon flag. (default: default)')) daemon_ag.add_argument( '--pid-file-dir', default='/tmp', help='Directory where pid file is located in. Defaults to /tmp') dynamodb_ag = parser.add_argument_group('DynamoDB options') dynamodb_ag.add_argument( '-r', '--region', help='AWS region to operate in (default: us-east-1') dynamodb_ag.add_argument( '-t', '--table-name', help=( 'Table(s) to target. ' 'The name is treated as a regular expression. ' 'E.g. "^my_table.*$" or "my_table"')) r_scaling_ag = parser.add_argument_group('Read units scaling properties') r_scaling_ag.add_argument( '--reads-upper-threshold', type=int, help=) r_scaling_ag.add_argument( '--throttled-reads-upper-threshold', type=int, help=) r_scaling_ag.add_argument( '--reads-lower-threshold', type=int, help=) r_scaling_ag.add_argument( '--increase-reads-with', type=int, help=) r_scaling_ag.add_argument( '--decrease-reads-with', type=int, help=) r_scaling_ag.add_argument( '--increase-reads-unit', type=str, help='Do you want to scale in percent or units? (default: percent)') r_scaling_ag.add_argument( '--decrease-reads-unit', type=str, help='Do you want to scale in percent or units? (default: percent)') r_scaling_ag.add_argument( '--min-provisioned-reads', type=int, help=) r_scaling_ag.add_argument( '--max-provisioned-reads', type=int, help=) r_scaling_ag.add_argument( '--num-read-checks-before-scale-down', type=int, help=) r_scaling_ag.add_argument( '--num-read-checks-reset-percent', type=int, help=) w_scaling_ag = parser.add_argument_group('Write units scaling properties') w_scaling_ag.add_argument( '--writes-upper-threshold', type=int, help=) w_scaling_ag.add_argument( '--throttled-writes-upper-threshold', type=int, help=) w_scaling_ag.add_argument( '--writes-lower-threshold', type=int, help=) w_scaling_ag.add_argument( '--increase-writes-with', type=int, help=) w_scaling_ag.add_argument( '--decrease-writes-with', type=int, help=) w_scaling_ag.add_argument( '--increase-writes-unit', type=str, help='Do you want to scale in percent or units? (default: percent)') w_scaling_ag.add_argument( '--decrease-writes-unit', type=str, help='Do you want to scale in percent or units? (default: percent)') w_scaling_ag.add_argument( '--min-provisioned-writes', type=int, help=) w_scaling_ag.add_argument( '--max-provisioned-writes', type=int, help=) w_scaling_ag.add_argument( '--num-write-checks-before-scale-down', type=int, help=) w_scaling_ag.add_argument( '--num-write-checks-reset-percent', type=int, help=) args = parser.parse_args() # Print the version and quit if args.version: # Read the dynamic-dynamodb.conf configuration file internal_config_file = ConfigParser.RawConfigParser() internal_config_file.optionxform = lambda option: option internal_config_file.read( os.path.abspath( os.path.join( os.path.dirname(__file__), '../dynamic-dynamodb.conf'))) print 'Dynamic DynamoDB version: {0}'.format( internal_config_file.get('general', 'version')) sys.exit(0) # Replace any new values in the configuration configuration = {} for arg in args.__dict__: if args.__dict__.get(arg) is not None: configuration[arg] = args.__dict__.get(arg) return configuration
[ " Parse command line options ", "How many seconds should we wait between\n the checks (default: 300)", "Scale up the reads with --increase-reads-with if\n the currently consumed read units reaches this many\n percent (default: 90)", "Scale up the reads with --increase-reads-with if\n the count of throttled read events exceeds this\n count (default: 0)", "Scale down the reads with --decrease-reads-with if the\n currently consumed read units is as low as this\n percentage (default: 30)", "How much should we increase the read units with?\n (default: 50, max: 100 if --increase-reads-unit = percent)", "How much should we decrease the read units with?\n (default: 50)", "Minimum number of provisioned reads", "Maximum number of provisioned reads", "Number of consecutive checks that must meet criteria\n before a scale down event occurs", "Percentage Value that will cause the num_read_checks_before\n scale_down var to reset back to 0", "Scale up the writes with --increase-writes-with\n if the currently consumed write units reaches this\n many percent (default: 90)", "Scale up the reads with --increase-writes-with if\n the count of throttled write events exceeds this\n count (default: 0)", "Scale down the writes with --decrease-writes-with\n if the currently consumed write units is as low as this\n percentage (default: 30)", "How much should we increase the write units with?\n (default: 50,\n max: 100 if --increase-writes-unit = 'percent')", "How much should we decrease the write units with?\n (default: 50)", "Minimum number of provisioned writes", "Maximum number of provisioned writes", "Number of consecutive checks that must meet criteria\n before a scale down event occurs", "Percentage Value that will cause the num_write_checks_before\n scale_down var to reset back to 0" ]
Please provide a description of the function:def ensure_provisioning( table_name, key_name, num_consec_read_checks, num_consec_write_checks): if get_global_option('circuit_breaker_url') or get_table_option( key_name, 'circuit_breaker_url'): if circuit_breaker.is_open(table_name, key_name): logger.warning('Circuit breaker is OPEN!') return (0, 0) # Handle throughput alarm checks __ensure_provisioning_alarm(table_name, key_name) try: read_update_needed, updated_read_units, num_consec_read_checks = \ __ensure_provisioning_reads( table_name, key_name, num_consec_read_checks) write_update_needed, updated_write_units, num_consec_write_checks = \ __ensure_provisioning_writes( table_name, key_name, num_consec_write_checks) if read_update_needed: num_consec_read_checks = 0 if write_update_needed: num_consec_write_checks = 0 # Handle throughput updates if read_update_needed or write_update_needed: logger.info( '{0} - Changing provisioning to {1:d} ' 'read units and {2:d} write units'.format( table_name, int(updated_read_units), int(updated_write_units))) __update_throughput( table_name, key_name, updated_read_units, updated_write_units) else: logger.info('{0} - No need to change provisioning'.format( table_name)) except JSONResponseError: raise except BotoServerError: raise return num_consec_read_checks, num_consec_write_checks
[ " Ensure that provisioning is correct\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :type key_name: str\n :param key_name: Configuration option key name\n :type num_consec_read_checks: int\n :param num_consec_read_checks: How many consecutive checks have we had\n :type num_consec_write_checks: int\n :param num_consec_write_checks: How many consecutive checks have we had\n :returns: (int, int) -- num_consec_read_checks, num_consec_write_checks\n " ]
Please provide a description of the function:def __calculate_always_decrease_rw_values( table_name, read_units, provisioned_reads, write_units, provisioned_writes): if read_units <= provisioned_reads and write_units <= provisioned_writes: return (read_units, write_units) if read_units < provisioned_reads: logger.info( '{0} - Reads could be decreased, but we are waiting for ' 'writes to get lower than the threshold before ' 'scaling down'.format(table_name)) read_units = provisioned_reads elif write_units < provisioned_writes: logger.info( '{0} - Writes could be decreased, but we are waiting for ' 'reads to get lower than the threshold before ' 'scaling down'.format(table_name)) write_units = provisioned_writes return (read_units, write_units)
[ " Calculate values for always-decrease-rw-together\n\n This will only return reads and writes decreases if both reads and writes\n are lower than the current provisioning\n\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :type read_units: int\n :param read_units: New read unit provisioning\n :type provisioned_reads: int\n :param provisioned_reads: Currently provisioned reads\n :type write_units: int\n :param write_units: New write unit provisioning\n :type provisioned_writes: int\n :param provisioned_writes: Currently provisioned writes\n :returns: (int, int) -- (reads, writes)\n " ]
Please provide a description of the function:def __ensure_provisioning_reads(table_name, key_name, num_consec_read_checks): if not get_table_option(key_name, 'enable_reads_autoscaling'): logger.info( '{0} - Autoscaling of reads has been disabled'.format(table_name)) return False, dynamodb.get_provisioned_table_read_units(table_name), 0 update_needed = False try: lookback_window_start = get_table_option( key_name, 'lookback_window_start') lookback_period = get_table_option(key_name, 'lookback_period') current_read_units = dynamodb.get_provisioned_table_read_units( table_name) consumed_read_units_percent = \ table_stats.get_consumed_read_units_percent( table_name, lookback_window_start, lookback_period) throttled_read_count = \ table_stats.get_throttled_read_event_count( table_name, lookback_window_start, lookback_period) throttled_by_provisioned_read_percent = \ table_stats.get_throttled_by_provisioned_read_event_percent( table_name, lookback_window_start, lookback_period) throttled_by_consumed_read_percent = \ table_stats.get_throttled_by_consumed_read_percent( table_name, lookback_window_start, lookback_period) reads_upper_threshold = \ get_table_option(key_name, 'reads_upper_threshold') reads_lower_threshold = \ get_table_option(key_name, 'reads_lower_threshold') throttled_reads_upper_threshold = \ get_table_option(key_name, 'throttled_reads_upper_threshold') increase_reads_with = \ get_table_option(key_name, 'increase_reads_with') increase_reads_unit = \ get_table_option(key_name, 'increase_reads_unit') decrease_reads_with = \ get_table_option(key_name, 'decrease_reads_with') decrease_reads_unit = \ get_table_option(key_name, 'decrease_reads_unit') min_provisioned_reads = \ get_table_option(key_name, 'min_provisioned_reads') max_provisioned_reads = \ get_table_option(key_name, 'max_provisioned_reads') num_read_checks_before_scale_down = \ get_table_option(key_name, 'num_read_checks_before_scale_down') num_read_checks_reset_percent = \ get_table_option(key_name, 'num_read_checks_reset_percent') increase_throttled_by_provisioned_reads_unit = \ get_table_option( key_name, 'increase_throttled_by_provisioned_reads_unit') increase_throttled_by_provisioned_reads_scale = \ get_table_option( key_name, 'increase_throttled_by_provisioned_reads_scale') increase_throttled_by_consumed_reads_unit = \ get_table_option( key_name, 'increase_throttled_by_consumed_reads_unit') increase_throttled_by_consumed_reads_scale = \ get_table_option( key_name, 'increase_throttled_by_consumed_reads_scale') increase_consumed_reads_unit = \ get_table_option(key_name, 'increase_consumed_reads_unit') increase_consumed_reads_with = \ get_table_option(key_name, 'increase_consumed_reads_with') increase_consumed_reads_scale = \ get_table_option(key_name, 'increase_consumed_reads_scale') decrease_consumed_reads_unit = \ get_table_option(key_name, 'decrease_consumed_reads_unit') decrease_consumed_reads_with = \ get_table_option(key_name, 'decrease_consumed_reads_with') decrease_consumed_reads_scale = \ get_table_option(key_name, 'decrease_consumed_reads_scale') except JSONResponseError: raise except BotoServerError: raise # Set the updated units to the current read unit value updated_read_units = current_read_units # Reset consecutive reads if num_read_checks_reset_percent is reached if num_read_checks_reset_percent: if consumed_read_units_percent >= num_read_checks_reset_percent: logger.info( '{0} - Resetting the number of consecutive ' 'read checks. Reason: Consumed percent {1} is ' 'greater than reset percent: {2}'.format( table_name, consumed_read_units_percent, num_read_checks_reset_percent)) num_consec_read_checks = 0 # Exit if up scaling has been disabled if not get_table_option(key_name, 'enable_reads_up_scaling'): logger.debug( '{0} - Up scaling event detected. No action taken as scaling ' 'up reads has been disabled in the configuration'.format( table_name)) else: # If local/granular values not specified use global values increase_consumed_reads_unit = \ increase_consumed_reads_unit or increase_reads_unit increase_throttled_by_provisioned_reads_unit = \ increase_throttled_by_provisioned_reads_unit or increase_reads_unit increase_throttled_by_consumed_reads_unit = \ increase_throttled_by_consumed_reads_unit or increase_reads_unit increase_consumed_reads_with = \ increase_consumed_reads_with or increase_reads_with # Initialise variables to store calculated provisioning throttled_by_provisioned_calculated_provisioning = scale_reader( increase_throttled_by_provisioned_reads_scale, throttled_by_provisioned_read_percent) throttled_by_consumed_calculated_provisioning = scale_reader( increase_throttled_by_consumed_reads_scale, throttled_by_consumed_read_percent) consumed_calculated_provisioning = scale_reader( increase_consumed_reads_scale, consumed_read_units_percent) throttled_count_calculated_provisioning = 0 calculated_provisioning = 0 # Increase needed due to high throttled to provisioned ratio if throttled_by_provisioned_calculated_provisioning: if increase_throttled_by_provisioned_reads_unit == 'percent': throttled_by_provisioned_calculated_provisioning = \ calculators.increase_reads_in_percent( current_read_units, throttled_by_provisioned_calculated_provisioning, get_table_option(key_name, 'max_provisioned_reads'), consumed_read_units_percent, table_name) else: throttled_by_provisioned_calculated_provisioning = \ calculators.increase_reads_in_units( current_read_units, throttled_by_provisioned_calculated_provisioning, get_table_option(key_name, 'max_provisioned_reads'), consumed_read_units_percent, table_name) # Increase needed due to high throttled to consumed ratio if throttled_by_consumed_calculated_provisioning: if increase_throttled_by_consumed_reads_unit == 'percent': throttled_by_consumed_calculated_provisioning = \ calculators.increase_reads_in_percent( current_read_units, throttled_by_consumed_calculated_provisioning, get_table_option(key_name, 'max_provisioned_reads'), consumed_read_units_percent, table_name) else: throttled_by_consumed_calculated_provisioning = \ calculators.increase_reads_in_units( current_read_units, throttled_by_consumed_calculated_provisioning, get_table_option(key_name, 'max_provisioned_reads'), consumed_read_units_percent, table_name) # Increase needed due to high CU consumption if consumed_calculated_provisioning: if increase_consumed_reads_unit == 'percent': consumed_calculated_provisioning = \ calculators.increase_reads_in_percent( current_read_units, consumed_calculated_provisioning, get_table_option(key_name, 'max_provisioned_reads'), consumed_read_units_percent, table_name) else: consumed_calculated_provisioning = \ calculators.increase_reads_in_units( current_read_units, consumed_calculated_provisioning, get_table_option(key_name, 'max_provisioned_reads'), consumed_read_units_percent, table_name) elif (reads_upper_threshold and consumed_read_units_percent > reads_upper_threshold and not increase_consumed_reads_scale): if increase_consumed_reads_unit == 'percent': consumed_calculated_provisioning = \ calculators.increase_reads_in_percent( current_read_units, increase_consumed_reads_with, get_table_option(key_name, 'max_provisioned_reads'), consumed_read_units_percent, table_name) else: consumed_calculated_provisioning = \ calculators.increase_reads_in_units( current_read_units, increase_consumed_reads_with, get_table_option(key_name, 'max_provisioned_reads'), consumed_read_units_percent, table_name) # Increase needed due to high throttling if (throttled_reads_upper_threshold and throttled_read_count > throttled_reads_upper_threshold): if increase_reads_unit == 'percent': throttled_count_calculated_provisioning = \ calculators.increase_reads_in_percent( updated_read_units, increase_consumed_reads_with, get_table_option(key_name, 'max_provisioned_reads'), consumed_read_units_percent, table_name) else: throttled_count_calculated_provisioning = \ calculators.increase_reads_in_units( updated_read_units, increase_reads_with, get_table_option(key_name, 'max_provisioned_reads'), consumed_read_units_percent, table_name) # Determine which metric requires the most scaling if (throttled_by_provisioned_calculated_provisioning > calculated_provisioning): calculated_provisioning = \ throttled_by_provisioned_calculated_provisioning scale_reason = ( "due to throttled events by provisioned " "units threshold being exceeded") if (throttled_by_consumed_calculated_provisioning > calculated_provisioning): calculated_provisioning = \ throttled_by_consumed_calculated_provisioning scale_reason = ( "due to throttled events by consumed " "units threshold being exceeded") if consumed_calculated_provisioning > calculated_provisioning: calculated_provisioning = consumed_calculated_provisioning scale_reason = "due to consumed threshold being exceeded" if throttled_count_calculated_provisioning > calculated_provisioning: calculated_provisioning = throttled_count_calculated_provisioning scale_reason = "due to throttled events threshold being exceeded" if calculated_provisioning > current_read_units: logger.info( '{0} - Resetting the number of consecutive ' 'read checks. Reason: scale up {1}'.format( table_name, scale_reason)) num_consec_read_checks = 0 update_needed = True updated_read_units = calculated_provisioning # Decrease needed due to low CU consumption if not update_needed: # If local/granular values not specified use global values decrease_consumed_reads_unit = \ decrease_consumed_reads_unit or decrease_reads_unit decrease_consumed_reads_with = \ decrease_consumed_reads_with or decrease_reads_with # Initialise variables to store calculated provisioning consumed_calculated_provisioning = scale_reader_decrease( decrease_consumed_reads_scale, consumed_read_units_percent) calculated_provisioning = None # Exit if down scaling has been disabled if not get_table_option(key_name, 'enable_reads_down_scaling'): logger.debug( '{0} - Down scaling event detected. No action taken as scaling' ' down reads has been disabled in the configuration'.format( table_name)) # Exit if reads == 0% and downscaling has been disabled at 0% elif (consumed_read_units_percent == 0 and not get_table_option( key_name, 'allow_scaling_down_reads_on_0_percent')): logger.info( '{0} - Down scaling event detected. No action taken as scaling' ' down reads is not done when usage is at 0%'.format( table_name)) else: if consumed_calculated_provisioning: if decrease_consumed_reads_unit == 'percent': calculated_provisioning = \ calculators.decrease_reads_in_percent( updated_read_units, consumed_calculated_provisioning, get_table_option( key_name, 'min_provisioned_reads'), table_name) else: calculated_provisioning = \ calculators.decrease_reads_in_units( updated_read_units, consumed_calculated_provisioning, get_table_option( key_name, 'min_provisioned_reads'), table_name) elif (reads_lower_threshold and consumed_read_units_percent < reads_lower_threshold and not decrease_consumed_reads_scale): if decrease_consumed_reads_unit == 'percent': calculated_provisioning = \ calculators.decrease_reads_in_percent( updated_read_units, decrease_consumed_reads_with, get_table_option( key_name, 'min_provisioned_reads'), table_name) else: calculated_provisioning = \ calculators.decrease_reads_in_units( updated_read_units, decrease_consumed_reads_with, get_table_option( key_name, 'min_provisioned_reads'), table_name) if (calculated_provisioning and current_read_units != calculated_provisioning): num_consec_read_checks += 1 if num_consec_read_checks >= num_read_checks_before_scale_down: update_needed = True updated_read_units = calculated_provisioning # Never go over the configured max provisioning if max_provisioned_reads: if int(updated_read_units) > int(max_provisioned_reads): update_needed = True updated_read_units = int(max_provisioned_reads) logger.info( 'Will not increase writes over max-provisioned-reads ' 'limit ({0} writes)'.format(updated_read_units)) # Ensure that we have met the min-provisioning if min_provisioned_reads: if int(min_provisioned_reads) > int(updated_read_units): update_needed = True updated_read_units = int(min_provisioned_reads) logger.info( '{0} - Increasing reads to meet min-provisioned-reads ' 'limit ({1} reads)'.format(table_name, updated_read_units)) if calculators.is_consumed_over_proposed( current_read_units, updated_read_units, consumed_read_units_percent): update_needed = False updated_read_units = current_read_units logger.info( '{0} - Consumed is over proposed read units. Will leave table at ' 'current setting.'.format(table_name)) logger.info('{0} - Consecutive read checks {1}/{2}'.format( table_name, num_consec_read_checks, num_read_checks_before_scale_down)) return update_needed, updated_read_units, num_consec_read_checks
[ " Ensure that provisioning is correct\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :type key_name: str\n :param key_name: Configuration option key name\n :type num_consec_read_checks: int\n :param num_consec_read_checks: How many consecutive checks have we had\n :returns: (bool, int, int)\n update_needed, updated_read_units, num_consec_read_checks\n " ]
Please provide a description of the function:def __ensure_provisioning_writes( table_name, key_name, num_consec_write_checks): if not get_table_option(key_name, 'enable_writes_autoscaling'): logger.info( '{0} - Autoscaling of writes has been disabled'.format(table_name)) return False, dynamodb.get_provisioned_table_write_units(table_name), 0 update_needed = False try: lookback_window_start = get_table_option( key_name, 'lookback_window_start') lookback_period = get_table_option(key_name, 'lookback_period') current_write_units = dynamodb.get_provisioned_table_write_units( table_name) consumed_write_units_percent = \ table_stats.get_consumed_write_units_percent( table_name, lookback_window_start, lookback_period) throttled_write_count = \ table_stats.get_throttled_write_event_count( table_name, lookback_window_start, lookback_period) throttled_by_provisioned_write_percent = \ table_stats.get_throttled_by_provisioned_write_event_percent( table_name, lookback_window_start, lookback_period) throttled_by_consumed_write_percent = \ table_stats.get_throttled_by_consumed_write_percent( table_name, lookback_window_start, lookback_period) writes_upper_threshold = \ get_table_option(key_name, 'writes_upper_threshold') writes_lower_threshold = \ get_table_option(key_name, 'writes_lower_threshold') throttled_writes_upper_threshold = \ get_table_option(key_name, 'throttled_writes_upper_threshold') increase_writes_unit = \ get_table_option(key_name, 'increase_writes_unit') increase_writes_with = \ get_table_option(key_name, 'increase_writes_with') decrease_writes_unit = \ get_table_option(key_name, 'decrease_writes_unit') decrease_writes_with = \ get_table_option(key_name, 'decrease_writes_with') min_provisioned_writes = \ get_table_option(key_name, 'min_provisioned_writes') max_provisioned_writes = \ get_table_option(key_name, 'max_provisioned_writes') num_write_checks_before_scale_down = \ get_table_option(key_name, 'num_write_checks_before_scale_down') num_write_checks_reset_percent = \ get_table_option(key_name, 'num_write_checks_reset_percent') increase_throttled_by_provisioned_writes_unit = \ get_table_option( key_name, 'increase_throttled_by_provisioned_writes_unit') increase_throttled_by_provisioned_writes_scale = \ get_table_option( key_name, 'increase_throttled_by_provisioned_writes_scale') increase_throttled_by_consumed_writes_unit = \ get_table_option( key_name, 'increase_throttled_by_consumed_writes_unit') increase_throttled_by_consumed_writes_scale = \ get_table_option( key_name, 'increase_throttled_by_consumed_writes_scale') increase_consumed_writes_unit = \ get_table_option(key_name, 'increase_consumed_writes_unit') increase_consumed_writes_with = \ get_table_option(key_name, 'increase_consumed_writes_with') increase_consumed_writes_scale = \ get_table_option(key_name, 'increase_consumed_writes_scale') decrease_consumed_writes_unit = \ get_table_option(key_name, 'decrease_consumed_writes_unit') decrease_consumed_writes_with = \ get_table_option(key_name, 'decrease_consumed_writes_with') decrease_consumed_writes_scale = \ get_table_option(key_name, 'decrease_consumed_writes_scale') except JSONResponseError: raise except BotoServerError: raise # Set the updated units to the current read unit value updated_write_units = current_write_units # Reset consecutive write count if num_write_checks_reset_percent # is reached if num_write_checks_reset_percent: if consumed_write_units_percent >= num_write_checks_reset_percent: logger.info( '{0} - Resetting the number of consecutive ' 'write checks. Reason: Consumed percent {1} is ' 'greater than reset percent: {2}'.format( table_name, consumed_write_units_percent, num_write_checks_reset_percent)) num_consec_write_checks = 0 # Exit if up scaling has been disabled if not get_table_option(key_name, 'enable_writes_up_scaling'): logger.debug( '{0} - Up scaling event detected. No action taken as scaling ' 'up writes has been disabled in the configuration'.format( table_name)) else: # If local/granular values not specified use global values increase_consumed_writes_unit = \ increase_consumed_writes_unit or increase_writes_unit increase_throttled_by_provisioned_writes_unit = ( increase_throttled_by_provisioned_writes_unit or increase_writes_unit) increase_throttled_by_consumed_writes_unit = \ increase_throttled_by_consumed_writes_unit or increase_writes_unit increase_consumed_writes_with = \ increase_consumed_writes_with or increase_writes_with # Initialise variables to store calculated provisioning throttled_by_provisioned_calculated_provisioning = scale_reader( increase_throttled_by_provisioned_writes_scale, throttled_by_provisioned_write_percent) throttled_by_consumed_calculated_provisioning = scale_reader( increase_throttled_by_consumed_writes_scale, throttled_by_consumed_write_percent) consumed_calculated_provisioning = scale_reader( increase_consumed_writes_scale, consumed_write_units_percent) throttled_count_calculated_provisioning = 0 calculated_provisioning = 0 # Increase needed due to high throttled to provisioned ratio if throttled_by_provisioned_calculated_provisioning: if increase_throttled_by_provisioned_writes_unit == 'percent': throttled_by_provisioned_calculated_provisioning = \ calculators.increase_writes_in_percent( current_write_units, throttled_by_provisioned_calculated_provisioning, get_table_option(key_name, 'max_provisioned_writes'), consumed_write_units_percent, table_name) else: throttled_by_provisioned_calculated_provisioning = \ calculators.increase_writes_in_units( current_write_units, throttled_by_provisioned_calculated_provisioning, get_table_option(key_name, 'max_provisioned_writes'), consumed_write_units_percent, table_name) # Increase needed due to high throttled to consumed ratio if throttled_by_consumed_calculated_provisioning: if increase_throttled_by_consumed_writes_unit == 'percent': throttled_by_consumed_calculated_provisioning = \ calculators.increase_writes_in_percent( current_write_units, throttled_by_consumed_calculated_provisioning, get_table_option(key_name, 'max_provisioned_writes'), consumed_write_units_percent, table_name) else: throttled_by_consumed_calculated_provisioning = \ calculators.increase_writes_in_units( current_write_units, throttled_by_consumed_calculated_provisioning, get_table_option(key_name, 'max_provisioned_writes'), consumed_write_units_percent, table_name) # Increase needed due to high CU consumption if consumed_calculated_provisioning: if increase_consumed_writes_unit == 'percent': consumed_calculated_provisioning = \ calculators.increase_writes_in_percent( current_write_units, consumed_calculated_provisioning, get_table_option(key_name, 'max_provisioned_writes'), consumed_write_units_percent, table_name) else: consumed_calculated_provisioning = \ calculators.increase_writes_in_units( current_write_units, consumed_calculated_provisioning, get_table_option(key_name, 'max_provisioned_writes'), consumed_write_units_percent, table_name) elif (writes_upper_threshold and consumed_write_units_percent > writes_upper_threshold and not increase_consumed_writes_scale): if increase_consumed_writes_unit == 'percent': consumed_calculated_provisioning = \ calculators.increase_writes_in_percent( current_write_units, increase_consumed_writes_with, get_table_option(key_name, 'max_provisioned_writes'), consumed_write_units_percent, table_name) else: consumed_calculated_provisioning = \ calculators.increase_writes_in_units( current_write_units, increase_consumed_writes_with, get_table_option(key_name, 'max_provisioned_writes'), consumed_write_units_percent, table_name) # Increase needed due to high throttling if (throttled_writes_upper_threshold and throttled_write_count > throttled_writes_upper_threshold): if increase_writes_unit == 'percent': throttled_count_calculated_provisioning = \ calculators.increase_writes_in_percent( updated_write_units, increase_writes_with, get_table_option(key_name, 'max_provisioned_writes'), consumed_write_units_percent, table_name) else: throttled_count_calculated_provisioning = \ calculators.increase_writes_in_units( updated_write_units, increase_writes_with, get_table_option(key_name, 'max_provisioned_writes'), consumed_write_units_percent, table_name) # Determine which metric requires the most scaling if (throttled_by_provisioned_calculated_provisioning > calculated_provisioning): calculated_provisioning = \ throttled_by_provisioned_calculated_provisioning scale_reason = ( "due to throttled events by provisioned " "units threshold being exceeded") if (throttled_by_consumed_calculated_provisioning > calculated_provisioning): calculated_provisioning = \ throttled_by_consumed_calculated_provisioning scale_reason = ( "due to throttled events by consumed " "units threshold being exceeded") if consumed_calculated_provisioning > calculated_provisioning: calculated_provisioning = consumed_calculated_provisioning scale_reason = "due to consumed threshold being exceeded" if throttled_count_calculated_provisioning > calculated_provisioning: calculated_provisioning = throttled_count_calculated_provisioning scale_reason = "due to throttled events threshold being exceeded" if calculated_provisioning > current_write_units: logger.info( '{0} - Resetting the number of consecutive ' 'write checks. Reason: scale up {1}'.format( table_name, scale_reason)) num_consec_write_checks = 0 update_needed = True updated_write_units = calculated_provisioning # Decrease needed due to low CU consumption if not update_needed: # If local/granular values not specified use global values decrease_consumed_writes_unit = \ decrease_consumed_writes_unit or decrease_writes_unit decrease_consumed_writes_with = \ decrease_consumed_writes_with or decrease_writes_with # Initialise variables to store calculated provisioning consumed_calculated_provisioning = scale_reader_decrease( decrease_consumed_writes_scale, consumed_write_units_percent) calculated_provisioning = None # Exit if down scaling has been disabled if not get_table_option(key_name, 'enable_writes_down_scaling'): logger.debug( '{0} - Down scaling event detected. No action taken as scaling' ' down writes has been disabled in the configuration'.format( table_name)) # Exit if writes == 0% and downscaling has been disabled at 0% elif (consumed_write_units_percent == 0 and not get_table_option( key_name, 'allow_scaling_down_writes_on_0_percent')): logger.info( '{0} - Down scaling event detected. No action taken as scaling' ' down writes is not done when usage is at 0%'.format( table_name)) # Exit if writes are still throttled elif (throttled_writes_upper_threshold and throttled_write_count > throttled_writes_upper_threshold): logger.info( '{0} - Down scaling event detected. No action taken as there' ' are still throttled writes'.format(table_name)) else: if consumed_calculated_provisioning: if decrease_consumed_writes_unit == 'percent': calculated_provisioning = \ calculators.decrease_writes_in_percent( updated_write_units, consumed_calculated_provisioning, get_table_option( key_name, 'min_provisioned_writes'), table_name) else: calculated_provisioning = \ calculators.decrease_writes_in_units( updated_write_units, consumed_calculated_provisioning, get_table_option( key_name, 'min_provisioned_writes'), table_name) elif (writes_lower_threshold and consumed_write_units_percent < writes_lower_threshold and not decrease_consumed_writes_scale): if decrease_consumed_writes_unit == 'percent': calculated_provisioning = \ calculators.decrease_writes_in_percent( updated_write_units, decrease_consumed_writes_with, get_table_option( key_name, 'min_provisioned_writes'), table_name) else: calculated_provisioning = \ calculators.decrease_writes_in_units( updated_write_units, decrease_consumed_writes_with, get_table_option( key_name, 'min_provisioned_writes'), table_name) if (calculated_provisioning and current_write_units != calculated_provisioning): num_consec_write_checks += 1 if num_consec_write_checks >= \ num_write_checks_before_scale_down: update_needed = True updated_write_units = calculated_provisioning # Never go over the configured max provisioning if max_provisioned_writes: if int(updated_write_units) > int(max_provisioned_writes): update_needed = True updated_write_units = int(max_provisioned_writes) logger.info( 'Will not increase writes over max-provisioned-writes ' 'limit ({0} writes)'.format(updated_write_units)) # Ensure that we have met the min-provisioning if min_provisioned_writes: if int(min_provisioned_writes) > int(updated_write_units): update_needed = True updated_write_units = int(min_provisioned_writes) logger.info( '{0} - Increasing writes to meet min-provisioned-writes ' 'limit ({1} writes)'.format(table_name, updated_write_units)) if calculators.is_consumed_over_proposed( current_write_units, updated_write_units, consumed_write_units_percent): update_needed = False updated_write_units = current_write_units logger.info( '{0} - Consumed is over proposed write units. Will leave table at ' 'current setting.'.format(table_name)) logger.info('{0} - Consecutive write checks {1}/{2}'.format( table_name, num_consec_write_checks, num_write_checks_before_scale_down)) return update_needed, updated_write_units, num_consec_write_checks
[ " Ensure that provisioning of writes is correct\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :type key_name: str\n :param key_name: Configuration option key name\n :type num_consec_write_checks: int\n :param num_consec_write_checks: How many consecutive checks have we had\n :returns: (bool, int, int)\n update_needed, updated_write_units, num_consec_write_checks\n " ]
Please provide a description of the function:def __update_throughput(table_name, key_name, read_units, write_units): try: current_ru = dynamodb.get_provisioned_table_read_units(table_name) current_wu = dynamodb.get_provisioned_table_write_units(table_name) except JSONResponseError: raise # Check table status try: table_status = dynamodb.get_table_status(table_name) except JSONResponseError: raise logger.debug('{0} - Table status is {1}'.format(table_name, table_status)) if table_status != 'ACTIVE': logger.warning( '{0} - Not performing throughput changes when table ' 'is {1}'.format(table_name, table_status)) return # If this setting is True, we will only scale down when # BOTH reads AND writes are low if get_table_option(key_name, 'always_decrease_rw_together'): read_units, write_units = __calculate_always_decrease_rw_values( table_name, read_units, current_ru, write_units, current_wu) if read_units == current_ru and write_units == current_wu: logger.info('{0} - No changes to perform'.format(table_name)) return dynamodb.update_table_provisioning( table_name, key_name, int(read_units), int(write_units))
[ " Update throughput on the DynamoDB table\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :type key_name: str\n :param key_name: Configuration option key name\n :type read_units: int\n :param read_units: New read unit provisioning\n :type write_units: int\n :param write_units: New write unit provisioning\n " ]
Please provide a description of the function:def __ensure_provisioning_alarm(table_name, key_name): lookback_window_start = get_table_option( key_name, 'lookback_window_start') lookback_period = get_table_option(key_name, 'lookback_period') consumed_read_units_percent = table_stats.get_consumed_read_units_percent( table_name, lookback_window_start, lookback_period) consumed_write_units_percent = table_stats.get_consumed_write_units_percent( table_name, lookback_window_start, lookback_period) reads_upper_alarm_threshold = \ get_table_option(key_name, 'reads-upper-alarm-threshold') reads_lower_alarm_threshold = \ get_table_option(key_name, 'reads-lower-alarm-threshold') writes_upper_alarm_threshold = \ get_table_option(key_name, 'writes-upper-alarm-threshold') writes_lower_alarm_threshold = \ get_table_option(key_name, 'writes-lower-alarm-threshold') # Check upper alarm thresholds upper_alert_triggered = False upper_alert_message = [] if 0 < reads_upper_alarm_threshold <= consumed_read_units_percent: upper_alert_triggered = True upper_alert_message.append( '{0} - Consumed Read Capacity {1:f}% ' 'was greater than or equal to the upper ' 'alarm threshold {2:f}%\n'.format( table_name, consumed_read_units_percent, reads_upper_alarm_threshold)) if 0 < writes_upper_alarm_threshold <= consumed_write_units_percent: upper_alert_triggered = True upper_alert_message.append( '{0} - Consumed Write Capacity {1:f}% ' 'was greater than or equal to the upper alarm ' 'threshold {2:f}%\n'.format( table_name, consumed_write_units_percent, writes_upper_alarm_threshold)) # Check lower alarm thresholds lower_alert_triggered = False lower_alert_message = [] if (reads_lower_alarm_threshold > 0 and consumed_read_units_percent < reads_lower_alarm_threshold): lower_alert_triggered = True lower_alert_message.append( '{0} - Consumed Read Capacity {1:f}% ' 'was below the lower alarm threshold {2:f}%\n'.format( table_name, consumed_read_units_percent, reads_lower_alarm_threshold)) if (writes_lower_alarm_threshold > 0 and consumed_write_units_percent < writes_lower_alarm_threshold): lower_alert_triggered = True lower_alert_message.append( '{0} - Consumed Write Capacity {1:f}% ' 'was below the lower alarm threshold {2:f}%\n'.format( table_name, consumed_write_units_percent, writes_lower_alarm_threshold)) # Send alert if needed if upper_alert_triggered: logger.info( '{0} - Will send high provisioning alert'.format(table_name)) sns.publish_table_notification( key_name, ''.join(upper_alert_message), ['high-throughput-alarm'], subject='ALARM: High Throughput for Table {0}'.format(table_name)) elif lower_alert_triggered: logger.info( '{0} - Will send low provisioning alert'.format(table_name)) sns.publish_table_notification( key_name, ''.join(lower_alert_message), ['low-throughput-alarm'], subject='ALARM: Low Throughput for Table {0}'.format(table_name)) else: logger.debug('{0} - Throughput alarm thresholds not crossed'.format( table_name))
[ " Ensure that provisioning alarm threshold is not exceeded\n\n :type table_name: str\n :param table_name: Name of the DynamoDB table\n :type key_name: str\n :param key_name: Configuration option key name\n " ]
Please provide a description of the function:def get_configuration(): # This is the dict we will return configuration = { 'global': {}, 'logging': {}, 'tables': ordereddict() } # Read the command line options cmd_line_options = command_line_parser.parse() # If a configuration file is specified, read that as well conf_file_options = None if 'config' in cmd_line_options: conf_file_options = config_file_parser.parse( cmd_line_options['config']) # Extract global config configuration['global'] = __get_global_options( cmd_line_options, conf_file_options) # Extract logging config configuration['logging'] = __get_logging_options( cmd_line_options, conf_file_options) # Extract table configuration # If the --table cmd line option is set, it indicates that only table # options from the command line should be used if 'table_name' in cmd_line_options: configuration['tables'] = __get_cmd_table_options(cmd_line_options) else: configuration['tables'] = __get_config_table_options(conf_file_options) # Ensure some basic rules __check_gsi_rules(configuration) __check_logging_rules(configuration) __check_table_rules(configuration) return configuration
[ " Get the configuration from command line and config files " ]
Please provide a description of the function:def __get_cmd_table_options(cmd_line_options): table_name = cmd_line_options['table_name'] options = {table_name: {}} for option in DEFAULT_OPTIONS['table'].keys(): options[table_name][option] = DEFAULT_OPTIONS['table'][option] if option in cmd_line_options: options[table_name][option] = cmd_line_options[option] return options
[ " Get all table options from the command line\n\n :type cmd_line_options: dict\n :param cmd_line_options: Dictionary with all command line options\n :returns: dict -- E.g. {'table_name': {}}\n " ]
Please provide a description of the function:def __get_config_table_options(conf_file_options): options = ordereddict() if not conf_file_options: return options for table_name in conf_file_options['tables']: options[table_name] = {} # Regular table options for option in DEFAULT_OPTIONS['table'].keys(): options[table_name][option] = DEFAULT_OPTIONS['table'][option] if option not in conf_file_options['tables'][table_name]: continue if option == 'sns_message_types': try: raw_list = conf_file_options['tables'][table_name][option] options[table_name][option] = \ [i.strip() for i in raw_list.split(',')] except: print( 'Error parsing the "sns-message-types" ' 'option: {0}'.format( conf_file_options['tables'][table_name][option])) else: options[table_name][option] = \ conf_file_options['tables'][table_name][option] # GSI specific options if 'gsis' in conf_file_options['tables'][table_name]: for gsi_name in conf_file_options['tables'][table_name]['gsis']: for option in DEFAULT_OPTIONS['gsi'].keys(): opt = DEFAULT_OPTIONS['gsi'][option] if 'gsis' not in options[table_name]: options[table_name]['gsis'] = {} if gsi_name not in options[table_name]['gsis']: options[table_name]['gsis'][gsi_name] = {} if (option not in conf_file_options[ 'tables'][table_name]['gsis'][gsi_name]): options[table_name]['gsis'][gsi_name][option] = opt continue if option == 'sns_message_types': try: raw_list = conf_file_options[ 'tables'][table_name]['gsis'][gsi_name][option] opt = [i.strip() for i in raw_list.split(',')] except: print( 'Error parsing the "sns-message-types" ' 'option: {0}'.format( conf_file_options[ 'tables'][table_name][ 'gsis'][gsi_name][option])) else: opt = conf_file_options[ 'tables'][table_name]['gsis'][gsi_name][option] options[table_name]['gsis'][gsi_name][option] = opt return options
[ " Get all table options from the config file\n\n :type conf_file_options: ordereddict\n :param conf_file_options: Dictionary with all config file options\n :returns: ordereddict -- E.g. {'table_name': {}}\n " ]
Please provide a description of the function:def __get_global_options(cmd_line_options, conf_file_options=None): options = {} for option in DEFAULT_OPTIONS['global'].keys(): options[option] = DEFAULT_OPTIONS['global'][option] if conf_file_options and option in conf_file_options: options[option] = conf_file_options[option] if cmd_line_options and option in cmd_line_options: options[option] = cmd_line_options[option] return options
[ " Get all global options\n\n :type cmd_line_options: dict\n :param cmd_line_options: Dictionary with all command line options\n :type conf_file_options: dict\n :param conf_file_options: Dictionary with all config file options\n :returns: dict\n " ]
Please provide a description of the function:def __get_logging_options(cmd_line_options, conf_file_options=None): options = {} for option in DEFAULT_OPTIONS['logging'].keys(): options[option] = DEFAULT_OPTIONS['logging'][option] if conf_file_options and option in conf_file_options: options[option] = conf_file_options[option] if cmd_line_options and option in cmd_line_options: options[option] = cmd_line_options[option] return options
[ " Get all logging options\n\n :type cmd_line_options: dict\n :param cmd_line_options: Dictionary with all command line options\n :type conf_file_options: dict\n :param conf_file_options: Dictionary with all config file options\n :returns: dict\n " ]
Please provide a description of the function:def __check_gsi_rules(configuration): for table_name in configuration['tables']: if 'gsis' not in configuration['tables'][table_name]: continue for gsi_name in configuration['tables'][table_name]['gsis']: gsi = configuration['tables'][table_name]['gsis'][gsi_name] # Check that increase/decrease units is OK valid_units = ['percent', 'units'] if gsi['increase_reads_unit'] not in valid_units: print( 'increase-reads-unit must be set to ' 'either percent or units') sys.exit(1) if gsi['decrease_reads_unit'] not in valid_units: print( 'decrease-reads-unit must be set to ' 'either percent or units') sys.exit(1) if gsi['increase_writes_unit'] not in valid_units: print( 'increase-writes-unit must be set to ' 'either percent or units') sys.exit(1) if gsi['decrease_writes_unit'] not in valid_units: print( 'decrease-writes-unit must be set to ' 'either percent or units') sys.exit(1) if 'increase_consumed_reads_unit' in gsi and gsi['increase_consumed_reads_unit'] and \ gsi['increase_consumed_reads_unit'] not in valid_units: print( 'increase-consumed-reads-unit must be set to ' 'either percent or units, or left unset') sys.exit(1) if 'increase_consumed_writes_unit' in gsi and gsi['increase_consumed_writes_unit'] and \ gsi['increase_consumed_writes_unit'] not in valid_units: print( 'increase-consumed-writes-unit must be set to ' 'either percent or units, or left unset') sys.exit(1) if ('increase_throttled_by_consumed_reads_unit' in gsi and gsi['increase_throttled_by_consumed_reads_unit'] and gsi['increase_throttled_by_consumed_reads_unit'] not in valid_units): print( 'increase-throttled-by-consumed-reads-unit must be set to ' 'either percent or units, or left unset') sys.exit(1) if ('increase_throttled_by_consumed_writes_unit' in gsi and gsi['increase_throttled_by_consumed_writes_unit'] and gsi['increase_throttled_by_consumed_writes_unit'] not in valid_units): print( 'increase-throttled-by-consumed-writes-unit must be set to ' 'either percent or units, or left unset') sys.exit(1) if ('increase_throttled_by_provisioned_reads_unit' in gsi and gsi['increase_throttled_by_provisioned_reads_unit'] and gsi['increase_throttled_by_provisioned_reads_unit'] not in valid_units): print( 'increase-throttled-by-provisioned-reads-unit must be set ' 'to either percent or units, or left unset') sys.exit(1) if ('increase_throttled_by_provisioned_writes_unit' in gsi and gsi['increase_throttled_by_provisioned_writes_unit'] and gsi['increase_throttled_by_provisioned_writes_unit'] not in valid_units): print( 'increase-throttled-by-provisioned-writes-unit must be set ' 'to either percent or units, or left unset') sys.exit(1) # Check lookback-window start if gsi['lookback_window_start'] < 1: print( 'lookback-window-start must be a value higher than 1, ' 'as DynamoDB sends CloudWatch data every minute') sys.exit(1) # Check sns-message-types valid_sns_message_types = [ 'scale-up', 'scale-down', 'high-throughput-alarm', 'low-throughput-alarm'] if gsi['sns_message_types']: for sns_type in gsi['sns_message_types']: if sns_type not in valid_sns_message_types: print('Warning: Invalid sns-message-type: {0}'.format( sns_type)) gsi['sns_message_types'].remove(sns_type) # Ensure values > 1 for some important configuration options options = [ 'reads_lower_threshold', 'reads_upper_threshold', 'increase_reads_with', 'decrease_reads_with', 'writes_lower_threshold', 'writes_upper_threshold', 'increase_writes_with', 'decrease_writes_with', 'min_provisioned_reads', 'max_provisioned_reads', 'min_provisioned_writes', 'max_provisioned_writes', 'increase_consumed_reads_with', 'increase_consumed_writes_with', 'decrease_consumed_reads_with', 'decrease_consumed_writes_with' ] # Config options without a mandatory default # should be allowed a None value non_default = [ 'increase_consumed_reads_with', 'increase_consumed_writes_with', 'decrease_consumed_reads_with', 'decrease_consumed_writes_with' ] for option in options: if (option in non_default and option in gsi and gsi[option] and gsi[option] < 1): print('{0} may not be lower than 1 for GSI {1}'.format( option, gsi_name)) sys.exit(1) if (option in gsi and option not in non_default and gsi[option] < 1): print('{0} may not be lower than 1 for GSI {1}'.format( option, gsi_name)) sys.exit(1) if (int(gsi['min_provisioned_reads']) > int(gsi['max_provisioned_reads'])): print( 'min-provisioned-reads ({0}) may not be higher than ' 'max-provisioned-reads ({1}) for GSI {2}'.format( gsi['min_provisioned_reads'], gsi['max_provisioned_reads'], gsi_name)) sys.exit(1) elif (int(gsi['min_provisioned_writes']) > int(gsi['max_provisioned_writes'])): print( 'min-provisioned-writes ({0}) may not be higher than ' 'max-provisioned-writes ({1}) for GSI {2}'.format( gsi['min_provisioned_writes'], gsi['max_provisioned_writes'], gsi_name)) sys.exit(1)
[ " Do some basic checks on the configuration " ]
Please provide a description of the function:def __check_logging_rules(configuration): valid_log_levels = [ 'debug', 'info', 'warning', 'error' ] if configuration['logging']['log_level'].lower() not in valid_log_levels: print('Log level must be one of {0}'.format( ', '.join(valid_log_levels))) sys.exit(1)
[ " Check that the logging values are proper " ]
Please provide a description of the function:def __check_table_rules(configuration): for table_name in configuration['tables']: table = configuration['tables'][table_name] # Check that increase/decrease units is OK valid_units = ['percent', 'units'] if table['increase_reads_unit'] not in valid_units: print('increase-reads-unit must be set to either percent or units') sys.exit(1) if table['decrease_reads_unit'] not in valid_units: print('decrease-reads-unit must be set to either percent or units') sys.exit(1) if table['increase_writes_unit'] not in valid_units: print( 'increase-writes-unit must be set to either percent or units') sys.exit(1) if table['decrease_writes_unit'] not in valid_units: print( 'decrease-writes-unit must be set to either percent or units') sys.exit(1) if ('increase_consumed_reads_unit' in table and table['increase_consumed_reads_unit'] and table['increase_consumed_reads_unit'] not in valid_units): print( 'increase-consumed-reads-unit must be set to ' 'either percent or units, or left unset') sys.exit(1) if ('increase_consumed_writes_unit' in table and table['increase_consumed_writes_unit'] and table['increase_consumed_writes_unit'] not in valid_units): print( 'increase-consumed-writes-unit must be set to ' 'either percent or units, or left unset') sys.exit(1) if ('increase_throttled_by_consumed_reads_unit' in table and table['increase_throttled_by_consumed_reads_unit'] and table['increase_throttled_by_consumed_reads_unit'] not in valid_units): print( 'increase-throttled-by-consumed-reads-unit must be set to ' 'either percent or units, or left unset') sys.exit(1) if ('increase_throttled_by_consumed_writes_unit' in table and table['increase_throttled_by_consumed_writes_unit'] and table['increase_throttled_by_consumed_writes_unit'] not in valid_units): print( 'increase-throttled-by-consumed-writes-unit must be set to ' 'either percent or units, or left unset') sys.exit(1) if ('increase_throttled_by_provisioned_reads_unit' in table and table['increase_throttled_by_provisioned_reads_unit'] and table['increase_throttled_by_provisioned_reads_unit'] not in valid_units): print( 'increase-throttled-by-provisioned-reads-unit must be set to ' 'either percent or units, or left unset') sys.exit(1) if ('increase_throttled_by_provisioned_writes_unit' in table and table['increase_throttled_by_provisioned_writes_unit'] and table['increase_throttled_by_provisioned_writes_unit'] not in valid_units): print( 'increase-throttled-by-provisioned-writes-unit must be set to ' 'either percent or units, or left unset') sys.exit(1) # Check lookback-window start if table['lookback_window_start'] < 1: print( 'lookback-window-start must be a value higher than 1, ' 'as DynamoDB sends CloudWatch data every minute') sys.exit(1) # Check sns-message-types valid_sns_message_types = [ 'scale-up', 'scale-down', 'high-throughput-alarm', 'low-throughput-alarm'] if table['sns_message_types']: for sns_type in table['sns_message_types']: if sns_type not in valid_sns_message_types: print('Warning: Invalid sns-message-type: {0}'.format( sns_type)) table['sns_message_types'].remove(sns_type) # Ensure values > 0 for some important configuration options options = [ 'reads_lower_threshold', 'reads_upper_threshold', 'increase_reads_with', 'decrease_reads_with', 'writes_lower_threshold', 'writes_upper_threshold', 'increase_writes_with', 'decrease_writes_with', 'min_provisioned_reads', 'max_provisioned_reads', 'min_provisioned_writes', 'max_provisioned_writes', 'num_read_checks_before_scale_down', 'num_write_checks_before_scale_down', 'increase_consumed_reads_with', 'increase_consumed_writes_with' ] # Config options without a mandatory default # should be allowed a None value non_default = [ 'increase_consumed_reads_with', 'increase_consumed_writes_with' ] for option in options: if (option in non_default and option in table and table[option] and table[option] < 1): print('{0} may not be lower than 1 for table {1}'.format( option, table_name)) sys.exit(1) if (option in table and option not in non_default and table[option] < 1): print('{0} may not be lower than 1 for table {1}'.format( option, table_name)) sys.exit(1) if (int(table['min_provisioned_reads']) > int(table['max_provisioned_reads'])): print( 'min_provisioned_reads ({0}) may not be higher than ' 'max_provisioned_reads ({1}) for table {2}'.format( table['min_provisioned_reads'], table['max_provisioned_reads'], table_name)) sys.exit(1) elif (int(table['min_provisioned_writes']) > int(table['max_provisioned_writes'])): print( 'min_provisioned_writes ({0}) may not be higher than ' 'max_provisioned_writes ({1}) for table {2}'.format( table['min_provisioned_writes'], table['max_provisioned_writes'], table_name)) sys.exit(1)
[ " Do some basic checks on the configuration " ]
Please provide a description of the function:def decrease_reads_in_percent( current_provisioning, percent, min_provisioned_reads, log_tag): percent = float(percent) decrease = int(float(current_provisioning)*(float(percent)/100)) updated_provisioning = current_provisioning - decrease min_provisioned_reads = __get_min_reads( current_provisioning, min_provisioned_reads, log_tag) if updated_provisioning < min_provisioned_reads: logger.info( '{0} - Reached provisioned reads min limit: {1:d}'.format( log_tag, int(min_provisioned_reads))) return min_provisioned_reads logger.debug( '{0} - Read provisioning will be decreased to {1:d} units'.format( log_tag, int(updated_provisioning))) return updated_provisioning
[ " Decrease the current_provisioning with percent %\n\n :type current_provisioning: int\n :param current_provisioning: The current provisioning\n :type percent: int\n :param percent: How many percent should we decrease with\n :type min_provisioned_reads: int\n :param min_provisioned_reads: Configured min provisioned reads\n :type log_tag: str\n :param log_tag: Prefix for the log\n :returns: int -- New provisioning value\n " ]
Please provide a description of the function:def decrease_reads_in_units( current_provisioning, units, min_provisioned_reads, log_tag): updated_provisioning = int(current_provisioning) - int(units) min_provisioned_reads = __get_min_reads( current_provisioning, min_provisioned_reads, log_tag) if updated_provisioning < min_provisioned_reads: logger.info( '{0} - Reached provisioned reads min limit: {1:d}'.format( log_tag, int(min_provisioned_reads))) return min_provisioned_reads logger.debug( '{0} - Read provisioning will be decreased to {1:d} units'.format( log_tag, int(updated_provisioning))) return updated_provisioning
[ " Decrease the current_provisioning with units units\n\n :type current_provisioning: int\n :param current_provisioning: The current provisioning\n :type units: int\n :param units: How many units should we decrease with\n :returns: int -- New provisioning value\n :type min_provisioned_reads: int\n :param min_provisioned_reads: Configured min provisioned reads\n :type log_tag: str\n :param log_tag: Prefix for the log\n " ]
Please provide a description of the function:def decrease_writes_in_percent( current_provisioning, percent, min_provisioned_writes, log_tag): percent = float(percent) decrease = int(float(current_provisioning)*(float(percent)/100)) updated_provisioning = current_provisioning - decrease min_provisioned_writes = __get_min_writes( current_provisioning, min_provisioned_writes, log_tag) if updated_provisioning < min_provisioned_writes: logger.info( '{0} - Reached provisioned writes min limit: {1:d}'.format( log_tag, int(min_provisioned_writes))) return min_provisioned_writes logger.debug( '{0} - Write provisioning will be decreased to {1:d} units'.format( log_tag, int(updated_provisioning))) return updated_provisioning
[ " Decrease the current_provisioning with percent %\n\n :type current_provisioning: int\n :param current_provisioning: The current provisioning\n :type percent: int\n :param percent: How many percent should we decrease with\n :returns: int -- New provisioning value\n :type min_provisioned_writes: int\n :param min_provisioned_writes: Configured min provisioned writes\n :type log_tag: str\n :param log_tag: Prefix for the log\n " ]
Please provide a description of the function:def decrease_writes_in_units( current_provisioning, units, min_provisioned_writes, log_tag): updated_provisioning = int(current_provisioning) - int(units) min_provisioned_writes = __get_min_writes( current_provisioning, min_provisioned_writes, log_tag) if updated_provisioning < min_provisioned_writes: logger.info( '{0} - Reached provisioned writes min limit: {1:d}'.format( log_tag, int(min_provisioned_writes))) return min_provisioned_writes logger.debug( '{0} - Write provisioning will be decreased to {1:d} units'.format( log_tag, int(updated_provisioning))) return updated_provisioning
[ " Decrease the current_provisioning with units units\n\n :type current_provisioning: int\n :param current_provisioning: The current provisioning\n :type units: int\n :param units: How many units should we decrease with\n :returns: int -- New provisioning value\n :type min_provisioned_writes: int\n :param min_provisioned_writes: Configured min provisioned writes\n :type log_tag: str\n :param log_tag: Prefix for the log\n " ]
Please provide a description of the function:def increase_reads_in_percent( current_provisioning, percent, max_provisioned_reads, consumed_read_units_percent, log_tag): current_provisioning = float(current_provisioning) consumed_read_units_percent = float(consumed_read_units_percent) percent = float(percent) consumption_based_current_provisioning = \ float(math.ceil(current_provisioning*(consumed_read_units_percent/100))) if consumption_based_current_provisioning > current_provisioning: increase = int( math.ceil(consumption_based_current_provisioning*(percent/100))) updated_provisioning = consumption_based_current_provisioning + increase else: increase = int(math.ceil(current_provisioning*(percent/100))) updated_provisioning = current_provisioning + increase if max_provisioned_reads > 0: if updated_provisioning > max_provisioned_reads: logger.info( '{0} - Reached provisioned reads max limit: {1}'.format( log_tag, max_provisioned_reads)) return max_provisioned_reads logger.debug( '{0} - Read provisioning will be increased to {1} units'.format( log_tag, updated_provisioning)) return updated_provisioning
[ " Increase the current_provisioning with percent %\n\n :type current_provisioning: int\n :param current_provisioning: The current provisioning\n :type percent: int\n :param percent: How many percent should we increase with\n :type max_provisioned_reads: int\n :param max_provisioned_reads: Configured max provisioned reads\n :returns: int -- New provisioning value\n :type consumed_read_units_percent: float\n :param consumed_read_units_percent: Number of consumed read units\n :type log_tag: str\n :param log_tag: Prefix for the log\n " ]
Please provide a description of the function:def increase_reads_in_units( current_provisioning, units, max_provisioned_reads, consumed_read_units_percent, log_tag): units = int(units) current_provisioning = float(current_provisioning) consumed_read_units_percent = float(consumed_read_units_percent) consumption_based_current_provisioning = \ int(math.ceil(current_provisioning*(consumed_read_units_percent/100))) if consumption_based_current_provisioning > current_provisioning: updated_provisioning = consumption_based_current_provisioning + units else: updated_provisioning = int(current_provisioning) + units if max_provisioned_reads > 0: if updated_provisioning > max_provisioned_reads: logger.info( '{0} - Reached provisioned reads max limit: {1}'.format( log_tag, max_provisioned_reads)) return max_provisioned_reads logger.debug( '{0} - Read provisioning will be increased to {1:d} units'.format( log_tag, int(updated_provisioning))) return updated_provisioning
[ " Increase the current_provisioning with units units\n\n :type current_provisioning: int\n :param current_provisioning: The current provisioning\n :type units: int\n :param units: How many units should we increase with\n :returns: int -- New provisioning value\n :type max_provisioned_reads: int\n :param max_provisioned_reads: Configured max provisioned reads\n :returns: int -- New provisioning value\n :type consumed_read_units_percent: float\n :param consumed_read_units_percent: Number of consumed read units\n :type log_tag: str\n :param log_tag: Prefix for the log\n " ]
Please provide a description of the function:def increase_writes_in_percent( current_provisioning, percent, max_provisioned_writes, consumed_write_units_percent, log_tag): current_provisioning = float(current_provisioning) consumed_write_units_percent = float(consumed_write_units_percent) percent = float(percent) consumption_based_current_provisioning = \ int(math.ceil(current_provisioning*(consumed_write_units_percent/100))) if consumption_based_current_provisioning > current_provisioning: increase = int( math.ceil(consumption_based_current_provisioning*(percent/100))) updated_provisioning = consumption_based_current_provisioning + increase else: increase = int(math.ceil(current_provisioning*(float(percent)/100))) updated_provisioning = current_provisioning + increase if max_provisioned_writes > 0: if updated_provisioning > max_provisioned_writes: logger.info( '{0} - Reached provisioned writes max limit: {1}'.format( log_tag, max_provisioned_writes)) return max_provisioned_writes logger.debug( '{0} - Write provisioning will be increased to {1:d} units'.format( log_tag, int(updated_provisioning))) return updated_provisioning
[ " Increase the current_provisioning with percent %\n\n :type current_provisioning: int\n :param current_provisioning: The current provisioning\n :type percent: int\n :param percent: How many percent should we increase with\n :returns: int -- New provisioning value\n :type max_provisioned_writes: int\n :param max_provisioned_writes: Configured max provisioned writes\n :type consumed_write_units_percent: float\n :param consumed_write_units_percent: Number of consumed write units\n :type log_tag: str\n :param log_tag: Prefix for the log\n " ]
Please provide a description of the function:def increase_writes_in_units( current_provisioning, units, max_provisioned_writes, consumed_write_units_percent, log_tag): units = int(units) current_provisioning = float(current_provisioning) consumed_write_units_percent = float(consumed_write_units_percent) consumption_based_current_provisioning = \ int(math.ceil(current_provisioning*(consumed_write_units_percent/100))) if consumption_based_current_provisioning > current_provisioning: updated_provisioning = consumption_based_current_provisioning + units else: updated_provisioning = int(current_provisioning) + units if max_provisioned_writes > 0: if updated_provisioning > max_provisioned_writes: logger.info( '{0} - Reached provisioned writes max limit: {1}'.format( log_tag, max_provisioned_writes)) return max_provisioned_writes logger.debug( '{0} - Write provisioning will be increased to {1:d} units'.format( log_tag, int(updated_provisioning))) return updated_provisioning
[ " Increase the current_provisioning with units units\n\n :type current_provisioning: int\n :param current_provisioning: The current provisioning\n :type units: int\n :param units: How many units should we increase with\n :returns: int -- New provisioning value\n :type max_provisioned_writes: int\n :param max_provisioned_writes: Configured max provisioned writes\n :type consumed_write_units_percent: float\n :param consumed_write_units_percent: Number of consumed write units\n :type log_tag: str\n :param log_tag: Prefix for the log\n " ]
Please provide a description of the function:def is_consumed_over_proposed( current_provisioning, proposed_provisioning, consumed_units_percent): consumption_based_current_provisioning = \ int(math.ceil(current_provisioning*(consumed_units_percent/100))) return consumption_based_current_provisioning > proposed_provisioning
[ "\n Determines if the currently consumed capacity is over the proposed capacity\n for this table\n\n :type current_provisioning: int\n :param current_provisioning: The current provisioning\n :type proposed_provisioning: int\n :param proposed_provisioning: New provisioning\n :type consumed_units_percent: float\n :param consumed_units_percent: Percent of consumed units\n :returns: bool - if consumed is over max\n " ]
Please provide a description of the function:def __get_min_reads(current_provisioning, min_provisioned_reads, log_tag): # Fallback value to ensure that we always have at least 1 read reads = 1 if min_provisioned_reads: reads = int(min_provisioned_reads) if reads > int(current_provisioning * 2): reads = int(current_provisioning * 2) logger.debug( '{0} - ' 'Cannot reach min-provisioned-reads as max scale up ' 'is 100% of current provisioning'.format(log_tag)) logger.debug( '{0} - Setting min provisioned reads to {1}'.format( log_tag, min_provisioned_reads)) return reads
[ " Get the minimum number of reads to current_provisioning\n\n :type current_provisioning: int\n :param current_provisioning: Current provisioned reads\n :type min_provisioned_reads: int\n :param min_provisioned_reads: Configured min provisioned reads\n :type log_tag: str\n :param log_tag: Prefix for the log\n :returns: int -- Minimum number of reads\n " ]
Please provide a description of the function:def __get_min_writes(current_provisioning, min_provisioned_writes, log_tag): # Fallback value to ensure that we always have at least 1 read writes = 1 if min_provisioned_writes: writes = int(min_provisioned_writes) if writes > int(current_provisioning * 2): writes = int(current_provisioning * 2) logger.debug( '{0} - ' 'Cannot reach min-provisioned-writes as max scale up ' 'is 100% of current provisioning'.format(log_tag)) logger.debug( '{0} - Setting min provisioned writes to {1}'.format( log_tag, min_provisioned_writes)) return writes
[ " Get the minimum number of writes to current_provisioning\n\n :type current_provisioning: int\n :param current_provisioning: Current provisioned writes\n :type min_provisioned_writes: int\n :param min_provisioned_writes: Configured min provisioned writes\n :type log_tag: str\n :param log_tag: Prefix for the log\n :returns: int -- Minimum number of writes\n " ]
Please provide a description of the function:def restart(self, *args, **kwargs): self.stop() try: self.start(*args, **kwargs) except IOError: raise
[ " Restart the daemon " ]
Please provide a description of the function:def __parse_options(config_file, section, options): configuration = {} for option in options: try: if option.get('type') == 'str': configuration[option.get('key')] = \ config_file.get(section, option.get('option')) elif option.get('type') == 'int': try: configuration[option.get('key')] = \ config_file.getint(section, option.get('option')) except ValueError: print('Error: Expected an integer value for {0}'.format( option.get('option'))) sys.exit(1) elif option.get('type') == 'float': try: configuration[option.get('key')] = \ config_file.getfloat(section, option.get('option')) except ValueError: print('Error: Expected an float value for {0}'.format( option.get('option'))) sys.exit(1) elif option.get('type') == 'bool': try: configuration[option.get('key')] = \ config_file.getboolean(section, option.get('option')) except ValueError: print('Error: Expected an boolean value for {0}'.format( option.get('option'))) sys.exit(1) elif option.get('type') == 'dict': configuration[option.get('key')] = \ ast.literal_eval( config_file.get(section, option.get('option'))) else: configuration[option.get('key')] = \ config_file.get(section, option.get('option')) except ConfigParser.NoOptionError: if option.get('required'): print('Missing [{0}] option "{1}" in configuration'.format( section, option.get('option'))) sys.exit(1) return configuration
[ " Parse the section options\n\n :type config_file: ConfigParser object\n :param config_file: The config file object to use\n :type section: str\n :param section: Which section to read in the configuration file\n :type options: list of dicts\n :param options:\n A list of options to parse. Example list::\n [{\n 'key': 'aws_access_key_id',\n 'option': 'aws-access-key-id',\n 'required': False,\n 'type': str\n }]\n :returns: dict\n " ]
Please provide a description of the function:def parse(config_path): config_path = os.path.expanduser(config_path) # Read the configuration file config_file = ConfigParser.RawConfigParser() config_file.SECTCRE = re.compile(r"\[ *(?P<header>.*) *\]") config_file.optionxform = lambda option: option config_file.read(config_path) # # Handle [global] # if 'global' in config_file.sections(): global_config = __parse_options( config_file, 'global', [ { 'key': 'aws_access_key_id', 'option': 'aws-access-key-id', 'required': False, 'type': 'str' }, { 'key': 'aws_secret_access_key', 'option': 'aws-secret-access-key-id', 'required': False, 'type': 'str' }, { 'key': 'region', 'option': 'region', 'required': False, 'type': 'str' }, { 'key': 'check_interval', 'option': 'check-interval', 'required': False, 'type': 'int' }, { 'key': 'circuit_breaker_url', 'option': 'circuit-breaker-url', 'required': False, 'type': 'str' }, { 'key': 'circuit_breaker_timeout', 'option': 'circuit-breaker-timeout', 'required': False, 'type': 'float' }, ]) # # Handle [logging] # if 'logging' in config_file.sections(): logging_config = __parse_options( config_file, 'logging', [ { 'key': 'log_level', 'option': 'log-level', 'required': False, 'type': 'str' }, { 'key': 'log_file', 'option': 'log-file', 'required': False, 'type': 'str' }, { 'key': 'log_config_file', 'option': 'log-config-file', 'required': False, 'type': 'str' } ]) if 'default_options' in config_file.sections(): # nothing is required in defaults, so we set required to False default_config_options = deepcopy(TABLE_CONFIG_OPTIONS) for item in default_config_options: item['required'] = False default_options = __parse_options( config_file, 'default_options', default_config_options) # if we've got a default set required to be false for table parsing for item in TABLE_CONFIG_OPTIONS: if item['key'] in default_options: item['required'] = False else: default_options = {} # # Handle [table: ] # table_config = {'tables': ordereddict()} # Find the first table definition found_table = False for current_section in config_file.sections(): if current_section.rsplit(':', 1)[0] != 'table': continue found_table = True current_table_name = current_section.rsplit(':', 1)[1].strip() table_config['tables'][current_table_name] = \ dict(default_options.items() + __parse_options( config_file, current_section, TABLE_CONFIG_OPTIONS).items()) if not found_table: print('Could not find a [table: <table_name>] section in {0}'.format( config_path)) sys.exit(1) # Find gsi definitions - this allows gsi's to be defined before the table # definitions we don't worry about parsing everything twice here for current_section in config_file.sections(): try: header1, gsi_key, header2, table_key = current_section.split(' ') except ValueError: continue if header1 != 'gsi:': continue if table_key not in table_config['tables']: print('No table configuration matching {0} found.'.format( table_key)) sys.exit(1) if 'gsis' not in table_config['tables'][table_key]: table_config['tables'][table_key]['gsis'] = {} table_config['tables'][table_key]['gsis'][gsi_key] = \ ordereddict(default_options.items() + __parse_options( config_file, current_section, TABLE_CONFIG_OPTIONS).items()) return ordereddict( global_config.items() + logging_config.items() + table_config.items())
[ " Parse the configuration file\n\n :type config_path: str\n :param config_path: Path to the configuration file\n " ]
Please provide a description of the function:def main(): try: if get_global_option('show_config'): print json.dumps(config.get_configuration(), indent=2) elif get_global_option('daemon'): daemon = DynamicDynamoDBDaemon( '{0}/dynamic-dynamodb.{1}.pid'.format( get_global_option('pid_file_dir'), get_global_option('instance'))) if get_global_option('daemon') == 'start': logger.debug('Starting daemon') try: daemon.start() logger.info('Daemon started') except IOError as error: logger.error('Could not create pid file: {0}'.format(error)) logger.error('Daemon not started') elif get_global_option('daemon') == 'stop': logger.debug('Stopping daemon') daemon.stop() logger.info('Daemon stopped') sys.exit(0) elif get_global_option('daemon') == 'restart': logger.debug('Restarting daemon') daemon.restart() logger.info('Daemon restarted') elif get_global_option('daemon') in ['foreground', 'fg']: logger.debug('Starting daemon in foreground') daemon.run() logger.info('Daemon started in foreground') else: print( 'Valid options for --daemon are start, ' 'stop, restart, and foreground') sys.exit(1) else: if get_global_option('run_once'): execute() else: while True: execute() except Exception as error: logger.exception(error)
[ " Main function called from dynamic-dynamodb " ]
Please provide a description of the function:def execute(): boto_server_error_retries = 3 # Ensure provisioning for table_name, table_key in sorted(dynamodb.get_tables_and_gsis()): try: table_num_consec_read_checks = \ CHECK_STATUS['tables'][table_name]['reads'] except KeyError: table_num_consec_read_checks = 0 try: table_num_consec_write_checks = \ CHECK_STATUS['tables'][table_name]['writes'] except KeyError: table_num_consec_write_checks = 0 try: # The return var shows how many times the scale-down criteria # has been met. This is coupled with a var in config, # "num_intervals_scale_down", to delay the scale-down table_num_consec_read_checks, table_num_consec_write_checks = \ table.ensure_provisioning( table_name, table_key, table_num_consec_read_checks, table_num_consec_write_checks) CHECK_STATUS['tables'][table_name] = { 'reads': table_num_consec_read_checks, 'writes': table_num_consec_write_checks } gsi_names = set() # Add regexp table names for gst_instance in dynamodb.table_gsis(table_name): gsi_name = gst_instance[u'IndexName'] try: gsi_keys = get_table_option(table_key, 'gsis').keys() except AttributeError: # Continue if there are not GSIs configured continue for gsi_key in gsi_keys: try: if re.match(gsi_key, gsi_name): logger.debug( 'Table {0} GSI {1} matches ' 'GSI config key {2}'.format( table_name, gsi_name, gsi_key)) gsi_names.add((gsi_name, gsi_key)) except re.error: logger.error('Invalid regular expression: "{0}"'.format( gsi_key)) sys.exit(1) for gsi_name, gsi_key in sorted(gsi_names): unique_gsi_name = ':'.join([table_name, gsi_name]) try: gsi_num_consec_read_checks = \ CHECK_STATUS['gsis'][unique_gsi_name]['reads'] except KeyError: gsi_num_consec_read_checks = 0 try: gsi_num_consec_write_checks = \ CHECK_STATUS['gsis'][unique_gsi_name]['writes'] except KeyError: gsi_num_consec_write_checks = 0 gsi_num_consec_read_checks, gsi_num_consec_write_checks = \ gsi.ensure_provisioning( table_name, table_key, gsi_name, gsi_key, gsi_num_consec_read_checks, gsi_num_consec_write_checks) CHECK_STATUS['gsis'][unique_gsi_name] = { 'reads': gsi_num_consec_read_checks, 'writes': gsi_num_consec_write_checks } except JSONResponseError as error: exception = error.body['__type'].split('#')[1] if exception == 'ResourceNotFoundException': logger.error('{0} - Table {1} does not exist anymore'.format( table_name, table_name)) continue except BotoServerError as error: if boto_server_error_retries > 0: logger.error( 'Unknown boto error. Status: "{0}". ' 'Reason: "{1}". Message: {2}'.format( error.status, error.reason, error.message)) logger.error( 'Please bug report if this error persists') boto_server_error_retries -= 1 continue else: raise # Sleep between the checks if not get_global_option('run_once'): logger.debug('Sleeping {0} seconds until next check'.format( get_global_option('check_interval'))) time.sleep(get_global_option('check_interval'))
[ " Ensure provisioning " ]
Please provide a description of the function:def init_logging(debug=False, logfile=None): loglevel = logging.DEBUG if debug else logging.INFO logformat = '%(asctime)s %(name)s: %(levelname)s: %(message)s' formatter = logging.Formatter(logformat) stderr = logging.StreamHandler() stderr.setFormatter(formatter) root = logging.getLogger() root.setLevel(loglevel) root.handlers = [stderr] if logfile: fhandler = logging.FileHandler(logfile) fhandler.setFormatter(formatter) root.addHandler(fhandler)
[ "Initialize logging." ]
Please provide a description of the function:def decode_tve_parameter(data): (nontve,) = struct.unpack(nontve_header, data[:nontve_header_len]) if nontve == 1023: # customparameter (size,) = struct.unpack('!H', data[nontve_header_len:nontve_header_len+2]) (subtype,) = struct.unpack('!H', data[size-4:size-2]) param_name, param_fmt = ext_param_formats[subtype] (unpacked,) = struct.unpack(param_fmt, data[size-2:size]) return {param_name: unpacked}, size # decode the TVE field's header (1 bit "reserved" + 7-bit type) (msgtype,) = struct.unpack(tve_header, data[:tve_header_len]) if not msgtype & 0b10000000: # not a TV-encoded param return None, 0 msgtype = msgtype & 0x7f try: param_name, param_fmt = tve_param_formats[msgtype] logger.debug('found %s (type=%s)', param_name, msgtype) except KeyError: return None, 0 # decode the body nbytes = struct.calcsize(param_fmt) end = tve_header_len + nbytes try: unpacked = struct.unpack(param_fmt, data[tve_header_len:end]) return {param_name: unpacked}, end except struct.error: return None, 0
[ "Generic byte decoding function for TVE parameters.\n\n Given an array of bytes, tries to interpret a TVE parameter from the\n beginning of the array. Returns the decoded data and the number of bytes\n it read." ]
Please provide a description of the function:def tagReportCallback(llrpMsg): global tagReport tags = llrpMsg.msgdict['RO_ACCESS_REPORT']['TagReportData'] if len(tags): logger.info('saw tag(s): %s', pprint.pformat(tags)) else: logger.info('no tags seen') return for tag in tags: tagReport += tag['TagSeenCount'][0] if "OpSpecResult" in tag: # copy the binary data to the standard output stream data = tag["OpSpecResult"].get("ReadData") if data: if sys.version_info.major < 3: sys.stdout.write(data) else: sys.stdout.buffer.write(data) # bytes logger.debug("hex data: %s", binascii.hexlify(data))
[ "Function to run each time the reader reports seeing tags." ]
Please provide a description of the function:def tag_report_cb(llrp_msg): global numtags tags = llrp_msg.msgdict['RO_ACCESS_REPORT']['TagReportData'] if len(tags): logger.info('saw tag(s): %s', pprint.pformat(tags)) for tag in tags: numtags += tag['TagSeenCount'][0] else: logger.info('no tags seen') return
[ "Function to run each time the reader reports seeing tags." ]