Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def sentences(quantity=2, as_list=False):
"""Return random sentences."""
result = [sntc.strip() for sntc in
random.sample(get_dictionary('lorem_ipsum'), quantity)]
if as_list:
return result
else:
return ' '.join(result) |
def paragraph(separator='\n\n', wrap_start='', wrap_end='',
html=False, sentences_quantity=3):
"""Return a random paragraph."""
return paragraphs(quantity=1, separator=separator, wrap_start=wrap_start,
wrap_end=wrap_end, html=html,
sentences_quantity=sentences_quantity) |
def paragraphs(quantity=2, separator='\n\n', wrap_start='', wrap_end='',
html=False, sentences_quantity=3, as_list=False):
"""Return random paragraphs."""
if html:
wrap_start = '<p>'
wrap_end = '</p>'
separator = '\n\n'
result = []
try:
for _ in xrange(0, quantity):
result.append(wrap_start +
sentences(sentences_quantity) +
wrap_end)
# Python 3 compatibility
except NameError:
for _ in range(0, quantity):
result.append(wrap_start +
sentences(sentences_quantity) +
wrap_end)
if as_list:
return result
else:
return separator.join(result) |
def _to_lower_alpha_only(s):
"""Return a lowercased string with non alphabetic chars removed.
White spaces are not to be removed."""
s = re.sub(r'\n', ' ', s.lower())
return re.sub(r'[^a-z\s]', '', s) |
def characters(quantity=10):
"""Return random characters."""
line = map(_to_lower_alpha_only,
''.join(random.sample(get_dictionary('lorem_ipsum'), quantity)))
return ''.join(line)[:quantity] |
def text(what="sentence", *args, **kwargs):
"""An aggregator for all above defined public methods."""
if what == "character":
return character(*args, **kwargs)
elif what == "characters":
return characters(*args, **kwargs)
elif what == "word":
return word(*args, **kwargs)
elif what == "words":
return words(*args, **kwargs)
elif what == "sentence":
return sentence(*args, **kwargs)
elif what == "sentences":
return sentences(*args, **kwargs)
elif what == "paragraph":
return paragraph(*args, **kwargs)
elif what == "paragraphs":
return paragraphs(*args, **kwargs)
elif what == "title":
return title(*args, **kwargs)
else:
raise NameError('No such method') |
def user_name(with_num=False):
"""Return a random user name.
Basically it's lowercased result of
:py:func:`~forgery_py.forgery.name.first_name()` with a number appended
if `with_num`.
"""
result = first_name()
if with_num:
result += str(random.randint(63, 94))
return result.lower() |
def domain_name():
"""Return a random domain name.
Lowercased result of :py:func:`~forgery_py.forgery.name.company_name()`
plus :py:func:`~top_level_domain()`.
"""
result = random.choice(get_dictionary('company_names')).strip()
result += '.' + top_level_domain()
return result.lower() |
def email_address(user=None):
"""Return random e-mail address in a hopefully imaginary domain.
If `user` is ``None`` :py:func:`~user_name()` will be used. Otherwise it
will be lowercased and will have spaces replaced with ``_``.
Domain name is created using :py:func:`~domain_name()`.
"""
if not user:
user = user_name()
else:
user = user.strip().replace(' ', '_').lower()
return user + '@' + domain_name() |
def account_number():
"""Return a random bank account number."""
account = [random.randint(1, 9) for _ in range(20)]
return "".join(map(str, account)) |
def bik():
"""Return a random bank identification number."""
return '04' + \
''.join([str(random.randint(1, 9)) for _ in range(5)]) + \
str(random.randint(0, 49) + 50) |
def legal_inn():
"""Return a random taxation ID number for a company."""
mask = [2, 4, 10, 3, 5, 9, 4, 6, 8]
inn = [random.randint(1, 9) for _ in range(10)]
weighted = [v * mask[i] for i, v in enumerate(inn[:-1])]
inn[9] = sum(weighted) % 11 % 10
return "".join(map(str, inn)) |
def legal_ogrn():
"""Return a random government registration ID for a company."""
ogrn = "".join(map(str, [random.randint(1, 9) for _ in range(12)]))
ogrn += str((int(ogrn) % 11 % 10))
return ogrn |
def person_inn():
"""Return a random taxation ID number for a natural person."""
mask11 = [7, 2, 4, 10, 3, 5, 9, 4, 6, 8]
mask12 = [3, 7, 2, 4, 10, 3, 5, 9, 4, 6, 8]
inn = [random.randint(1, 9) for _ in range(12)]
# get the 11th digit of the INN
weighted11 = [v * mask11[i] for i, v in enumerate(inn[:-2])]
inn[10] = sum(weighted11) % 11 % 10
# get the 12th digit of the INN
weighted12 = [v * mask12[i] for i, v in enumerate(inn[:-1])]
inn[11] = sum(weighted12) % 11 % 10
return "".join(map(str, inn)) |
def encrypt(password='password', salt=None):
"""
Return SHA1 hexdigest of a password (optionally salted with a string).
"""
if not salt:
salt = str(datetime.utcnow())
try:
# available for python 2.7.8 and python 3.4+
dk = hashlib.pbkdf2_hmac('sha1', password.encode(), salt.encode(), 100000)
hexdigest = binascii.hexlify(dk).decode('utf-8')
except AttributeError:
# see https://pymotw.com/2/hashlib/
# see https://docs.python.org/release/2.5/lib/module-hashlib.html
dk = hashlib.sha1()
dk.update(password.encode() + salt.encode())
hexdigest = dk.hexdigest()
return hexdigest |
def password(at_least=6, at_most=12, lowercase=True,
uppercase=True, digits=True, spaces=False, punctuation=False):
"""Return a random string for use as a password."""
return text(at_least=at_least, at_most=at_most, lowercase=lowercase,
uppercase=uppercase, digits=digits, spaces=spaces,
punctuation=punctuation) |
def forwards(self, orm):
"Write your forwards methods here."
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
print("Updating: JednostkaAdministracyjna")
ja_akt_stan=orm.JednostkaAdministracyjna.objects.all().aggregate(Max('stan_na'))['stan_na__max']
orm.JednostkaAdministracyjna.objects.filter(stan_na__exact=ja_akt_stan).update(aktywny=True)
orm.JednostkaAdministracyjna.objects.exclude(stan_na__exact=ja_akt_stan).update(aktywny=False)
print("Updating: Miejscowosc")
m_akt_stan=orm.Miejscowosc.objects.all().aggregate(Max('stan_na'))['stan_na__max']
orm.Miejscowosc.objects.filter(stan_na__exact=m_akt_stan).update(aktywny=True)
orm.Miejscowosc.objects.exclude(stan_na__exact=m_akt_stan).update(aktywny=False)
print("Updating: RodzajMiejsowosci")
rm_akt_stan=orm.RodzajMiejsowosci.objects.all().aggregate(Max('stan_na'))['stan_na__max']
orm.RodzajMiejsowosci.objects.filter(stan_na__exact=rm_akt_stan).update(aktywny=True)
orm.RodzajMiejsowosci.objects.exclude(stan_na__exact=rm_akt_stan).update(aktywny=False)
print("Updating: Ulica")
u_akt_stan=orm.Ulica.objects.all().aggregate(Max('stan_na'))['stan_na__max']
orm.Ulica.objects.filter(stan_na__exact=u_akt_stan).update(aktywny=True)
orm.Ulica.objects.exclude(stan_na__exact=u_akt_stan).update(aktywny=False) |
def forwards(self, orm):
"Write your forwards methods here."
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
LEN_TYPE = {
7: 'GMI',
4: 'POW',
2: 'WOJ',
}
for ja in orm.JednostkaAdministracyjna.objects.all():
ja.typ = LEN_TYPE[len(ja.id)]
ja.save() |
def case(*, to, **kwargs):
"""Converts an identifier from one case type to another.
An identifier is an ASCII string consisting of letters, digits and underscores, not starting with a digit.
The supported case types are camelCase, PascalCase, snake_case, and CONSTANT_CASE,
identified as camel, pascal, snake, and constant.
The input identifier is given as a keyword argument with one of these names,
and the output type is given as a string in the `to` keyword argument.
If a given string does not conform to the specified case type (such as underscores in camel or pascal case strings,
or double__underscores in general), the result may not be as desired,
although things like snaKe_casE or CONStaNT_CASe will generally work."""
if len(kwargs) != 1:
raise ValueError("expect exactly one source string argument")
[(typ, string)] = kwargs.items()
types = {'pascal', 'camel', 'snake', 'constant'}
if typ not in types:
raise ValueError(f"source string keyword must be one of {types}")
if to not in types:
raise ValueError(f"\"to\" argument must be one of {types}")
def pascal_iter(string):
yield from (m.group(0) for m in re.finditer(r'[A-Z][a-z0-9]*|[a-z0-9]+', string))
def snake_iter(string):
yield from (m.group(2) for m in re.finditer(r'(^|_)([A-Za-z0-9]+)', string))
inputs = {
'pascal': pascal_iter,
'camel': pascal_iter,
'snake': snake_iter,
'constant': snake_iter,
}
def out_fun(sep, case=None, case_fst=None):
if case is None:
case = lambda x: x
if case_fst is None:
case_fst = case
return lambda tokens: sep.join(case_fst(token) if i == 0 else case(token) for i, token in enumerate(tokens))
outputs = {
'pascal': out_fun('', str.capitalize),
'camel': out_fun('', str.capitalize, str.lower),
'snake': out_fun('_', str.lower),
'constant': out_fun('_', str.upper),
}
tokens = inputs[typ](string)
return outputs[to](tokens) |
def read_stream(schema, stream, *, buffer_size=io.DEFAULT_BUFFER_SIZE):
"""Using a schema, deserialize a stream of consecutive Avro values.
:param str schema: json string representing the Avro schema
:param file-like stream: a buffered stream of binary input
:param int buffer_size: size of bytes to read from the stream each time
:return: yields a sequence of python data structures deserialized
from the stream
"""
reader = _lancaster.Reader(schema)
buf = stream.read(buffer_size)
remainder = b''
while len(buf) > 0:
values, n = reader.read_seq(buf)
yield from values
remainder = buf[n:]
buf = stream.read(buffer_size)
if len(buf) > 0 and len(remainder) > 0:
ba = bytearray()
ba.extend(remainder)
ba.extend(buf)
buf = memoryview(ba).tobytes()
if len(remainder) > 0:
raise EOFError('{} bytes remaining but could not continue reading '
'from stream'.format(len(remainder))) |
def parse_user_defined_metric_classes(config_obj, metric_classes):
"""
Parse the user defined metric class information
:param config_obj: ConfigParser object
:param metric_classes: list of metric classes to be updated
:return:
"""
user_defined_metric_list = config_obj.get('GLOBAL', 'user_defined_metrics').split()
for udm_string in user_defined_metric_list:
try:
metric_name, metric_class_name, metric_file = udm_string.split(':')
except ValueError:
logger.error('Bad user defined metric specified')
continue
module_name = os.path.splitext(os.path.basename(metric_file))[0]
try:
new_module = imp.load_source(module_name, metric_file)
new_class = getattr(new_module, metric_class_name)
if metric_name in metric_classes.keys():
logger.warn('Overriding pre-defined metric class definition for ', metric_name)
metric_classes[metric_name] = new_class
except ImportError:
logger.error('Something wrong with importing a user defined metric class. Skipping metric: ', metric_name)
continue |
def is_valid_url(url):
"""
Check if a given string is in the correct URL format or not
:param str url:
:return: True or False
"""
regex = re.compile(r'^(?:http|ftp)s?://'
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|'
r'localhost|'
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
r'(?::\d+)?'
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
if regex.match(url):
logger.info("URL given as config")
return True
else:
return False |
def download_file(url):
"""
Download a file pointed to by url to a temp file on local disk
:param str url:
:return: local_file
"""
try:
(local_file, headers) = urllib.urlretrieve(url)
except:
sys.exit("ERROR: Problem downloading config file. Please check the URL (" + url + "). Exiting...")
return local_file |
def is_valid_metric_name(metric_name):
"""
check the validity of metric_name in config; the metric_name will be used for creation of sub-dir, so only contains: alphabet, digits , '.', '-' and '_'
:param str metric_name: metric_name
:return: True if valid
"""
reg = re.compile('^[a-zA-Z0-9\.\-\_]+$')
if reg.match(metric_name) and not metric_name.startswith('.'):
return True
else:
return False |
def get_run_time_period(run_steps):
"""
This method finds the time range which covers all the Run_Steps
:param run_steps: list of Run_Step objects
:return: tuple of start and end timestamps
"""
init_ts_start = get_standardized_timestamp('now', None)
ts_start = init_ts_start
ts_end = '0'
for run_step in run_steps:
if run_step.ts_start and run_step.ts_end:
if run_step.ts_start < ts_start:
ts_start = run_step.ts_start
if run_step.ts_end > ts_end:
ts_end = run_step.ts_end
if ts_end == '0':
ts_end = None
if ts_start == init_ts_start:
ts_start = None
logger.info('get_run_time_period range returned ' + str(ts_start) + ' to ' + str(ts_end))
return ts_start, ts_end |
def get_rule_strings(config_obj, section):
"""
Extract rule strings from a section
:param config_obj: ConfigParser object
:param section: Section name
:return: the rule strings
"""
rule_strings = {}
kwargs = dict(config_obj.items(section))
for key in kwargs.keys():
if key.endswith('.sla'):
rule_strings[key.replace('.sla', '')] = kwargs[key]
del kwargs[key]
return rule_strings, kwargs |
def extract_diff_sla_from_config_file(obj, options_file):
"""
Helper function to parse diff config file, which contains SLA rules for diff comparisons
"""
rule_strings = {}
config_obj = ConfigParser.ConfigParser()
config_obj.optionxform = str
config_obj.read(options_file)
for section in config_obj.sections():
rule_strings, kwargs = get_rule_strings(config_obj, section)
for (key, val) in rule_strings.iteritems():
set_sla(obj, section, key, val) |
def parse_basic_metric_options(config_obj, section):
"""
Parse basic options from metric sections of the config
:param config_obj: ConfigParser object
:param section: Section name
:return: all the parsed options
"""
infile = {}
aggr_hosts = None
aggr_metrics = None
ts_start = None
ts_end = None
precision = None
hostname = "localhost"
rule_strings = {}
important_sub_metrics = None
anomaly_detection_metrics = None
try:
if config_obj.has_option(section, 'important_sub_metrics'):
important_sub_metrics = config_obj.get(section, 'important_sub_metrics').split()
config_obj.remove_option(section, 'important_sub_metrics')
if config_obj.has_option(section, 'hostname'):
hostname = config_obj.get(section, 'hostname')
config_obj.remove_option(section, 'hostname')
# 'infile' is not mandatory for aggregate metrics
if config_obj.has_option(section, 'infile'):
infile = config_obj.get(section, 'infile').split()
config_obj.remove_option(section, 'infile')
label = sanitize_string_section_name(section)
if config_obj.has_option(section, 'ts_start'):
ts_start = get_standardized_timestamp(config_obj.get(section, 'ts_start'), None)
config_obj.remove_option(section, 'ts_start')
if config_obj.has_option(section, 'ts_end'):
ts_end = get_standardized_timestamp(config_obj.get(section, 'ts_end'), None)
config_obj.remove_option(section, 'ts_end')
if config_obj.has_option(section, 'precision'):
precision = config_obj.get(section, 'precision')
config_obj.remove_option(section, 'precision')
# support aggregate metrics, which take aggr_hosts and aggr_metrics
if config_obj.has_option(section, 'aggr_hosts'):
aggr_hosts = config_obj.get(section, 'aggr_hosts')
config_obj.remove_option(section, 'aggr_hosts')
if config_obj.has_option(section, 'aggr_metrics'):
aggr_metrics = config_obj.get(section, 'aggr_metrics')
config_obj.remove_option(section, 'aggr_metrics')
if config_obj.has_option(section, 'anomaly_detection_metrics'):
anomaly_detection_metrics = config_obj.get(section, 'anomaly_detection_metrics').split()
config_obj.remove_option(section, 'anomaly_detection_metrics')
rule_strings, other_options = get_rule_strings(config_obj, section)
except ConfigParser.NoOptionError:
logger.exception("Exiting.... some mandatory options are missing from the config file in section: " + section)
sys.exit()
return (hostname, infile, aggr_hosts, aggr_metrics, label, ts_start, ts_end, precision, aggr_metrics, other_options,
rule_strings, important_sub_metrics, anomaly_detection_metrics) |
def parse_metric_section(config_obj, section, metric_classes, metrics, aggregate_metric_classes, outdir_default, resource_path):
"""
Parse a metric section and create a Metric object
:param config_obj: ConfigParser object
:param section: Section name
:param metric_classes: List of valid metric types
:param metrics: List of all regular metric objects (used by aggregate metric)
:param aggregate_metric_classes: List of all valid aggregate metric types
:param outdir_default: Default output directory
:param resource_path: Default resource directory
:return: An initialized Metric object
"""
(hostname, infile, aggr_hosts, aggr_metrics, label, ts_start, ts_end, precision, aggr_metrics, other_options,
rule_strings, important_sub_metrics, anomaly_detection_metrics) = parse_basic_metric_options(config_obj, section)
# TODO: Make user specify metric_type in config and not infer from section
metric_type = section.split('-')[0]
if metric_type in aggregate_metric_classes:
new_metric = initialize_aggregate_metric(section, aggr_hosts, aggr_metrics, metrics, outdir_default, resource_path, label, ts_start, ts_end, rule_strings,
important_sub_metrics, anomaly_detection_metrics, other_options)
else:
new_metric = initialize_metric(section, infile, hostname, aggr_metrics, outdir_default, resource_path, label, ts_start, ts_end, rule_strings,
important_sub_metrics, anomaly_detection_metrics, other_options)
if config_obj.has_option(section, 'ignore') and config_obj.getint(section, 'ignore') == 1:
new_metric.ignore = True
if config_obj.has_option(section, 'calc_metrics'):
new_metric.calc_metrics = config_obj.get(section, 'calc_metrics')
new_metric.precision = precision
return new_metric |
def parse_global_section(config_obj, section):
"""
Parse GLOBAL section in the config to return global settings
:param config_obj: ConfigParser object
:param section: Section name
:return: ts_start and ts_end time
"""
ts_start = None
ts_end = None
if config_obj.has_option(section, 'ts_start'):
ts_start = get_standardized_timestamp(config_obj.get(section, 'ts_start'), None)
config_obj.remove_option(section, 'ts_start')
if config_obj.has_option(section, 'ts_end'):
ts_end = get_standardized_timestamp(config_obj.get(section, 'ts_end'), None)
config_obj.remove_option(section, 'ts_end')
return ts_start, ts_end |
def parse_run_step_section(config_obj, section):
"""
Parse a RUN-STEP section in the config to return a Run_Step object
:param config_obj: ConfigParser objection
:param section: Section name
:return: an initialized Run_Step object
"""
kill_after_seconds = None
try:
run_cmd = config_obj.get(section, 'run_cmd')
run_rank = int(config_obj.get(section, 'run_rank'))
except ConfigParser.NoOptionError:
logger.exception("Exiting.... some mandatory options are missing from the config file in section: " + section)
sys.exit()
except ValueError:
logger.error("Bad run_rank %s specified in section %s, should be integer. Exiting.", config_obj.get(section, 'run_rank'), section)
sys.exit()
if config_obj.has_option(section, 'run_type'):
run_type = config_obj.get(section, 'run_type')
else:
run_type = CONSTANTS.RUN_TYPE_WORKLOAD
if config_obj.has_option(section, 'run_order'):
run_order = config_obj.get(section, 'run_order')
else:
run_order = CONSTANTS.PRE_ANALYSIS_RUN
if config_obj.has_option(section, 'call_type'):
call_type = config_obj.get(section, 'call_type')
else:
call_type = 'local'
if config_obj.has_option(section, 'kill_after_seconds'):
try:
kill_after_seconds = int(config_obj.get(section, 'kill_after_seconds'))
except ValueError:
logger.error("Bad kill_after_seconds %s specified in section %s, should be integer.", config_obj.get(section, 'kill_after_seconds'), section)
if call_type == 'local':
run_step_obj = Local_Cmd(run_type, run_cmd, call_type, run_order, run_rank, kill_after_seconds=kill_after_seconds)
else:
logger.error('Unsupported RUN_STEP supplied, call_type should be local')
run_step_obj = None
return run_step_obj |
def parse_graph_section(config_obj, section, outdir_default, indir_default):
"""
Parse the GRAPH section of the config to extract useful values
:param config_obj: ConfigParser object
:param section: Section name
:param outdir_default: Default output directory passed in args
:param indir_default: Default input directory passed in args
:return: List of options extracted from the GRAPH section
"""
graph_timezone = None
graphing_library = CONSTANTS.DEFAULT_GRAPHING_LIBRARY
crossplots = []
if config_obj.has_option(section, 'graphing_library'):
graphing_library = config_obj.get(section, 'graphing_library')
if config_obj.has_option(section, 'graphs'):
graphs_string = config_obj.get(section, 'graphs')
crossplots = graphs_string.split()
# Supporting both outdir and output_dir
if config_obj.has_option(section, 'outdir'):
outdir_default = config_obj.get(section, 'outdir')
if config_obj.has_option(section, 'output_dir'):
outdir_default = config_obj.get(section, 'output_dir')
if config_obj.has_option(section, 'input_dir'):
indir_default = config_obj.get(section, 'input_dir')
if config_obj.has_option(section, 'graph_timezone'):
graph_timezone = config_obj.get(section, 'graph_timezone')
if graph_timezone not in ("UTC", "PST", "PDT"):
logger.warn('Unsupported timezone ' + graph_timezone + ' specified in option graph_timezone. Will use UTC instead')
graph_timezone = "UTC"
return graphing_library, crossplots, outdir_default, indir_default, graph_timezone |
def parse_report_section(config_obj, section):
"""
parse the [REPORT] section of a config file to extract various reporting options to be passed to the Report object
:param: config_obj : configparser object for the config file passed in to naarad
:param: section: name of the section. 'REPORT' should be passed in here
:return: report_kwargs: dictionary of Reporting options and values specified in config.
"""
report_kwargs = {}
if config_obj.has_option(section, 'stylesheet_includes'):
report_kwargs['stylesheet_includes'] = config_obj.get(section, 'stylesheet_includes')
if config_obj.has_option(section, 'javascript_includes'):
report_kwargs['javascript_includes'] = config_obj.get(section, 'javascript_includes')
if config_obj.has_option(section, 'header_template'):
report_kwargs['header_template'] = config_obj.get(section, 'header_template')
if config_obj.has_option(section, 'footer_template'):
report_kwargs['footer_template'] = config_obj.get(section, 'footer_template')
if config_obj.has_option(section, 'summary_content_template'):
report_kwargs['summary_content_template'] = config_obj.get(section, 'summary_content_template')
if config_obj.has_option(section, 'summary_page_template'):
report_kwargs['summary_page_template'] = config_obj.get(section, 'summary_page_template')
if config_obj.has_option(section, 'metric_page_template'):
report_kwargs['metric_page_template'] = config_obj.get(section, 'metric_page_template')
if config_obj.has_option(section, 'client_charting_template'):
report_kwargs['client_charting_template'] = config_obj.get(section, 'client_charting_template')
if config_obj.has_option(section, 'diff_client_charting_template'):
report_kwargs['diff_client_charting_template'] = config_obj.get(section, 'diff_client_charting_template')
if config_obj.has_option(section, 'diff_page_template'):
report_kwargs['diff_page_template'] = config_obj.get(section, 'diff_page_template')
return report_kwargs |
def calculate_stats(data_list, stats_to_calculate=['mean', 'std'], percentiles_to_calculate=[]):
"""
Calculate statistics for given data.
:param list data_list: List of floats
:param list stats_to_calculate: List of strings with statistics to calculate. Supported stats are defined in constant stats_to_numpy_method_map
:param list percentiles_to_calculate: List of floats that defined which percentiles to calculate.
:return: tuple of dictionaries containing calculated statistics and percentiles
"""
stats_to_numpy_method_map = {
'mean': numpy.mean,
'avg': numpy.mean,
'std': numpy.std,
'standard_deviation': numpy.std,
'median': numpy.median,
'min': numpy.amin,
'max': numpy.amax
}
calculated_stats = {}
calculated_percentiles = {}
if len(data_list) == 0:
return calculated_stats, calculated_percentiles
for stat in stats_to_calculate:
if stat in stats_to_numpy_method_map.keys():
calculated_stats[stat] = stats_to_numpy_method_map[stat](data_list)
else:
logger.error("Unsupported stat : " + str(stat))
for percentile in percentiles_to_calculate:
if isinstance(percentile, float) or isinstance(percentile, int):
calculated_percentiles[percentile] = numpy.percentile(data_list, percentile)
else:
logger.error("Unsupported percentile requested (should be int or float): " + str(percentile))
return calculated_stats, calculated_percentiles |
def is_valid_file(filename):
"""
Check if the specifed file exists and is not empty
:param filename: full path to the file that needs to be checked
:return: Status, Message
"""
if os.path.exists(filename):
if not os.path.getsize(filename):
logger.warning('%s : file is empty.', filename)
return False
else:
logger.warning('%s : file does not exist.', filename)
return False
return True |
def detect_timestamp_format(timestamp):
"""
Given an input timestamp string, determine what format is it likely in.
:param string timestamp: the timestamp string for which we need to determine format
:return: best guess timestamp format
"""
time_formats = {
'epoch': re.compile(r'^[0-9]{10}$'),
'epoch_ms': re.compile(r'^[0-9]{13}$'),
'epoch_fraction': re.compile(r'^[0-9]{10}\.[0-9]{3,9}$'),
'%Y-%m-%d %H:%M:%S': re.compile(r'^[0-9]{4}-[0-1][0-9]-[0-3][0-9] [0-2][0-9]:[0-5][0-9]:[0-5][0-9]$'),
'%Y-%m-%dT%H:%M:%S': re.compile(r'^[0-9]{4}-[0-1][0-9]-[0-3][0-9]T[0-2][0-9]:[0-5][0-9]:[0-5][0-9]$'),
'%Y-%m-%d_%H:%M:%S': re.compile(r'^[0-9]{4}-[0-1][0-9]-[0-3][0-9]_[0-2][0-9]:[0-5][0-9]:[0-5][0-9]$'),
'%Y-%m-%d %H:%M:%S.%f': re.compile(r'^[0-9]{4}-[0-1][0-9]-[0-3][0-9] [0-2][0-9]:[0-5][0-9]:[0-5][0-9].[0-9]+$'),
'%Y-%m-%dT%H:%M:%S.%f': re.compile(r'^[0-9]{4}-[0-1][0-9]-[0-3][0-9]T[0-2][0-9]:[0-5][0-9]:[0-5][0-9].[0-9]+$'),
'%Y-%m-%d_%H:%M:%S.%f': re.compile(r'^[0-9]{4}-[0-1][0-9]-[0-3][0-9]_[0-2][0-9]:[0-5][0-9]:[0-5][0-9].[0-9]+$'),
'%Y%m%d %H:%M:%S': re.compile(r'^[0-9]{4}[0-1][0-9][0-3][0-9] [0-2][0-9]:[0-5][0-9]:[0-5][0-9]$'),
'%Y%m%dT%H:%M:%S': re.compile(r'^[0-9]{4}[0-1][0-9][0-3][0-9]T[0-2][0-9]:[0-5][0-9]:[0-5][0-9]$'),
'%Y%m%d_%H:%M:%S': re.compile(r'^[0-9]{4}[0-1][0-9][0-3][0-9]_[0-2][0-9]:[0-5][0-9]:[0-5][0-9]$'),
'%Y%m%d %H:%M:%S.%f': re.compile(r'^[0-9]{4}[0-1][0-9][0-3][0-9] [0-2][0-9]:[0-5][0-9]:[0-5][0-9].[0-9]+$'),
'%Y%m%dT%H:%M:%S.%f': re.compile(r'^[0-9]{4}[0-1][0-9][0-3][0-9]T[0-2][0-9]:[0-5][0-9]:[0-5][0-9].[0-9]+$'),
'%Y%m%d_%H:%M:%S.%f': re.compile(r'^[0-9]{4}[0-1][0-9][0-3][0-9]_[0-2][0-9]:[0-5][0-9]:[0-5][0-9].[0-9]+$'),
'%H:%M:%S': re.compile(r'^[0-2][0-9]:[0-5][0-9]:[0-5][0-9]$'),
'%H:%M:%S.%f': re.compile(r'^[0-2][0-9]:[0-5][0-9]:[0-5][0-9].[0-9]+$'),
'%Y-%m-%dT%H:%M:%S.%f%z': re.compile(r'^[0-9]{4}-[0-1][0-9]-[0-3][0-9]T[0-2][0-9]:[0-5][0-9]:[0-5][0-9].[0-9]+[+-][0-9]{4}$')
}
for time_format in time_formats:
if re.match(time_formats[time_format], timestamp):
return time_format
return 'unknown' |
def get_standardized_timestamp(timestamp, ts_format):
"""
Given a timestamp string, return a time stamp in the epoch ms format. If no date is present in
timestamp then today's date will be added as a prefix before conversion to epoch ms
"""
if not timestamp:
return None
if timestamp == 'now':
timestamp = str(datetime.datetime.now())
if not ts_format:
ts_format = detect_timestamp_format(timestamp)
try:
if ts_format == 'unknown':
logger.error('Unable to determine timestamp format for : %s', timestamp)
return -1
elif ts_format == 'epoch':
ts = int(timestamp) * 1000
elif ts_format == 'epoch_ms':
ts = timestamp
elif ts_format == 'epoch_fraction':
ts = int(timestamp[:10]) * 1000 + int(timestamp[11:])
elif ts_format in ('%H:%M:%S', '%H:%M:%S.%f'):
date_today = str(datetime.date.today())
dt_obj = datetime.datetime.strptime(date_today + ' ' + timestamp, '%Y-%m-%d ' + ts_format)
ts = calendar.timegm(dt_obj.utctimetuple()) * 1000 + dt_obj.microsecond / 1000
else:
dt_obj = datetime.datetime.strptime(timestamp, ts_format)
ts = calendar.timegm(dt_obj.utctimetuple()) * 1000 + dt_obj.microsecond / 1000
except ValueError:
return -1
return str(ts) |
def set_sla(obj, metric, sub_metric, rules):
"""
Extract SLAs from a set of rules
"""
if not hasattr(obj, 'sla_map'):
return False
rules_list = rules.split()
for rule in rules_list:
if '<' in rule:
stat, threshold = rule.split('<')
sla = SLA(metric, sub_metric, stat, threshold, 'lt')
elif '>' in rule:
stat, threshold = rule.split('>')
sla = SLA(metric, sub_metric, stat, threshold, 'gt')
else:
if hasattr(obj, 'logger'):
obj.logger.error('Unsupported SLA type defined : ' + rule)
sla = None
obj.sla_map[metric][sub_metric][stat] = sla
if hasattr(obj, 'sla_list'):
obj.sla_list.append(sla) # TODO : remove this once report has grading done in the metric tables
return True |
def check_slas(metric):
"""
Check if all SLAs pass
:return: 0 (if all SLAs pass) or the number of SLAs failures
"""
if not hasattr(metric, 'sla_map'):
return
for metric_label in metric.sla_map.keys():
for sub_metric in metric.sla_map[metric_label].keys():
for stat_name in metric.sla_map[metric_label][sub_metric].keys():
sla = metric.sla_map[metric_label][sub_metric][stat_name]
if stat_name[0] == 'p' and hasattr(metric, 'calculated_percentiles'):
if sub_metric in metric.calculated_percentiles.keys():
percentile_num = int(stat_name[1:])
if isinstance(percentile_num, float) or isinstance(percentile_num, int):
if percentile_num in metric.calculated_percentiles[sub_metric].keys():
if not sla.check_sla_passed(metric.calculated_percentiles[sub_metric][percentile_num]):
logger.info("Failed SLA for " + sub_metric)
metric.status = CONSTANTS.SLA_FAILED
if sub_metric in metric.calculated_stats.keys() and hasattr(metric, 'calculated_stats'):
if stat_name in metric.calculated_stats[sub_metric].keys():
if not sla.check_sla_passed(metric.calculated_stats[sub_metric][stat_name]):
logger.info("Failed SLA for " + sub_metric)
metric.status = CONSTANTS.SLA_FAILED
# Save SLA results in a file
if len(metric.sla_map.keys()) > 0 and hasattr(metric, 'get_sla_csv'):
sla_csv_file = metric.get_sla_csv()
with open(sla_csv_file, 'w') as FH:
for metric_label in metric.sla_map.keys():
for sub_metric in metric.sla_map[metric_label].keys():
for stat, sla in metric.sla_map[metric_label][sub_metric].items():
FH.write('%s\n' % (sla.get_csv_repr())) |
def init_logging(logger, log_file, log_level):
"""
Initialize the naarad logger.
:param: logger: logger object to initialize
:param: log_file: log file name
:param: log_level: log level (debug, info, warn, error)
"""
with open(log_file, 'w'):
pass
numeric_level = getattr(logging, log_level.upper(), None) if log_level else logging.INFO
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % log_level)
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(log_file)
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(numeric_level)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
return CONSTANTS.OK |
def get_argument_parser():
"""
Initialize list of valid arguments accepted by Naarad CLI
:return: arg_parser: argeparse.ArgumentParser object initialized with naarad CLI parameters
"""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('-c', '--config', help="file with specifications for each metric and graphs")
arg_parser.add_argument('--start', help="Start time in the format of HH:MM:SS or YYYY-mm-dd_HH:MM:SS")
arg_parser.add_argument('--end', help="End time in the format of HH:MM:SS or YYYY-mm-dd_HH:MM:SS")
arg_parser.add_argument('-i', '--input_dir', help="input directory used to construct full path name of the metric infile")
arg_parser.add_argument('-o', '--output_dir', help="output directory where the plots and Report.html will be generated")
arg_parser.add_argument('-V', '--variables', action="append",
help="User defined variables (in form key=value) for substitution in the config file. "
"Config should have the variable names in format %%(key)s")
arg_parser.add_argument('-s', '--show_config', help="Print config associated with the provided template name", action="store_true")
arg_parser.add_argument('-l', '--log', help="log level")
arg_parser.add_argument('-d', '--diff', nargs=2,
help="Specify the location of two naarad reports to diff separated by a space. Can be local or http(s) "
"locations. The first report is used as a baseline.", metavar=("report-1", "report-2"))
arg_parser.add_argument('-n', '--no_plots',
help="Don't generate plot images. Useful when you only want SLA calculations. Note that on-demand charts can "
"still be generated through client-charting.", action="store_true")
arg_parser.add_argument('-e', '--exit_code', help="optional argument to enable exit_code for naarad", action="store_true")
# TODO(Ritesh) : Print a list of all templates supported with descriptions
# arg_parser.add_argument('-l', '--list_templates', help="List all template configs", action="store_true")
return arg_parser |
def get_variables(args):
"""
Return a dictionary of variables specified at CLI
:param: args: Command Line Arguments namespace
"""
variables_dict = {}
if args.variables:
for var in args.variables:
words = var.split('=')
variables_dict[words[0]] = words[1]
return variables_dict |
def validate_arguments(args):
"""
Validate that the necessary argument for normal or diff analysis are specified.
:param: args: Command line arguments namespace
"""
if args.diff:
if not args.output_dir:
logger.error('No Output location specified')
print_usage()
sys.exit(0)
# elif not (args.config and args.output_dir):
elif not args.output_dir:
print_usage()
sys.exit(0) |
def discover_by_name(input_directory, output_directory):
"""
Auto discover metric types from the files that exist in input_directory and return a list of metrics
:param: input_directory: The location to scan for log files
:param: output_directory: The location for the report
"""
metric_list = []
log_files = os.listdir(input_directory)
for log_file in log_files:
if log_file in CONSTANTS.SUPPORTED_FILENAME_MAPPING.keys():
metric_list.append(initialize_metric(CONSTANTS.SUPPORTED_FILENAME_MAPPING[log_file], [log_file], None, [], output_directory, CONSTANTS.RESOURCE_PATH,
CONSTANTS.SUPPORTED_FILENAME_MAPPING[log_file], None, None, {}, None, None, {}))
else:
logger.warning('Unable to determine metric type for file: %s', log_file)
return metric_list |
def initialize_metric(section, infile_list, hostname, aggr_metrics, output_directory, resource_path, label, ts_start, ts_end, rule_strings,
important_sub_metrics, anomaly_detection_metrics, other_options):
"""
Initialize appropriate metric based on type of metric.
:param: section: config section name or auto discovered metric type
:param: infile_list: list of input log files for the metric
:param: hostname: hostname associated with the logs origin
:param: output_directory: report location
:param: resource_path: resource path for report
:param: label: label for config section or auto discovered metric type
:param: ts_start: start time for analysis
:param: ts_end: end time for analysis
:param: rule_strings: list of slas
:param: important_sub_metrics: list of important sub metrics
:param: anomaly_detection_metrics: list of metrics to use for anomaly detection.
:param: other_options: kwargs
:return: metric object
"""
metric = None
metric_type = section.split('-')[0]
if metric_type in metric_classes:
if 'SAR' in metric_type:
metric = metric_classes['SAR'](section, infile_list, hostname, aggr_metrics, output_directory, resource_path, label, ts_start, ts_end,
rule_strings, important_sub_metrics, anomaly_detection_metrics, **other_options)
else:
metric = metric_classes[metric_type](section, infile_list, hostname, aggr_metrics, output_directory, resource_path, label, ts_start, ts_end,
rule_strings, important_sub_metrics, anomaly_detection_metrics, **other_options)
else:
metric = Metric(section, infile_list, hostname, aggr_metrics, output_directory, resource_path, label, ts_start, ts_end, rule_strings,
important_sub_metrics, anomaly_detection_metrics, **other_options)
return metric |
def initialize_aggregate_metric(section, aggr_hosts, aggr_metrics, metrics, outdir_default, resource_path, label, ts_start, ts_end, rule_strings,
important_sub_metrics, anomaly_detection_metrics, other_options):
"""
Initialize aggregate metric
:param: section: config section name
:param: aggr_hosts: list of hostnames to aggregate
:param: aggr_metrics: list of metrics to aggregate
:param: metrics: list of metric objects associated with the current naarad analysis
:param: outdir_default: report location
:param: resource_path: resource path for report
:param: label: label for config section
:param: ts_start: start time for analysis
:param: ts_end: end time for analysis
:param: rule_strings: list of slas
:param: important_sub_metrics: list of important sub metrics
:param: other_options: kwargs
:return: metric object
"""
metric = None
metric_type = section.split('-')[0]
metric = aggregate_metric_classes[metric_type](section, aggr_hosts, aggr_metrics, metrics, outdir_default, resource_path, label, ts_start, ts_end,
rule_strings, important_sub_metrics, anomaly_detection_metrics, **other_options)
return metric |
def graph_csv(output_directory, resource_path, csv_file, plot_title, output_filename, y_label=None, precision=None, graph_height="600", graph_width="1500"):
""" Single metric graphing function """
if not os.path.getsize(csv_file):
return False, ""
y_label = y_label or plot_title
div_id = str(random.random())
div_string = "<div id=\"%s\" style=\"width:%spx; height:%spx;\"></div>" % (div_id, graph_width, graph_height)
script_string = """<script type=\"text/javascript\">
g2 = new Dygraph(
document.getElementById(\"""" + div_id + """"),
\"""" + resource_path + '/' + os.path.basename(csv_file) + """",
{
xValueFormatter: Dygraph.dateString_,
xValueParser: function(x) {
var date_components = x.split(" ");
var supported_format = date_components[0] + 'T' + date_components[1];
if(date_components[1].indexOf(".") == -1)
{
supported_format += ".0";
}
return Date.parse(supported_format);
},
xTicker: Dygraph.dateTicker,
xlabel: "Time",
ylabel: \"""" + y_label + """",
title: \"""" + plot_title + """",
labels: ["Time",\"""" + y_label + """"]
} // options
);
</script>"""
with open(os.path.join(output_directory, output_filename + '.div'), 'w') as div_file:
div_file.write(div_string + script_string)
# TODO(ritesh): Also generate PNGs if someone needs them separately
return True, os.path.join(output_directory, output_filename + '.div') |
def aggregate_count_over_time(self, metric_store, line_data, transaction_list, aggregate_timestamp):
"""
Organize and store the count of data from the log line into the metric store by metric type, transaction, timestamp
:param dict metric_store: The metric store used to store all the parsed jmeter log data
:param dict line_data: dict with the extracted k:v from the log line
:param list transaction_list: list of transaction to be used for storing the metrics from given line
:param string aggregate_timestamp: timestamp used for storing the raw data. This accounts for aggregation time period
:return: None
"""
for transaction in transaction_list:
if line_data.get('s') == 'true':
all_qps = metric_store['qps']
else:
all_qps = metric_store['eqps']
qps = all_qps[transaction]
if aggregate_timestamp in qps:
qps[aggregate_timestamp] += 1
else:
qps[aggregate_timestamp] = 1
return None |
def aggregate_values_over_time(self, metric_store, line_data, transaction_list, metric_list, aggregate_timestamp):
"""
Organize and store the data from the log line into the metric store by metric type, transaction, timestamp
:param dict metric_store: The metric store used to store all the parsed jmeter log data
:param dict line_data: dict with the extracted k:v from the log line
:param list transaction_list: list of transaction to be used for storing the metrics from given line
:param list metric_list: list of metrics to extract from the log line
:param string aggregate_timestamp: timestamp used for storing the raw data. This accounts for aggregation time period
:return: None
"""
for metric in metric_list:
for transaction in transaction_list:
metric_data = reduce(defaultdict.__getitem__, [metric, transaction, aggregate_timestamp], metric_store)
metric_data.append(float(line_data.get(metric)))
return None |
def average_values_for_plot(self, metric_store, data, averaging_factor):
"""
Create the time series for the various metrics, averaged over the aggregation period being used for plots
:param dict metric_store: The metric store used to store all the parsed jmeter log data
:param dict data: Dict with all the metric data to be output to csv
:param float averaging_factor: averaging factor to be used for calculating the average per second metrics
:return: None
"""
for metric, transaction_store in metric_store.items():
for transaction, time_store in transaction_store.items():
for time_stamp, metric_data in sorted(time_store.items()):
if metric in ['t', 'by']:
data[self.get_csv(transaction, metric)].append(','.join([str(time_stamp), str(sum(map(float, metric_data)) / float(len(metric_data)))]))
if metric == 'by':
metric_store['thr'][transaction][time_stamp] = sum(map(float, metric_data)) / float(averaging_factor * 1024 * 1024 / 8.0)
data[self.get_csv(transaction, 'thr')].append(','.join([str(time_stamp), str(metric_store['thr'][transaction][time_stamp])]))
elif metric in ['qps', 'eqps']:
data[self.get_csv(transaction, metric)].append(','.join([str(time_stamp), str(metric_data / float(averaging_factor))]))
return None |
def calculate_key_stats(self, metric_store):
"""
Calculate key statistics for given data and store in the class variables calculated_stats and calculated_percentiles
calculated_stats:
'mean', 'std', 'median', 'min', 'max'
calculated_percentiles:
range(5,101,5), 99
:param dict metric_store: The metric store used to store all the parsed jmeter log data
:return: none
"""
stats_to_calculate = ['mean', 'std', 'median', 'min', 'max'] # TODO: get input from user
percentiles_to_calculate = range(5, 101, 5) # TODO: get input from user
percentiles_to_calculate.append(99)
for transaction in metric_store['t'].keys():
transaction_key = transaction + '.' + 'ResponseTime'
# For ResponseTime and ResponseSize, each timestamp has a list of values associated with it.
# Using heapq.merge to merge all the lists into a single list to be passed to numpy.
self.calculated_stats[transaction_key], self.calculated_percentiles[transaction_key] = \
naarad.utils.calculate_stats(list(heapq.merge(*metric_store['t'][transaction].values())),
stats_to_calculate, percentiles_to_calculate)
self.update_summary_stats(transaction_key)
transaction_key = transaction + '.' + 'qps'
if len(metric_store['qps'][transaction].values()) > 0:
self.calculated_stats[transaction_key], self.calculated_percentiles[transaction_key] = \
naarad.utils.calculate_stats(metric_store['qps'][transaction].values(),
stats_to_calculate, percentiles_to_calculate)
self.update_summary_stats(transaction_key)
transaction_key = transaction + '.' + 'ResponseSize'
self.calculated_stats[transaction_key], self.calculated_percentiles[transaction_key] = \
naarad.utils.calculate_stats(list(heapq.merge(*metric_store['by'][transaction].values())),
stats_to_calculate, percentiles_to_calculate)
self.update_summary_stats(transaction_key)
if 'eqps' in metric_store.keys() and transaction in metric_store['eqps'].keys():
transaction_key = transaction + '.' + 'ErrorsPerSecond'
self.calculated_stats[transaction_key], self.calculated_percentiles[transaction_key] = \
naarad.utils.calculate_stats(metric_store['eqps'][transaction].values(),
stats_to_calculate, percentiles_to_calculate)
self.update_summary_stats(transaction + '.' + 'ErrorsPerSecond')
transaction_key = transaction + '.' + 'DataThroughput'
self.calculated_stats[transaction_key], self.calculated_percentiles[transaction_key] = \
naarad.utils.calculate_stats(metric_store['thr'][transaction].values(),
stats_to_calculate, percentiles_to_calculate)
self.update_summary_stats(transaction_key)
return None |
def parse(self):
"""
Parse the Jmeter file and calculate key stats
:return: status of the metric parse
"""
file_status = True
for infile in self.infile_list:
file_status = file_status and naarad.utils.is_valid_file(infile)
if not file_status:
return False
status = self.parse_xml_jtl(self.aggregation_granularity)
gc.collect()
return status |
def parse_xml_jtl(self, granularity):
"""
Parse Jmeter workload output in XML format and extract overall and per transaction data and key statistics
:param string granularity: The time period over which to aggregate and average the raw data. Valid values are 'hour', 'minute' or 'second'
:return: status of the metric parse
"""
data = defaultdict(list)
processed_data = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for input_file in self.infile_list:
logger.info('Processing : %s', input_file)
timestamp_format = None
tree = ElementTree.parse(input_file)
samples = tree.findall('./httpSample') + tree.findall('./sample')
for sample in samples:
if not timestamp_format or timestamp_format == 'unknown':
timestamp_format = naarad.utils.detect_timestamp_format(sample.get('ts'))
if timestamp_format == 'unknown':
continue
ts = naarad.utils.get_standardized_timestamp(sample.get('ts'), timestamp_format)
if ts == -1:
continue
ts = naarad.utils.reconcile_timezones(ts, self.timezone, self.graph_timezone)
aggregate_timestamp, averaging_factor = self.get_aggregation_timestamp(ts, granularity)
self.aggregate_count_over_time(processed_data, sample, [self._sanitize_label(sample.get('lb')), 'Overall_Summary'], aggregate_timestamp)
self.aggregate_values_over_time(processed_data, sample, [self._sanitize_label(sample.get('lb')), 'Overall_Summary'], ['t', 'by'], aggregate_timestamp)
logger.info('Finished parsing : %s', input_file)
logger.info('Processing metrics for output to csv')
self.average_values_for_plot(processed_data, data, averaging_factor)
logger.info('Writing time series csv')
for csv in data.keys():
self.csv_files.append(csv)
with open(csv, 'w') as csvf:
csvf.write('\n'.join(sorted(data[csv])))
logger.info('Processing raw data for stats')
self.calculate_key_stats(processed_data)
return True |
def put_values_into_data(self, values):
"""
Take the (col, value) in 'values', append value into 'col' in self.data[]
"""
for col, value in values.items():
if col in self.column_csv_map:
out_csv = self.column_csv_map[col]
else:
out_csv = self.get_csv(col) # column_csv_map[] is assigned in get_csv()
self.data[out_csv] = []
self.data[out_csv].append(self.ts + "," + value) |
def process_top_line(self, words):
"""
Process the line starting with "top"
Example log: top - 00:00:02 up 32 days, 7:08, 19 users, load average: 0.00, 0.00, 0.00
"""
self.ts_time = words[2]
self.ts = self.ts_date + ' ' + self.ts_time
self.ts = ts = naarad.utils.get_standardized_timestamp(self.ts, None)
if self.ts_out_of_range(self.ts):
self.ts_valid_lines = False
else:
self.ts_valid_lines = True
up_days = int(words[4])
up_hour_minute = words[6].split(':') # E.g. '4:02,'
up_minutes = int(up_hour_minute[0]) * 60 + int(up_hour_minute[1].split(',')[0])
uptime_minute = up_days * 24 * 60 + up_minutes # Converting days to minutes
values = {}
values['uptime_minute'] = str(uptime_minute)
values['num_users'] = words[7]
values['load_aver_1_minute'] = words[11][:-1]
values['load_aver_5_minute'] = words[12][:-1]
values['load_aver_15_minute'] = words[13]
self.put_values_into_data(values) |
def process_tasks_line(self, words):
"""
Process the line starting with "Tasks:"
Example log: Tasks: 446 total, 1 running, 442 sleeping, 2 stopped, 1 zombie
"""
words = words[1:]
length = len(words) / 2 # The number of pairs
values = {}
for offset in range(length):
k = words[2 * offset + 1].strip(',')
v = words[2 * offset]
values['tasks_' + k] = v
self.put_values_into_data(values) |
def process_cpu_line(self, words):
"""
Process the line starting with "Cpu(s):"
Example log: Cpu(s): 1.3%us, 0.5%sy, 0.0%ni, 98.2%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st
"""
values = {}
for word in words[1:]:
val, key = word.split('%')
values['cpu_' + key.strip(',')] = val
self.put_values_into_data(values) |
def convert_to_G(self, word):
"""
Given a size such as '2333M', return the converted value in G
"""
value = 0.0
if word[-1] == 'G' or word[-1] == 'g':
value = float(word[:-1])
elif word[-1] == 'M' or word[-1] == 'm':
value = float(word[:-1]) / 1000.0
elif word[-1] == 'K' or word[-1] == 'k':
value = float(word[:-1]) / 1000.0 / 1000.0
else: # No unit
value = float(word) / 1000.0 / 1000.0 / 1000.0
return str(value) |
def process_swap_line(self, words):
"""
Process the line starting with "Swap:"
Example log: Swap: 63.998G total, 0.000k used, 63.998G free, 11.324G cached
For each value, needs to convert to 'G' (needs to handle cases of K, M)
"""
words = words[1:]
length = len(words) / 2 # The number of pairs
values = {}
for offset in range(length):
k = words[2 * offset + 1].strip(',')
v = self.convert_to_G(words[2 * offset])
values['swap_' + k] = v
self.put_values_into_data(values) |
def process_individual_command(self, words):
"""
process the individual lines like this:
#PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
29303 root 20 0 35300 2580 1664 R 3.9 0.0 0:00.02 top
11 root RT 0 0 0 0 S 1.9 0.0 0:18.87 migration/2
3702 root 20 0 34884 4192 1692 S 1.9 0.0 31:40.47 cf-serverd
It does not record all processes due to memory concern; rather only records interested processes (based on user input of PID and COMMAND)
"""
pid_index = self.process_headers.index('PID')
proces_index = self.process_headers.index('COMMAND')
pid = words[pid_index]
process = words[proces_index]
if pid in self.PID or process in self.COMMAND:
process_name = process.split('/')[0]
values = {}
for word_col in self.process_headers:
word_index = self.process_headers.index(word_col)
if word_col in ['VIRT', 'RES', 'SHR']: # These values need to convert to 'G'
values[process_name + '_' + pid + '_' + word_col] = self.convert_to_G(words[word_index])
elif word_col in ['PR', 'NI', '%CPU', '%MEM']: # These values will be assigned later or ignored
values[process_name + '_' + pid + '_' + word_col.strip('%')] = words[word_index]
uptime_index = self.process_headers.index('TIME+')
uptime = words[uptime_index].split(':')
uptime_sec = float(uptime[0]) * 60 + float(uptime[1])
values[process_name + '_' + pid + '_' + 'TIME'] = str(uptime_sec)
self.put_values_into_data(values) |
def parse(self):
"""
Parse the top output file
Return status of the metric parse
The raw log file is like the following:
2014-06-23
top - 00:00:02 up 18 days, 7:08, 19 users, load average: 0.05, 0.03, 0.00
Tasks: 447 total, 1 running, 443 sleeping, 2 stopped, 1 zombie
Cpu(s): 1.6%us, 0.5%sy, 0.0%ni, 97.9%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st
Mem: 62.841G total, 15.167G used, 47.675G free, 643.434M buffers
Swap: 63.998G total, 0.000k used, 63.998G free, 11.324G cached
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
1730 root 20 0 4457m 10m 3328 S 1.9 0.0 80:13.45 lwregd
The log lines can be generated by echo $t >> $RESULT/top.out &; top -b -n $COUNT -d $INTERVAL | grep -A 40 '^top' >> $RESULT/top.out &
"""
for infile in self.infile_list:
logger.info('Processing : %s', infile)
status = True
file_status = naarad.utils.is_valid_file(infile)
if not file_status:
return False
with open(infile) as fh:
for line in fh:
words = line.split()
if not words:
continue
# Pattern matches line of '2014-02-03'
if re.match('^\d\d\d\d-\d\d-\d\d$', line):
self.ts_date = words[0]
continue
prefix_word = words[0].strip()
if prefix_word == 'top':
self.process_top_line(words)
self.saw_pid = False # Turn off the processing of individual process line
elif self.ts_valid_lines:
if prefix_word == 'Tasks:':
self.process_tasks_line(words)
elif prefix_word == 'Cpu(s):':
self.process_cpu_line(words)
elif prefix_word == 'Mem:':
self.process_mem_line(words)
elif prefix_word == 'Swap:':
self.process_swap_line(words)
elif prefix_word == 'PID':
self.saw_pid = True # Turn on the processing of individual process line
self.process_headers = words
else: # Each individual process line
if self.saw_pid and len(words) >= len(self.process_headers): # Only valid process lines
self.process_individual_command(words)
# Putting data in csv files;
for out_csv in self.data.keys(): # All sub_metrics
self.csv_files.append(out_csv)
with open(out_csv, 'w') as fh:
fh.write('\n'.join(self.data[out_csv]))
gc.collect()
return status |
def handle_single_url(url, outdir, outfile=None):
"""
Base function which takes a single url, download it to outdir/outfile
:param str url: a full/absolute url, e.g. http://www.cnn.com/log.zip
:param str outdir: the absolute local directory. e.g. /home/user1/tmp/
:param str outfile: (optional) filename stored in local directory. If outfile is not given, extract the filename from url
:return: the local full path name of downloaded url
"""
if not url or type(url) != str \
or not outdir or type(outdir) != str:
logger.error('passed in parameters %s %s are incorrect.' % (url, outdir))
return
if not naarad.utils.is_valid_url(url):
logger.error("passed in url %s is incorrect." % url)
return
if not outfile:
segs = url.split('/')
outfile = segs[-1]
outfile = urllib2.quote(outfile)
output_file = os.path.join(outdir, outfile)
if os.path.exists(output_file):
logger.warn("the %s already exists!" % outfile)
with open(output_file, "w") as fh:
try:
response = urllib2.urlopen(url)
fh.write(response.read())
except urllib2.HTTPError:
logger.error("got HTTPError when retrieving %s" % url)
return
except urllib2.URLError:
logger.error("got URLError when retrieving %s" % url)
return
return output_file |
def stream_url(url):
"""
Read response of specified url into memory and return to caller. No persistence to disk.
:return: response content if accessing the URL succeeds, False otherwise
"""
try:
response = urllib2.urlopen(url)
response_content = response.read()
return response_content
except (urllib2.URLError, urllib2.HTTPError) as e:
logger.error('Unable to access requested URL: %s', url)
return False |
def get_urls_from_seed(url):
"""
get a list of urls from a seeding url, return a list of urls
:param str url: a full/absolute url, e.g. http://www.cnn.com/logs/
:return: a list of full/absolute urls.
"""
if not url or type(url) != str or not naarad.utils.is_valid_url(url):
logger.error("get_urls_from_seed() does not have valid seeding url.")
return
# Extract the host info of "http://host:port/" in case of href urls are elative urls (e.g., /path/gc.log)
# Then join (host info and relative urls) to form the complete urls
base_index = url.find('/', len("https://")) # get the first "/" after http://" or "https://"; handling both cases.
base_url = url[:base_index] # base_url = "http://host:port" or https://host:port" or http://host" (where no port is given)
# Extract the "href" denoted urls
urls = []
try:
response = urllib2.urlopen(url)
hp = HTMLLinkExtractor()
hp.feed(response.read())
urls = hp.links
hp.close()
except urllib2.HTTPError:
logger.error("Got HTTPError when opening the url of %s" % url)
return urls
# Check whether the url is relative or complete
for i in range(len(urls)):
if not urls[i].startswith("http://") and not urls[i].startswith("https://"): # a relative url ?
urls[i] = base_url + urls[i]
return urls |
def download_url_single(inputs, outdir, outfile=None):
"""
Downloads a http(s) url to a local file
:param str inputs: the absolute url
:param str outdir: Required. the local directory to put the downloadedfiles.
:param str outfile: // Optional. If this is given, the downloaded url will be renated to outfile;
If this is not given, then the local file will be the original one, as given in url.
:return: the local full path name of downloaded url
"""
if not inputs or type(inputs) != str or not outdir or type(outdir) != str:
logging.error("The call parameters are invalid.")
return
else:
if not os.path.exists(outdir):
os.makedirs(outdir)
output_file = handle_single_url(inputs, outdir, outfile)
return output_file |
def download_url_regex(inputs, outdir, regex=".*"):
"""
Downloads http(s) urls to a local files
:param str inputs: Required, the seed url
:param str outdir: Required. the local directory to put the downloadedfiles.
:param str regex: Optional, a regex string. If not given, then all urls will be valid
:return: A list of local full path names (downloaded from inputs)
"""
if not inputs or type(inputs) != str \
or not outdir or type(outdir) != str:
logging.error("The call parameters are invalid.")
return
else:
if not os.path.exists(outdir):
os.makedirs(outdir)
output_files = []
files = get_urls_from_seed(inputs)
for f in files:
if re.compile(regex).match(f):
output_file = handle_single_url(f, outdir)
output_files.append(output_file)
return output_files |
def read_csv(csv_name):
"""
Read data from a csv file into a dictionary.
:param str csv_name: path to a csv file.
:return dict: a dictionary represents the data in file.
"""
data = {}
if not isinstance(csv_name, (str, unicode)):
raise exceptions.InvalidDataFormat('luminol.utils: csv_name has to be a string!')
with open(csv_name, 'r') as csv_data:
reader = csv.reader(csv_data, delimiter=',', quotechar='|')
for row in reader:
try:
key = to_epoch(row[0])
value = float(row[1])
data[key] = value
except ValueError:
pass
return data |
def run(self):
"""
Run the command, infer time period to be used in metric analysis phase.
:return: None
"""
cmd_args = shlex.split(self.run_cmd)
logger.info('Local command RUN-STEP starting with rank %d', self.run_rank)
logger.info('Running subprocess command with following args: ' + str(cmd_args))
# TODO: Add try catch blocks. Kill process on CTRL-C
# Infer time period for analysis. Assume same timezone between client and servers.
self.ts_start = time.strftime("%Y-%m-%d %H:%M:%S")
try:
self.process = subprocess.Popen(cmd_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1)
if self.kill_after_seconds:
self.timer = Timer(self.kill_after_seconds, self.kill)
self.timer.start()
# Using 2nd method here to stream output:
# http://stackoverflow.com/questions/2715847/python-read-streaming-input-from-subprocess-communicate
for line in iter(self.process.stdout.readline, b''):
logger.info(line.strip())
self.process.communicate()
except KeyboardInterrupt:
logger.warning('Handling keyboard interrupt (Ctrl-C)')
self.kill()
if self.timer:
self.timer.cancel()
self.ts_end = time.strftime("%Y-%m-%d %H:%M:%S")
logger.info('subprocess finished')
logger.info('run_step started at ' + self.ts_start + ' and ended at ' + self.ts_end) |
def kill(self):
"""
If run_step needs to be killed, this method will be called
:return: None
"""
try:
logger.info('Trying to terminating run_step...')
self.process.terminate()
time_waited_seconds = 0
while self.process.poll() is None and time_waited_seconds < CONSTANTS.SECONDS_TO_KILL_AFTER_SIGTERM:
time.sleep(0.5)
time_waited_seconds += 0.5
if self.process.poll() is None:
self.process.kill()
logger.warning('Waited %d seconds for run_step to terminate. Killing now....', CONSTANTS.SECONDS_TO_KILL_AFTER_SIGTERM)
except OSError, e:
logger.error('Error while trying to kill the subprocess: %s', e) |
def copy_local_includes(self):
"""
Copy local js/css includes from naarad resources to the report/resources directory
:return: None
"""
resource_folder = self.get_resources_location()
for stylesheet in self.stylesheet_includes:
if ('http' not in stylesheet) and naarad.utils.is_valid_file(os.path.join(resource_folder, stylesheet)):
shutil.copy(os.path.join(resource_folder, stylesheet), self.resource_directory)
for javascript in self.javascript_includes:
if ('http' not in javascript) and naarad.utils.is_valid_file(os.path.join(resource_folder, javascript)):
shutil.copy(os.path.join(resource_folder, javascript), self.resource_directory)
return None |
def generate_client_charting_page(self, data_sources):
"""
Create the client charting page for the diff report, with time series data from the two diffed reports.
:return: generated html to be written to disk
"""
if not os.path.exists(self.resource_directory):
os.makedirs(self.resource_directory)
self.copy_local_includes()
template_loader = FileSystemLoader(self.get_resources_location())
template_environment = Environment(loader=template_loader)
client_html = template_environment.get_template(CONSTANTS.TEMPLATE_HEADER).render(custom_stylesheet_includes=CONSTANTS.STYLESHEET_INCLUDES,
custom_javascript_includes=CONSTANTS.JAVASCRIPT_INCLUDES,
resource_path=self.resource_path,
report_title='naarad diff report') + '\n'
client_html += template_environment.get_template(CONSTANTS.TEMPLATE_DIFF_CLIENT_CHARTING).render(data_series=data_sources,
resource_path=self.resource_path) + '\n'
client_html += template_environment.get_template(CONSTANTS.TEMPLATE_FOOTER).render()
return client_html |
def generate_diff_html(self):
"""
Generate the summary diff report html from template
:return: generated html to be written to disk
"""
if not os.path.exists(self.resource_directory):
os.makedirs(self.resource_directory)
self.copy_local_includes()
div_html = ''
for plot_div in sorted(self.plot_files):
with open(plot_div, 'r') as div_file:
div_html += '\n' + div_file.read()
template_loader = FileSystemLoader(self.get_resources_location())
template_environment = Environment(loader=template_loader)
template_environment.filters['sanitize_string'] = naarad.utils.sanitize_string
diff_html = template_environment.get_template(CONSTANTS.TEMPLATE_HEADER).render(custom_stylesheet_includes=CONSTANTS.STYLESHEET_INCLUDES,
custom_javascript_includes=CONSTANTS.JAVASCRIPT_INCLUDES,
resource_path=self.resource_path,
report_title='naarad diff report') + '\n'
diff_html += template_environment.get_template(CONSTANTS.TEMPLATE_DIFF_PAGE).render(diff_data=self.diff_data, plot_div_content=div_html,
reports=self.reports, sla_failure_list=self.sla_failure_list,
sla_map=self.sla_map) + '\n'
diff_html += template_environment.get_template(CONSTANTS.TEMPLATE_FOOTER).render()
return diff_html |
def discover(self, metafile):
"""
Determine what summary stats, time series, and CDF csv exist for the reports that need to be diffed.
:return: boolean: return whether the summary stats / time series / CDF csv summary was successfully located
"""
for report in self.reports:
if report.remote_location == 'local':
if naarad.utils.is_valid_file(os.path.join(os.path.join(report.location, self.resource_path), metafile)):
with open(os.path.join(os.path.join(report.location, self.resource_path), metafile), 'r') as meta_file:
if metafile == CONSTANTS.STATS_CSV_LIST_FILE:
report.stats = meta_file.readlines()[0].split(',')
elif metafile == CONSTANTS.PLOTS_CSV_LIST_FILE:
report.datasource = meta_file.readlines()[0].split(',')
elif metafile == CONSTANTS.CDF_PLOTS_CSV_LIST_FILE:
report.cdf_datasource = meta_file.readlines()[0].split(',')
else:
report.status = 'NO_SUMMARY_STATS'
self.status = 'ERROR'
logger.error('Unable to access summary stats file for report :%s', report.label)
return False
else:
stats_url = report.remote_location + '/' + self.resource_path + '/' + metafile
meta_file_data = naarad.httpdownload.stream_url(stats_url)
if meta_file_data:
if metafile == CONSTANTS.STATS_CSV_LIST_FILE:
report.stats = meta_file_data.split(',')
elif metafile == CONSTANTS.PLOTS_CSV_LIST_FILE:
report.datasource = meta_file_data.split(',')
elif metafile == CONSTANTS.CDF_PLOTS_CSV_LIST_FILE:
report.cdf_datasource = meta_file_data.split(',')
else:
report.status = 'NO_SUMMARY_STATS'
self.status = 'ERROR'
logger.error('No summary stats available for report :%s', report.label)
return False
return True |
def collect_datasources(self):
"""
Identify what time series exist in both the diffed reports and download them to the diff report resources directory
:return: True/False : return status of whether the download of time series resources succeeded.
"""
report_count = 0
if self.status != 'OK':
return False
diff_datasource = sorted(set(self.reports[0].datasource) & set(self.reports[1].datasource))
if diff_datasource:
self.reports[0].datasource = diff_datasource
self.reports[1].datasource = diff_datasource
else:
self.status = 'NO_COMMON_STATS'
logger.error('No common metrics were found between the two reports')
return False
for report in self.reports:
report.label = report_count
report_count += 1
report.local_location = os.path.join(self.resource_directory, str(report.label))
try:
os.makedirs(report.local_location)
except OSError as exeption:
if exeption.errno != errno.EEXIST:
raise
if report.remote_location != 'local':
naarad.httpdownload.download_url_list(map(lambda x: report.remote_location + '/' + self.resource_path + '/' + x + '.csv', report.datasource),
report.local_location)
else:
for filename in report.datasource:
try:
shutil.copy(os.path.join(os.path.join(report.location, self.resource_path), filename + '.csv'), report.local_location)
except IOError as exeption:
continue
return True |
def plot_diff(self, graphing_library='matplotlib'):
"""
Generate CDF diff plots of the submetrics
"""
diff_datasource = sorted(set(self.reports[0].datasource) & set(self.reports[1].datasource))
graphed = False
for submetric in diff_datasource:
baseline_csv = naarad.utils.get_default_csv(self.reports[0].local_location, (submetric + '.percentiles'))
current_csv = naarad.utils.get_default_csv(self.reports[1].local_location, (submetric + '.percentiles'))
if (not (naarad.utils.is_valid_file(baseline_csv) & naarad.utils.is_valid_file(current_csv))):
continue
baseline_plot = PD(input_csv=baseline_csv, csv_column=1, series_name=submetric, y_label=submetric, precision=None, graph_height=600, graph_width=1200,
graph_type='line', plot_label='baseline', x_label='Percentiles')
current_plot = PD(input_csv=current_csv, csv_column=1, series_name=submetric, y_label=submetric, precision=None, graph_height=600, graph_width=1200,
graph_type='line', plot_label='current', x_label='Percentiles')
graphed, div_file = Diff.graphing_modules[graphing_library].graph_data_on_the_same_graph([baseline_plot, current_plot],
os.path.join(self.output_directory, self.resource_path),
self.resource_path, (submetric + '.diff'))
if graphed:
self.plot_files.append(div_file)
return True |
def check_sla(self, sla, diff_metric):
"""
Check whether the SLA has passed or failed
"""
try:
if sla.display is '%':
diff_val = float(diff_metric['percent_diff'])
else:
diff_val = float(diff_metric['absolute_diff'])
except ValueError:
return False
if not (sla.check_sla_passed(diff_val)):
self.sla_failures += 1
self.sla_failure_list.append(DiffSLAFailure(sla, diff_metric))
return True |
def generate(self):
"""
Generate a diff report from the reports specified.
:return: True/False : return status of whether the diff report generation succeeded.
"""
if (self.discover(CONSTANTS.STATS_CSV_LIST_FILE) and self.discover(CONSTANTS.PLOTS_CSV_LIST_FILE) and self.discover(CONSTANTS.CDF_PLOTS_CSV_LIST_FILE) and
self.collect() and self.collect_datasources() and self.collect_cdf_datasources()):
for stats in self.reports[0].stats:
metric_label = stats.replace('.stats.csv', '')
stats_0 = os.path.join(self.reports[0].local_location, stats)
stats_1 = os.path.join(self.reports[1].local_location, stats)
report0_stats = {}
report1_stats = {}
if naarad.utils.is_valid_file(stats_0) and naarad.utils.is_valid_file(stats_1):
report0 = csv.DictReader(open(stats_0))
for row in report0:
report0_stats[row[CONSTANTS.SUBMETRIC_HEADER]] = row
report0_stats['__headers__'] = report0._fieldnames
report1 = csv.DictReader(open(stats_1))
for row in report1:
report1_stats[row[CONSTANTS.SUBMETRIC_HEADER]] = row
report1_stats['__headers__'] = report1._fieldnames
common_stats = sorted(set(report0_stats['__headers__']) & set(report1_stats['__headers__']))
common_submetrics = sorted(set(report0_stats.keys()) & set(report1_stats.keys()))
for submetric in common_submetrics:
if submetric != '__headers__':
for stat in common_stats:
if stat != CONSTANTS.SUBMETRIC_HEADER:
diff_metric = reduce(defaultdict.__getitem__, [stats.split('.')[0], submetric, stat], self.diff_data)
diff_metric[0] = float(report0_stats[submetric][stat])
diff_metric[1] = float(report1_stats[submetric][stat])
diff_metric['absolute_diff'] = naarad.utils.normalize_float_for_display(diff_metric[1] - diff_metric[0])
if diff_metric[0] == 0:
if diff_metric['absolute_diff'] == '0.0':
diff_metric['percent_diff'] = 0.0
else:
diff_metric['percent_diff'] = 'N/A'
else:
diff_metric['percent_diff'] = naarad.utils.normalize_float_for_display((diff_metric[1] - diff_metric[0]) * 100 / diff_metric[0])
# check whether there is a SLA failure
if ((metric_label in self.sla_map.keys()) and (submetric in self.sla_map[metric_label].keys()) and
(stat in self.sla_map[metric_label][submetric].keys())):
self.check_sla(self.sla_map[metric_label][submetric][stat], diff_metric)
else:
return False
self.plot_diff()
diff_html = ''
if self.diff_data:
diff_html = self.generate_diff_html()
client_html = self.generate_client_charting_page(self.reports[0].datasource)
if diff_html != '':
with open(os.path.join(self.output_directory, CONSTANTS.DIFF_REPORT_FILE), 'w') as diff_file:
diff_file.write(diff_html)
with open(os.path.join(self.output_directory, CONSTANTS.CLIENT_CHARTING_FILE), 'w') as client_file:
client_file.write(client_html)
return True |
def get_aggregation_timestamp(self, timestamp, granularity='second'):
"""
Return a timestamp from the raw epoch time based on the granularity preferences passed in.
:param string timestamp: timestamp from the log line
:param string granularity: aggregation granularity used for plots.
:return: string aggregate_timestamp: timestamp used for metrics aggregation in all functions
"""
if granularity is None or granularity.lower() == 'none':
return int(timestamp), 1
elif granularity == 'hour':
return (int(timestamp) / (3600 * 1000)) * 3600 * 1000, 3600
elif granularity == 'minute':
return (int(timestamp) / (60 * 1000)) * 60 * 1000, 60
else:
return (int(timestamp) / 1000) * 1000, 1 |
def aggregate_count_over_time(self, metric_store, groupby_name, aggregate_timestamp):
"""
Organize and store the count of data from the log line into the metric store by columnm, group name, timestamp
:param dict metric_store: The metric store used to store all the parsed the log data
:param string groupby_name: the group name that the log line belongs to
:param string aggregate_timestamp: timestamp used for storing the raw data. This accounts for aggregation time period
:return: None
"""
all_qps = metric_store['qps']
qps = all_qps[groupby_name]
if aggregate_timestamp in qps:
qps[aggregate_timestamp] += 1
else:
qps[aggregate_timestamp] = 1
return None |
def aggregate_values_over_time(self, metric_store, data, groupby_name, column_name, aggregate_timestamp):
"""
Organize and store the data from the log line into the metric store by metric type, transaction, timestamp
:param dict metric_store: The metric store used to store all the parsed log data
:param string data: column data in the log line
:param string groupby_name: the group that the data belongs to
:param string column_name: the column name of the data
:param string aggregate_timestamp: timestamp used for storing the raw data. This accounts for aggregation time period
:return: None
"""
# To add overall_summary one
if self.groupby:
metric_data = reduce(defaultdict.__getitem__, [column_name, 'Overall_summary', aggregate_timestamp], metric_store)
metric_data.append(float(data))
metric_data = reduce(defaultdict.__getitem__, [column_name, groupby_name, aggregate_timestamp], metric_store)
metric_data.append(float(data))
return None |
def average_values_for_plot(self, metric_store, data, averaging_factor):
"""
Create the time series for the various metrics, averaged over the aggregation period being used for plots
:param dict metric_store: The metric store used to store all the parsed log data
:param dict data: Dict with all the metric data to be output to csv
:param float averaging_factor: averaging factor to be used for calculating the average per second metrics
:return: None
"""
for column, groups_store in metric_store.items():
for group, time_store in groups_store.items():
for time_stamp, column_data in sorted(time_store.items()):
if column in ['qps']:
if self.groupby:
data[self.get_csv(column, group)].append(','.join([str(time_stamp), str(column_data / float(averaging_factor))]))
else:
data[self.get_csv(column)].append(','.join([str(time_stamp), str(column_data / float(averaging_factor))]))
else:
if self.groupby:
data[self.get_csv(column, group)].append(','.join([str(time_stamp), str(sum(map(float, column_data)) / float(len(column_data)))]))
else:
data[self.get_csv(column)].append(','.join([str(time_stamp), str(sum(map(float, column_data)) / float(len(column_data)))]))
return None |
def calc_key_stats(self, metric_store):
"""
Calculate stats such as percentile and mean
:param dict metric_store: The metric store used to store all the parsed log data
:return: None
"""
stats_to_calculate = ['mean', 'std', 'min', 'max'] # TODO: get input from user
percentiles_to_calculate = range(0, 100, 1) # TODO: get input from user
for column, groups_store in metric_store.items():
for group, time_store in groups_store.items():
data = metric_store[column][group].values()
if self.groupby:
column_name = group + '.' + column
else:
column_name = column
if column.startswith('qps'):
self.calculated_stats[column_name], self.calculated_percentiles[column_name] = naarad.utils.calculate_stats(data, stats_to_calculate, percentiles_to_calculate)
else:
self.calculated_stats[column_name], self.calculated_percentiles[column_name] = naarad.utils.calculate_stats(list(heapq.merge(*data)), stats_to_calculate,
percentiles_to_calculate)
self.update_summary_stats(column_name) |
def calculate_stats(self):
"""
Calculate stats with different function depending on the metric type:
Data is recorded in memory for base metric type, and use calculate_base_metric_stats()
Data is recorded in CSV file for other metric types, and use calculate_other_metric_stats()
"""
metric_type = self.metric_type.split('-')[0]
if metric_type in naarad.naarad_imports.metric_classes or metric_type in naarad.naarad_imports.aggregate_metric_classes:
self.calculate_other_metric_stats()
else:
self.calculate_base_metric_stats() |
def plot_timeseries(self, graphing_library='matplotlib'):
"""
plot timeseries for sub-metrics
"""
if self.groupby:
plot_data = {}
# plot time series data for submetrics
for out_csv in sorted(self.csv_files, reverse=True):
csv_filename = os.path.basename(out_csv)
transaction_name = ".".join(csv_filename.split('.')[1:-1])
if transaction_name in self.anomalies.keys():
highlight_regions = self.anomalies[transaction_name]
else:
highlight_regions = None
# The last element is .csv, don't need that in the name of the chart
column = csv_filename.split('.')[-2]
transaction_name = ' '.join(csv_filename.split('.')[1:-2])
plot = PD(input_csv=out_csv, csv_column=1, series_name=transaction_name + '.' + column,
y_label=column + ' (' + self.sub_metric_description[column] + ')', precision=None, graph_height=500, graph_width=1200, graph_type='line',
highlight_regions=highlight_regions)
if transaction_name in plot_data:
plot_data[transaction_name].append(plot)
else:
plot_data[transaction_name] = [plot]
for transaction in plot_data:
graphed, div_file = Metric.graphing_modules[graphing_library].graph_data(plot_data[transaction], self.resource_directory, self.resource_path,
self.label + '.' + transaction)
if graphed:
self.plot_files.append(div_file)
else:
graphed = False
for out_csv in self.csv_files:
csv_filename = os.path.basename(out_csv)
transaction_name = ".".join(csv_filename.split('.')[1:-1])
if transaction_name in self.anomalies.keys():
highlight_regions = self.anomalies[transaction_name]
else:
highlight_regions = None
# The last element is .csv, don't need that in the name of the chart
column = self.csv_column_map[out_csv]
column = naarad.utils.sanitize_string(column)
graph_title = '.'.join(csv_filename.split('.')[0:-1])
if self.sub_metric_description and column in self.sub_metric_description.keys():
graph_title += ' (' + self.sub_metric_description[column] + ')'
if self.sub_metric_unit and column in self.sub_metric_unit.keys():
plot_data = [PD(input_csv=out_csv, csv_column=1, series_name=graph_title, y_label=column + ' (' + self.sub_metric_unit[column] + ')',
precision=None, graph_height=600, graph_width=1200, graph_type='line', highlight_regions=highlight_regions)]
else:
plot_data = [PD(input_csv=out_csv, csv_column=1, series_name=graph_title, y_label=column, precision=None, graph_height=600, graph_width=1200,
graph_type='line', highlight_regions=highlight_regions)]
graphed, div_file = Metric.graphing_modules[graphing_library].graph_data(plot_data, self.resource_directory, self.resource_path, graph_title)
if graphed:
self.plot_files.append(div_file)
return True |
def check_important_sub_metrics(self, sub_metric):
"""
check whether the given sub metric is in important_sub_metrics list
"""
if not self.important_sub_metrics:
return False
if sub_metric in self.important_sub_metrics:
return True
items = sub_metric.split('.')
if items[-1] in self.important_sub_metrics:
return True
return False |
def plot_cdf(self, graphing_library='matplotlib'):
"""
plot CDF for important sub-metrics
"""
graphed = False
for percentile_csv in self.percentiles_files:
csv_filename = os.path.basename(percentile_csv)
# The last element is .csv, don't need that in the name of the chart
column = self.csv_column_map[percentile_csv.replace(".percentiles.", ".")]
if not self.check_important_sub_metrics(column):
continue
column = naarad.utils.sanitize_string(column)
graph_title = '.'.join(csv_filename.split('.')[0:-1])
if self.sub_metric_description and column in self.sub_metric_description.keys():
graph_title += ' (' + self.sub_metric_description[column] + ')'
if self.sub_metric_unit and column in self.sub_metric_unit.keys():
plot_data = [PD(input_csv=percentile_csv, csv_column=1, series_name=graph_title, x_label='Percentiles',
y_label=column + ' (' + self.sub_metric_unit[column] + ')', precision=None, graph_height=600, graph_width=1200, graph_type='line')]
else:
plot_data = [PD(input_csv=percentile_csv, csv_column=1, series_name=graph_title, x_label='Percentiles', y_label=column, precision=None,
graph_height=600, graph_width=1200, graph_type='line')]
graphed, div_file = Metric.graphing_modules[graphing_library].graph_data_on_the_same_graph(plot_data, self.resource_directory,
self.resource_path, graph_title)
if graphed:
self.plot_files.append(div_file)
return True |
def graph(self, graphing_library='matplotlib'):
"""
graph generates two types of graphs
'time': generate a time-series plot for all submetrics (the x-axis is a time series)
'cdf': generate a CDF plot for important submetrics (the x-axis shows percentiles)
"""
logger.info('Using graphing_library {lib} for metric {name}'.format(lib=graphing_library, name=self.label))
self.plot_cdf(graphing_library)
self.plot_timeseries(graphing_library)
return True |
def detect_anomaly(self):
"""
Detect anomalies in the timeseries data for the submetrics specified in the config file. Identified anomalies are
stored in self.anomalies as well as written to .anomalies.csv file to be used by the client charting page. Anomaly
detection uses the luminol library (http://pypi.python.org/pypi/luminol)
"""
if not self.anomaly_detection_metrics or len(self.anomaly_detection_metrics) <= 0:
return
for submetric in self.anomaly_detection_metrics:
csv_file = self.get_csv(submetric)
if naarad.utils.is_valid_file(csv_file):
detector = anomaly_detector.AnomalyDetector(csv_file)
anomalies = detector.get_anomalies()
if len(anomalies) <= 0:
return
self.anomalies[submetric] = anomalies
anomaly_csv_file = os.path.join(self.resource_directory, self.label + '.' + submetric + '.anomalies.csv')
with open(anomaly_csv_file, 'w') as FH:
for anomaly in anomalies:
FH.write(",".join([str(anomaly.anomaly_score), str(anomaly.start_timestamp), str(anomaly.end_timestamp), str(anomaly.exact_timestamp)]))
FH.write('\n') |
def _get_tuple(self, fields):
"""
:param fields: a list which contains either 0,1,or 2 values
:return: a tuple with default values of '';
"""
v1 = ''
v2 = ''
if len(fields) > 0:
v1 = fields[0]
if len(fields) > 1:
v2 = fields[1]
return v1, v2 |
def _extract_input_connections(self):
"""
Given user input of interested connections, it will extract the info and output a list of tuples.
- input can be multiple values, separated by space;
- either host or port is optional
- it may be just one end,
- e.g., "host1<->host2 host3<-> host1:port1<->host2"
:return: None
"""
for con in self.connections:
ends = con.strip().split('<->') # [host1:port1->host2]
ends = filter(None, ends) # Remove '' elements
if len(ends) == 0:
continue
if len(ends) > 0:
host1, port1 = self._get_tuple(ends[0].split(':'))
host2 = ''
port2 = ''
if len(ends) > 1:
host2, port2 = self._get_tuple(ends[1].split(':'))
self.input_connections.append((host1, port1, host2, port2)) |
def _extract_input_processes(self):
"""
Given user input of interested processes, it will extract the info and output a list of tuples.
- input can be multiple values, separated by space;
- either pid or process_name is optional
- e.g., "10001/python 10002/java cpp"
:return: None
"""
for proc in self.processes:
ends = proc.split('/')
pid, name = self._get_tuple(ends)
self.input_processes.append((pid, name)) |
def _match_host_port(self, host, port, cur_host, cur_port):
"""
Determine whether user-specified (host,port) matches current (cur_host, cur_port)
:param host,port: The user input of (host,port)
:param cur_host, cur_port: The current connection
:return: True or Not
"""
# if host is '', true; if not '', it should prefix-match cur_host
host_match = False
if not host:
host_match = True
elif cur_host.startswith(host): # allow for partial match
host_match = True
# if port is '', true; if not '', it should exactly match cur_port
port_match = False
if not port:
port_match = True
elif port == cur_port:
port_match = True
return host_match and port_match |
def _match_processes(self, pid, name, cur_process):
"""
Determine whether user-specified "pid/processes" contain this process
:param pid: The user input of pid
:param name: The user input of process name
:param process: current process info
:return: True or Not; (if both pid/process are given, then both of them need to match)
"""
cur_pid, cur_name = self._get_tuple(cur_process.split('/'))
pid_match = False
if not pid:
pid_match = True
elif pid == cur_pid:
pid_match = True
name_match = False
if not name:
name_match = True
elif name == cur_name:
name_match = True
return pid_match and name_match |
def _check_connection(self, local_end, remote_end, process):
"""
Check whether the connection is of interest or not
:param local_end: Local connection end point, e.g., 'host1:port1'
:param remote_end: Remote connection end point, e.g., 'host2:port2'
:param process: Current connection 's process info, e.g., '1234/firefox'
:return: a tuple of (local_end, remote_end, True/False); e.g. ('host1_23232', 'host2_2222', True)
"""
# check tcp end points
cur_host1, cur_port1 = self._get_tuple(local_end.split(':'))
cur_host2, cur_port2 = self._get_tuple(remote_end.split(':'))
# check whether the connection is interested or not by checking user input
host_port_is_interested = False
for (host1, port1, host2, port2) in self.input_connections:
if self._match_host_port(host1, port1, cur_host1, cur_port1) and self._match_host_port(host2, port2, cur_host2, cur_port2):
host_port_is_interested = True
break
if self._match_host_port(host1, port1, cur_host2, cur_port2) and self._match_host_port(host2, port2, cur_host1, cur_port1):
host_port_is_interested = True
break
# check whether the connection is interested or not by checking process names given in the config
process_is_interested = False
for pid, name in self.input_processes:
if self._match_processes(pid, name, process):
process_is_interested = True
break
return cur_host1 + '_' + cur_port1, cur_host2 + '_' + cur_port2, host_port_is_interested and process_is_interested |
def _add_data_line(self, data, col, value, ts):
"""
Append the data point to the dictionary of "data"
:param data: The dictionary containing all data
:param col: The sub-metric name e.g. 'host1_port1.host2_port2.SendQ'
:param value: integer
:param ts: timestamp
:return: None
"""
if col in self.column_csv_map:
out_csv = self.column_csv_map[col]
else:
out_csv = self.get_csv(col) # column_csv_map[] is assigned in get_csv()
data[out_csv] = []
data[out_csv].append(ts + "," + value) |
def parse(self):
"""
Parse the netstat output file
:return: status of the metric parse
"""
# sample netstat output: 2014-04-02 15:44:02.86612 tcp 9600 0 host1.localdomain.com.:21567 remote.remotedomain.com:51168 ESTABLISH pid/process
data = {} # stores the data of each sub-metric
for infile in self.infile_list:
logger.info('Processing : %s', infile)
timestamp_format = None
with open(infile) as fh:
for line in fh:
if 'ESTABLISHED' not in line:
continue
words = line.split()
if len(words) < 8 or words[2] != 'tcp':
continue
ts = words[0] + " " + words[1]
if not timestamp_format or timestamp_format == 'unknown':
timestamp_format = naarad.utils.detect_timestamp_format(ts)
if timestamp_format == 'unknown':
continue
ts = naarad.utils.get_standardized_timestamp(ts, timestamp_format)
if self.ts_out_of_range(ts):
continue
# filtering based on user input; (local socket, remote socket, pid/process)
local_end, remote_end, interested = self._check_connection(words[5], words[6], words[8])
if interested:
self._add_data_line(data, local_end + '.' + remote_end + '.RecvQ', words[3], ts)
self._add_data_line(data, local_end + '.' + remote_end + '.SendQ', words[4], ts)
# post processing, putting data in csv files;
for csv in data.keys():
self.csv_files.append(csv)
with open(csv, 'w') as fh:
fh.write('\n'.join(sorted(data[csv])))
return True |
def get_csv(self, cpu, device=None):
"""
Returns the CSV file related to the given metric. The metric is determined by the cpu and device.
The cpu is the CPU as in the interrupts file for example CPU12.
The metric is a combination of the CPU and device. The device consists of IRQ #, the irq device ASCII name.
CPU0 CPU1
2014-10-29 00:27:42.15161 59: 29 2 IR-IO-APIC-edge timer
^ ^ ^ ^ ^
| | | | |
IRQ# Value Value IRQ Device Ascii Name
This would produce a metric CPU0.timer-IRQ59 and CPU1.timer-IRQ59 so one per IRQ per CPU.
:param cpu: The name of the cpu given as CPU#.
:param device: The device name as given by the system. <ASCII name>-IRQ<IRQ #>
:return: The CSV file for the metric.
"""
cpu = naarad.utils.sanitize_string(cpu)
if device is None:
outcsv = os.path.join(self.resource_directory, "{0}.{1}.csv".format(self.label, cpu))
self.csv_column_map[outcsv] = cpu
else:
device = naarad.utils.sanitize_string(device)
outcsv = os.path.join(self.resource_directory, "{0}.{1}.{2}.csv".format(self.label, cpu, device))
self.csv_column_map[outcsv] = cpu + '.' + device
return outcsv |
def find_header(self, infile):
"""
Parses the file and tries to find the header line. The header line has format:
2014-10-29 00:28:42.15161 CPU0 CPU1 CPU2 CPU3 ...
So should always have CPU# for each core. This function verifies a good header and
returns the list of CPUs that exist from the header.
:param infile: The opened file in read mode to find the header.
:return cpus: A list of the core names so in this example ['CPU0', 'CPU1', ...]
"""
cpus = []
for line in infile: # Pre-processing - Try to find header
if not self.is_header_line(line):
continue
# Verifying correctness of the header
cpu_header = line.split()
for cpu_h in cpu_header[2:]:
if not cpu_h.startswith('CPU'):
cpus = [] # Bad header so reset to nothing
break
else:
cpus.append(cpu_h)
if len(cpus) > 0: # We found the header
break
return cpus |
def parse(self):
"""
Processes the files for each IRQ and each CPU in terms of the differences.
Also produces accumulated interrupt count differences for each set of Ethernet IRQs.
Generally Ethernet has 8 TxRx IRQs thus all are combined so that one can see the overall interrupts being generated by the NIC.
Simplified Interrupt File Format: (See examples for example log)
CPU0 CPU1
2014-10-29 00:27:42.15161 59: 29 2 IR-IO-APIC-edge timer
2014-10-29 00:27:42.15161 60: 2123 0 IR-PCI-MSI-edge eth0
CPU0 CPU1
2014-10-29 00:27:42.15161 59: 29 2 IR-IO-APIC-edge timer
2014-10-29 00:27:42.15161 60: 2123 0 IR-PCI-MSI-edge eth0
:returns: True or False whether parsing was successful or not.
"""
if not os.path.isdir(self.outdir):
os.makedirs(self.outdir)
if not os.path.isdir(self.resource_directory):
os.makedirs(self.resource_directory)
data = {}
for input_file in self.infile_list:
logger.info('Processing : %s', input_file)
timestamp_format = None
with open(input_file, 'r') as infile:
# Get the header for this file
cpus = self.find_header(infile)
if len(cpus) == 0: # Make sure we have header otherwise go to next file
logger.error("Header not found for file: %s", input_file)
continue
# Parse the actual file after header
prev_data = None # Stores the previous interval's log data
curr_data = {} # Stores the current interval's log data
eth_data = {}
for line in infile:
if self.is_header_line(line): # New section so save old and aggregate ETH
prev_data = curr_data
curr_data = {}
# New section so store the collected Ethernet data
# Example Aggregate metric: PROCINTERRUPTS.AGGREGATE.eth0
for eth in eth_data:
outcsv = self.get_csv('AGGREGATE', eth)
if outcsv not in data:
data[outcsv] = []
data[outcsv].append(ts + ',' + str(eth_data[eth]))
eth_data = {}
continue
words = line.split()
if len(words) <= 4: # Does not have any CPU data so skip
continue
# Process timestamp or determine timestamp
ts = words[0] + " " + words[1]
if not timestamp_format or timestamp_format == 'unknown':
timestamp_format = naarad.utils.detect_timestamp_format(ts)
if timestamp_format == 'unknown':
continue
ts = naarad.utils.get_standardized_timestamp(ts, timestamp_format)
if self.ts_out_of_range(ts): # See if time is in range
continue
# Process data lines
# Note that some IRQs such as ERR and MIS do not have device nor ascii name
device = words[2].strip(':') # Get IRQ Number/Name
if re.match("\d+", device):
# Devices with digits need ASCII name if exists
if (4 + len(cpus)) < len(words):
device = words[4 + len(cpus)] + "-IRQ" + device
else:
device = "IRQ" + device
else:
# For devices with IRQ # that aren't digits then has description
device = "-".join(words[(3 + len(cpus)):]) + "-IRQ" + device
# Deal with each column worth of data
for (cpu, datum) in zip(cpus, words[3:]):
if self.CPUS and cpu not in self.CPUS: # Skip if config defines which CPUs to look at
continue
outcsv = self.get_csv(cpu, device)
curr_data[outcsv] = int(datum)
if outcsv in data:
datum = int(datum) - prev_data[outcsv] # prev_data exists since outcsv exists in data
else:
data[outcsv] = []
datum = 0 # First data point is set to 0
# Store data point
data[outcsv].append(ts + ',' + str(datum))
# Deal with accumulating aggregate data for Ethernet
m = re.search("(?P<eth>eth\d)", device)
if m:
eth = m.group('eth')
if eth not in eth_data:
eth_data[eth] = 0
eth_data[eth] += datum
# Post processing, putting data in csv files
for csv in data.keys():
self.csv_files.append(csv)
with open(csv, 'w') as csvf:
csvf.write('\n'.join(sorted(data[csv])))
return True |
def parse(self):
"""
Parse the vmstat file
:return: status of the metric parse
"""
file_status = True
for input_file in self.infile_list:
file_status = file_status and naarad.utils.is_valid_file(input_file)
if not file_status:
return False
status = True
data = {} # stores the data of each column
for input_file in self.infile_list:
logger.info('Processing : %s', input_file)
timestamp_format = None
with open(input_file) as fh:
for line in fh:
words = line.split() # [0] is day; [1] is seconds; [2] is field name:; [3] is value [4] is unit
if len(words) < 3:
continue
ts = words[0] + " " + words[1]
if not timestamp_format or timestamp_format == 'unknown':
timestamp_format = naarad.utils.detect_timestamp_format(ts)
if timestamp_format == 'unknown':
continue
ts = naarad.utils.get_standardized_timestamp(ts, timestamp_format)
if self.ts_out_of_range(ts):
continue
col = words[2].strip(':')
# only process sub_metrics specified in config.
if self.sub_metrics and col not in self.sub_metrics:
continue
# add unit to metric description; most of the metrics have 'KB'; a few others do not have unit, they are in number of pages
if len(words) > 4 and words[4]:
unit = words[4]
else:
unit = 'pages'
self.sub_metric_unit[col] = unit
# stores the values in data[] before finally writing out
if col in self.column_csv_map:
out_csv = self.column_csv_map[col]
else:
out_csv = self.get_csv(col) # column_csv_map[] is assigned in get_csv()
data[out_csv] = []
data[out_csv].append(ts + "," + words[3])
# post processing, putting data in csv files;
for csv in data.keys():
self.csv_files.append(csv)
with open(csv, 'w') as fh:
fh.write('\n'.join(sorted(data[csv])))
return status |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.