Search is not available for this dataset
text
stringlengths
75
104k
def _extract_transform_colander_schema(self, args): """ Extract schema from view args and transform it using the pipeline of schema transformers :param args: Arguments from the view decorator. :rtype: colander.MappingSchema() :returns: View schema cloned and transformed """ schema = args.get('schema', colander.MappingSchema()) if not isinstance(schema, colander.Schema): schema = schema() schema = schema.clone() for transformer in self.schema_transformers: schema = transformer(schema, args) return schema
def main(): """Creates arguments and parses user input""" parser = argparse.ArgumentParser( description=_('Uploads selected file to working pomf.se clone')) parser.add_argument('files', metavar='file', nargs='*', type=str, help=_('Files to upload')) parser.add_argument('-c', metavar='host_number', type=int, dest='host', default=None, help=_('The number (0-n) of the selected host (default is random)')) parser.add_argument('-l', dest='only_link', action='store_const', const=True, default=False, help=_('Changes output to just link to the file')) parser.add_argument('-e', dest='encrypt', action='store_const', const=True, default=False, help=_('Encrypts then uploads the files.')) parser.add_argument('-d', dest='decrypt', action='store_const', const=True, default=False, help=_('Decrypts files from links with encrypted files')) parser.add_argument('-j', dest="local_list", default=False, help=_('Path to a local list file')) parser.add_argument('-s', dest="show_list", action='store_const', const=True, default=False, help=_('Show the host list (will not upload your files when called)')) parser.add_argument('-m', dest='limit_size', action='store_const', const=True, default=False, help=_('Do not upload file if it exceeds the certain host limit')) parser.add_argument('-nc', dest='no_cloudflare', action='store_const', const=True, default=False, help=_('Do not use hosts which use Cloudflare.')) parser.add_argument('--log-file', metavar="LOGFILE", dest="logfile", default="~/limf.log", help=_("The location of log file")) parser.add_argument('--log', dest='log', action="store_const", const=True, default=False, help=_("Enables the logging feature, default logfile is ~/limf.log")) args = parser.parse_args() try: if args.local_list: clone_list = retrieve_local_host_list(args.local_list) else: clone_list = retrieve_online_host_list() if len(min(clone_list, key=len)) < 5 and (args.limit_size or args.no_cloudflare): print(_("For newer options, please update your host_list.")) exit() if args.host and not(0 <= args.host < len(clone_list)): print(generate_host_string(clone_list)) exit() parse_arguments(args, clone_list) except FileNotFoundError: print(_('Plese enter valid file.'))
def convert(self, schema_node, definition_handler): """ Convert node schema into a parameter object. """ converted = { 'name': schema_node.name, 'in': self._in, 'required': schema_node.required } if schema_node.description: converted['description'] = schema_node.description if schema_node.default: converted['default'] = schema_node.default schema = definition_handler(schema_node) # Parameters shouldn't have a title schema.pop('title', None) converted.update(schema) if schema.get('type') == 'array': converted['items'] = {'type': schema['items']['type']} return converted
def cornice_enable_openapi_view( config, api_path='/api-explorer/swagger.json', permission=NO_PERMISSION_REQUIRED, route_factory=None, **kwargs): """ :param config: Pyramid configurator object :param api_path: where to expose swagger JSON definition view :param permission: pyramid permission for those views :param route_factory: factory for context object for those routes :param kwargs: kwargs that will be passed to CorniceSwagger's `generate()` This registers and configures the view that serves api definitions """ config.registry.settings['cornice_swagger.spec_kwargs'] = kwargs config.add_route('cornice_swagger.open_api_path', api_path, factory=route_factory) config.add_view('cornice_swagger.views.open_api_json_view', renderer='json', permission=permission, route_name='cornice_swagger.open_api_path')
def cornice_enable_openapi_explorer( config, api_explorer_path='/api-explorer', permission=NO_PERMISSION_REQUIRED, route_factory=None, **kwargs): """ :param config: Pyramid configurator object :param api_explorer_path: where to expose Swagger UI interface view :param permission: pyramid permission for those views :param route_factory: factory for context object for those routes This registers and configures the view that serves api explorer """ config.add_route('cornice_swagger.api_explorer_path', api_explorer_path, factory=route_factory) config.add_view('cornice_swagger.views.swagger_ui_template_view', permission=permission, route_name='cornice_swagger.api_explorer_path')
def trim(docstring): """ Remove the tabs to spaces, and remove the extra spaces / tabs that are in front of the text in docstrings. Implementation taken from http://www.python.org/dev/peps/pep-0257/ """ if not docstring: return '' # Convert tabs to spaces (following the normal Python rules) # and split into a list of lines: lines = six.u(docstring).expandtabs().splitlines() lines = [line.strip() for line in lines] res = six.u('\n').join(lines) return res
def merge_dicts(base, changes): """Merge b into a recursively, without overwriting values. :param base: the dict that will be altered. :param changes: changes to update base. """ for k, v in changes.items(): if isinstance(v, dict): merge_dicts(base.setdefault(k, {}), v) else: base.setdefault(k, v)
def get_transition_viewset_method(transition_name, **kwargs): ''' Create a viewset method for the provided `transition_name` ''' @detail_route(methods=['post'], **kwargs) def inner_func(self, request, pk=None, **kwargs): object = self.get_object() transition_method = getattr(object, transition_name) transition_method(by=self.request.user) if self.save_after_transition: object.save() serializer = self.get_serializer(object) return Response(serializer.data) return inner_func
def get_viewset_transition_action_mixin(model, **kwargs): ''' Find all transitions defined on `model`, then create a corresponding viewset action method for each and apply it to `Mixin`. Finally, return `Mixin` ''' instance = model() class Mixin(object): save_after_transition = True transitions = instance.get_all_status_transitions() transition_names = set(x.name for x in transitions) for transition_name in transition_names: setattr( Mixin, transition_name, get_transition_viewset_method(transition_name, **kwargs) ) return Mixin
def fresh_cookies(ctx, mold=''): """Refresh the project from the original cookiecutter template.""" mold = mold or "https://github.com/Springerle/py-generic-project.git" # TODO: URL from config tmpdir = os.path.join(tempfile.gettempdir(), "cc-upgrade-pygments-markdown-lexer") if os.path.isdir('.git'): # TODO: Ensure there are no local unstashed changes pass # Make a copy of the new mold version if os.path.isdir(tmpdir): shutil.rmtree(tmpdir) if os.path.exists(mold): shutil.copytree(mold, tmpdir, ignore=shutil.ignore_patterns( ".git", ".svn", "*~", )) else: ctx.run("git clone {} {}".format(mold, tmpdir)) # Copy recorded "cookiecutter.json" into mold shutil.copy2("project.d/cookiecutter.json", tmpdir) with pushd('..'): ctx.run("cookiecutter --no-input {}".format(tmpdir)) if os.path.exists('.git'): ctx.run("git status")
def ci(ctx): """Perform continuous integration tasks.""" opts = [''] # 'tox' makes no sense in Travis if os.environ.get('TRAVIS', '').lower() == 'true': opts += ['test.pytest'] else: opts += ['test.tox'] ctx.run("invoke --echo --pty clean --all build --docs check --reports{}".format(' '.join(opts)))
def _build_metadata(): # pylint: disable=too-many-locals, too-many-branches "Return project's metadata as a dict." # Handle metadata in package source expected_keys = ('url', 'version', 'license', 'author', 'author_email', 'long_description', 'keywords') metadata = {} with io.open(srcfile('src', package_name, '__init__.py'), encoding='utf-8') as handle: pkg_init = handle.read() # Get default long description from docstring metadata['long_description'] = re.search(r'^"""(.+?)^"""$', pkg_init, re.DOTALL|re.MULTILINE).group(1) for line in pkg_init.splitlines(): match = re.match(r"""^__({0})__ += (?P<q>['"])(.+?)(?P=q)$""".format('|'.join(expected_keys)), line) if match: metadata[match.group(1)] = match.group(3) if not all(i in metadata for i in expected_keys): raise RuntimeError("Missing or bad metadata in '{0}' package: {1}" .format(name, ', '.join(sorted(set(expected_keys) - set(metadata.keys()))),)) text = metadata['long_description'].strip() if text: metadata['description'], text = text.split('.', 1) metadata['description'] = ' '.join(metadata['description'].split()).strip() + '.' # normalize whitespace metadata['long_description'] = textwrap.dedent(text).strip() metadata['keywords'] = metadata['keywords'].replace(',', ' ').strip().split() # Load requirements files requirements_files = dict( install = 'requirements.txt', setup = 'setup-requirements.txt', test = 'test-requirements.txt', ) requires = {} for key, filename in requirements_files.items(): requires[key] = [] if os.path.exists(srcfile(filename)): with io.open(srcfile(filename), encoding='utf-8') as handle: for line in handle: line = line.strip() if line and not line.startswith('#'): if any(line.startswith(i) for i in ('-e', 'http://', 'https://')): line = line.split('#egg=')[1] requires[key].append(line) if not any('pytest' == re.split('[\t ,<=>]', i.lower())[0] for i in requires['test']): requires['test'].append('pytest') # add missing requirement # CLI entry points console_scripts = [] for path, dirs, files in os.walk(srcfile('src', package_name)): dirs = [i for i in dirs if not i.startswith('.')] if '__main__.py' in files: path = path[len(srcfile('src') + os.sep):] appname = path.split(os.sep)[-1] with io.open(srcfile('src', path, '__main__.py'), encoding='utf-8') as handle: for line in handle.readlines(): match = re.match(r"""^__app_name__ += (?P<q>['"])(.+?)(?P=q)$""", line) if match: appname = match.group(2) console_scripts.append('{0} = {1}.__main__:cli'.format(appname, path.replace(os.sep, '.'))) # Add some common files to EGG-INFO candidate_files = [ 'LICENSE', 'NOTICE', 'README', 'README.md', 'README.rst', 'README.txt', 'CHANGES', 'CHANGELOG', 'debian/changelog', ] data_files = defaultdict(list) for filename in candidate_files: if os.path.exists(srcfile(filename)): data_files['EGG-INFO'].append(filename) # Complete project metadata classifiers = [] for classifiers_txt in ('classifiers.txt', 'project.d/classifiers.txt'): classifiers_txt = srcfile(classifiers_txt) if os.path.exists(classifiers_txt): with io.open(classifiers_txt, encoding='utf-8') as handle: classifiers = [i.strip() for i in handle if i.strip() and not i.startswith('#')] break entry_points.setdefault('console_scripts', []).extend(console_scripts) metadata.update(dict( name = name, package_dir = {'': 'src'}, packages = find_packages(srcfile('src'), exclude=['tests']), data_files = data_files.items(), zip_safe = False, include_package_data = True, install_requires = requires['install'], setup_requires = requires['setup'], tests_require = requires['test'], classifiers = classifiers, cmdclass = dict( test = PyTest, ), entry_points = entry_points, )) return metadata
def py_hash(key, num_buckets): """Generate a number in the range [0, num_buckets). Args: key (int): The key to hash. num_buckets (int): Number of buckets to use. Returns: The bucket number `key` computes to. Raises: ValueError: If `num_buckets` is not a positive number. """ b, j = -1, 0 if num_buckets < 1: raise ValueError('num_buckets must be a positive number') while j < num_buckets: b = int(j) key = ((key * long(2862933555777941757)) + 1) & 0xffffffffffffffff j = float(b + 1) * (float(1 << 31) / float((key >> 33) + 1)) return int(b)
def setup(app): """ Initializer for Sphinx extension API. See http://www.sphinx-doc.org/en/stable/extdev/index.html#dev-extensions. """ lexer = MarkdownLexer() for alias in lexer.aliases: app.add_lexer(alias, lexer) return dict(version=__version__)
def load(self): """Return a dict of stats.""" ret = {} # Read the mdstat file with open(self.get_path(), 'r') as f: # lines is a list of line (with \n) lines = f.readlines() # First line: get the personalities # The "Personalities" line tells you what RAID level the kernel currently supports. # This can be changed by either changing the raid modules or recompiling the kernel. # Possible personalities include: [raid0] [raid1] [raid4] [raid5] [raid6] [linear] [multipath] [faulty] ret['personalities'] = self.get_personalities(lines[0]) # Second to last before line: Array definition ret['arrays'] = self.get_arrays(lines[1:-1], ret['personalities']) # Save the file content as it for the __str__ method self.content = reduce(lambda x, y: x + y, lines) return ret
def get_personalities(self, line): """Return a list of personalities readed from the input line.""" return [split('\W+', i)[1] for i in line.split(':')[1].split(' ') if i.startswith('[')]
def get_arrays(self, lines, personalities=[]): """Return a dict of arrays.""" ret = {} i = 0 while i < len(lines): try: # First array line: get the md device md_device = self.get_md_device_name(lines[i]) except IndexError: # No array detected pass else: # Array detected if md_device is not None: # md device line ret[md_device] = self.get_md_device(lines[i], personalities) # md config/status line i += 1 ret[md_device].update(self.get_md_status(lines[i])) i += 1 return ret
def get_md_device(self, line, personalities=[]): """Return a dict of md device define in the line.""" ret = {} splitted = split('\W+', line) # Raid status # Active or 'started'. An inactive array is usually faulty. # Stopped arrays aren't visible here. ret['status'] = splitted[1] if splitted[2] in personalities: # Raid type (ex: RAID5) ret['type'] = splitted[2] # Array's components ret['components'] = self.get_components(line, with_type=True) else: # Raid type (ex: RAID5) ret['type'] = None # Array's components ret['components'] = self.get_components(line, with_type=False) return ret
def get_md_status(self, line): """Return a dict of md status define in the line.""" ret = {} splitted = split('\W+', line) if len(splitted) < 7: ret['available'] = None ret['used'] = None ret['config'] = None else: # The final 2 entries on this line: [n/m] [UUUU_] # [n/m] means that ideally the array would have n devices however, currently, m devices are in use. # Obviously when m >= n then things are good. ret['available'] = splitted[-4] ret['used'] = splitted[-3] # [UUUU_] represents the status of each device, either U for up or _ for down. ret['config'] = splitted[-2] return ret
def get_components(self, line, with_type=True): """Return a dict of components in the line. key: device name (ex: 'sdc1') value: device role number """ ret = {} # Ignore (F) (see test 08) line2 = reduce(lambda x, y: x + y, split('\(.+\)', line)) if with_type: splitted = split('\W+', line2)[3:] else: splitted = split('\W+', line2)[2:] ret = dict(zip(splitted[0::2], splitted[1::2])) return ret
def register_receivers(app, config): """Register signal receivers which send events.""" for event_name, event_config in config.items(): event_builders = [ obj_or_import_string(func) for func in event_config.get('event_builders', []) ] signal = obj_or_import_string(event_config['signal']) signal.connect( EventEmmiter(event_name, event_builders), sender=app, weak=False )
def check(self, query): """ :param query: """ if query.get_type() != Keyword.DELETE: return Ok(True) return Err("Delete queries are forbidden.")
def set_scheduled(self): """ Returns True if state was successfully changed from idle to scheduled. """ with self._idle_lock: if self._idle: self._idle = False return True return False
def post(self, **kwargs): """Get statistics.""" data = request.get_json(force=False) if data is None: data = {} result = {} for query_name, config in data.items(): if config is None or not isinstance(config, dict) \ or (set(config.keys()) != {'stat', 'params'} and set(config.keys()) != {'stat'}): raise InvalidRequestInputError( 'Invalid Input. It should be of the form ' '{ STATISTIC_NAME: { "stat": STAT_TYPE, ' '"params": STAT_PARAMS \}}' ) stat = config['stat'] params = config.get('params', {}) try: query_cfg = current_stats.queries[stat] except KeyError: raise UnknownQueryError(stat) permission = current_stats.permission_factory(stat, params) if permission is not None and not permission.can(): message = ('You do not have a permission to query the ' 'statistic "{}" with those ' 'parameters'.format(stat)) if current_user.is_authenticated: abort(403, message) abort(401, message) try: query = query_cfg.query_class(**query_cfg.query_config) result[query_name] = query.run(**params) except ValueError as e: raise InvalidRequestInputError(e.args[0]) except NotFoundError as e: return None return self.make_response(result)
def check(self, query): """ :param query: """ if query.get_type() not in {Keyword.SELECT, Keyword.DELETE}: # Only select and delete queries deal with time durations # All others are not affected by this rule. Bailing out. return Ok(True) datapoints = query.get_datapoints() if datapoints <= self.max_datapoints: return Ok(True) return Err(("Expecting {} datapoints from that query, which is above the threshold! " "Set a date range (e.g. where time > now() - 24h), " "increase grouping (e.g. group by time(24h) " "or limit the number of datapoints (e.g. limit 100)").format(datapoints))
def _get_oldest_event_timestamp(self): """Search for the oldest event timestamp.""" # Retrieve the oldest event in order to start aggregation # from there query_events = Search( using=self.client, index=self.event_index )[0:1].sort( {'timestamp': {'order': 'asc'}} ) result = query_events.execute() # There might not be any events yet if the first event have been # indexed but the indices have not been refreshed yet. if len(result) == 0: return None return parser.parse(result[0]['timestamp'])
def get_bookmark(self): """Get last aggregation date.""" if not Index(self.aggregation_alias, using=self.client).exists(): if not Index(self.event_index, using=self.client).exists(): return datetime.date.today() return self._get_oldest_event_timestamp() # retrieve the oldest bookmark query_bookmark = Search( using=self.client, index=self.aggregation_alias, doc_type=self.bookmark_doc_type )[0:1].sort( {'date': {'order': 'desc'}} ) bookmarks = query_bookmark.execute() # if no bookmark is found but the index exist, the bookmark was somehow # lost or never written, so restart from the beginning if len(bookmarks) == 0: return self._get_oldest_event_timestamp() # change it to doc_id_suffix bookmark = datetime.datetime.strptime(bookmarks[0].date, self.doc_id_suffix) return bookmark
def set_bookmark(self): """Set bookmark for starting next aggregation.""" def _success_date(): bookmark = { 'date': self.new_bookmark or datetime.datetime.utcnow(). strftime(self.doc_id_suffix) } yield dict(_index=self.last_index_written, _type=self.bookmark_doc_type, _source=bookmark) if self.last_index_written: bulk(self.client, _success_date(), stats_only=True)
def _format_range_dt(self, d): """Format range filter datetime to the closest aggregation interval.""" if not isinstance(d, six.string_types): d = d.isoformat() return '{0}||/{1}'.format( d, self.dt_rounding_map[self.aggregation_interval])
def agg_iter(self, lower_limit=None, upper_limit=None): """Aggregate and return dictionary to be indexed in ES.""" lower_limit = lower_limit or self.get_bookmark().isoformat() upper_limit = upper_limit or ( datetime.datetime.utcnow().replace(microsecond=0).isoformat()) aggregation_data = {} self.agg_query = Search(using=self.client, index=self.event_index).\ filter('range', timestamp={ 'gte': self._format_range_dt(lower_limit), 'lte': self._format_range_dt(upper_limit)}) # apply query modifiers for modifier in self.query_modifiers: self.agg_query = modifier(self.agg_query) hist = self.agg_query.aggs.bucket( 'histogram', 'date_histogram', field='timestamp', interval=self.aggregation_interval ) terms = hist.bucket( 'terms', 'terms', field=self.aggregation_field, size=0 ) top = terms.metric( 'top_hit', 'top_hits', size=1, sort={'timestamp': 'desc'} ) for dst, (metric, src, opts) in self.metric_aggregation_fields.items(): terms.metric(dst, metric, field=src, **opts) results = self.agg_query.execute() index_name = None for interval in results.aggregations['histogram'].buckets: interval_date = datetime.datetime.strptime( interval['key_as_string'], '%Y-%m-%dT%H:%M:%S') for aggregation in interval['terms'].buckets: aggregation_data['timestamp'] = interval_date.isoformat() aggregation_data[self.aggregation_field] = aggregation['key'] aggregation_data['count'] = aggregation['doc_count'] if self.metric_aggregation_fields: for f in self.metric_aggregation_fields: aggregation_data[f] = aggregation[f]['value'] doc = aggregation.top_hit.hits.hits[0]['_source'] for destination, source in self.copy_fields.items(): if isinstance(source, six.string_types): aggregation_data[destination] = doc[source] else: aggregation_data[destination] = source( doc, aggregation_data ) index_name = 'stats-{0}-{1}'.\ format(self.event, interval_date.strftime( self.index_name_suffix)) self.indices.add(index_name) yield dict(_id='{0}-{1}'. format(aggregation['key'], interval_date.strftime( self.doc_id_suffix)), _index=index_name, _type=self.aggregation_doc_type, _source=aggregation_data) self.last_index_written = index_name
def run(self, start_date=None, end_date=None, update_bookmark=True): """Calculate statistics aggregations.""" # If no events have been indexed there is nothing to aggregate if not Index(self.event_index, using=self.client).exists(): return lower_limit = start_date or self.get_bookmark() # Stop here if no bookmark could be estimated. if lower_limit is None: return upper_limit = min( end_date or datetime.datetime.max, # ignore if `None` datetime.datetime.utcnow().replace(microsecond=0), datetime.datetime.combine( lower_limit + datetime.timedelta(self.batch_size), datetime.datetime.min.time()) ) while upper_limit <= datetime.datetime.utcnow(): self.indices = set() self.new_bookmark = upper_limit.strftime(self.doc_id_suffix) bulk(self.client, self.agg_iter(lower_limit, upper_limit), stats_only=True, chunk_size=50) # Flush all indices which have been modified current_search_client.indices.flush( index=','.join(self.indices), wait_if_ongoing=True ) if update_bookmark: self.set_bookmark() self.indices = set() lower_limit = lower_limit + datetime.timedelta(self.batch_size) upper_limit = min( end_date or datetime.datetime.max, # ignore if `None`` datetime.datetime.utcnow().replace(microsecond=0), lower_limit + datetime.timedelta(self.batch_size) ) if lower_limit > upper_limit: break
def list_bookmarks(self, start_date=None, end_date=None, limit=None): """List the aggregation's bookmarks.""" query = Search( using=self.client, index=self.aggregation_alias, doc_type=self.bookmark_doc_type ).sort({'date': {'order': 'desc'}}) range_args = {} if start_date: range_args['gte'] = self._format_range_dt( start_date.replace(microsecond=0)) if end_date: range_args['lte'] = self._format_range_dt( end_date.replace(microsecond=0)) if range_args: query = query.filter('range', date=range_args) return query[0:limit].execute() if limit else query.scan()
def delete(self, start_date=None, end_date=None): """Delete aggregation documents.""" aggs_query = Search( using=self.client, index=self.aggregation_alias, doc_type=self.aggregation_doc_type ).extra(_source=False) range_args = {} if start_date: range_args['gte'] = self._format_range_dt( start_date.replace(microsecond=0)) if end_date: range_args['lte'] = self._format_range_dt( end_date.replace(microsecond=0)) if range_args: aggs_query = aggs_query.filter('range', timestamp=range_args) bookmarks_query = Search( using=self.client, index=self.aggregation_alias, doc_type=self.bookmark_doc_type ).sort({'date': {'order': 'desc'}}) if range_args: bookmarks_query = bookmarks_query.filter('range', date=range_args) def _delete_actions(): for query in (aggs_query, bookmarks_query): affected_indices = set() for doc in query.scan(): affected_indices.add(doc.meta.index) yield dict(_index=doc.meta.index, _op_type='delete', _id=doc.meta.id, _type=doc.meta.doc_type) current_search_client.indices.flush( index=','.join(affected_indices), wait_if_ongoing=True) bulk(self.client, _delete_actions(), refresh=True)
def parse(self, group_by_stmt): """ Extract the data resolution of a query in seconds E.g. "group by time(99s)" => 99 :param group_by_stmt: A raw InfluxDB group by statement """ if not group_by_stmt: return Resolution.MAX_RESOLUTION m = self.GROUP_BY_TIME_PATTERN.match(group_by_stmt) if not m: return None value = int(m.group(1)) unit = m.group(2) resolution = self.convert_to_seconds(value, unit) # We can't have a higher resolution than the max resolution return max(resolution, Resolution.MAX_RESOLUTION)
def get(self, timeout=None): """ Return value on success, or raise exception on failure. """ result = None try: result = self._result.get(True, timeout=timeout) except Empty: raise Timeout() if isinstance(result, Failure): six.reraise(*result.exc_info) else: return result
def _events_process(event_types=None, eager=False): """Process stats events.""" event_types = event_types or list(current_stats.enabled_events) if eager: process_events.apply((event_types,), throw=True) click.secho('Events processed successfully.', fg='green') else: process_events.delay(event_types) click.secho('Events processing task sent...', fg='yellow')
def _aggregations_process(aggregation_types=None, start_date=None, end_date=None, update_bookmark=False, eager=False): """Process stats aggregations.""" aggregation_types = (aggregation_types or list(current_stats.enabled_aggregations)) if eager: aggregate_events.apply( (aggregation_types,), dict(start_date=start_date, end_date=end_date, update_bookmark=update_bookmark), throw=True) click.secho('Aggregations processed successfully.', fg='green') else: aggregate_events.delay( aggregation_types, start_date=start_date, end_date=end_date) click.secho('Aggregations processing task sent...', fg='yellow')
def _aggregations_delete(aggregation_types=None, start_date=None, end_date=None): """Delete computed aggregations.""" aggregation_types = (aggregation_types or list(current_stats.enabled_aggregations)) for a in aggregation_types: aggr_cfg = current_stats.aggregations[a] aggregator = aggr_cfg.aggregator_class( name=aggr_cfg.name, **aggr_cfg.aggregator_config) aggregator.delete(start_date, end_date)
def _aggregations_list_bookmarks(aggregation_types=None, start_date=None, end_date=None, limit=None): """List aggregation bookmarks.""" aggregation_types = (aggregation_types or list(current_stats.enabled_aggregations)) for a in aggregation_types: aggr_cfg = current_stats.aggregations[a] aggregator = aggr_cfg.aggregator_class( name=aggr_cfg.name, **aggr_cfg.aggregator_config) bookmarks = aggregator.list_bookmarks(start_date, end_date, limit) click.echo('{}:'.format(a)) for b in bookmarks: click.echo(' - {}'.format(b.date))
def _events_config(self): """Load events configuration.""" # import iter_entry_points here so that it can be mocked in tests result = {} for ep in iter_entry_points( group=self.entry_point_group_events): for cfg in ep.load()(): if cfg['event_type'] not in self.enabled_events: continue elif cfg['event_type'] in result: raise DuplicateEventError( 'Duplicate event {0} in entry point ' '{1}'.format(cfg['event_type'], ep.name)) # Update the default configuration with env/overlay config. cfg.update( self.enabled_events[cfg['event_type']] or {} ) result[cfg['event_type']] = cfg return result
def _aggregations_config(self): """Load aggregation configurations.""" result = {} for ep in iter_entry_points( group=self.entry_point_group_aggs): for cfg in ep.load()(): if cfg['aggregation_name'] not in self.enabled_aggregations: continue elif cfg['aggregation_name'] in result: raise DuplicateAggregationError( 'Duplicate aggregation {0} in entry point ' '{1}'.format(cfg['event_type'], ep.name)) # Update the default configuration with env/overlay config. cfg.update( self.enabled_aggregations[cfg['aggregation_name']] or {} ) result[cfg['aggregation_name']] = cfg return result
def _queries_config(self): """Load queries configuration.""" result = {} for ep in iter_entry_points(group=self.entry_point_group_queries): for cfg in ep.load()(): if cfg['query_name'] not in self.enabled_queries: continue elif cfg['query_name'] in result: raise DuplicateQueryError( 'Duplicate query {0} in entry point ' '{1}'.format(cfg['query'], ep.name)) # Update the default configuration with env/overlay config. cfg.update( self.enabled_queries[cfg['query_name']] or {} ) result[cfg['query_name']] = cfg return result
def publish(self, event_type, events): """Publish events.""" assert event_type in self.events current_queues.queues['stats-{}'.format(event_type)].publish(events)
def consume(self, event_type, no_ack=True, payload=True): """Comsume all pending events.""" assert event_type in self.events return current_queues.queues['stats-{}'.format(event_type)].consume( payload=payload)
def init_app(self, app, entry_point_group_events='invenio_stats.events', entry_point_group_aggs='invenio_stats.aggregations', entry_point_group_queries='invenio_stats.queries'): """Flask application initialization.""" self.init_config(app) state = _InvenioStatsState( app, entry_point_group_events=entry_point_group_events, entry_point_group_aggs=entry_point_group_aggs, entry_point_group_queries=entry_point_group_queries ) self._state = app.extensions['invenio-stats'] = state if app.config['STATS_REGISTER_RECEIVERS']: signal_receivers = {key: value for key, value in app.config.get('STATS_EVENTS', {}).items() if 'signal' in value} register_receivers(app, signal_receivers) return state
def tell(self, message, sender=no_sender): """ Send a message to this actor. Asynchronous fire-and-forget. :param message: The message to send. :type message: Any :param sender: The sender of the message. If provided it will be made available to the receiving actor via the :attr:`Actor.sender` attribute. :type sender: :class:`Actor` """ if sender is not no_sender and not isinstance(sender, ActorRef): raise ValueError("Sender must be actor reference") self._cell.send_message(message, sender)
def get_anonymization_salt(ts): """Get the anonymization salt based on the event timestamp's day.""" salt_key = 'stats:salt:{}'.format(ts.date().isoformat()) salt = current_cache.get(salt_key) if not salt: salt_bytes = os.urandom(32) salt = b64encode(salt_bytes).decode('utf-8') current_cache.set(salt_key, salt, timeout=60 * 60 * 24) return salt
def get_geoip(ip): """Lookup country for IP address.""" reader = geolite2.reader() ip_data = reader.get(ip) or {} return ip_data.get('country', {}).get('iso_code')
def get_user(): """User information. .. note:: **Privacy note** A users IP address, user agent string, and user id (if logged in) is sent to a message queue, where it is stored for about 5 minutes. The information is used to: - Detect robot visits from the user agent string. - Generate an anonymized visitor id (using a random salt per day). - Detect the users host contry based on the IP address. The information is then discarded. """ return dict( ip_address=request.remote_addr, user_agent=request.user_agent.string, user_id=( current_user.get_id() if current_user.is_authenticated else None ), session_id=session.get('sid_s') )
def default_permission_factory(query_name, params): """Default permission factory. It enables by default the statistics if they don't have a dedicated permission factory. """ from invenio_stats import current_stats if current_stats.queries[query_name].permission_factory is None: return AllowAllPermission else: return current_stats.queries[query_name].permission_factory( query_name, params )
def load_config(): """ Load settings from default config and optionally overwrite with config file and commandline parameters (in that order). """ # We start with the default config config = flatten(default_config.DEFAULT_CONFIG) # Read commandline arguments cli_config = flatten(parse_args()) if "configfile" in cli_config: logging.info("Reading config file {}".format(cli_config['configfile'])) configfile = parse_configfile(cli_config['configfile']) config = overwrite_config(config, configfile) # Parameters from commandline take precedence over all others config = overwrite_config(config, cli_config) # Set verbosity level if 'verbose' in config: if config['verbose'] == 1: logging.getLogger().setLevel(logging.INFO) elif config['verbose'] > 1: logging.getLogger().setLevel(logging.DEBUG) return ObjectView(config)
def parse_configfile(configfile): """ Read settings from file :param configfile: """ with open(configfile) as f: try: return yaml.safe_load(f) except Exception as e: logging.fatal("Could not load default config file: %s", e) exit(-1)
def register_templates(): """Register elasticsearch templates for events.""" event_templates = [current_stats._events_config[e] ['templates'] for e in current_stats._events_config] aggregation_templates = [current_stats._aggregations_config[a] ['templates'] for a in current_stats._aggregations_config] return event_templates + aggregation_templates
def check(self, query): """ :param query: """ if query.get_type() in {Keyword.LIST, Keyword.DROP}: series = query.series_stmt else: series = query.from_stmt if len(series) >= self.min_series_name_length: return Ok(True) return Err("Series name too short. Please be more precise.")
def process_events(event_types): """Index statistics events.""" results = [] for e in event_types: processor = current_stats.events[e].processor_class( **current_stats.events[e].processor_config) results.append((e, processor.run())) return results
def aggregate_events(aggregations, start_date=None, end_date=None, update_bookmark=True): """Aggregate indexed events.""" start_date = dateutil_parse(start_date) if start_date else None end_date = dateutil_parse(end_date) if end_date else None results = [] for a in aggregations: aggr_cfg = current_stats.aggregations[a] aggregator = aggr_cfg.aggregator_class( name=aggr_cfg.name, **aggr_cfg.aggregator_config) results.append(aggregator.run(start_date, end_date, update_bookmark)) return results
def ask(actor, message): """ Send a message to `actor` and return a :class:`Future` holding a possible reply. To receive a result, the actor MUST send a reply to `sender`. :param actor: :type actor: :class:`ActorRef`. :param message: :type message: :type: Any :return: A future holding the result. """ sender = PromiseActorRef() actor.tell(message, sender) return sender.promise.future
def get_queries(parameters): """ Get a list of all queries (q=... parameters) from an URL parameter string :param parameters: The url parameter list """ parsed_params = urlparse.parse_qs(parameters) if 'q' not in parsed_params: return [] queries = parsed_params['q'] # Check if only one query string is given # in this case make it a list if not isinstance(queries, list): queries = [queries] return queries
def _handle_request(self, scheme, netloc, path, headers, body=None, method="GET"): """ Run the actual request """ backend_url = "{}://{}{}".format(scheme, netloc, path) try: response = self.http_request.request(backend_url, method=method, body=body, headers=dict(headers)) self._return_response(response) except Exception as e: body = "Invalid response from backend: '{}' Server might be busy".format(e.message) logging.debug(body) self.send_error(httplib.SERVICE_UNAVAILABLE, body)
def send_error(self, code, message=None): """ Send and log plain text error reply. :param code: :param message: """ message = message.strip() self.log_error("code %d, message %s", code, message) self.send_response(code) self.send_header("Content-Type", "text/plain") self.send_header('Connection', 'close') self.end_headers() if message: self.wfile.write(message)
def _return_response(self, response): """ :type result: HTTPResponse """ self.filter_headers(response.msg) if "content-length" in response.msg: del response.msg["content-length"] self.send_response(response.status, response.reason) for header_key, header_value in response.msg.items(): self.send_header(header_key, header_value) body = response.read() self.send_header('Content-Length', str(len(body))) self.end_headers() self.wfile.write(body)
def anonymize_user(doc): """Preprocess an event by anonymizing user information. The anonymization is done by removing fields that can uniquely identify a user, such as the user's ID, session ID, IP address and User Agent, and hashing them to produce a ``visitor_id`` and ``unique_session_id``. To further secure the method, a randomly generated 32-byte salt is used, that expires after 24 hours and is discarded. The salt values are stored in Redis (or whichever backend Invenio-Cache uses). The ``unique_session_id`` is calculated in the same way as the ``visitor_id``, with the only difference that it also takes into account the hour of the event . All of these rules effectively mean that a user can have a unique ``visitor_id`` for each day and unique ``unique_session_id`` for each hour of a day. This session ID generation process was designed according to the `Project COUNTER Code of Practice <https://www.projectcounter.org/code-of- practice-sections/general-information/>`_. In addition to that the country of the user is extracted from the IP address as a ISO 3166-1 alpha-2 two-letter country code (e.g. "CH" for Switzerland). """ ip = doc.pop('ip_address', None) if ip: doc.update({'country': get_geoip(ip)}) user_id = doc.pop('user_id', '') session_id = doc.pop('session_id', '') user_agent = doc.pop('user_agent', '') # A 'User Session' is defined as activity by a user in a period of # one hour. timeslice represents the hour of the day in which # the event has been generated and together with user info it determines # the 'User Session' timestamp = arrow.get(doc.get('timestamp')) timeslice = timestamp.strftime('%Y%m%d%H') salt = get_anonymization_salt(timestamp) visitor_id = hashlib.sha224(salt.encode('utf-8')) # TODO: include random salt here, that changes once a day. # m.update(random_salt) if user_id: visitor_id.update(user_id.encode('utf-8')) elif session_id: visitor_id.update(session_id.encode('utf-8')) elif ip and user_agent: vid = '{}|{}|{}'.format(ip, user_agent, timeslice) visitor_id.update(vid.encode('utf-8')) else: # TODO: add random data? pass unique_session_id = hashlib.sha224(salt.encode('utf-8')) if user_id: sid = '{}|{}'.format(user_id, timeslice) unique_session_id.update(sid.encode('utf-8')) elif session_id: sid = '{}|{}'.format(session_id, timeslice) unique_session_id.update(sid.encode('utf-8')) elif ip and user_agent: sid = '{}|{}|{}'.format(ip, user_agent, timeslice) unique_session_id.update(sid.encode('utf-8')) doc.update(dict( visitor_id=visitor_id.hexdigest(), unique_session_id=unique_session_id.hexdigest() )) return doc
def hash_id(iso_timestamp, msg): """Generate event id, optimized for ES.""" return '{0}-{1}'.format(iso_timestamp, hashlib.sha1( msg.get('unique_id').encode('utf-8') + str(msg.get('visitor_id')). encode('utf-8')). hexdigest())
def actionsiter(self): """Iterator.""" for msg in self.queue.consume(): try: for preproc in self.preprocessors: msg = preproc(msg) if msg is None: break if msg is None: continue suffix = arrow.get(msg.get('timestamp')).strftime(self.suffix) ts = parser.parse(msg.get('timestamp')) # Truncate timestamp to keep only seconds. This is to improve # elasticsearch performances. ts = ts.replace(microsecond=0) msg['timestamp'] = ts.isoformat() # apply timestamp windowing in order to group events too close # in time if self.double_click_window > 0: timestamp = mktime(utc.localize(ts).utctimetuple()) ts = ts.fromtimestamp( timestamp // self.double_click_window * self.double_click_window ) yield dict( _id=hash_id(ts.isoformat(), msg), _op_type='index', _index='{0}-{1}'.format(self.index, suffix), _type=self.doctype, _source=msg, ) except Exception: current_app.logger.exception(u'Error while processing event')
def run(self): """Process events queue.""" return elasticsearch.helpers.bulk( self.client, self.actionsiter(), stats_only=True, chunk_size=50 )
def parse(duration_seconds, resolution_seconds=Resolution.MAX_RESOLUTION, limit=None): """ num_datapoints = min(duration/resolution, limit) :param duration_seconds: Time duration (in seconds) for which datapoints should be returned :param resolution_seconds: Time interval (in seconds) between data points :param limit: Maximum number of datapoints to return """ if not duration_seconds or duration_seconds < 0: return 0 if not resolution_seconds or resolution_seconds <= 0: return None num_datapoints = duration_seconds / resolution_seconds if limit: num_datapoints = min(int(limit), num_datapoints) return int(math.ceil(num_datapoints))
def create_series(self, num_series, batch_size=5000): """ Write one data point for each series name to initialize the series :param num_series: Number of different series names to create :param batch_size: Number of series to create at the same time :return: """ datapoints = [] for _ in range(num_series): name = self.dummy_seriesname() datapoints.append(self.create_datapoint(name, ["value"], [[1]])) for data in tqdm(self.batch(datapoints, batch_size)): self.client.write_points(data)
def write_points(self, series_name, start_date, end_date, resolution=10, batch_size=5000): """ Create sample datapoints between two dates with the given resolution (in seconds) :param series_name: :param start_date: :param end_date: :param resolution: :param batch_size: """ start_ts = int(start_date.strftime("%s")) end_ts = int(end_date.strftime("%s")) range_seconds = end_ts - start_ts num_datapoints = range_seconds / resolution timestamps = [start_ts + i * resolution for i in range(num_datapoints)] columns = ["time", "value"] for batch in tqdm(self.batch(timestamps, batch_size)): points = [] for timestamp in batch: point = random.randint(1, 100) points.append([timestamp, point]) datapoint = self.create_datapoint(series_name, columns, points) self.client.write_points([datapoint])
def register_events(): """Register sample events.""" return [ dict( event_type='file-download', templates='invenio_stats.contrib.file_download', processor_class=EventsIndexer, processor_config=dict( preprocessors=[ flag_robots, anonymize_user, build_file_unique_id ])), dict( event_type='record-view', templates='invenio_stats.contrib.record_view', processor_class=EventsIndexer, processor_config=dict( preprocessors=[ flag_robots, anonymize_user, build_record_unique_id ])) ]
def register_aggregations(): """Register sample aggregations.""" return [dict( aggregation_name='file-download-agg', templates='invenio_stats.contrib.aggregations.aggr_file_download', aggregator_class=StatAggregator, aggregator_config=dict( client=current_search_client, event='file-download', aggregation_field='unique_id', aggregation_interval='day', copy_fields=dict( file_key='file_key', bucket_id='bucket_id', file_id='file_id', ), metric_aggregation_fields={ 'unique_count': ('cardinality', 'unique_session_id', {'precision_threshold': 1000}), 'volume': ('sum', 'size', {}), }, )), dict( aggregation_name='record-view-agg', templates='invenio_stats.contrib.aggregations.aggr_record_view', aggregator_class=StatAggregator, aggregator_config=dict( client=current_search_client, event='record-view', aggregation_field='unique_id', aggregation_interval='day', copy_fields=dict( record_id='record_id', pid_type='pid_type', pid_value='pid_value', ), metric_aggregation_fields={ 'unique_count': ('cardinality', 'unique_session_id', {'precision_threshold': 1000}), }, ))]
def register_queries(): """Register queries.""" return [ dict( query_name='bucket-file-download-histogram', query_class=ESDateHistogramQuery, query_config=dict( index='stats-file-download', doc_type='file-download-day-aggregation', copy_fields=dict( bucket_id='bucket_id', file_key='file_key', ), required_filters=dict( bucket_id='bucket_id', file_key='file_key', ) ) ), dict( query_name='bucket-file-download-total', query_class=ESTermsQuery, query_config=dict( index='stats-file-download', doc_type='file-download-day-aggregation', copy_fields=dict( # bucket_id='bucket_id', ), required_filters=dict( bucket_id='bucket_id', ), aggregated_fields=['file_key'] ) ), ]
def check(self, query): """ :param query: """ if query.get_type() not in {Keyword.SELECT}: # Bailing out for non select queries return Ok(True) if query.get_resolution() > 0: return Ok(True) return Err("Group by statements need a positive time value (e.g. time(10s))")
def declare_queues(): """Index statistics events.""" return [dict(name='stats-{0}'.format(event['event_type']), exchange=current_stats.exchange) for event in current_stats._events_config.values()]
def parse(self, raw_query_string): """ Parse a raw query string into fields :param raw_query_string: Raw InfluxDB query string """ self._reset() if not isinstance(raw_query_string, basestring): return None query_string = self._cleanup(raw_query_string) parts = self._split(query_string) parts = self._sanitize_keywords(parts) tokens = self._tokenize(parts) if tokens: # Run subparsers to analyze parts of the query self.parsed_resolution = self._parse_resolution(tokens) self.parsed_time = self._parse_time(tokens) self.parsed_time_overlap = self._parse_duration(self.parsed_time) self.parsed_datapoints = self._parse_datapoints( self.parsed_time_overlap.timespan_seconds(), self.parsed_resolution, self.parse_keyword(Keyword.LIMIT, tokens) ) return self.create_query_object(tokens)
def create_query_object(self, tokens): """ Analyze query tokens and create an InfluxDBStatement from them Return None on error :param tokens: A list of InfluxDB query tokens """ try: query_type = tokens['type'] return getattr(self, 'create_%s_query' % query_type)(tokens) except (KeyError, TypeError): return self.invalid_query(tokens)
def create_select_query(self, tokens): """ Parse tokens of select query :param tokens: A list of InfluxDB query tokens """ if not tokens[Keyword.SELECT]: return None if not tokens[Keyword.FROM]: return None return SelectQuery( self.parse_keyword(Keyword.SELECT, tokens), self.parse_keyword(Keyword.FROM, tokens), where_stmt=self.parse_keyword(Keyword.WHERE, tokens), limit_stmt=self.parse_keyword(Keyword.LIMIT, tokens), group_by_stmt=self.parse_group(tokens), duration=self.parsed_time_overlap.timespan_seconds(), resolution=self.parsed_resolution, time_ranges=self.parsed_time, time_overlap=self.parsed_time_overlap, datapoints=self.parsed_datapoints )
def create_list_query(self, tokens): """ Parse tokens of list query :param tokens: A list of InfluxDB query tokens """ if not tokens[Keyword.SERIES]: # A list series keyword is allowed # without a series name or regex tokens[Keyword.SERIES] = '' return ListQuery(self.parse_keyword(Keyword.SERIES, tokens))
def create_drop_query(self, tokens): """ Parse tokens of drop query :param tokens: A list of InfluxDB query tokens """ if not tokens[Keyword.SERIES]: return None return DropQuery(self.parse_keyword(Keyword.SERIES, tokens))
def create_delete_query(self, tokens): """ Parse tokens of delete query :param tokens: A list of InfluxDB query tokens """ # From keyword is required if not tokens[Keyword.FROM]: return None where_stmt = self.parse_keyword(Keyword.WHERE, tokens) if where_stmt: if not where_stmt.startswith('time'): return None return DeleteQuery( self.parse_keyword(Keyword.FROM, tokens), self.parse_keyword(Keyword.WHERE, tokens) )
def _parse_time(self, tokens): """ Parse the date range for the query E.g. WHERE time > now() - 48h AND time < now() - 24h would result in DateRange(datetime_start, datetime_end) where datetime_start would be parsed from now() - 48h and datetime_end would be parsed from now() - 24h :param tokens: :return: """ return self.time_parser.parse(self.parse_keyword(Keyword.WHERE, tokens))
def _parse_resolution(self, tokens): """ Parse resolution from the GROUP BY statement. E.g. GROUP BY time(10s) would mean a 10 second resolution :param tokens: :return: """ return self.resolution_parser.parse(self.parse_keyword(Keyword.GROUP_BY, tokens))
def _parse_datapoints(self, parsed_duration, parsed_resolution, limit): """ Parse the number of datapoints of a query. This can be calculated from the given duration and resolution of the query. E.g. if the query has a duation of 2*60*60 = 7200 seconds and a resolution of 10 seconds then the number of datapoints would be 7200/10 => 7200 datapoints. :param parsed_duration: :param parsed_resolution: :param limit: :return: """ return self.datapoints_parser.parse(parsed_duration, parsed_resolution, limit)
def check(self, query): """ :param query: """ if query.get_type() not in {Keyword.SELECT}: # Only select queries need to be checked here # All others are not affected by this rule. Bailing out. return Ok(True) earliest_date = query.get_earliest_date() if earliest_date >= self.min_start_date: return Ok(True) if query.limit_stmt: return Ok(True) return Err(("Querying for data before {} is prohibited. " "Your beginning date is {}, which is before that.").format(self.min_start_date.strftime("%Y-%m-%d"), earliest_date))
def extract_date(self, date): """Extract date from string if necessary. :returns: the extracted date. """ if isinstance(date, six.string_types): try: date = dateutil.parser.parse(date) except ValueError: raise ValueError( 'Invalid date format for statistic {}.' ).format(self.query_name) if not isinstance(date, datetime): raise TypeError( 'Invalid date type for statistic {}.' ).format(self.query_name) return date
def validate_arguments(self, interval, start_date, end_date, **kwargs): """Validate query arguments.""" if interval not in self.allowed_intervals: raise InvalidRequestInputError( 'Invalid aggregation time interval for statistic {}.' ).format(self.query_name) if set(kwargs) < set(self.required_filters): raise InvalidRequestInputError( 'Missing one of the required parameters {0} in ' 'query {1}'.format(set(self.required_filters.keys()), self.query_name) )
def build_query(self, interval, start_date, end_date, **kwargs): """Build the elasticsearch query.""" agg_query = Search(using=self.client, index=self.index, doc_type=self.doc_type)[0:0] if start_date is not None or end_date is not None: time_range = {} if start_date is not None: time_range['gte'] = start_date.isoformat() if end_date is not None: time_range['lte'] = end_date.isoformat() agg_query = agg_query.filter( 'range', **{self.time_field: time_range}) for modifier in self.query_modifiers: agg_query = modifier(agg_query, **kwargs) base_agg = agg_query.aggs.bucket( 'histogram', 'date_histogram', field=self.time_field, interval=interval ) for destination, (metric, field, opts) in self.metric_fields.items(): base_agg.metric(destination, metric, field=field, **opts) if self.copy_fields: base_agg.metric( 'top_hit', 'top_hits', size=1, sort={'timestamp': 'desc'} ) for query_param, filtered_field in self.required_filters.items(): if query_param in kwargs: agg_query = agg_query.filter( 'term', **{filtered_field: kwargs[query_param]} ) return agg_query
def process_query_result(self, query_result, interval, start_date, end_date): """Build the result using the query result.""" def build_buckets(agg): """Build recursively result buckets.""" bucket_result = dict( key=agg['key'], date=agg['key_as_string'], ) for metric in self.metric_fields: bucket_result[metric] = agg[metric]['value'] if self.copy_fields and agg['top_hit']['hits']['hits']: doc = agg['top_hit']['hits']['hits'][0]['_source'] for destination, source in self.copy_fields.items(): if isinstance(source, six.string_types): bucket_result[destination] = doc[source] else: bucket_result[destination] = source(bucket_result, doc) return bucket_result # Add copy_fields buckets = query_result['aggregations']['histogram']['buckets'] return dict( interval=interval, key_type='date', start_date=start_date.isoformat() if start_date else None, end_date=end_date.isoformat() if end_date else None, buckets=[build_buckets(b) for b in buckets] )
def validate_arguments(self, start_date, end_date, **kwargs): """Validate query arguments.""" if set(kwargs) < set(self.required_filters): raise InvalidRequestInputError( 'Missing one of the required parameters {0} in ' 'query {1}'.format(set(self.required_filters.keys()), self.query_name) )
def build_query(self, start_date, end_date, **kwargs): """Build the elasticsearch query.""" agg_query = Search(using=self.client, index=self.index, doc_type=self.doc_type)[0:0] if start_date is not None or end_date is not None: time_range = {} if start_date is not None: time_range['gte'] = start_date.isoformat() if end_date is not None: time_range['lte'] = end_date.isoformat() agg_query = agg_query.filter( 'range', **{self.time_field: time_range}) for modifier in self.query_modifiers: agg_query = modifier(agg_query, **kwargs) base_agg = agg_query.aggs def _apply_metric_aggs(agg): for dst, (metric, field, opts) in self.metric_fields.items(): agg.metric(dst, metric, field=field, **opts) _apply_metric_aggs(base_agg) if self.aggregated_fields: cur_agg = base_agg for term in self.aggregated_fields: cur_agg = cur_agg.bucket(term, 'terms', field=term, size=0) _apply_metric_aggs(cur_agg) if self.copy_fields: base_agg.metric( 'top_hit', 'top_hits', size=1, sort={'timestamp': 'desc'} ) for query_param, filtered_field in self.required_filters.items(): if query_param in kwargs: agg_query = agg_query.filter( 'term', **{filtered_field: kwargs[query_param]} ) return agg_query
def process_query_result(self, query_result, start_date, end_date): """Build the result using the query result.""" def build_buckets(agg, fields, bucket_result): """Build recursively result buckets.""" # Add metric results for current bucket for metric in self.metric_fields: bucket_result[metric] = agg[metric]['value'] if fields: current_level = fields[0] bucket_result.update(dict( type='bucket', field=current_level, key_type='terms', buckets=[build_buckets(b, fields[1:], dict(key=b['key'])) for b in agg[current_level]['buckets']] )) return bucket_result # Add copy_fields aggs = query_result['aggregations'] result = dict( start_date=start_date.isoformat() if start_date else None, end_date=end_date.isoformat() if end_date else None, ) if self.copy_fields and aggs['top_hit']['hits']['hits']: doc = aggs['top_hit']['hits']['hits'][0]['_source'] for destination, source in self.copy_fields.items(): if isinstance(source, six.string_types): result[destination] = doc[source] else: result[destination] = source(result, doc) return build_buckets(aggs, self.aggregated_fields, result)
def run(self, start_date=None, end_date=None, **kwargs): """Run the query.""" start_date = self.extract_date(start_date) if start_date else None end_date = self.extract_date(end_date) if end_date else None self.validate_arguments(start_date, end_date, **kwargs) agg_query = self.build_query(start_date, end_date, **kwargs) query_result = agg_query.execute().to_dict() res = self.process_query_result(query_result, start_date, end_date) return res
def handle_error(self, request, client_address): """ Overwrite error handling to suppress socket/ssl related errors :param client_address: Address of client :param request: Request causing an error """ cls, e = sys.exc_info()[:2] if cls is socket.error or cls is ssl.SSLError: pass else: return HTTPServer.handle_error(self, request, client_address)
def file_download_event_builder(event, sender_app, obj=None, **kwargs): """Build a file-download event.""" event.update(dict( # When: timestamp=datetime.datetime.utcnow().isoformat(), # What: bucket_id=str(obj.bucket_id), file_id=str(obj.file_id), file_key=obj.key, size=obj.file.size, referrer=request.referrer, # Who: **get_user() )) return event
def record_view_event_builder(event, sender_app, pid=None, record=None, **kwargs): """Build a record-view event.""" event.update(dict( # When: timestamp=datetime.datetime.utcnow().isoformat(), # What: record_id=str(record.id), pid_type=pid.pid_type, pid_value=str(pid.pid_value), referrer=request.referrer, # Who: **get_user() )) return event
def main(): """ Setup consumer """ config = loader.load_config() if config.version: show_version() if config.show_rules: show_rules() if not config.configfile and not (hasattr(config, "status") or hasattr(config, "stop")): show_configfile_warning() # Check if we have permissions to open the log file. check_write_permissions(config.logfile) start_proxy(config)
def check_write_permissions(file): """ Check if we can write to the given file Otherwise since we might detach the process to run in the background we might never find out that writing failed and get an ugly exit message on startup. For example: ERROR: Child exited immediately with non-zero exit code 127 So we catch this error upfront and print a nicer error message with a hint on how to fix it. """ try: open(file, 'a') except IOError: print("Can't open file {}. " "Please grant write permissions or change the path in your config".format(file)) sys.exit(1)
def show_rules(): """ Show the list of available rules and quit :return: """ from rules.loader import import_rules from rules.rule_list import all_rules rules = import_rules(all_rules) print("") for name, rule in rules.iteritems(): heading = "{} (`{}`)".format(rule.description(), name) print("#### {} ####".format(heading)) for line in rule.reason(): print(line) print("") sys.exit(0)
def start_proxy(config): """ Start the http proxy :param config: :return: """ protector = Protector(config.rules, config.whitelist) protector_daemon = ProtectorDaemon(config=config, protector=protector) daemon = daemonocle.Daemon( pidfile=config.pidfile, detach=(not config.foreground), shutdown_callback=shutdown, worker=protector_daemon.run ) daemon.do_action(config.command)
def batches(iterable, n=1): """ From http://stackoverflow.com/a/8290508/270334 :param n: :param iterable: """ l = len(iterable) for ndx in range(0, l, n): yield iterable[ndx:min(ndx + n, l)]
def _is_root(): """Checks if the user is rooted.""" import os import ctypes try: return os.geteuid() == 0 except AttributeError: return ctypes.windll.shell32.IsUserAnAdmin() != 0 return False