Code
stringlengths 103
85.9k
| Summary
listlengths 0
94
|
---|---|
Please provide a description of the function:def feed_backend_arthur(backend_name, backend_params):
# Always get pending items from arthur for all data sources
feed_arthur()
logger.debug("Items available for %s", arthur_items.keys())
# Get only the items for the backend
if not get_connector_from_name(backend_name):
raise RuntimeError("Unknown backend %s" % backend_name)
connector = get_connector_from_name(backend_name)
klass = connector[3] # BackendCmd for the connector
backend_cmd = init_backend(klass(*backend_params))
tag = backend_cmd.backend.tag
logger.debug("Getting items for %s.", tag)
if tag in arthur_items:
logger.debug("Found items for %s.", tag)
for item in arthur_items[tag]:
yield item | [
" Feed Ocean with backend data collected from arthur redis queue"
]
|
Please provide a description of the function:def feed_backend(url, clean, fetch_archive, backend_name, backend_params,
es_index=None, es_index_enrich=None, project=None, arthur=False,
es_aliases=None, projects_json_repo=None):
backend = None
repo = {'backend_name': backend_name, 'backend_params': backend_params} # repository data to be stored in conf
if es_index:
clean = False # don't remove index, it could be shared
if not get_connector_from_name(backend_name):
raise RuntimeError("Unknown backend %s" % backend_name)
connector = get_connector_from_name(backend_name)
klass = connector[3] # BackendCmd for the connector
try:
logger.info("Feeding Ocean from %s (%s)", backend_name, es_index)
if not es_index:
logger.error("Raw index not defined for %s", backend_name)
repo['repo_update_start'] = datetime.now().isoformat()
# perceval backends fetch params
offset = None
from_date = None
category = None
latest_items = None
filter_classified = None
backend_cmd = klass(*backend_params)
parsed_args = vars(backend_cmd.parsed_args)
init_args = find_signature_parameters(backend_cmd.BACKEND,
parsed_args)
if backend_cmd.archive_manager and fetch_archive:
archive = Archive(parsed_args['archive_path'])
else:
archive = backend_cmd.archive_manager.create_archive() if backend_cmd.archive_manager else None
init_args['archive'] = archive
backend_cmd.backend = backend_cmd.BACKEND(**init_args)
backend = backend_cmd.backend
ocean_backend = connector[1](backend, fetch_archive=fetch_archive, project=project)
elastic_ocean = get_elastic(url, es_index, clean, ocean_backend, es_aliases)
ocean_backend.set_elastic(elastic_ocean)
ocean_backend.set_projects_json_repo(projects_json_repo)
if fetch_archive:
signature = inspect.signature(backend.fetch_from_archive)
else:
signature = inspect.signature(backend.fetch)
if 'from_date' in signature.parameters:
try:
# Support perceval pre and post BackendCommand refactoring
from_date = backend_cmd.from_date
except AttributeError:
from_date = backend_cmd.parsed_args.from_date
if 'offset' in signature.parameters:
try:
offset = backend_cmd.offset
except AttributeError:
offset = backend_cmd.parsed_args.offset
if 'category' in signature.parameters:
try:
category = backend_cmd.category
except AttributeError:
try:
category = backend_cmd.parsed_args.category
except AttributeError:
pass
if 'filter_classified' in signature.parameters:
try:
filter_classified = backend_cmd.parsed_args.filter_classified
except AttributeError:
pass
if 'latest_items' in signature.parameters:
try:
latest_items = backend_cmd.latest_items
except AttributeError:
latest_items = backend_cmd.parsed_args.latest_items
# fetch params support
if arthur:
# If using arthur just provide the items generator to be used
# to collect the items and upload to Elasticsearch
aitems = feed_backend_arthur(backend_name, backend_params)
ocean_backend.feed(arthur_items=aitems)
else:
params = {}
if latest_items:
params['latest_items'] = latest_items
if category:
params['category'] = category
if filter_classified:
params['filter_classified'] = filter_classified
if from_date and (from_date.replace(tzinfo=None) != parser.parse("1970-01-01")):
params['from_date'] = from_date
if offset:
params['from_offset'] = offset
ocean_backend.feed(**params)
except Exception as ex:
if backend:
logger.error("Error feeding ocean from %s (%s): %s", backend_name, backend.origin, ex, exc_info=True)
else:
logger.error("Error feeding ocean %s", ex, exc_info=True)
logger.info("Done %s ", backend_name) | [
" Feed Ocean with backend data "
]
|
Please provide a description of the function:def get_items_from_uuid(uuid, enrich_backend, ocean_backend):
# logger.debug("Getting items for merged uuid %s " % (uuid))
uuid_fields = enrich_backend.get_fields_uuid()
terms = "" # all terms with uuids in the enriched item
for field in uuid_fields:
terms += % (field, uuid)
terms += ","
terms = terms[:-1] # remove last , for last item
query = % (terms)
url_search = enrich_backend.elastic.index_url + "/_search"
url_search += "?size=1000" # TODO get all items
r = requests_ses.post(url_search, data=query)
eitems = r.json()['hits']['hits']
if len(eitems) == 0:
# logger.warning("No enriched items found for uuid: %s " % (uuid))
return []
items_ids = []
for eitem in eitems:
item_id = enrich_backend.get_item_id(eitem)
# For one item several eitems could be generated
if item_id not in items_ids:
items_ids.append(item_id)
# Time to get the items
logger.debug("Items to be renriched for merged uuids: %s" % (",".join(items_ids)))
url_mget = ocean_backend.elastic.index_url + "/_mget"
items_ids_query = ""
for item_id in items_ids:
items_ids_query += '{"_id" : "%s"}' % (item_id)
items_ids_query += ","
items_ids_query = items_ids_query[:-1] # remove last , for last item
query = '{"docs" : [%s]}' % (items_ids_query)
r = requests_ses.post(url_mget, data=query)
res_items = r.json()['docs']
items = []
for res_item in res_items:
if res_item['found']:
items.append(res_item["_source"])
return items | [
" Get all items that include uuid ",
"\n {\"term\": {\n \"%s\": {\n \"value\": \"%s\"\n }\n }}\n ",
"\n {\"query\": { \"bool\": { \"should\": [%s] }}}\n "
]
|
Please provide a description of the function:def refresh_identities(enrich_backend, author_field=None, author_values=None):
def update_items(new_filter_author):
for eitem in enrich_backend.fetch(new_filter_author):
roles = None
try:
roles = enrich_backend.roles
except AttributeError:
pass
new_identities = enrich_backend.get_item_sh_from_id(eitem, roles)
eitem.update(new_identities)
yield eitem
logger.debug("Refreshing identities fields from %s",
enrich_backend.elastic.anonymize_url(enrich_backend.elastic.index_url))
total = 0
max_ids = enrich_backend.elastic.max_items_clause
logger.debug('Refreshing identities')
if author_field is None:
# No filter, update all items
for item in update_items(None):
yield item
total += 1
else:
to_refresh = []
for author_value in author_values:
to_refresh.append(author_value)
if len(to_refresh) > max_ids:
filter_author = {"name": author_field,
"value": to_refresh}
for item in update_items(filter_author):
yield item
total += 1
to_refresh = []
if len(to_refresh) > 0:
filter_author = {"name": author_field,
"value": to_refresh}
for item in update_items(filter_author):
yield item
total += 1
logger.info("Total eitems refreshed for identities fields %i", total) | [
"Refresh identities in enriched index.\n\n Retrieve items from the enriched index corresponding to enrich_backend,\n and update their identities information, with fresh data from the\n SortingHat database.\n\n Instead of the whole index, only items matching the filter_author\n filter are fitered, if that parameters is not None.\n\n :param enrich_backend: enriched backend to update\n :param author_field: field to match items authored by a user\n :param author_values: values of the authored field to match items\n "
]
|
Please provide a description of the function:def get_ocean_backend(backend_cmd, enrich_backend, no_incremental,
filter_raw=None, filter_raw_should=None):
if no_incremental:
last_enrich = None
else:
last_enrich = get_last_enrich(backend_cmd, enrich_backend, filter_raw=filter_raw)
logger.debug("Last enrichment: %s", last_enrich)
backend = None
connector = get_connectors()[enrich_backend.get_connector_name()]
if backend_cmd:
backend_cmd = init_backend(backend_cmd)
backend = backend_cmd.backend
signature = inspect.signature(backend.fetch)
if 'from_date' in signature.parameters:
ocean_backend = connector[1](backend, from_date=last_enrich)
elif 'offset' in signature.parameters:
ocean_backend = connector[1](backend, offset=last_enrich)
else:
if last_enrich:
ocean_backend = connector[1](backend, from_date=last_enrich)
else:
ocean_backend = connector[1](backend)
else:
# We can have params for non perceval backends also
params = enrich_backend.backend_params
if params:
try:
date_pos = params.index('--from-date')
last_enrich = parser.parse(params[date_pos + 1])
except ValueError:
pass
if last_enrich:
ocean_backend = connector[1](backend, from_date=last_enrich)
else:
ocean_backend = connector[1](backend)
if filter_raw:
ocean_backend.set_filter_raw(filter_raw)
if filter_raw_should:
ocean_backend.set_filter_raw_should(filter_raw_should)
return ocean_backend | [
" Get the ocean backend configured to start from the last enriched date "
]
|
Please provide a description of the function:def do_studies(ocean_backend, enrich_backend, studies_args, retention_time=None):
for study in enrich_backend.studies:
selected_studies = [(s['name'], s['params']) for s in studies_args if s['type'] == study.__name__]
for (name, params) in selected_studies:
logger.info("Starting study: %s, params %s", name, str(params))
try:
study(ocean_backend, enrich_backend, **params)
except Exception as e:
logger.error("Problem executing study %s, %s", name, str(e))
raise e
# identify studies which creates other indexes. If the study is onion,
# it can be ignored since the index is recreated every week
if name.startswith('enrich_onion'):
continue
index_params = [p for p in params if 'out_index' in p]
for ip in index_params:
index_name = params[ip]
elastic = get_elastic(enrich_backend.elastic_url, index_name)
elastic.delete_items(retention_time) | [
"Execute studies related to a given enrich backend. If `retention_time` is not None, the\n study data is deleted based on the number of minutes declared in `retention_time`.\n\n :param ocean_backend: backend to access raw items\n :param enrich_backend: backend to access enriched items\n :param retention_time: maximum number of minutes wrt the current date to retain the data\n :param studies_args: list of studies to be executed\n "
]
|
Please provide a description of the function:def enrich_backend(url, clean, backend_name, backend_params, cfg_section_name,
ocean_index=None,
ocean_index_enrich=None,
db_projects_map=None, json_projects_map=None,
db_sortinghat=None,
no_incremental=False, only_identities=False,
github_token=None, studies=False, only_studies=False,
url_enrich=None, events_enrich=False,
db_user=None, db_password=None, db_host=None,
do_refresh_projects=False, do_refresh_identities=False,
author_id=None, author_uuid=None, filter_raw=None,
filters_raw_prefix=None, jenkins_rename_file=None,
unaffiliated_group=None, pair_programming=False,
node_regex=False, studies_args=None, es_enrich_aliases=None,
last_enrich_date=None, projects_json_repo=None):
backend = None
enrich_index = None
if ocean_index or ocean_index_enrich:
clean = False # don't remove index, it could be shared
if do_refresh_projects or do_refresh_identities:
clean = False # refresh works over the existing enriched items
if not get_connector_from_name(backend_name):
raise RuntimeError("Unknown backend %s" % backend_name)
connector = get_connector_from_name(backend_name)
klass = connector[3] # BackendCmd for the connector
try:
backend = None
backend_cmd = None
if klass:
# Data is retrieved from Perceval
backend_cmd = init_backend(klass(*backend_params))
backend = backend_cmd.backend
if ocean_index_enrich:
enrich_index = ocean_index_enrich
else:
if not ocean_index:
ocean_index = backend_name + "_" + backend.origin
enrich_index = ocean_index + "_enrich"
if events_enrich:
enrich_index += "_events"
enrich_backend = connector[2](db_sortinghat, db_projects_map, json_projects_map,
db_user, db_password, db_host)
enrich_backend.set_params(backend_params)
# store the cfg section name in the enrich backend to recover the corresponding project name in projects.json
enrich_backend.set_cfg_section_name(cfg_section_name)
enrich_backend.set_from_date(last_enrich_date)
if url_enrich:
elastic_enrich = get_elastic(url_enrich, enrich_index, clean, enrich_backend, es_enrich_aliases)
else:
elastic_enrich = get_elastic(url, enrich_index, clean, enrich_backend, es_enrich_aliases)
enrich_backend.set_elastic(elastic_enrich)
if github_token and backend_name == "git":
enrich_backend.set_github_token(github_token)
if jenkins_rename_file and backend_name == "jenkins":
enrich_backend.set_jenkins_rename_file(jenkins_rename_file)
if unaffiliated_group:
enrich_backend.unaffiliated_group = unaffiliated_group
if pair_programming:
enrich_backend.pair_programming = pair_programming
if node_regex:
enrich_backend.node_regex = node_regex
# The filter raw is needed to be able to assign the project value to an enriched item
# see line 544, grimoire_elk/enriched/enrich.py (fltr = eitem['origin'] + ' --filter-raw=' + self.filter_raw)
if filter_raw:
enrich_backend.set_filter_raw(filter_raw)
elif filters_raw_prefix:
enrich_backend.set_filter_raw_should(filters_raw_prefix)
enrich_backend.set_projects_json_repo(projects_json_repo)
ocean_backend = get_ocean_backend(backend_cmd, enrich_backend,
no_incremental, filter_raw,
filters_raw_prefix)
if only_studies:
logger.info("Running only studies (no SH and no enrichment)")
do_studies(ocean_backend, enrich_backend, studies_args)
elif do_refresh_projects:
logger.info("Refreshing project field in %s",
enrich_backend.elastic.anonymize_url(enrich_backend.elastic.index_url))
field_id = enrich_backend.get_field_unique_id()
eitems = refresh_projects(enrich_backend)
enrich_backend.elastic.bulk_upload(eitems, field_id)
elif do_refresh_identities:
author_attr = None
author_values = None
if author_id:
author_attr = 'author_id'
author_values = [author_id]
elif author_uuid:
author_attr = 'author_uuid'
author_values = [author_uuid]
logger.info("Refreshing identities fields in %s",
enrich_backend.elastic.anonymize_url(enrich_backend.elastic.index_url))
field_id = enrich_backend.get_field_unique_id()
eitems = refresh_identities(enrich_backend, author_attr, author_values)
enrich_backend.elastic.bulk_upload(eitems, field_id)
else:
clean = False # Don't remove ocean index when enrich
elastic_ocean = get_elastic(url, ocean_index, clean, ocean_backend)
ocean_backend.set_elastic(elastic_ocean)
logger.info("Adding enrichment data to %s",
enrich_backend.elastic.anonymize_url(enrich_backend.elastic.index_url))
if db_sortinghat and enrich_backend.has_identities():
# FIXME: This step won't be done from enrich in the future
total_ids = load_identities(ocean_backend, enrich_backend)
logger.info("Total identities loaded %i ", total_ids)
if only_identities:
logger.info("Only SH identities added. Enrich not done!")
else:
# Enrichment for the new items once SH update is finished
if not events_enrich:
enrich_count = enrich_items(ocean_backend, enrich_backend)
if enrich_count is not None:
logger.info("Total items enriched %i ", enrich_count)
else:
enrich_count = enrich_items(ocean_backend, enrich_backend, events=True)
if enrich_count is not None:
logger.info("Total events enriched %i ", enrich_count)
if studies:
do_studies(ocean_backend, enrich_backend, studies_args)
except Exception as ex:
if backend:
logger.error("Error enriching ocean from %s (%s): %s",
backend_name, backend.origin, ex, exc_info=True)
else:
logger.error("Error enriching ocean %s", ex, exc_info=True)
logger.info("Done %s ", backend_name) | [
" Enrich Ocean index "
]
|
Please provide a description of the function:def delete_orphan_unique_identities(es, sortinghat_db, current_data_source, active_data_sources):
def get_uuids_in_index(target_uuids):
page = es.search(
index=IDENTITIES_INDEX,
scroll="360m",
size=SIZE_SCROLL_IDENTITIES_INDEX,
body={
"query": {
"bool": {
"filter": [
{
"terms": {
"sh_uuid": target_uuids
}
}
]
}
}
}
)
hits = []
if page['hits']['total'] != 0:
hits = page['hits']['hits']
return hits
def delete_unique_identities(target_uuids):
count = 0
for uuid in target_uuids:
success = SortingHat.remove_unique_identity(sortinghat_db, uuid)
count = count + 1 if success else count
return count
def delete_identities(unique_ident, data_sources):
count = 0
for ident in unique_ident.identities:
if ident.source not in data_sources:
success = SortingHat.remove_identity(sortinghat_db, ident.id)
count = count + 1 if success else count
return count
def has_identities_in_data_sources(unique_ident, data_sources):
in_active = False
for ident in unique_ident.identities:
if ident.source in data_sources:
in_active = True
break
return in_active
deleted_unique_identities = 0
deleted_identities = 0
uuids_to_process = []
# Collect all unique identities
for unique_identity in SortingHat.unique_identities(sortinghat_db):
# Remove a unique identity if all its identities are in non active data source
if not has_identities_in_data_sources(unique_identity, active_data_sources):
deleted_unique_identities += delete_unique_identities([unique_identity.uuid])
continue
# Remove the identities of non active data source for a given unique identity
deleted_identities += delete_identities(unique_identity, active_data_sources)
# Process only the unique identities that include the current data source, since
# it may be that unique identities in other data source have not been
# added yet to IDENTITIES_INDEX
if not has_identities_in_data_sources(unique_identity, [current_data_source]):
continue
# Add the uuid to the list to check its existence in the IDENTITIES_INDEX
uuids_to_process.append(unique_identity.uuid)
# Process the uuids in block of SIZE_SCROLL_IDENTITIES_INDEX
if len(uuids_to_process) != SIZE_SCROLL_IDENTITIES_INDEX:
continue
# Find which uuids to be processed exist in IDENTITIES_INDEX
results = get_uuids_in_index(uuids_to_process)
uuids_found = [item['_source']['sh_uuid'] for item in results]
# Find the uuids which exist in SortingHat but not in IDENTITIES_INDEX
orphan_uuids = set(uuids_to_process) - set(uuids_found)
# Delete the orphan uuids from SortingHat
deleted_unique_identities += delete_unique_identities(orphan_uuids)
# Reset the list
uuids_to_process = []
# Check that no uuids have been left to process
if uuids_to_process:
# Find which uuids to be processed exist in IDENTITIES_INDEX
results = get_uuids_in_index(uuids_to_process)
uuids_found = [item['_source']['sh_uuid'] for item in results]
# Find the uuids which exist in SortingHat but not in IDENTITIES_INDEX
orphan_uuids = set(uuids_to_process) - set(uuids_found)
# Delete the orphan uuids from SortingHat
deleted_unique_identities += delete_unique_identities(orphan_uuids)
logger.debug("[identities retention] Total orphan unique identities deleted from SH: %i",
deleted_unique_identities)
logger.debug("[identities retention] Total identities in non-active data sources deleted from SH: %i",
deleted_identities) | [
"Delete all unique identities which appear in SortingHat, but not in the IDENTITIES_INDEX.\n\n :param es: ElasticSearchDSL object\n :param sortinghat_db: instance of the SortingHat database\n :param current_data_source: current data source\n :param active_data_sources: list of active data sources\n ",
"Find a set of uuids in IDENTITIES_INDEX and return them if exist.\n\n :param target_uuids: target uuids\n ",
"Delete a list of uuids from SortingHat.\n\n :param target_uuids: uuids to be deleted\n ",
"Remove the identities in non active data sources.\n\n :param unique_ident: unique identity object\n :param data_sources: target data sources\n ",
"Check if a unique identity has identities in a set of data sources.\n\n :param unique_ident: unique identity object\n :param data_sources: target data sources\n "
]
|
Please provide a description of the function:def delete_inactive_unique_identities(es, sortinghat_db, before_date):
page = es.search(
index=IDENTITIES_INDEX,
scroll="360m",
size=SIZE_SCROLL_IDENTITIES_INDEX,
body={
"query": {
"range": {
"last_seen": {
"lte": before_date
}
}
}
}
)
sid = page['_scroll_id']
scroll_size = page['hits']['total']
if scroll_size == 0:
logging.warning("[identities retention] No inactive identities found in %s after %s!",
IDENTITIES_INDEX, before_date)
return
count = 0
while scroll_size > 0:
for item in page['hits']['hits']:
to_delete = item['_source']['sh_uuid']
success = SortingHat.remove_unique_identity(sortinghat_db, to_delete)
# increment the number of deleted identities only if the corresponding command was successful
count = count + 1 if success else count
page = es.scroll(scroll_id=sid, scroll='60m')
sid = page['_scroll_id']
scroll_size = len(page['hits']['hits'])
logger.debug("[identities retention] Total inactive identities deleted from SH: %i", count) | [
"Select the unique identities not seen before `before_date` and\n delete them from SortingHat.\n\n :param es: ElasticSearchDSL object\n :param sortinghat_db: instance of the SortingHat database\n :param before_date: datetime str to filter the identities\n "
]
|
Please provide a description of the function:def retain_identities(retention_time, es_enrichment_url, sortinghat_db, data_source, active_data_sources):
before_date = get_diff_current_date(minutes=retention_time)
before_date_str = before_date.isoformat()
es = Elasticsearch([es_enrichment_url], timeout=120, max_retries=20, retry_on_timeout=True, verify_certs=False)
# delete the unique identities which have not been seen after `before_date`
delete_inactive_unique_identities(es, sortinghat_db, before_date_str)
# delete the unique identities for a given data source which are not in the IDENTITIES_INDEX
delete_orphan_unique_identities(es, sortinghat_db, data_source, active_data_sources) | [
"Select the unique identities not seen before `retention_time` and\n delete them from SortingHat. Furthermore, it deletes also the orphan unique identities,\n those ones stored in SortingHat but not in IDENTITIES_INDEX.\n\n :param retention_time: maximum number of minutes wrt the current date to retain the identities\n :param es_enrichment_url: URL of the ElasticSearch where the enriched data is stored\n :param sortinghat_db: instance of the SortingHat database\n :param data_source: target data source (e.g., git, github, slack)\n :param active_data_sources: list of active data sources\n "
]
|
Please provide a description of the function:def init_backend(backend_cmd):
try:
backend_cmd.backend
except AttributeError:
parsed_args = vars(backend_cmd.parsed_args)
init_args = find_signature_parameters(backend_cmd.BACKEND,
parsed_args)
backend_cmd.backend = backend_cmd.BACKEND(**init_args)
return backend_cmd | [
"Init backend within the backend_cmd"
]
|
Please provide a description of the function:def populate_identities_index(es_enrichment_url, enrich_index):
class Mapping(BaseMapping):
@staticmethod
def get_elastic_mappings(es_major):
mapping =
return {"items": mapping}
# identities index
mapping_identities_index = Mapping()
elastic_identities = get_elastic(es_enrichment_url, IDENTITIES_INDEX, mapping=mapping_identities_index)
# enriched index
elastic_enrich = get_elastic(es_enrichment_url, enrich_index)
# collect mapping attributes in enriched index
attributes = elastic_enrich.all_properties()
# select attributes coming from SortingHat (*_uuid except git_uuid)
sh_uuid_attributes = [attr for attr in attributes if attr.endswith('_uuid') and not attr.startswith('git_')]
enriched_items = ElasticItems(None)
enriched_items.elastic = elastic_enrich
logger.debug("[identities-index] Start adding identities to %s", IDENTITIES_INDEX)
identities = []
for eitem in enriched_items.fetch(ignore_incremental=True):
for sh_uuid_attr in sh_uuid_attributes:
if sh_uuid_attr not in eitem:
continue
if not eitem[sh_uuid_attr]:
continue
identity = {
'sh_uuid': eitem[sh_uuid_attr],
'last_seen': datetime_utcnow().isoformat()
}
identities.append(identity)
if len(identities) == elastic_enrich.max_items_bulk:
elastic_identities.bulk_upload(identities, 'sh_uuid')
identities = []
if len(identities) > 0:
elastic_identities.bulk_upload(identities, 'sh_uuid')
logger.debug("[identities-index] End adding identities to %s", IDENTITIES_INDEX) | [
"Save the identities currently in use in the index IDENTITIES_INDEX.\n\n :param es_enrichment_url: url of the ElasticSearch with enriched data\n :param enrich_index: name of the enriched index\n ",
"Get Elasticsearch mapping.\n\n :param es_major: major version of Elasticsearch, as string\n :returns: dictionary with a key, 'items', with the mapping\n ",
"\n {\n \"properties\": {\n \"sh_uuid\": {\n \"type\": \"keyword\"\n },\n \"last_seen\": {\n \"type\": \"date\"\n }\n }\n }\n "
]
|
Please provide a description of the function:def safe_index(cls, unique_id):
index = unique_id
if unique_id:
index = unique_id.replace("/", "_").lower()
return index | [
" Return a valid elastic index generated from unique_id "
]
|
Please provide a description of the function:def _check_instance(url, insecure):
res = grimoire_con(insecure).get(url)
if res.status_code != 200:
logger.error("Didn't get 200 OK from url %s", url)
raise ElasticConnectException
else:
try:
version_str = res.json()['version']['number']
version_major = version_str.split('.')[0]
return version_major
except Exception:
logger.error("Could not read proper welcome message from url %s",
ElasticSearch.anonymize_url(url))
logger.error("Message read: %s", res.text)
raise ElasticConnectException | [
"Checks if there is an instance of Elasticsearch in url.\n\n Actually, it checks if GET on the url returns a JSON document\n with a field tagline \"You know, for search\",\n and a field version.number.\n\n :value url: url of the instance to check\n :value insecure: don't verify ssl connection (boolean)\n :returns: major version of Ellasticsearch, as string.\n "
]
|
Please provide a description of the function:def safe_put_bulk(self, url, bulk_json):
headers = {"Content-Type": "application/x-ndjson"}
try:
res = self.requests.put(url + '?refresh=true', data=bulk_json, headers=headers)
res.raise_for_status()
except UnicodeEncodeError:
# Related to body.encode('iso-8859-1'). mbox data
logger.error("Encondig error ... converting bulk to iso-8859-1")
bulk_json = bulk_json.encode('iso-8859-1', 'ignore')
res = self.requests.put(url, data=bulk_json, headers=headers)
res.raise_for_status()
result = res.json()
failed_items = []
if result['errors']:
# Due to multiple errors that may be thrown when inserting bulk data, only the first error is returned
failed_items = [item['index'] for item in result['items'] if 'error' in item['index']]
error = str(failed_items[0]['error'])
logger.error("Failed to insert data to ES: %s, %s", error, self.anonymize_url(url))
inserted_items = len(result['items']) - len(failed_items)
# The exception is currently not thrown to avoid stopping ocean uploading processes
try:
if failed_items:
raise ELKError(cause=error)
except ELKError:
pass
logger.debug("%i items uploaded to ES (%s)", inserted_items, self.anonymize_url(url))
return inserted_items | [
" Bulk PUT controlling unicode issues "
]
|
Please provide a description of the function:def all_es_aliases(self):
r = self.requests.get(self.url + "/_aliases", headers=HEADER_JSON, verify=False)
try:
r.raise_for_status()
except requests.exceptions.HTTPError as ex:
logger.warning("Something went wrong when retrieving aliases on %s.",
self.anonymize_url(self.index_url))
logger.warning(ex)
return
aliases = []
for index in r.json().keys():
aliases.extend(list(r.json()[index]['aliases'].keys()))
aliases = list(set(aliases))
return aliases | [
"List all aliases used in ES"
]
|
Please provide a description of the function:def list_aliases(self):
# check alias doesn't exist
r = self.requests.get(self.index_url + "/_alias", headers=HEADER_JSON, verify=False)
try:
r.raise_for_status()
except requests.exceptions.HTTPError as ex:
logger.warning("Something went wrong when retrieving aliases on %s.",
self.anonymize_url(self.index_url))
logger.warning(ex)
return
aliases = r.json()[self.index]['aliases']
return aliases | [
"List aliases linked to the index"
]
|
Please provide a description of the function:def add_alias(self, alias):
aliases = self.list_aliases()
if alias in aliases:
logger.debug("Alias %s already exists on %s.", alias, self.anonymize_url(self.index_url))
return
# add alias
alias_data = % (self.index, alias)
r = self.requests.post(self.url + "/_aliases", headers=HEADER_JSON, verify=False, data=alias_data)
try:
r.raise_for_status()
except requests.exceptions.HTTPError as ex:
logger.warning("Something went wrong when adding an alias on %s. Alias not set.",
self.anonymize_url(self.index_url))
logger.warning(ex)
return
logger.info("Alias %s created on %s.", alias, self.anonymize_url(self.index_url)) | [
"Add an alias to the index set in the elastic obj\n\n :param alias: alias to add\n\n :returns: None\n ",
"\n {\n \"actions\": [\n {\n \"add\": {\n \"index\": \"%s\",\n \"alias\": \"%s\"\n }\n }\n ]\n }\n "
]
|
Please provide a description of the function:def bulk_upload(self, items, field_id):
current = 0
new_items = 0 # total items added with bulk
bulk_json = ""
if not items:
return new_items
url = self.index_url + '/items/_bulk'
logger.debug("Adding items to %s (in %i packs)", self.anonymize_url(url), self.max_items_bulk)
task_init = time()
for item in items:
if current >= self.max_items_bulk:
task_init = time()
new_items += self.safe_put_bulk(url, bulk_json)
current = 0
json_size = sys.getsizeof(bulk_json) / (1024 * 1024)
logger.debug("bulk packet sent (%.2f sec, %i total, %.2f MB)"
% (time() - task_init, new_items, json_size))
bulk_json = ""
data_json = json.dumps(item)
bulk_json += '{"index" : {"_id" : "%s" } }\n' % (item[field_id])
bulk_json += data_json + "\n" # Bulk document
current += 1
if current > 0:
new_items += self.safe_put_bulk(url, bulk_json)
json_size = sys.getsizeof(bulk_json) / (1024 * 1024)
logger.debug("bulk packet sent (%.2f sec prev, %i total, %.2f MB)"
% (time() - task_init, new_items, json_size))
return new_items | [
"Upload in controlled packs items to ES using bulk API"
]
|
Please provide a description of the function:def get_last_date(self, field, filters_=[]):
'''
:field: field with the data
:filters_: additional filters to find the date
'''
last_date = self.get_last_item_field(field, filters_=filters_)
return last_date | []
|
Please provide a description of the function:def get_last_offset(self, field, filters_=[]):
'''
:field: field with the data
:filters_: additional filters to find the date
'''
offset = self.get_last_item_field(field, filters_=filters_, offset=True)
return offset | []
|
Please provide a description of the function:def get_last_item_field(self, field, filters_=[], offset=False):
'''
:field: field with the data
:filters_: additional filters to find the date
:offset: Return offset field insted of date field
'''
last_value = None
url = self.index_url
url += "/_search"
if filters_ is None:
filters_ = []
terms = []
for filter_ in filters_:
if not filter_:
continue
term = '''{"term" : { "%s" : "%s"}}''' % (filter_['name'], filter_['value'])
terms.append(term)
data_query = '''"query": {"bool": {"filter": [%s]}},''' % (','.join(terms))
data_agg = '''
"aggs": {
"1": {
"max": {
"field": "%s"
}
}
}
''' % field
data_json = '''
{ "size": 0, %s %s
} ''' % (data_query, data_agg)
logger.debug("%s %s", self.anonymize_url(url), data_json)
headers = {"Content-Type": "application/json"}
res = self.requests.post(url, data=data_json, headers=headers)
res.raise_for_status()
res_json = res.json()
if 'aggregations' in res_json:
last_value = res_json["aggregations"]["1"]["value"]
if offset:
if last_value is not None:
last_value = int(last_value)
else:
if "value_as_string" in res_json["aggregations"]["1"]:
last_value = res_json["aggregations"]["1"]["value_as_string"]
last_value = parser.parse(last_value)
else:
last_value = res_json["aggregations"]["1"]["value"]
if last_value:
try:
last_value = unixtime_to_datetime(last_value)
except ValueError:
# last_value is in microsecs
last_value = unixtime_to_datetime(last_value / 1000)
return last_value | []
|
Please provide a description of the function:def delete_items(self, retention_time, time_field="metadata__updated_on"):
if retention_time is None:
logger.debug("[items retention] Retention policy disabled, no items will be deleted.")
return
if retention_time <= 0:
logger.debug("[items retention] Minutes to retain must be greater than 0.")
return
before_date = get_diff_current_date(minutes=retention_time)
before_date_str = before_date.isoformat()
es_query = '''
{
"query": {
"range": {
"%s": {
"lte": "%s"
}
}
}
}
''' % (time_field, before_date_str)
r = self.requests.post(self.index_url + "/_delete_by_query?refresh",
data=es_query, headers=HEADER_JSON, verify=False)
try:
r.raise_for_status()
r_json = r.json()
logger.debug("[items retention] %s items deleted from %s before %s.",
r_json['deleted'], self.anonymize_url(self.index_url), before_date)
except requests.exceptions.HTTPError as ex:
logger.error("[items retention] Error deleted items from %s.", self.anonymize_url(self.index_url))
logger.error(ex)
return | [
"Delete documents updated before a given date\n\n :param retention_time: maximum number of minutes wrt the current date to retain the data\n :param time_field: time field to delete the data\n "
]
|
Please provide a description of the function:def all_properties(self):
properties = {}
r = self.requests.get(self.index_url + "/_mapping", headers=HEADER_JSON, verify=False)
try:
r.raise_for_status()
r_json = r.json()
if 'items' not in r_json[self.index]['mappings']:
return properties
if 'properties' not in r_json[self.index]['mappings']['items']:
return properties
properties = r_json[self.index]['mappings']['items']['properties']
except requests.exceptions.HTTPError as ex:
logger.error("Error all attributes for %s.", self.anonymize_url(self.index_url))
logger.error(ex)
return
return properties | [
"Get all properties of a given index"
]
|
Please provide a description of the function:def get_kibiter_version(url):
config_url = '.kibana/config/_search'
# Avoid having // in the URL because ES will fail
if url[-1] != '/':
url += "/"
url += config_url
r = requests.get(url)
r.raise_for_status()
if len(r.json()['hits']['hits']) == 0:
logger.error("Can not get the Kibiter version")
return None
version = r.json()['hits']['hits'][0]['_id']
# 5.4.0-SNAPSHOT
major_version = version.split(".", 1)[0]
return major_version | [
"\n Return kibiter major number version\n\n The url must point to the Elasticsearch used by Kibiter\n "
]
|
Please provide a description of the function:def get_params_parser():
parser = argparse.ArgumentParser(usage=ARTHUR_USAGE_MSG,
description=ARTHUR_DESC_MSG,
epilog=ARTHUR_EPILOG_MSG,
formatter_class=argparse.RawDescriptionHelpFormatter,
add_help=False)
ElasticOcean.add_params(parser)
parser.add_argument('-h', '--help', action='help',
help=argparse.SUPPRESS)
parser.add_argument('-g', '--debug', dest='debug',
action='store_true',
help=argparse.SUPPRESS)
parser.add_argument("--no_incremental", action='store_true',
help="don't use last state for data source")
parser.add_argument("--fetch_cache", action='store_true',
help="Use cache for item retrieval")
parser.add_argument("--enrich", action='store_true',
help="Enrich items after retrieving")
parser.add_argument("--enrich_only", action='store_true',
help="Only enrich items (DEPRECATED, use --only-enrich)")
parser.add_argument("--only-enrich", dest='enrich_only', action='store_true',
help="Only enrich items")
parser.add_argument("--filter-raw", dest='filter_raw',
help="Filter raw items. Format: field:value")
parser.add_argument("--filters-raw-prefix", nargs='*',
help="Filter raw items with prefix filter. Format: field:value field:value ...")
parser.add_argument("--events-enrich", dest='events_enrich', action='store_true',
help="Enrich events in items")
parser.add_argument('--index', help="Ocean index name")
parser.add_argument('--index-enrich', dest="index_enrich", help="Ocean enriched index name")
parser.add_argument('--db-user', help="User for db connection (default to root)",
default="root")
parser.add_argument('--db-password', help="Password for db connection (default empty)",
default="")
parser.add_argument('--db-host', help="Host for db connection (default to mariadb)",
default="mariadb")
parser.add_argument('--db-projects-map', help="Projects Mapping DB")
parser.add_argument('--json-projects-map', help="Projects Mapping JSON file")
parser.add_argument('--project', help="Project for the repository (origin)")
parser.add_argument('--refresh-projects', action='store_true', help="Refresh projects in enriched items")
parser.add_argument('--db-sortinghat', help="SortingHat DB")
parser.add_argument('--only-identities', action='store_true', help="Only add identities to SortingHat DB")
parser.add_argument('--refresh-identities', action='store_true', help="Refresh identities in enriched items")
parser.add_argument('--author_id', nargs='*', help="Field author_ids to be refreshed")
parser.add_argument('--author_uuid', nargs='*', help="Field author_uuids to be refreshed")
parser.add_argument('--github-token', help="If provided, github usernames will be retrieved in git enrich.")
parser.add_argument('--jenkins-rename-file', help="CSV mapping file with nodes renamed schema.")
parser.add_argument('--studies', action='store_true', help="Execute studies after enrichment.")
parser.add_argument('--only-studies', action='store_true', help="Execute only studies.")
parser.add_argument('--bulk-size', default=1000, type=int,
help="Number of items per bulk request to Elasticsearch.")
parser.add_argument('--scroll-size', default=100, type=int,
help="Number of items to get from Elasticsearch when scrolling.")
parser.add_argument('--arthur', action='store_true', help="Read items from arthur redis queue")
parser.add_argument('--pair-programming', action='store_true', help="Do pair programming in git enrich")
parser.add_argument('--studies-list', nargs='*', help="List of studies to be executed")
parser.add_argument('backend', help=argparse.SUPPRESS)
parser.add_argument('backend_args', nargs=argparse.REMAINDER,
help=argparse.SUPPRESS)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser | [
"Parse command line arguments"
]
|
Please provide a description of the function:def get_params():
parser = get_params_parser()
args = parser.parse_args()
if not args.enrich_only and not args.only_identities and not args.only_studies:
if not args.index:
# Check that the raw index name is defined
print("[error] --index <name> param is required when collecting items from raw")
sys.exit(1)
return args | [
" Get params definition from ElasticOcean and from all the backends "
]
|
Please provide a description of the function:def get_time_diff_days(start_txt, end_txt):
''' Number of days between two days '''
if start_txt is None or end_txt is None:
return None
start = parser.parse(start_txt)
end = parser.parse(end_txt)
seconds_day = float(60 * 60 * 24)
diff_days = \
(end - start).total_seconds() / seconds_day
diff_days = float('%.2f' % diff_days)
return diff_days | []
|
Please provide a description of the function:def get_identities(self, item):
item = item['data']
if 'owner' in item:
owner = self.get_sh_identity(item['owner'])
yield owner
if 'user' in item:
user = self.get_sh_identity(item['user'])
yield user
if 'mentor' in item:
mentor = self.get_sh_identity(item['mentor'])
yield mentor | [
"Return the identities from an item"
]
|
Please provide a description of the function:def get_item_sh(self, item, roles=None, date_field=None):
eitem_sh = {}
created = str_to_datetime(date_field)
for rol in roles:
identity = self.get_sh_identity(item, rol)
eitem_sh.update(self.get_item_sh_fields(identity, created, rol=rol))
if not eitem_sh[rol + '_org_name']:
eitem_sh[rol + '_org_name'] = SH_UNKNOWN_VALUE
if not eitem_sh[rol + '_name']:
eitem_sh[rol + '_name'] = SH_UNKNOWN_VALUE
if not eitem_sh[rol + '_user_name']:
eitem_sh[rol + '_user_name'] = SH_UNKNOWN_VALUE
# Add the author field common in all data sources
if rol == self.get_field_author():
identity = self.get_sh_identity(item, rol)
eitem_sh.update(self.get_item_sh_fields(identity, created, rol="author"))
if not eitem_sh['author_org_name']:
eitem_sh['author_org_name'] = SH_UNKNOWN_VALUE
if not eitem_sh['author_name']:
eitem_sh['author_name'] = SH_UNKNOWN_VALUE
if not eitem_sh['author_user_name']:
eitem_sh['author_user_name'] = SH_UNKNOWN_VALUE
return eitem_sh | [
"Add sorting hat enrichment fields"
]
|
Please provide a description of the function:def get_identities(self, item):
item = item['data']
for field in ["assignee", "reporter", "creator"]:
if field not in item["fields"]:
continue
if item["fields"][field]:
user = self.get_sh_identity(item["fields"][field])
yield user
comments = item.get('comments_data', [])
for comment in comments:
if 'author' in comment and comment['author']:
user = self.get_sh_identity(comment['author'])
yield user
if 'updateAuthor' in comment and comment['updateAuthor']:
user = self.get_sh_identity(comment['updateAuthor'])
yield user | [
"Return the identities from an item"
]
|
Please provide a description of the function:def enrich_fields(cls, fields, eitem):
for field in fields:
if field.startswith('customfield_'):
if type(fields[field]) is dict:
if 'name' in fields[field]:
if fields[field]['name'] == "Story Points":
eitem['story_points'] = fields[field]['value']
elif fields[field]['name'] == "Sprint":
value = fields[field]['value']
if value:
sprint = value[0].partition(",name=")[2].split(',')[0]
sprint_start = value[0].partition(",startDate=")[2].split(',')[0]
sprint_end = value[0].partition(",endDate=")[2].split(',')[0]
sprint_complete = value[0].partition(",completeDate=")[2].split(',')[0]
eitem['sprint'] = sprint
eitem['sprint_start'] = cls.fix_value_null(sprint_start)
eitem['sprint_end'] = cls.fix_value_null(sprint_end)
eitem['sprint_complete'] = cls.fix_value_null(sprint_complete) | [
"Enrich the fields property of an issue.\n\n Loops through al properties in issue['fields'],\n using those that are relevant to enrich eitem with new properties.\n Those properties are user defined, depending on options\n configured in Jira. For example, if SCRUM is activated,\n we have a field named \"Story Points\".\n\n :param fields: fields property of an issue\n :param eitem: enriched item, which will be modified adding more properties\n "
]
|
Please provide a description of the function:def get_identities(self, item):
identities = []
if 'data' not in item:
return identities
if 'revisions' not in item['data']:
return identities
revisions = item['data']['revisions']
for revision in revisions:
user = self.get_sh_identity(revision)
yield user | [
" Return the identities from an item "
]
|
Please provide a description of the function:def get_review_sh(self, revision, item):
identity = self.get_sh_identity(revision)
update = parser.parse(item[self.get_field_date()])
erevision = self.get_item_sh_fields(identity, update)
return erevision | [
" Add sorting hat enrichment fields for the author of the revision "
]
|
Please provide a description of the function:def get_identities(self, item):
category = item['category']
item = item['data']
if category == "issue":
identity_types = ['user', 'assignee']
elif category == "pull_request":
identity_types = ['user', 'merged_by']
else:
identity_types = []
for identity in identity_types:
if item[identity]:
# In user_data we have the full user data
user = self.get_sh_identity(item[identity + "_data"])
if user:
yield user | [
"Return the identities from an item"
]
|
Please provide a description of the function:def get_github_cache(self, kind, key_):
cache = {}
res_size = 100 # best size?
from_ = 0
index_github = "github/" + kind
url = self.elastic.url + "/" + index_github
url += "/_search" + "?" + "size=%i" % res_size
r = self.requests.get(url)
type_items = r.json()
if 'hits' not in type_items:
logger.info("No github %s data in ES" % (kind))
else:
while len(type_items['hits']['hits']) > 0:
for hit in type_items['hits']['hits']:
item = hit['_source']
cache[item[key_]] = item
from_ += res_size
r = self.requests.get(url + "&from=%i" % from_)
type_items = r.json()
if 'hits' not in type_items:
break
return cache | [
" Get cache data for items of _type using key_ as the cache dict key "
]
|
Please provide a description of the function:def get_time_to_merge_request_response(self, item):
review_dates = [str_to_datetime(review['created_at']) for review in item['review_comments_data']
if item['user']['login'] != review['user']['login']]
if review_dates:
return min(review_dates)
return None | [
"Get the first date at which a review was made on the PR by someone\n other than the user who created the PR\n "
]
|
Please provide a description of the function:def get_num_commenters(self, item):
commenters = [comment['user']['login'] for comment in item['comments_data']]
return len(set(commenters)) | [
"Get the number of unique people who commented on the issue/pr"
]
|
Please provide a description of the function:def enrich_pull_requests(self, ocean_backend, enrich_backend, raw_issues_index="github_issues_raw"):
HEADER_JSON = {"Content-Type": "application/json"}
# issues raw index from which the data will be extracted
github_issues_raw_index = ocean_backend.elastic_url + "/" + raw_issues_index
issues_index_search_url = github_issues_raw_index + "/_search"
# pull_requests index search url in which the data is to be updated
enrich_index_search_url = self.elastic.index_url + "/_search"
logger.info("Doing enrich_pull_request study for index {}"
.format(self.elastic.anonymize_url(self.elastic.index_url)))
time.sleep(1) # HACK: Wait until git enrich index has been written
def make_request(url, error_msg, data=None, req_type="GET"):
r = None
if req_type == "GET":
r = self.requests.get(url, headers=HEADER_JSON,
verify=False)
elif req_type == "POST" and data is not None:
r = self.requests.post(url, data=data, headers=HEADER_JSON,
verify=False)
try:
r.raise_for_status()
except requests.exceptions.HTTPError as ex:
logger.error(error_msg)
logger.error(ex)
return
return r
# Check if the github issues raw index exists, if not raise an error and abort
error_msg = "Invalid index provided for enrich_pull_requests study. Aborting."
make_request(issues_index_search_url, error_msg)
# get the number of pull requests in the pull_requests index
# https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-count.html
# Example:
# epoch timestamp count
# 1533454641 13:07:21 276
count_url = enrich_backend.elastic_url + "/_cat/count/" + enrich_backend.elastic.index + "?v"
error_msg = "Cannot fetch number of items in {} Aborting.".format(enrich_backend.elastic.index)
r = make_request(count_url, error_msg)
num_pull_requests = int(r.text.split()[-1])
# get all the ids that are in the enriched pull requests index which will be used later
# to pull requests data from the issue having the same id in the raw_issues_index
pull_requests_ids = []
size = 10000 # Default number of items that can be queried from elasticsearch at a time
i = 0 # counter
while num_pull_requests > 0:
fetch_id_in_repo_query = % (i, size)
error_msg = "Error extracting id_in_repo from {}. Aborting.".format(self.elastic.index_url)
r = make_request(enrich_index_search_url, error_msg, fetch_id_in_repo_query, "POST")
id_in_repo_json = r.json()["hits"]["hits"]
pull_requests_ids.extend([item["_source"]["id_in_repo"] for item in id_in_repo_json])
i += size
num_pull_requests -= size
# get pull requests data from the github_issues_raw and pull_requests only
# index using specific id for each of the item
query =
num_enriched = 0 # counter to count the number of PRs enriched
pull_requests = []
for pr_id in pull_requests_ids:
# retrieve the data from the issues index
issue_query = query % ('"data.number"', pr_id)
error_msg = "Id {} doesnot exists in {}. Aborting.".format(pr_id, github_issues_raw_index)
r = make_request(issues_index_search_url, error_msg, issue_query, "POST")
issue = r.json()["hits"]["hits"][0]["_source"]["data"]
# retrieve the data from the pull_requests index
pr_query = query % ('"id_in_repo"', pr_id)
error_msg = "Id {} doesnot exists in {}. Aborting.".format(pr_id, self.elastic.index_url)
r = make_request(enrich_index_search_url, error_msg, pr_query, "POST")
pull_request_data = r.json()["hits"]["hits"][0]
pull_request = pull_request_data['_source']
pull_request["_item_id"] = pull_request_data['_id']
# Add the necessary fields
reaction_time = get_time_diff_days(str_to_datetime(issue['created_at']),
self.get_time_to_first_attention(issue))
if not reaction_time:
reaction_time = 0
if pull_request["time_to_merge_request_response"]:
reaction_time = min(pull_request["time_to_merge_request_response"], reaction_time)
pull_request["time_to_merge_request_response"] = reaction_time
pull_request['num_comments'] = issue['comments']
# should latest reviews be considered as well?
pull_request['pr_comment_duration'] = get_time_diff_days(str_to_datetime(issue['created_at']),
self.get_latest_comment_date(issue))
pull_request['pr_comment_diversity'] = self.get_num_commenters(issue)
pull_requests.append(pull_request)
if len(pull_requests) >= self.elastic.max_items_bulk:
self.elastic.bulk_upload(pull_requests, "_item_id")
pull_requests = []
num_enriched += 1
logger.info("pull_requests processed %i/%i", num_enriched, len(pull_requests_ids))
self.elastic.bulk_upload(pull_requests, "_item_id") | [
"\n The purpose of this Study is to add additional fields to the pull_requests only index.\n Basically to calculate some of the metrics from Code Development under GMD metrics:\n https://github.com/chaoss/wg-gmd/blob/master/2_Growth-Maturity-Decline.md#code-development\n\n When data from the pull requests category is fetched using perceval,\n some additional fields such as \"number_of_comments\" that are made on the PR\n cannot be calculated as the data related to comments is not fetched.\n When data from the issues category is fetched, then every item is considered as an issue\n and PR specific data such as \"review_comments\" are not fetched.\n\n Items (pull requests) from the raw issues index are queried and data from those items\n are used to add fields in the corresponding pull request in the pull requests only index.\n The ids are matched in both the indices.\n\n :param ocean_backend: backend from which to read the raw items\n :param enrich_backend: backend from which to read the enriched items\n :param raw_issues_index: the raw issues index from which the data for PRs is to be extracted\n :return: None\n ",
"\n Make a request to the given url. The request can be of type GET or a POST.\n If the request raises an error, display that error using the custom error msg.\n\n :param url: URL to make the GET request to\n :param error_msg: custom error message for logging purposes\n :param data: data to be sent with the POST request\n optional if type=\"GET\" else compulsory\n :param req_type: the type of request to be made: GET or POST\n default: GET\n :return r: requests object\n ",
"\n {\n \"_source\": [\"id_in_repo\"],\n \"from\": %s,\n \"size\": %s\n }\n ",
"\n {\n \"query\": {\n \"bool\": {\n \"must\": [{\n \"match\": {\n %s: %s\n }\n }]\n }\n }\n }\n "
]
|
Please provide a description of the function:def get_identities(self, item):
field = self.get_field_author()
yield self.get_sh_identity(item, field) | [
" Return the identities from an item "
]
|
Please provide a description of the function:def get_identities(self, item):
message = item['data']['message']
identity = self.get_sh_identity(message['from'])
yield identity | [
" Return the identities from an item "
]
|
Please provide a description of the function:def get_params():
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--debug', dest='debug', action='store_true')
parser.add_argument('-t', '--token', dest='token', help="GitHub token")
parser.add_argument('-o', '--owner', dest='owner', help='GitHub owner (user or org) to be analyzed')
parser.add_argument('-n', '--nrepos', dest='nrepos', type=int, default=NREPOS,
help='Number of GitHub repositories from the Organization to be analyzed (default:10)')
return parser.parse_args() | [
"Parse command line arguments"
]
|
Please provide a description of the function:def get_arthur_params_from_url(cls, url):
params = {}
owner = url.split('/')[-2]
repository = url.split('/')[-1]
# params.append('--owner')
params['owner'] = owner
# params.append('--repository')
params['repository'] = repository
return params | [
" Get the arthur params given a URL for the data source "
]
|
Please provide a description of the function:def get_identities(self, item):
identities = []
field = self.get_field_author()
identities.append(self.get_sh_identity(item, field))
return identities | [
" Return the identities from an item "
]
|
Please provide a description of the function:def get_rich_events(self, item):
if "version_downloads_data" not in item['data']:
return []
# To get values from the task
eitem = self.get_rich_item(item)
for sample in item['data']["version_downloads_data"]["version_downloads"]:
event = deepcopy(eitem)
event['download_sample_id'] = sample['id']
event['sample_date'] = sample['date']
sample_date = parser.parse(event['sample_date'])
event['sample_version'] = sample['version']
event['sample_downloads'] = sample['downloads']
event.update(self.get_grimoire_fields(sample_date.isoformat(), "downloads_event"))
yield event | [
"\n In the events there are some common fields with the crate. The name\n of the field must be the same in the create and in the downloads event\n so we can filer using it in crate and event at the same time.\n\n * Fields that don't change: the field does not change with the events\n in a create so the value is always the same in the events of a create.\n\n * Fields that change: the value of the field changes with events\n "
]
|
Please provide a description of the function:def get_params_parser_create_dash():
parser = argparse.ArgumentParser(usage="usage: e2k.py [options]",
description="Create a Kibana dashboard from a template")
ElasticOcean.add_params(parser)
parser.add_argument("-d", "--dashboard", help="dashboard to be used as template")
parser.add_argument("-i", "--index", help="enriched index to be used as data source")
parser.add_argument("--kibana", dest="kibana_index", default=".kibana",
help="Kibana index name (.kibana default)")
parser.add_argument('-g', '--debug', dest='debug', action='store_true')
return parser | [
"Parse command line arguments"
]
|
Please provide a description of the function:def get_identities(self, item):
item = item['data']
user = self.get_sh_identity(item)
yield user | [
" Return the identities from an item "
]
|
Please provide a description of the function:def get_item_project(self, eitem):
project = None
eitem_project = {}
ds_name = self.get_connector_name() # data source name in projects map
if ds_name not in self.prjs_map:
return eitem_project
for tag in eitem['hashtags_analyzed']:
# lcanas: hashtag provided in projects.json file should not be case sensitive T6876
tags2project = CaseInsensitiveDict(self.prjs_map[ds_name])
if tag in tags2project:
project = tags2project[tag]
break
if project is None:
project = DEFAULT_PROJECT
eitem_project = {"project": project}
eitem_project.update(self.add_project_levels(project))
return eitem_project | [
" Get project mapping enrichment field.\n\n Twitter mappings is pretty special so it needs a special\n implementacion.\n "
]
|
Please provide a description of the function:def set_jenkins_rename_file(self, nodes_rename_file):
self.nodes_rename_file = nodes_rename_file
self.__load_node_renames()
logger.info("Jenkis node rename file active: %s", nodes_rename_file) | [
" File with nodes renaming mapping:\n\n Node,Comment\n arm-build1,remove\n arm-build2,keep\n ericsson-build3,merge into ericsson-build1\n ....\n\n Once set in the next enrichment the rename will be done\n "
]
|
Please provide a description of the function:def get_fields_from_job_name(self, job_name):
extra_fields = {
'category': None,
'installer': None,
'scenario': None,
'testproject': None,
'pod': None,
'loop': None,
'branch': None
}
try:
components = job_name.split('-')
if len(components) < 2:
return extra_fields
kind = components[1]
if kind == 'os':
extra_fields['category'] = 'parent/main'
extra_fields['installer'] = components[0]
extra_fields['scenario'] = '-'.join(components[2:-3])
elif kind == 'deploy':
extra_fields['category'] = 'deploy'
extra_fields['installer'] = components[0]
else:
extra_fields['category'] = 'test'
extra_fields['testproject'] = components[0]
extra_fields['installer'] = components[1]
extra_fields['pod'] = components[-3]
extra_fields['loop'] = components[-2]
extra_fields['branch'] = components[-1]
except IndexError as ex:
# Just DEBUG level because it is just for OPNFV
logger.debug('Problems parsing job name %s', job_name)
logger.debug(ex)
return extra_fields | [
"Analyze a Jenkins job name, producing a dictionary\n\n The produced dictionary will include information about the category\n and subcategory of the job name, and any extra information which\n could be useful.\n\n For each deployment of a Jenkins dashboard, an implementation of\n this function should be produced, according to the needs of the users.\n\n :param job: job name to Analyze\n :returns: dictionary with categorization information\n\n "
]
|
Please provide a description of the function:def extract_builton(self, built_on, regex):
pattern = re.compile(regex, re.M | re.I)
match = pattern.search(built_on)
if match and len(match.groups()) >= 1:
node_name = match.group(1)
else:
msg = "Node name not extracted, using builtOn as it is: " + regex + ":" + built_on
logger.warning(msg)
node_name = built_on
return node_name | [
"Extracts node name using a regular expression. Node name is expected to\n be group 1.\n "
]
|
Please provide a description of the function:def get_identities(self, item):
data = item['data']
identity = self.get_sh_identity(data)
if identity['username']:
self.add_sh_github_identity(identity['username'])
yield identity | [
" Return the identities from an item "
]
|
Please provide a description of the function:def onion_study(in_conn, out_conn, data_source):
onion = OnionStudy(in_connector=in_conn, out_connector=out_conn, data_source=data_source)
ndocs = onion.analyze()
return ndocs | [
"Build and index for onion from a given Git index.\n\n :param in_conn: ESPandasConnector to read from.\n :param out_conn: ESPandasConnector to write to.\n :param data_source: name of the date source to generate onion from.\n :return: number of documents written in ElasticSearch enriched index.\n "
]
|
Please provide a description of the function:def read_block(self, size=None, from_date=None):
# Get quarters corresponding to All items (Incremental mode NOT SUPPORTED)
quarters = self.__quarters()
for quarter in quarters:
logger.info(self.__log_prefix + " Quarter: " + str(quarter))
date_range = {self._timeframe_field: {'gte': quarter.start_time, 'lte': quarter.end_time}}
orgs = self.__list_uniques(date_range, self.AUTHOR_ORG)
projects = self.__list_uniques(date_range, self.PROJECT)
# Get global data
s = self.__build_search(date_range)
response = s.execute()
for timing in response.aggregations[self.TIMEFRAME].buckets:
yield self.__build_dataframe(timing).copy()
# Get global data by Org
for org_name in orgs:
logger.info(self.__log_prefix + " Quarter: " + str(quarter) + " Org: " + org_name)
s = self.__build_search(date_range, org_name=org_name)
response = s.execute()
for timing in response.aggregations[self.TIMEFRAME].buckets:
yield self.__build_dataframe(timing, org_name=org_name).copy()
# Get project specific data
for project in projects:
logger.info(self.__log_prefix + " Quarter: " + str(quarter) + " Project: " + project)
# Global project
s = self.__build_search(date_range, project_name=project)
response = s.execute()
for timing in response.aggregations[self.TIMEFRAME].buckets:
yield self.__build_dataframe(timing, project_name=project).copy()
# Split by Org
for org_name in orgs:
logger.info(self.__log_prefix + " Quarter: " + str(quarter) + " Project: " + project + " Org: " + org_name)
s = self.__build_search(date_range, project_name=project, org_name=org_name)
response = s.execute()
for timing in response.aggregations[self.TIMEFRAME].buckets:
yield self.__build_dataframe(timing, project_name=project, org_name=org_name).copy() | [
"Read author commits by Quarter, Org and Project.\n\n :param from_date: not used here. Incremental mode not supported yet.\n :param size: not used here.\n :return: DataFrame with commit count per author, split by quarter, org and project.\n "
]
|
Please provide a description of the function:def write(self, items):
if self._read_only:
raise IOError("Cannot write, Connector created as Read Only")
if len(items) == 0:
logger.info(self.__log_prefix + " Nothing to write")
return
# Uploading info to the new ES
rows = items.to_dict("index")
docs = []
for row_index in rows.keys():
row = rows[row_index]
item_id = row[self.AUTHOR_ORG] + '_' + row[self.PROJECT] + '_' \
+ row[self.TIMEFRAME] + '_' + row[self.AUTHOR_UUID]
item_id = item_id.replace(' ', '').lower()
doc = {
"_index": self._es_index,
"_type": "item",
"_id": item_id,
"_source": row
}
docs.append(doc)
# TODO uncomment following lines for incremental version
# # Delete old data if exists to ensure refreshing in case of deleted commits
# timeframe = docs[0]['_source']['timeframe']
# org = docs[0]['_source']['author_org_name']
# project = docs[0]['_source']['project']
# s = Search(using=self._es_conn, index=self._es_index)
# s = s.filter('term', project=project)
# s = s.filter('term', author_org_name=org)
# s = s.filter('term', timeframe=timeframe)
# response = s.execute()
#
# if response.hits.total > 0:
# response = s.delete()
# logger.info("[Onion] Deleted " + str(response.deleted) + " items for refreshing: " + timeframe + " "
# + org + " " + project)
# TODO exception and error handling
helpers.bulk(self._es_conn, docs)
logger.info(self.__log_prefix + " Written: " + str(len(docs))) | [
"Write items into ElasticSearch.\n\n :param items: Pandas DataFrame\n "
]
|
Please provide a description of the function:def __quarters(self, from_date=None):
s = Search(using=self._es_conn, index=self._es_index)
if from_date:
# Work around to solve conversion problem of '__' to '.' in field name
q = Q('range')
q.__setattr__(self._sort_on_field, {'gte': from_date})
s = s.filter(q)
# from:to parameters (=> from: 0, size: 0)
s = s[0:0]
s.aggs.bucket(self.TIMEFRAME, 'date_histogram', field=self._timeframe_field,
interval='quarter', min_doc_count=1)
response = s.execute()
quarters = []
for quarter in response.aggregations[self.TIMEFRAME].buckets:
period = pandas.Period(quarter.key_as_string, 'Q')
quarters.append(period)
return quarters | [
"Get a set of quarters with available items from a given index date.\n\n :param from_date:\n :return: list of `pandas.Period` corresponding to quarters\n "
]
|
Please provide a description of the function:def __list_uniques(self, date_range, field_name):
# Get project list
s = Search(using=self._es_conn, index=self._es_index)
s = s.filter('range', **date_range)
# from:to parameters (=> from: 0, size: 0)
s = s[0:0]
s.aggs.bucket('uniques', 'terms', field=field_name, size=1000)
response = s.execute()
uniques_list = []
for item in response.aggregations.uniques.buckets:
uniques_list.append(item.key)
return uniques_list | [
"Retrieve a list of unique values in a given field within a date range.\n\n :param date_range:\n :param field_name:\n :return: list of unique values.\n "
]
|
Please provide a description of the function:def __build_dataframe(self, timing, project_name=None, org_name=None):
date_list = []
uuid_list = []
name_list = []
contribs_list = []
latest_ts_list = []
logger.debug(self.__log_prefix + " timing: " + timing.key_as_string)
for author in timing[self.AUTHOR_UUID].buckets:
latest_ts_list.append(timing[self.LATEST_TS].value_as_string)
date_list.append(timing.key_as_string)
uuid_list.append(author.key)
if author[self.AUTHOR_NAME] and author[self.AUTHOR_NAME].buckets \
and len(author[self.AUTHOR_NAME].buckets) > 0:
name_list.append(author[self.AUTHOR_NAME].buckets[0].key)
else:
name_list.append("Unknown")
contribs_list.append(author[self.CONTRIBUTIONS].value)
df = pandas.DataFrame()
df[self.TIMEFRAME] = date_list
df[self.AUTHOR_UUID] = uuid_list
df[self.AUTHOR_NAME] = name_list
df[self.CONTRIBUTIONS] = contribs_list
df[self.TIMESTAMP] = latest_ts_list
if not project_name:
project_name = "_Global_"
df[self.PROJECT] = project_name
if not org_name:
org_name = "_Global_"
df[self.AUTHOR_ORG] = org_name
return df | [
"Build a DataFrame from a time bucket.\n\n :param timing:\n :param project_name:\n :param org_name:\n :return:\n "
]
|
Please provide a description of the function:def process(self, items_block):
logger.info(self.__log_prefix + " Authors to process: " + str(len(items_block)))
onion_enrich = Onion(items_block)
df_onion = onion_enrich.enrich(member_column=ESOnionConnector.AUTHOR_UUID,
events_column=ESOnionConnector.CONTRIBUTIONS)
# Get and store Quarter as String
df_onion['quarter'] = df_onion[ESOnionConnector.TIMEFRAME].map(lambda x: str(pandas.Period(x, 'Q')))
# Add metadata: enriched on timestamp
df_onion['metadata__enriched_on'] = datetime.utcnow().isoformat()
df_onion['data_source'] = self.data_source
df_onion['grimoire_creation_date'] = df_onion[ESOnionConnector.TIMEFRAME]
logger.info(self.__log_prefix + " Final new events: " + str(len(df_onion)))
return self.ProcessResults(processed=len(df_onion), out_items=df_onion) | [
"Process a DataFrame to compute Onion.\n\n :param items_block: items to be processed. Expects to find a pandas DataFrame.\n "
]
|
Please provide a description of the function:def get_projects(self):
repos_list = []
gerrit_projects_db = self.projects_db
db = Database(user="root", passwd="", host="localhost", port=3306,
scrdb=None, shdb=gerrit_projects_db, prjdb=None)
sql =
repos_list_raw = db.execute(sql)
# Convert from review.openstack.org_openstack/rpm-packaging-tools to
# openstack_rpm-packaging-tools
for repo in repos_list_raw:
# repo_name = repo[0].replace("review.openstack.org_","")
repo_name = repo[0].replace(self.repository + "_", "")
repos_list.append(repo_name)
return repos_list | [
" Get the projects list from database ",
"\n SELECT DISTINCT(repository_name)\n FROM project_repositories\n WHERE data_source='scr'\n "
]
|
Please provide a description of the function:def metadata(func):
@functools.wraps(func)
def decorator(self, *args, **kwargs):
eitem = func(self, *args, **kwargs)
metadata = {
'metadata__gelk_version': self.gelk_version,
'metadata__gelk_backend_name': self.__class__.__name__,
'metadata__enriched_on': datetime_utcnow().isoformat()
}
eitem.update(metadata)
return eitem
return decorator | [
"Add metadata to an item.\n\n Decorator that adds metadata to a given item such as\n the gelk revision used.\n\n "
]
|
Please provide a description of the function:def __convert_json_to_projects_map(self, json):
ds_repo_to_prj = {}
for project in json:
for ds in json[project]:
if ds == "meta":
continue # not a real data source
if ds not in ds_repo_to_prj:
if ds not in ds_repo_to_prj:
ds_repo_to_prj[ds] = {}
for repo in json[project][ds]:
if repo in ds_repo_to_prj[ds]:
if project == ds_repo_to_prj[ds][repo]:
logger.debug("Duplicated repo: %s %s %s", ds, repo, project)
else:
if len(project.split(".")) > len(ds_repo_to_prj[ds][repo].split(".")):
logger.debug("Changed repo project because we found a leaf: %s leaf vs %s (%s, %s)",
project, ds_repo_to_prj[ds][repo], repo, ds)
ds_repo_to_prj[ds][repo] = project
else:
ds_repo_to_prj[ds][repo] = project
return ds_repo_to_prj | [
" Convert JSON format to the projects map format\n map[ds][repository] = project\n If a repository is in several projects assign to leaf\n Check that all JSON data is in the database\n\n :param json: data with the projects to repositories mapping\n :returns: the repositories to projects mapping per data source\n "
]
|
Please provide a description of the function:def enrich_items(self, ocean_backend, events=False):
max_items = self.elastic.max_items_bulk
current = 0
total = 0
bulk_json = ""
items = ocean_backend.fetch()
url = self.elastic.index_url + '/items/_bulk'
logger.debug("Adding items to %s (in %i packs)", self.elastic.anonymize_url(url), max_items)
if events:
logger.debug("Adding events items")
for item in items:
if current >= max_items:
try:
total += self.elastic.safe_put_bulk(url, bulk_json)
json_size = sys.getsizeof(bulk_json) / (1024 * 1024)
logger.debug("Added %i items to %s (%0.2f MB)", total, self.elastic.anonymize_url(url), json_size)
except UnicodeEncodeError:
# Why is requests encoding the POST data as ascii?
logger.error("Unicode error in enriched items")
logger.debug(bulk_json)
safe_json = str(bulk_json.encode('ascii', 'ignore'), 'ascii')
total += self.elastic.safe_put_bulk(url, safe_json)
bulk_json = ""
current = 0
if not events:
rich_item = self.get_rich_item(item)
data_json = json.dumps(rich_item)
bulk_json += '{"index" : {"_id" : "%s" } }\n' % \
(item[self.get_field_unique_id()])
bulk_json += data_json + "\n" # Bulk document
current += 1
else:
rich_events = self.get_rich_events(item)
for rich_event in rich_events:
data_json = json.dumps(rich_event)
bulk_json += '{"index" : {"_id" : "%s_%s" } }\n' % \
(item[self.get_field_unique_id()],
rich_event[self.get_field_event_unique_id()])
bulk_json += data_json + "\n" # Bulk document
current += 1
if current > 0:
total += self.elastic.safe_put_bulk(url, bulk_json)
return total | [
"\n Enrich the items fetched from ocean_backend generator\n generating enriched items/events which are uploaded to the Elasticsearch index for\n this Enricher (self).\n\n :param ocean_backend: Ocean backend object to fetch the items from\n :param events: enrich items or enrich events\n :return: total number of enriched items/events uploaded to Elasticsearch\n "
]
|
Please provide a description of the function:def get_grimoire_fields(self, creation_date, item_name):
grimoire_date = None
try:
grimoire_date = str_to_datetime(creation_date).isoformat()
except Exception as ex:
pass
name = "is_" + self.get_connector_name() + "_" + item_name
return {
"grimoire_creation_date": grimoire_date,
name: 1
} | [
" Return common grimoire fields for all data sources "
]
|
Please provide a description of the function:def add_project_levels(cls, project):
eitem_path = ''
eitem_project_levels = {}
if project is not None:
subprojects = project.split('.')
for i in range(0, len(subprojects)):
if i > 0:
eitem_path += "."
eitem_path += subprojects[i]
eitem_project_levels['project_' + str(i + 1)] = eitem_path
return eitem_project_levels | [
" Add project sub levels extra items "
]
|
Please provide a description of the function:def find_item_project(self, eitem):
# get the data source name relying on the cfg section name, if null use the connector name
ds_name = self.cfg_section_name if self.cfg_section_name else self.get_connector_name()
try:
# retrieve the project which includes the repo url in the projects.json,
# the variable `projects_json_repo` is passed from mordred to ELK when
# iterating over the repos in the projects.json, (see: param
# `projects_json_repo` in the functions elk.feed_backend and
# elk.enrich_backend)
if self.projects_json_repo:
project = self.prjs_map[ds_name][self.projects_json_repo]
# if `projects_json_repo`, which shouldn't never happen, use the
# method `get_project_repository` (defined in each enricher)
else:
repository = self.get_project_repository(eitem)
project = self.prjs_map[ds_name][repository]
# With the introduction of `projects_json_repo` the code in the
# except should be unreachable, and could be removed
except KeyError:
# logger.warning("Project not found for repository %s (data source: %s)", repository, ds_name)
project = None
if self.filter_raw:
fltr = eitem['origin'] + ' --filter-raw=' + self.filter_raw
if ds_name in self.prjs_map and fltr in self.prjs_map[ds_name]:
project = self.prjs_map[ds_name][fltr]
if project == UNKNOWN_PROJECT:
return None
if project:
return project
# Try to use always the origin in any case
if 'origin' in eitem:
if ds_name in self.prjs_map and eitem['origin'] in self.prjs_map[ds_name]:
project = self.prjs_map[ds_name][eitem['origin']]
elif ds_name in self.prjs_map:
# Try to find origin as part of the keys
for ds_repo in self.prjs_map[ds_name]:
ds_repo = str(ds_repo) # discourse has category_id ints
if eitem['origin'] in ds_repo:
project = self.prjs_map[ds_name][ds_repo]
break
if project == UNKNOWN_PROJECT:
project = None
return project | [
"\n Find the project for a enriched item\n :param eitem: enriched item for which to find the project\n :return: the project entry (a dictionary)\n "
]
|
Please provide a description of the function:def get_item_project(self, eitem):
eitem_project = {}
project = self.find_item_project(eitem)
if project is None:
project = DEFAULT_PROJECT
eitem_project = {"project": project}
# Time to add the project levels: eclipse.platform.releng.aggregator
eitem_project.update(self.add_project_levels(project))
# And now time to add the metadata
eitem_project.update(self.get_item_metadata(eitem))
return eitem_project | [
"\n Get the project name related to the eitem\n :param eitem: enriched item for which to find the project\n :return: a dictionary with the project data\n "
]
|
Please provide a description of the function:def get_item_metadata(self, eitem):
eitem_metadata = {}
# Get the project entry for the item, which includes the metadata
project = self.find_item_project(eitem)
if project and 'meta' in self.json_projects[project]:
meta_fields = self.json_projects[project]['meta']
if isinstance(meta_fields, dict):
eitem_metadata = {CUSTOM_META_PREFIX + "_" + field: value for field, value in meta_fields.items()}
return eitem_metadata | [
"\n In the projects.json file, inside each project, there is a field called \"meta\" which has a\n dictionary with fields to be added to the enriched items for this project.\n\n This fields must be added with the prefix cm_ (custom metadata).\n\n This method fetch the metadata fields for the project in which the eitem is included.\n\n :param eitem: enriched item to search metadata for\n :return: a dictionary with the metadata fields\n "
]
|
Please provide a description of the function:def get_domain(self, identity):
domain = None
if identity['email']:
try:
domain = identity['email'].split("@")[1]
except IndexError:
# logger.warning("Bad email format: %s" % (identity['email']))
pass
return domain | [
" Get the domain from a SH identity "
]
|
Please provide a description of the function:def get_enrollment(self, uuid, item_date):
# item_date must be offset-naive (utc)
if item_date and item_date.tzinfo:
item_date = (item_date - item_date.utcoffset()).replace(tzinfo=None)
enrollments = self.get_enrollments(uuid)
enroll = self.unaffiliated_group
if enrollments:
for enrollment in enrollments:
if not item_date:
enroll = enrollment.organization.name
break
elif item_date >= enrollment.start and item_date <= enrollment.end:
enroll = enrollment.organization.name
break
return enroll | [
" Get the enrollment for the uuid when the item was done "
]
|
Please provide a description of the function:def __get_item_sh_fields_empty(self, rol, undefined=False):
# If empty_field is None, the fields do not appear in index patterns
empty_field = '' if not undefined else '-- UNDEFINED --'
return {
rol + "_id": empty_field,
rol + "_uuid": empty_field,
rol + "_name": empty_field,
rol + "_user_name": empty_field,
rol + "_domain": empty_field,
rol + "_gender": empty_field,
rol + "_gender_acc": None,
rol + "_org_name": empty_field,
rol + "_bot": False
} | [
" Return a SH identity with all fields to empty_field "
]
|
Please provide a description of the function:def get_item_sh_fields(self, identity=None, item_date=None, sh_id=None,
rol='author'):
eitem_sh = self.__get_item_sh_fields_empty(rol)
if identity:
# Use the identity to get the SortingHat identity
sh_ids = self.get_sh_ids(identity, self.get_connector_name())
eitem_sh[rol + "_id"] = sh_ids.get('id', '')
eitem_sh[rol + "_uuid"] = sh_ids.get('uuid', '')
eitem_sh[rol + "_name"] = identity.get('name', '')
eitem_sh[rol + "_user_name"] = identity.get('username', '')
eitem_sh[rol + "_domain"] = self.get_identity_domain(identity)
elif sh_id:
# Use the SortingHat id to get the identity
eitem_sh[rol + "_id"] = sh_id
eitem_sh[rol + "_uuid"] = self.get_uuid_from_id(sh_id)
else:
# No data to get a SH identity. Return an empty one.
return eitem_sh
# If the identity does not exists return and empty identity
if rol + "_uuid" not in eitem_sh or not eitem_sh[rol + "_uuid"]:
return self.__get_item_sh_fields_empty(rol, undefined=True)
# Get the SH profile to use first this data
profile = self.get_profile_sh(eitem_sh[rol + "_uuid"])
if profile:
# If name not in profile, keep its old value (should be empty or identity's name field value)
eitem_sh[rol + "_name"] = profile.get('name', eitem_sh[rol + "_name"])
email = profile.get('email', None)
if email:
eitem_sh[rol + "_domain"] = self.get_email_domain(email)
eitem_sh[rol + "_gender"] = profile.get('gender', self.unknown_gender)
eitem_sh[rol + "_gender_acc"] = profile.get('gender_acc', 0)
elif not profile and sh_id:
logger.warning("Can't find SH identity profile: %s", sh_id)
# Ensure we always write gender fields
if not eitem_sh.get(rol + "_gender"):
eitem_sh[rol + "_gender"] = self.unknown_gender
eitem_sh[rol + "_gender_acc"] = 0
eitem_sh[rol + "_org_name"] = self.get_enrollment(eitem_sh[rol + "_uuid"], item_date)
eitem_sh[rol + "_bot"] = self.is_bot(eitem_sh[rol + '_uuid'])
return eitem_sh | [
" Get standard SH fields from a SH identity "
]
|
Please provide a description of the function:def get_item_sh(self, item, roles=None, date_field=None):
eitem_sh = {} # Item enriched
author_field = self.get_field_author()
if not roles:
roles = [author_field]
if not date_field:
item_date = str_to_datetime(item[self.get_field_date()])
else:
item_date = str_to_datetime(item[date_field])
users_data = self.get_users_data(item)
for rol in roles:
if rol in users_data:
identity = self.get_sh_identity(item, rol)
eitem_sh.update(self.get_item_sh_fields(identity, item_date, rol=rol))
if not eitem_sh[rol + '_org_name']:
eitem_sh[rol + '_org_name'] = SH_UNKNOWN_VALUE
if not eitem_sh[rol + '_name']:
eitem_sh[rol + '_name'] = SH_UNKNOWN_VALUE
if not eitem_sh[rol + '_user_name']:
eitem_sh[rol + '_user_name'] = SH_UNKNOWN_VALUE
# Add the author field common in all data sources
rol_author = 'author'
if author_field in users_data and author_field != rol_author:
identity = self.get_sh_identity(item, author_field)
eitem_sh.update(self.get_item_sh_fields(identity, item_date, rol=rol_author))
if not eitem_sh['author_org_name']:
eitem_sh['author_org_name'] = SH_UNKNOWN_VALUE
if not eitem_sh['author_name']:
eitem_sh['author_name'] = SH_UNKNOWN_VALUE
if not eitem_sh['author_user_name']:
eitem_sh['author_user_name'] = SH_UNKNOWN_VALUE
return eitem_sh | [
"\n Add sorting hat enrichment fields for different roles\n\n If there are no roles, just add the author fields.\n\n "
]
|
Please provide a description of the function:def get_sh_ids(self, identity, backend_name):
# Convert the dict to tuple so it is hashable
identity_tuple = tuple(identity.items())
sh_ids = self.__get_sh_ids_cache(identity_tuple, backend_name)
return sh_ids | [
" Return the Sorting Hat id and uuid for an identity "
]
|
Please provide a description of the function:def enrich_demography(self, ocean_backend, enrich_backend, date_field="grimoire_creation_date",
author_field="author_uuid"):
logger.info("[Demography] Starting study %s", self.elastic.anonymize_url(self.elastic.index_url))
# The first step is to find the current min and max date for all the authors
authors_min_max_data = {}
es_query = Enrich.authors_min_max_dates(date_field, author_field=author_field)
r = self.requests.post(self.elastic.index_url + "/_search",
data=es_query, headers=HEADER_JSON,
verify=False)
try:
r.raise_for_status()
except requests.exceptions.HTTPError as ex:
logger.error("Error getting authors mix and max date. Demography aborted.")
logger.error(ex)
return
for author in r.json()['aggregations']['author']['buckets']:
authors_min_max_data[author['key']] = author
# Then we update the min max dates of all authors
for author_key in authors_min_max_data:
author_min_date = authors_min_max_data[author_key]['min']['value_as_string']
author_max_date = authors_min_max_data[author_key]['max']['value_as_string']
es_update = Enrich.update_author_min_max_date(author_min_date, author_max_date,
author_key, author_field=author_field)
r = self.requests.post(self.elastic.index_url + "/_update_by_query?refresh",
data=es_update, headers=HEADER_JSON,
verify=False)
try:
r.raise_for_status()
except requests.exceptions.HTTPError as ex:
logger.error("Error updating mix and max date for author %s. Demography aborted.", author_key)
logger.error(ex)
return
if not self.elastic.alias_in_use(DEMOGRAPHICS_ALIAS):
logger.info("Creating alias: %s", DEMOGRAPHICS_ALIAS)
self.elastic.add_alias(DEMOGRAPHICS_ALIAS)
logger.info("[Demography] End %s", self.elastic.anonymize_url(self.elastic.index_url)) | [
"\n The goal of the algorithm is to add to all enriched items the first and last date\n (i.e., demography_min_date, demography_max_date) of the author activities.\n\n In order to implement the algorithm first, the min and max dates (based on the date_field attribute)\n are retrieved for all authors. Then, demography_min_date and demography_max_date attributes are\n updated in all items.\n\n :param ocean_backend: backend from which to read the raw items\n :param enrich_backend: backend from which to read the enriched items\n :param date_field: field used to find the mix and max dates for the author's activity\n :param author_field: field of the author\n\n :return: None\n "
]
|
Please provide a description of the function:def update_author_min_max_date(min_date, max_date, target_author, author_field="author_uuid"):
es_query = '''
{
"script": {
"source":
"ctx._source.demography_min_date = params.min_date;ctx._source.demography_max_date = params.max_date;",
"lang": "painless",
"params": {
"min_date": "%s",
"max_date": "%s"
}
},
"query": {
"term": {
"%s": "%s"
}
}
}
''' % (min_date, max_date, author_field, target_author)
return es_query | [
"\n Get the query to update demography_min_date and demography_max_date of a given author\n\n :param min_date: new demography_min_date\n :param max_date: new demography_max_date\n :param target_author: target author to be updated\n :param author_field: author field\n\n :return: the query to be executed to update demography data of an author\n "
]
|
Please provide a description of the function:def get_params_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--elastic_url", default="http://127.0.0.1:9200",
help="Host with elastic search (default: http://127.0.0.1:9200)")
parser.add_argument('-g', '--debug', dest='debug', action='store_true')
parser.add_argument('-t', '--token', dest='token', help="GitHub token")
parser.add_argument('-o', '--org', dest='org', nargs='*', help='GitHub Organization/s to be analyzed')
parser.add_argument('-l', '--list', dest='list', action='store_true', help='Just list the repositories')
parser.add_argument('-n', '--nrepos', dest='nrepos', type=int, default=NREPOS,
help='Number of GitHub repositories from the Organization to be analyzed (default:0, no limit)')
parser.add_argument('--db-projects-map', help="Database to include the projects Mapping DB")
return parser | [
"Parse command line arguments"
]
|
Please provide a description of the function:def get_repository_filter_raw(self, term=False):
perceval_backend_name = self.get_connector_name()
filter_ = get_repository_filter(self.perceval_backend, perceval_backend_name, term)
return filter_ | [
" Returns the filter to be used in queries in a repository items "
]
|
Please provide a description of the function:def set_filter_raw(self, filter_raw):
self.filter_raw = filter_raw
self.filter_raw_dict = []
splitted = re.compile(FILTER_SEPARATOR).split(filter_raw)
for fltr_raw in splitted:
fltr = self.__process_filter(fltr_raw)
self.filter_raw_dict.append(fltr) | [
"Filter to be used when getting items from Ocean index"
]
|
Please provide a description of the function:def set_filter_raw_should(self, filter_raw_should):
self.filter_raw_should = filter_raw_should
self.filter_raw_should_dict = []
splitted = re.compile(FILTER_SEPARATOR).split(filter_raw_should)
for fltr_raw in splitted:
fltr = self.__process_filter(fltr_raw)
self.filter_raw_should_dict.append(fltr) | [
"Bool filter should to be used when getting items from Ocean index"
]
|
Please provide a description of the function:def fetch(self, _filter=None, ignore_incremental=False):
logger.debug("Creating a elastic items generator.")
scroll_id = None
page = self.get_elastic_items(scroll_id, _filter=_filter, ignore_incremental=ignore_incremental)
if not page:
return []
scroll_id = page["_scroll_id"]
scroll_size = page['hits']['total']
if scroll_size == 0:
logger.warning("No results found from %s", self.elastic.anonymize_url(self.elastic.index_url))
return
while scroll_size > 0:
logger.debug("Fetching from %s: %d received", self.elastic.anonymize_url(self.elastic.index_url),
len(page['hits']['hits']))
for item in page['hits']['hits']:
eitem = item['_source']
yield eitem
page = self.get_elastic_items(scroll_id, _filter=_filter, ignore_incremental=ignore_incremental)
if not page:
break
scroll_size = len(page['hits']['hits'])
logger.debug("Fetching from %s: done receiving", self.elastic.anonymize_url(self.elastic.index_url)) | [
" Fetch the items from raw or enriched index. An optional _filter\n could be provided to filter the data collected "
]
|
Please provide a description of the function:def get_elastic_items(self, elastic_scroll_id=None, _filter=None, ignore_incremental=False):
headers = {"Content-Type": "application/json"}
if not self.elastic:
return None
url = self.elastic.index_url
# 1 minute to process the results of size items
# In gerrit enrich with 500 items per page we need >1 min
# In Mozilla ES in Amazon we need 10m
max_process_items_pack_time = "10m" # 10 minutes
url += "/_search?scroll=%s&size=%i" % (max_process_items_pack_time,
self.scroll_size)
if elastic_scroll_id:
url = self.elastic.url
url += "/_search/scroll"
scroll_data = {
"scroll": max_process_items_pack_time,
"scroll_id": elastic_scroll_id
}
query_data = json.dumps(scroll_data)
else:
# If using a perceval backends always filter by repository
# to support multi repository indexes
filters_dict = self.get_repository_filter_raw(term=True)
if filters_dict:
filters = json.dumps(filters_dict)
else:
filters = ''
if self.filter_raw:
for fltr in self.filter_raw_dict:
filters += '''
, {"term":
{ "%s":"%s" }
}
''' % (fltr['name'], fltr['value'])
if _filter:
filter_str = '''
, {"terms":
{ "%s": %s }
}
''' % (_filter['name'], _filter['value'])
# List to string conversion uses ' that are not allowed in JSON
filter_str = filter_str.replace("'", "\"")
filters += filter_str
# The code below performs the incremental enrichment based on the last value of `metadata__timestamp`
# in the enriched index, which is calculated in the TaskEnrich before enriching the single repos that
# belong to a given data source. The old implementation of the incremental enrichment, which consisted in
# collecting the last value of `metadata__timestamp` in the enriched index for each repo, didn't work
# for global data source (which are collected globally and only partially enriched).
if self.from_date and not ignore_incremental:
date_field = self.get_incremental_date()
from_date = self.from_date.isoformat()
filters += '''
, {"range":
{"%s": {"gte": "%s"}}
}
''' % (date_field, from_date)
elif self.offset and not ignore_incremental:
filters += '''
, {"range":
{"offset": {"gte": %i}}
}
''' % self.offset
# Order the raw items from the old ones to the new so if the
# enrich process fails, it could be resume incrementally
order_query = ''
order_field = None
if self.perceval_backend:
order_field = self.get_incremental_date()
if order_field is not None:
order_query = ', "sort": { "%s": { "order": "asc" }} ' % order_field
filters_should = ''
if self.filter_raw_should:
for fltr in self.filter_raw_should_dict:
filters_should += '''
{"prefix":
{ "%s":"%s" }
},''' % (fltr['name'], fltr['value'])
filters_should = filters_should.rstrip(',')
query_should = '{"bool": {"should": [%s]}}' % filters_should
filters += ", " + query_should
# Fix the filters string if it starts with "," (empty first filter)
if filters.lstrip().startswith(','):
filters = filters.lstrip()[1:]
query = % (filters, order_query)
logger.debug("Raw query to %s\n%s", self.elastic.anonymize_url(url),
json.dumps(json.loads(query), indent=4))
query_data = query
rjson = None
try:
res = self.requests.post(url, data=query_data, headers=headers)
res.raise_for_status()
rjson = res.json()
except Exception:
# The index could not exists yet or it could be empty
logger.warning("No results found from %s", self.elastic.anonymize_url(url))
return rjson | [
" Get the items from the index related to the backend applying and\n optional _filter if provided",
" Just continue with the scrolling ",
"\n {\n \"query\": {\n \"bool\": {\n \"filter\": [%s]\n }\n } %s\n }\n "
]
|
Please provide a description of the function:def find_uuid(es_url, index):
uid_field = None
# Get the first item to detect the data source and raw/enriched type
res = requests.get('%s/%s/_search?size=1' % (es_url, index))
first_item = res.json()['hits']['hits'][0]['_source']
fields = first_item.keys()
if 'uuid' in fields:
uid_field = 'uuid'
else:
# Non perceval backend
uuid_value = res.json()['hits']['hits'][0]['_id']
logging.debug("Finding unique id for %s with value %s", index, uuid_value)
for field in fields:
if first_item[field] == uuid_value:
logging.debug("Found unique id for %s: %s", index, field)
uid_field = field
break
if not uid_field:
logging.error("Can not find uid field for %s. Can not copy the index.", index)
logging.error("Try to copy it directly with elasticdump or similar.")
sys.exit(1)
return uid_field | [
" Find the unique identifier field for a given index "
]
|
Please provide a description of the function:def find_mapping(es_url, index):
mapping = None
backend = find_perceval_backend(es_url, index)
if backend:
mapping = backend.get_elastic_mappings()
if mapping:
logging.debug("MAPPING FOUND:\n%s", json.dumps(json.loads(mapping['items']), indent=True))
return mapping | [
" Find the mapping given an index "
]
|
Please provide a description of the function:def get_elastic_items(elastic, elastic_scroll_id=None, limit=None):
scroll_size = limit
if not limit:
scroll_size = DEFAULT_LIMIT
if not elastic:
return None
url = elastic.index_url
max_process_items_pack_time = "5m" # 10 minutes
url += "/_search?scroll=%s&size=%i" % (max_process_items_pack_time,
scroll_size)
if elastic_scroll_id:
# Just continue with the scrolling
url = elastic.url
url += "/_search/scroll"
scroll_data = {
"scroll": max_process_items_pack_time,
"scroll_id": elastic_scroll_id
}
res = requests.post(url, data=json.dumps(scroll_data))
else:
query =
logging.debug("%s\n%s", url, json.dumps(json.loads(query), indent=4))
res = requests.post(url, data=query)
rjson = None
try:
rjson = res.json()
except Exception:
logging.error("No JSON found in %s", res.text)
logging.error("No results found from %s", url)
return rjson | [
" Get the items from the index ",
"\n {\n \"query\": {\n \"bool\": {\n \"must\": []\n }\n }\n }\n "
]
|
Please provide a description of the function:def get_elastic_items_search(elastic, search_after=None, size=None):
if not size:
size = DEFAULT_LIMIT
url = elastic.index_url + "/_search"
search_after_query = ''
if search_after:
logging.debug("Search after: %s", search_after)
# timestamp uuid
search_after_query = ', "search_after": [%i, "%s"] ' % (search_after[0], search_after[1])
query = % (size, search_after_query)
# logging.debug("%s\n%s", url, json.dumps(json.loads(query), indent=4))
res = requests.post(url, data=query)
rjson = None
try:
rjson = res.json()
except Exception:
logging.error("No JSON found in %s", res.text)
logging.error("No results found from %s", url)
return rjson | [
" Get the items from the index using search after scrolling ",
"\n {\n \"size\": %i,\n \"query\": {\n \"bool\": {\n \"must\": []\n }\n },\n \"sort\": [\n {\"metadata__timestamp\": \"asc\"},\n {\"uuid\": \"asc\"}\n ] %s\n\n }\n "
]
|
Please provide a description of the function:def fetch(elastic, backend, limit=None, search_after_value=None, scroll=True):
logging.debug("Creating a elastic items generator.")
elastic_scroll_id = None
search_after = search_after_value
while True:
if scroll:
rjson = get_elastic_items(elastic, elastic_scroll_id, limit)
else:
rjson = get_elastic_items_search(elastic, search_after, limit)
if rjson and "_scroll_id" in rjson:
elastic_scroll_id = rjson["_scroll_id"]
if rjson and "hits" in rjson:
if not rjson["hits"]["hits"]:
break
for hit in rjson["hits"]["hits"]:
item = hit['_source']
if 'sort' in hit:
search_after = hit['sort']
try:
backend._fix_item(item)
except Exception:
pass
yield item
else:
logging.error("No results found from %s", elastic.index_url)
break
return | [
" Fetch the items from raw or enriched index "
]
|
Please provide a description of the function:def export_items(elastic_url, in_index, out_index, elastic_url_out=None,
search_after=False, search_after_value=None, limit=None,
copy=False):
if not limit:
limit = DEFAULT_LIMIT
if search_after_value:
search_after_value_timestamp = int(search_after_value[0])
search_after_value_uuid = search_after_value[1]
search_after_value = [search_after_value_timestamp, search_after_value_uuid]
logging.info("Exporting items from %s/%s to %s", elastic_url, in_index, out_index)
count_res = requests.get('%s/%s/_count' % (elastic_url, in_index))
try:
count_res.raise_for_status()
except requests.exceptions.HTTPError:
if count_res.status_code == 404:
logging.error("The index does not exists: %s", in_index)
else:
logging.error(count_res.text)
sys.exit(1)
logging.info("Total items to copy: %i", count_res.json()['count'])
# Time to upload the items with the correct mapping
elastic_in = ElasticSearch(elastic_url, in_index)
if not copy:
# Create the correct mapping for the data sources detected from in_index
ds_mapping = find_mapping(elastic_url, in_index)
else:
logging.debug('Using the input index mapping')
ds_mapping = extract_mapping(elastic_url, in_index)
if not elastic_url_out:
elastic_out = ElasticSearch(elastic_url, out_index, mappings=ds_mapping)
else:
elastic_out = ElasticSearch(elastic_url_out, out_index, mappings=ds_mapping)
# Time to just copy from in_index to our_index
uid_field = find_uuid(elastic_url, in_index)
backend = find_perceval_backend(elastic_url, in_index)
if search_after:
total = elastic_out.bulk_upload(fetch(elastic_in, backend, limit,
search_after_value, scroll=False), uid_field)
else:
total = elastic_out.bulk_upload(fetch(elastic_in, backend, limit), uid_field)
logging.info("Total items copied: %i", total) | [
" Export items from in_index to out_index using the correct mapping "
]
|
Please provide a description of the function:def get_identities(self, item):
item = item['data']
# Changeset owner
user = item['owner']
identity = self.get_sh_identity(user)
yield identity
# Patchset uploader and author
if 'patchSets' in item:
for patchset in item['patchSets']:
user = patchset['uploader']
identity = self.get_sh_identity(user)
yield identity
if 'author' in patchset:
user = patchset['author']
identity = self.get_sh_identity(user)
yield identity
if 'approvals' in patchset:
# Approvals by
for approval in patchset['approvals']:
user = approval['by']
identity = self.get_sh_identity(user)
yield identity
# Comments reviewers
if 'comments' in item:
for comment in item['comments']:
user = comment['reviewer']
identity = self.get_sh_identity(user)
yield identity | [
"Return the identities from an item"
]
|
Please provide a description of the function:def _fix_review_dates(self, item):
for date_field in ['timestamp', 'createdOn', 'lastUpdated']:
if date_field in item.keys():
date_ts = item[date_field]
item[date_field] = unixtime_to_datetime(date_ts).isoformat()
if 'patchSets' in item.keys():
for patch in item['patchSets']:
pdate_ts = patch['createdOn']
patch['createdOn'] = unixtime_to_datetime(pdate_ts).isoformat()
if 'approvals' in patch:
for approval in patch['approvals']:
adate_ts = approval['grantedOn']
approval['grantedOn'] = unixtime_to_datetime(adate_ts).isoformat()
if 'comments' in item.keys():
for comment in item['comments']:
cdate_ts = comment['timestamp']
comment['timestamp'] = unixtime_to_datetime(cdate_ts).isoformat() | [
"Convert dates so ES detect them"
]
|
Please provide a description of the function:def get_sh_identity(self, item, identity_field=None):
def fill_list_identity(identity, user_list_data):
identity['username'] = user_list_data[0]['__text__']
if '@' in identity['username']:
identity['email'] = identity['username']
if 'name' in user_list_data[0]:
identity['name'] = user_list_data[0]['name']
return identity
identity = {}
for field in ['name', 'email', 'username']:
# Basic fields in Sorting Hat
identity[field] = None
user = item # by default a specific user dict is used
if 'data' in item and type(item) == dict:
user = item['data'][identity_field]
identity = fill_list_identity(identity, user)
return identity | [
" Return a Sorting Hat identity using bugzilla user data ",
" Fill identity with user data in first item in list "
]
|
Please provide a description of the function:def get_identities(self, item):
for rol in self.roles:
if rol in item['data']:
user = self.get_sh_identity(item["data"][rol])
yield user
if 'activity' in item["data"]:
for event in item["data"]['activity']:
event_user = [{"__text__": event['Who']}]
user = self.get_sh_identity(event_user)
yield user
if 'long_desc' in item["data"]:
for comment in item["data"]['long_desc']:
user = self.get_sh_identity(comment['who'])
yield user | [
"Return the identities from an item"
]
|
Please provide a description of the function:def analyze(self):
from_date = self._out.latest_date()
if from_date:
logger.info("Reading items since " + from_date)
else:
logger.info("Reading items since the beginning of times")
cont = 0
total_processed = 0
total_written = 0
for item_block in self._in.read_block(size=self._block_size, from_date=from_date):
cont = cont + len(item_block)
process_results = self.process(item_block)
total_processed += process_results.processed
if len(process_results.out_items) > 0:
self._out.write(process_results.out_items)
total_written += len(process_results.out_items)
else:
logger.info("No new items to be written this time.")
logger.info(
"Items read/to be written/total read/total processed/total written: "
"{0}/{1}/{2}/{3}/{4}".format(str(len(item_block)),
str(len(process_results.out_items)),
str(cont),
str(total_processed),
str(total_written)))
logger.info("SUMMARY: Items total read/total processed/total written: "
"{0}/{1}/{2}".format(str(cont),
str(total_processed),
str(total_written)))
logger.info("This is the end.")
return total_written | [
"Populate an enriched index by processing input items in blocks.\n\n :return: total number of out_items written.\n "
]
|
Please provide a description of the function:def read_item(self, from_date=None):
search_query = self._build_search_query(from_date)
for hit in helpers.scan(self._es_conn,
search_query,
scroll='300m',
index=self._es_index,
preserve_order=True):
yield hit | [
"Read items and return them one by one.\n\n :param from_date: start date for incremental reading.\n :return: next single item when any available.\n :raises ValueError: `metadata__timestamp` field not found in index\n :raises NotFoundError: index not found in ElasticSearch\n "
]
|
Please provide a description of the function:def read_block(self, size, from_date=None):
search_query = self._build_search_query(from_date)
hits_block = []
for hit in helpers.scan(self._es_conn,
search_query,
scroll='300m',
index=self._es_index,
preserve_order=True):
hits_block.append(hit)
if len(hits_block) % size == 0:
yield hits_block
# Reset hits block
hits_block = []
if len(hits_block) > 0:
yield hits_block | [
"Read items and return them in blocks.\n\n :param from_date: start date for incremental reading.\n :param size: block size.\n :return: next block of items when any available.\n :raises ValueError: `metadata__timestamp` field not found in index\n :raises NotFoundError: index not found in ElasticSearch\n "
]
|
Please provide a description of the function:def write(self, items):
if self._read_only:
raise IOError("Cannot write, Connector created as Read Only")
# Uploading info to the new ES
docs = []
for item in items:
doc = {
"_index": self._es_index,
"_type": "item",
"_id": item["_id"],
"_source": item["_source"]
}
docs.append(doc)
# TODO exception and error handling
helpers.bulk(self._es_conn, docs)
logger.info(self.__log_prefix + " Written: " + str(len(docs))) | [
"Upload items to ElasticSearch.\n\n :param items: items to be uploaded.\n "
]
|
Please provide a description of the function:def create_index(self, mappings_file, delete=True):
if self._read_only:
raise IOError("Cannot write, Connector created as Read Only")
if delete:
logger.info(self.__log_prefix + " Deleting index " + self._es_index)
self._es_conn.indices.delete(self._es_index, ignore=[400, 404])
# Read Mapping
with open(mappings_file) as f:
mapping = f.read()
self._es_conn.indices.create(self._es_index, body=mapping) | [
"Create a new index.\n\n :param mappings_file: index mappings to be used.\n :param delete: True to delete current index if exists.\n "
]
|
Please provide a description of the function:def create_alias(self, alias_name):
return self._es_conn.indices.put_alias(index=self._es_index, name=alias_name) | [
"Creates an alias pointing to the index configured in this connection"
]
|
Please provide a description of the function:def exists_alias(self, alias_name, index_name=None):
return self._es_conn.indices.exists_alias(index=index_name, name=alias_name) | [
"Check whether or not the given alias exists\n\n :return: True if alias already exist"
]
|
Please provide a description of the function:def _build_search_query(self, from_date):
sort = [{self._sort_on_field: {"order": "asc"}}]
filters = []
if self._repo:
filters.append({"term": {"origin": self._repo}})
if from_date:
filters.append({"range": {self._sort_on_field: {"gte": from_date}}})
if filters:
query = {"bool": {"filter": filters}}
else:
query = {"match_all": {}}
search_query = {
"query": query,
"sort": sort
}
return search_query | [
"Build an ElasticSearch search query to retrieve items for read methods.\n\n :param from_date: date to start retrieving items from.\n :return: JSON query in dict format\n "
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.