Code
stringlengths
103
85.9k
Summary
listlengths
0
94
Please provide a description of the function:def process(self, items_block): out_items = [] for hit in items_block: if __name__ == '__main__': hit['_source']['metadata__enriched_on'] = datetime.datetime_utcnow().isoformat() out_items.append(hit) return self.ProcessResults(processed=0, out_items=out_items)
[ "Return items as they come, updating their metadata__enriched_on field.\n\n :param items_block:\n :return: hits blocks as they come, updating their metadata__enriched_on field. Namedtuple containing:\n - processed: number of processed hits\n - out_items: a list containing items ready to be written.\n " ]
Please provide a description of the function:def get_arthur_params_from_url(cls, url): params = {} args = cls.get_perceval_params_from_url(url) parser = GitLabCommand.setup_cmd_parser() parsed_args = parser.parse(*args) params['owner'] = parsed_args.owner params['repository'] = parsed_args.repository # include only blacklist ids information params['blacklist_ids'] = parsed_args.blacklist_ids return params
[ " Get the arthur params given a URL for the data source " ]
Please provide a description of the function:def get_perceval_params_from_url(cls, url): params = [] tokens = url.split(' ') repo = tokens[0] owner = repo.split('/')[-2] repository = repo.split('/')[-1] params.append(owner) params.append(repository) if len(tokens) > 1: params.extend(tokens[1:]) return params
[ " Get the perceval params given a URL for the data source " ]
Please provide a description of the function:def get_identities(self, item): ''' Return the identities from an item ''' item = item['data'] # Creators if 'event_hosts' in item: user = self.get_sh_identity(item['event_hosts'][0]) yield user # rsvps rsvps = item.get('rsvps', []) for rsvp in rsvps: user = self.get_sh_identity(rsvp['member']) yield user # Comments for comment in item['comments']: user = self.get_sh_identity(comment['member']) yield user
[]
Please provide a description of the function:def get_item_sh(self, item): sh_fields = {} # Not shared common get_item_sh because it is pretty specific if 'member' in item: # comment and rsvp identity = self.get_sh_identity(item['member']) elif 'event_hosts' in item: # meetup event identity = self.get_sh_identity(item['event_hosts'][0]) else: return sh_fields created = unixtime_to_datetime(item['created'] / 1000) sh_fields = self.get_item_sh_fields(identity, created) return sh_fields
[ " Add sorting hat enrichment fields " ]
Please provide a description of the function:def add_params(cls, cmdline_parser): parser = cmdline_parser parser.add_argument("-e", "--elastic_url", default="http://127.0.0.1:9200", help="Host with elastic search (default: http://127.0.0.1:9200)") parser.add_argument("--elastic_url-enrich", help="Host with elastic search and enriched indexes")
[ " Shared params in all backends " ]
Please provide a description of the function:def get_p2o_params_from_url(cls, url): # if the url doesn't contain a filter separator, return it if PRJ_JSON_FILTER_SEPARATOR not in url: return {"url": url} # otherwise, add the url to the params params = {'url': url.split(' ', 1)[0]} # tokenize the filter and add them to the param dict tokens = url.split(PRJ_JSON_FILTER_SEPARATOR)[1:] if len(tokens) > 1: cause = "Too many filters defined for %s, only the first one is considered" % url logger.warning(cause) token = tokens[0] filter_tokens = token.split(PRJ_JSON_FILTER_OP_ASSIGNMENT) if len(filter_tokens) != 2: cause = "Too many tokens after splitting for %s in %s" % (token, url) logger.error(cause) raise ELKError(cause=cause) fltr_name = filter_tokens[0].strip() fltr_value = filter_tokens[1].strip() params['filter-' + fltr_name] = fltr_value return params
[ " Get the p2o params given a URL for the data source " ]
Please provide a description of the function:def add_update_date(self, item): updated = unixtime_to_datetime(item['updated_on']) timestamp = unixtime_to_datetime(item['timestamp']) item['metadata__updated_on'] = updated.isoformat() # Also add timestamp used in incremental enrichment item['metadata__timestamp'] = timestamp.isoformat()
[ " All item['updated_on'] from perceval is epoch " ]
Please provide a description of the function:def feed(self, from_date=None, from_offset=None, category=None, latest_items=None, arthur_items=None, filter_classified=None): if self.fetch_archive: items = self.perceval_backend.fetch_from_archive() self.feed_items(items) return elif arthur_items: items = arthur_items self.feed_items(items) return if from_date and from_offset: raise RuntimeError("Can't not feed using from_date and from_offset.") # We need to filter by repository to support several repositories # in the same raw index filters_ = [get_repository_filter(self.perceval_backend, self.get_connector_name())] # Check if backend supports from_date signature = inspect.signature(self.perceval_backend.fetch) last_update = None if 'from_date' in signature.parameters: if from_date: last_update = from_date else: self.last_update = self.get_last_update_from_es(filters_=filters_) last_update = self.last_update logger.info("Incremental from: %s", last_update) offset = None if 'offset' in signature.parameters: if from_offset: offset = from_offset else: offset = self.elastic.get_last_offset("offset", filters_=filters_) if offset is not None: logger.info("Incremental from: %i offset", offset) else: logger.info("Not incremental") params = {} # category and filter_classified params are shared # by all Perceval backends if category is not None: params['category'] = category if filter_classified is not None: params['filter_classified'] = filter_classified # latest items, from_date and offset cannot be used together, # thus, the params dictionary is filled with the param available # and Perceval is executed if latest_items: params['latest_items'] = latest_items items = self.perceval_backend.fetch(**params) elif last_update: last_update = last_update.replace(tzinfo=None) params['from_date'] = last_update items = self.perceval_backend.fetch(**params) elif offset is not None: params['offset'] = offset items = self.perceval_backend.fetch(**params) else: items = self.perceval_backend.fetch(**params) self.feed_items(items) self.update_items()
[ " Feed data in Elastic from Perceval or Arthur " ]
Please provide a description of the function:def _items_to_es(self, json_items): if len(json_items) == 0: return logger.info("Adding items to Ocean for %s (%i items)" % (self, len(json_items))) field_id = self.get_field_unique_id() inserted = self.elastic.bulk_upload(json_items, field_id) if len(json_items) != inserted: missing = len(json_items) - inserted info = json_items[0] name = info['backend_name'] version = info['backend_version'] origin = info['origin'] logger.warning("%s/%s missing JSON items for backend %s [ver. %s], origin %s", str(missing), str(len(json_items)), name, version, origin) return inserted
[ " Append items JSON to ES (data source state) " ]
Please provide a description of the function:def get_identities(self, item): def add_sh_github_identity(user, user_field, rol): github_repo = None if GITHUB in item['origin']: github_repo = item['origin'].replace(GITHUB, '') github_repo = re.sub('.git$', '', github_repo) if not github_repo: return # Try to get the identity from SH user_data = item['data'][user_field] sh_identity = SortingHat.get_github_commit_username(self.sh_db, user, SH_GIT_COMMIT) if not sh_identity: # Get the usename from GitHub gh_username = self.get_github_login(user_data, rol, commit_hash, github_repo) # Create a new SH identity with name, email from git and username from github logger.debug("Adding new identity %s to SH %s: %s", gh_username, SH_GIT_COMMIT, user) user = self.get_sh_identity(user_data) user['username'] = gh_username SortingHat.add_identity(self.sh_db, user, SH_GIT_COMMIT) else: if user_data not in self.github_logins: self.github_logins[user_data] = sh_identity['username'] logger.debug("GitHub-commit exists. username:%s user:%s", sh_identity['username'], user_data) commit_hash = item['data']['commit'] if item['data']['Author']: # Check multi authors commits m = self.AUTHOR_P2P_REGEX.match(item['data']["Author"]) n = self.AUTHOR_P2P_NEW_REGEX.match(item['data']["Author"]) if (m or n) and self.pair_programming: authors = self.__get_authors(item['data']["Author"]) for author in authors: user = self.get_sh_identity(author) yield user else: user = self.get_sh_identity(item['data']["Author"]) yield user if self.github_token: add_sh_github_identity(user, 'Author', 'author') if item['data']['Commit']: m = self.AUTHOR_P2P_REGEX.match(item['data']["Commit"]) n = self.AUTHOR_P2P_NEW_REGEX.match(item['data']["Author"]) if (m or n) and self.pair_programming: committers = self.__get_authors(item['data']['Commit']) for committer in committers: user = self.get_sh_identity(committer) yield user else: user = self.get_sh_identity(item['data']['Commit']) yield user if self.github_token: add_sh_github_identity(user, 'Commit', 'committer') if 'Signed-off-by' in item['data'] and self.pair_programming: signers = item['data']["Signed-off-by"] for signer in signers: user = self.get_sh_identity(signer) yield user
[ " Return the identities from an item.\n If the repo is in GitHub, get the usernames from GitHub. ", " Add a new github identity to SH if it does not exists " ]
Please provide a description of the function:def get_github_login(self, user, rol, commit_hash, repo): login = None try: login = self.github_logins[user] except KeyError: # Get the login from github API GITHUB_API_URL = "https://api.github.com" commit_url = GITHUB_API_URL + "/repos/%s/commits/%s" % (repo, commit_hash) headers = {'Authorization': 'token ' + self.github_token} r = self.requests.get(commit_url, headers=headers) try: r.raise_for_status() except requests.exceptions.ConnectionError as ex: # Connection error logger.error("Can't get github login for %s in %s because a connection error ", repo, commit_hash) return login self.rate_limit = int(r.headers['X-RateLimit-Remaining']) self.rate_limit_reset_ts = int(r.headers['X-RateLimit-Reset']) logger.debug("Rate limit pending: %s", self.rate_limit) if self.rate_limit <= self.min_rate_to_sleep: seconds_to_reset = self.rate_limit_reset_ts - int(time.time()) + 1 if seconds_to_reset < 0: seconds_to_reset = 0 cause = "GitHub rate limit exhausted." logger.info("%s Waiting %i secs for rate limit reset.", cause, seconds_to_reset) time.sleep(seconds_to_reset) # Retry once we have rate limit r = self.requests.get(commit_url, headers=headers) try: r.raise_for_status() except requests.exceptions.HTTPError as ex: # commit not found probably or rate limit exhausted logger.error("Can't find commit %s %s", commit_url, ex) return login commit_json = r.json() author_login = None if 'author' in commit_json and commit_json['author']: author_login = commit_json['author']['login'] else: self.github_logins_author_not_found += 1 user_login = None if 'committer' in commit_json and commit_json['committer']: user_login = commit_json['committer']['login'] else: self.github_logins_committer_not_found += 1 if rol == "author": login = author_login elif rol == "committer": login = user_login else: logger.error("Wrong rol: %s" % (rol)) raise RuntimeError self.github_logins[user] = login logger.debug("%s is %s in github (not found %i authors %i committers )", user, login, self.github_logins_author_not_found, self.github_logins_committer_not_found) return login
[ " rol: author or committer " ]
Please provide a description of the function:def __fix_field_date(self, item, attribute): field_date = str_to_datetime(item[attribute]) try: _ = int(field_date.strftime("%z")[0:3]) except ValueError: logger.warning("%s in commit %s has a wrong format", attribute, item['commit']) item[attribute] = field_date.replace(tzinfo=None).isoformat()
[ "Fix possible errors in the field date" ]
Please provide a description of the function:def enrich_items(self, ocean_backend, events=False): headers = {"Content-Type": "application/json"} max_items = self.elastic.max_items_bulk current = 0 total = 0 bulk_json = "" total_signed_off = 0 total_multi_author = 0 url = self.elastic.index_url + '/items/_bulk' logger.debug("Adding items to %s (in %i packs)", self.elastic.anonymize_url(url), max_items) items = ocean_backend.fetch() for item in items: if self.pair_programming: # First we need to add the authors field to all commits # Check multi author m = self.AUTHOR_P2P_REGEX.match(item['data']['Author']) n = self.AUTHOR_P2P_NEW_REGEX.match(item['data']['Author']) if m or n: logger.debug("Multiauthor detected. Creating one commit " "per author: %s", item['data']['Author']) item['data']['authors'] = self.__get_authors(item['data']['Author']) item['data']['Author'] = item['data']['authors'][0] m = self.AUTHOR_P2P_REGEX.match(item['data']['Commit']) n = self.AUTHOR_P2P_NEW_REGEX.match(item['data']['Author']) if m or n: logger.debug("Multicommitter detected: using just the first committer") item['data']['committers'] = self.__get_authors(item['data']['Commit']) item['data']['Commit'] = item['data']['committers'][0] # Add the authors list using the original Author and the Signed-off list if 'Signed-off-by' in item['data']: authors_all = item['data']['Signed-off-by'] + [item['data']['Author']] item['data']['authors_signed_off'] = list(set(authors_all)) if current >= max_items: try: total += self.elastic.safe_put_bulk(url, bulk_json) json_size = sys.getsizeof(bulk_json) / (1024 * 1024) logger.debug("Added %i items to %s (%0.2f MB)", total, self.elastic.anonymize_url(url), json_size) except UnicodeEncodeError: # Why is requests encoding the POST data as ascii? logger.error("Unicode error in enriched items") logger.debug(bulk_json) safe_json = str(bulk_json.encode('ascii', 'ignore'), 'ascii') total += self.elastic.safe_put_bulk(url, safe_json) bulk_json = "" current = 0 rich_item = self.get_rich_item(item) data_json = json.dumps(rich_item) unique_field = self.get_field_unique_id() bulk_json += '{"index" : {"_id" : "%s" } }\n' % (rich_item[unique_field]) bulk_json += data_json + "\n" # Bulk document current += 1 if self.pair_programming: # Multi author support if 'authors' in item['data']: # First author already added in the above commit authors = item['data']['authors'] for i in range(1, len(authors)): # logger.debug('Adding a new commit for %s', authors[i]) item['data']['Author'] = authors[i] item['data']['is_git_commit_multi_author'] = 1 rich_item = self.get_rich_item(item) item['data']['is_git_commit_multi_author'] = 1 data_json = json.dumps(rich_item) commit_id = item["uuid"] + "_" + str(i - 1) rich_item['git_uuid'] = commit_id bulk_json += '{"index" : {"_id" : "%s" } }\n' % rich_item['git_uuid'] bulk_json += data_json + "\n" # Bulk document current += 1 total_multi_author += 1 if rich_item['Signed-off-by_number'] > 0: nsg = 0 # Remove duplicates and the already added Author if exists authors = list(set(item['data']['Signed-off-by'])) if item['data']['Author'] in authors: authors.remove(item['data']['Author']) for author in authors: # logger.debug('Adding a new commit for %s', author) # Change the Author in the original commit and generate # a new enriched item with it item['data']['Author'] = author item['data']['is_git_commit_signed_off'] = 1 rich_item = self.get_rich_item(item) commit_id = item["uuid"] + "_" + str(nsg) rich_item['git_uuid'] = commit_id data_json = json.dumps(rich_item) bulk_json += '{"index" : {"_id" : "%s" } }\n' % rich_item['git_uuid'] bulk_json += data_json + "\n" # Bulk document current += 1 total_signed_off += 1 nsg += 1 if current > 0: total += self.elastic.safe_put_bulk(url, bulk_json) if total == 0: # No items enriched, nothing to upload to ES return total if self.pair_programming: logger.info("Signed-off commits generated: %i", total_signed_off) logger.info("Multi author commits generated: %i", total_multi_author) return total
[ " Implementation supporting signed-off and multiauthor/committer commits." ]
Please provide a description of the function:def update_items(self, ocean_backend, enrich_backend): fltr = { 'name': 'origin', 'value': [self.perceval_backend.origin] } logger.debug("[update-items] Checking commits for %s.", self.perceval_backend.origin) git_repo = GitRepository(self.perceval_backend.uri, self.perceval_backend.gitpath) try: current_hashes = set([commit for commit in git_repo.rev_list()]) except Exception as e: logger.error("Skip updating branch info for repo %s, git rev-list command failed: %s", git_repo.uri, e) return raw_hashes = set([item['data']['commit'] for item in ocean_backend.fetch(ignore_incremental=True, _filter=fltr)]) hashes_to_delete = list(raw_hashes.difference(current_hashes)) to_process = [] for _hash in hashes_to_delete: to_process.append(_hash) if len(to_process) != MAX_BULK_UPDATE_SIZE: continue # delete documents from the raw index self.remove_commits(to_process, ocean_backend.elastic.index_url, 'data.commit', self.perceval_backend.origin) # delete documents from the enriched index self.remove_commits(to_process, enrich_backend.elastic.index_url, 'hash', self.perceval_backend.origin) to_process = [] if to_process: # delete documents from the raw index self.remove_commits(to_process, ocean_backend.elastic.index_url, 'data.commit', self.perceval_backend.origin) # delete documents from the enriched index self.remove_commits(to_process, enrich_backend.elastic.index_url, 'hash', self.perceval_backend.origin) logger.debug("[update-items] %s commits deleted from %s with origin %s.", len(hashes_to_delete), ocean_backend.elastic.anonymize_url(ocean_backend.elastic.index_url), self.perceval_backend.origin) logger.debug("[update-items] %s commits deleted from %s with origin %s.", len(hashes_to_delete), enrich_backend.elastic.anonymize_url(enrich_backend.elastic.index_url), self.perceval_backend.origin) # update branch info self.delete_commit_branches(enrich_backend) self.add_commit_branches(git_repo, enrich_backend)
[ "Retrieve the commits not present in the original repository and delete\n the corresponding documents from the raw and enriched indexes" ]
Please provide a description of the function:def delete_commit_branches(self, enrich_backend): fltr = % self.perceval_backend.origin # reset references in enrich index es_query = % fltr index = enrich_backend.elastic.index_url r = self.requests.post(index + "/_update_by_query?refresh", data=es_query, headers=HEADER_JSON, verify=False) try: r.raise_for_status() except requests.exceptions.HTTPError: logger.error("Error while deleting branches on %s", self.elastic.anonymize_url(index)) logger.error(r.text) return logger.debug("Delete branches %s, index %s", r.text, self.elastic.anonymize_url(index))
[ "Delete the information about branches from the documents representing\n commits in the enriched index.\n\n :param enrich_backend: the enrich backend\n ", "\n \"filter\": [\n {\n \"term\": {\n \"origin\": \"%s\"\n }\n }\n ]\n ", "\n {\n \"script\": {\n \"source\": \"ctx._source.branches = new HashSet();\",\n \"lang\": \"painless\"\n },\n \"query\": {\n \"bool\": {\n %s\n }\n }\n }\n " ]
Please provide a description of the function:def add_commit_branches(self, git_repo, enrich_backend): to_process = [] for hash, refname in git_repo._discover_refs(remote=True): if not refname.startswith('refs/heads/'): continue commit_count = 0 branch_name = refname.replace('refs/heads/', '') try: commits = git_repo.rev_list([branch_name]) for commit in commits: to_process.append(commit) commit_count += 1 if commit_count == MAX_BULK_UPDATE_SIZE: self.__process_commits_in_branch(enrich_backend, branch_name, to_process) # reset the counter to_process = [] commit_count = 0 if commit_count: self.__process_commits_in_branch(enrich_backend, branch_name, to_process) except Exception as e: logger.error("Skip adding branch info for repo %s due to %s", git_repo.uri, e) return
[ "Add the information about branches to the documents representing commits in\n the enriched index. Branches are obtained using the command `git ls-remote`,\n then for each branch, the list of commits is retrieved via the command `git rev-list branch-name` and\n used to update the corresponding items in the enriched index.\n\n :param git_repo: GitRepository object\n :param enrich_backend: the enrich backend\n " ]
Please provide a description of the function:def remove_commits(self, items, index, attribute, origin): es_query = ''' { "query": { "bool": { "must": { "term": { "origin": "%s" } }, "filter": { "terms": { "%s": [%s] } } } } } ''' % (origin, attribute, ",".join(['"%s"' % i for i in items])) r = self.requests.post(index + "/_delete_by_query?refresh", data=es_query, headers=HEADER_JSON, verify=False) try: r.raise_for_status() except requests.exceptions.HTTPError as ex: logger.error("Error updating deleted commits for %s.", self.elastic.anonymize_url(index)) logger.error(r.text) return
[ "Delete documents that correspond to commits deleted in the Git repository\n\n :param items: target items to be deleted\n :param index: target index\n :param attribute: name of the term attribute to search items\n :param origin: name of the origin from where the items must be deleted\n " ]
Please provide a description of the function:def find_general_mappings(es_major_version): if es_major_version not in ES_SUPPORTED: print("Elasticsearch version not supported %s (supported %s)" % (es_major_version, ES_SUPPORTED)) sys.exit(1) # By default all strings are not analyzed in ES < 6 if es_major_version == '5': # Before version 6, strings were strings not_analyze_strings = else: # After version 6, strings are keywords (not analyzed) not_analyze_strings = return json.loads(not_analyze_strings)
[ "\n Find the general mappings applied to all data sources\n :param es_major_version: string with the major version for Elasticsearch\n :return: a dict with the mappings (raw and enriched)\n ", "\n {\n \"dynamic_templates\": [\n { \"notanalyzed\": {\n \"match\": \"*\",\n \"match_mapping_type\": \"string\",\n \"mapping\": {\n \"type\": \"string\",\n \"index\": \"not_analyzed\"\n }\n }\n }\n ]\n } ", "\n {\n \"dynamic_templates\": [\n { \"notanalyzed\": {\n \"match\": \"*\",\n \"match_mapping_type\": \"string\",\n \"mapping\": {\n \"type\": \"keyword\"\n }\n }\n }\n ]\n } " ]
Please provide a description of the function:def find_ds_mapping(data_source, es_major_version): mappings = {"raw": None, "enriched": None} # Backend connectors connectors = get_connectors() try: raw_klass = connectors[data_source][1] enrich_klass = connectors[data_source][2] except KeyError: print("Data source not found", data_source) sys.exit(1) # Mapping for raw index backend = raw_klass(None) if backend: mapping = json.loads(backend.mapping.get_elastic_mappings(es_major_version)['items']) mappings['raw'] = [mapping, find_general_mappings(es_major_version)] # Mapping for enriched index backend = enrich_klass(None) if backend: mapping = json.loads(backend.mapping.get_elastic_mappings(es_major_version)['items']) mappings['enriched'] = [mapping, find_general_mappings(es_major_version)] return mappings
[ "\n Find the mapping given a perceval data source\n\n :param data_source: name of the perceval data source\n :param es_major_version: string with the major version for Elasticsearch\n :return: a dict with the mappings (raw and enriched)\n " ]
Please provide a description of the function:def areas_of_code(git_enrich, in_conn, out_conn, block_size=100): aoc = AreasOfCode(in_connector=in_conn, out_connector=out_conn, block_size=block_size, git_enrich=git_enrich) ndocs = aoc.analyze() return ndocs
[ "Build and index for areas of code from a given Perceval RAW index.\n\n :param block_size: size of items block.\n :param git_enrich: GitEnrich object to deal with SortingHat affiliations.\n :param in_conn: ESPandasConnector to read from.\n :param out_conn: ESPandasConnector to write to.\n :return: number of documents written in ElasticSearch enriched index.\n " ]
Please provide a description of the function:def make_hashcode(uuid, filepath, file_event): content = ':'.join([uuid, filepath, file_event]) hashcode = hashlib.sha1(content.encode('utf-8')) return hashcode.hexdigest()
[ "Generate a SHA1 based on the given arguments.\n :param uuid: perceval uuid of the item\n :param filepath: path of the corresponding file\n :param file_event: commit file event\n :returns: a SHA1 hash code\n " ]
Please provide a description of the function:def write(self, items): if self._read_only: raise IOError("Cannot write, Connector created as Read Only") # Uploading info to the new ES rows = items.to_dict("index") docs = [] for row_index in rows.keys(): row = rows[row_index] item_id = self.make_hashcode(row[Events.PERCEVAL_UUID], row[Git.FILE_PATH], row[Git.FILE_EVENT]) row['uuid'] = item_id doc = { "_index": self._es_index, "_type": "items", "_id": item_id, "_source": row } docs.append(doc) # TODO exception and error handling chunk_size = 2000 chunks = [docs[i:i + chunk_size] for i in range(0, len(docs), chunk_size)] for chunk in chunks: helpers.bulk(self._es_conn, chunk) logger.info(self.__log_prefix + "Written: " + str(len(docs)))
[ "Write items into ElasticSearch.\n\n :param items: Pandas DataFrame\n " ]
Please provide a description of the function:def process(self, items_block): logger.info(self.__log_prefix + " New commits: " + str(len(items_block))) # Create events from commits git_events = Git(items_block, self._git_enrich) events_df = git_events.eventize(2) logger.info(self.__log_prefix + " New events: " + str(len(events_df))) if len(events_df) > 0: # Filter information data_filtered = FilterRows(events_df) events_df = data_filtered.filter_(["filepath"], "-") logger.info(self.__log_prefix + " New events filtered: " + str(len(events_df))) events_df['message'] = events_df['message'].str.slice(stop=AreasOfCode.MESSAGE_MAX_SIZE) logger.info(self.__log_prefix + " Remove message content") # Add filetype info enriched_filetype = FileType(events_df) events_df = enriched_filetype.enrich('filepath') logger.info(self.__log_prefix + " New Filetype events: " + str(len(events_df))) # Split filepath info enriched_filepath = FilePath(events_df) events_df = enriched_filepath.enrich('filepath') logger.info(self.__log_prefix + " New Filepath events: " + str(len(events_df))) # Deal with surrogates convert = ToUTF8(events_df) events_df = convert.enrich(["owner"]) logger.info(self.__log_prefix + " Final new events: " + str(len(events_df))) return self.ProcessResults(processed=len(events_df), out_items=events_df)
[ "Process items to add file related information.\n\n Eventize items creating one new item per each file found in the commit (excluding\n files with no actions performed on them). For each event, file path, file name,\n path parts, file type and file extension are added as fields.\n\n :param items_block: items to be processed. Expects to find ElasticSearch hits _source part only.\n " ]
Please provide a description of the function:def get_perceval_params_from_url(cls, url): params = [] dparam = cls.get_arthur_params_from_url(url) params.append(dparam['url']) params.append(dparam['channel']) return params
[ " Get the perceval params given a URL for the data source " ]
Please provide a description of the function:def get_repository_filter(perceval_backend, perceval_backend_name, term=False): from .github import GITHUB filter_ = {} if not perceval_backend: return filter_ field = 'origin' value = perceval_backend.origin if perceval_backend_name in ["meetup", "nntp", "stackexchange", "jira"]: # Until tag is supported in all raw and enriched indexes # we should use origin. But stackexchange and meetup won't work with origin # because the tag must be included in the filter. # For nntp we have a common group server as origin, so we need to use also the tag. # And in jira we can filter by product, and the origin is the same jira server. field = 'tag' value = perceval_backend.tag if perceval_backend: if not term: filter_ = {"name": field, "value": value} else: filter_ = ''' {"term": { "%s" : "%s" } } ''' % (field, value) # Filters are always a dict filter_ = json.loads(filter_) if value in ['', GITHUB + '/', 'https://meetup.com/']: # Support for getting all items from a multiorigin index # In GitHub we receive GITHUB + '/', the site url without org and repo # In Meetup we receive https://meetup.com/ as the tag filter_ = {} return filter_
[ " Get the filter needed for get the items in a repository " ]
Please provide a description of the function:def get_time_diff_days(start, end): ''' Number of days between two dates in UTC format ''' if start is None or end is None: return None if type(start) is not datetime.datetime: start = parser.parse(start).replace(tzinfo=None) if type(end) is not datetime.datetime: end = parser.parse(end).replace(tzinfo=None) seconds_day = float(60 * 60 * 24) diff_days = (end - start).total_seconds() / seconds_day diff_days = float('%.2f' % diff_days) return diff_days
[]
Please provide a description of the function:def unixtime_to_datetime(ut): dt = datetime.datetime.utcfromtimestamp(ut) dt = dt.replace(tzinfo=tz.tzutc()) return dt
[ "Convert a unixtime timestamp to a datetime object.\n The function converts a timestamp in Unix format to a\n datetime object. UTC timezone will also be set.\n :param ut: Unix timestamp to convert\n :returns: a datetime object\n :raises InvalidDateError: when the given timestamp cannot be\n converted into a valid date\n " ]
Please provide a description of the function:def get_arthur_params_from_url(cls, url): # In the url the org and the repository are included params = url.split() params = {"owner": params[0], "repository": params[1]} return params
[ " Get the arthur params given a URL for the data source " ]
Please provide a description of the function:def get_identities(self, item): if 'authorData' in item['data']['fields']: user = self.get_sh_identity(item['data']['fields']['authorData']) yield user if 'ownerData' in item['data']['fields']: user = self.get_sh_identity(item['data']['fields']['ownerData']) yield user
[ " Return the identities from an item " ]
Please provide a description of the function:def get_rich_events(self, item): # To get values from the task eitem = self.get_rich_item(item) # Fields that don't change never task_fields_nochange = ['author_userName', 'creation_date', 'url', 'id', 'bug_id'] # Follow changes in this fields task_fields_change = ['priority_value', 'status', 'assigned_to_userName', 'tags_custom_analyzed'] task_change = {} for f in task_fields_change: task_change[f] = None task_change['status'] = TASK_OPEN_STATUS task_change['tags_custom_analyzed'] = eitem['tags_custom_analyzed'] # Events are in transactions field (changes in fields) transactions = item['data']['transactions'] if not transactions: return [] for t in transactions: event = {} # Needed for incremental updates from the item event['metadata__updated_on'] = item['metadata__updated_on'] event['origin'] = item['origin'] # Real event data event['transactionID'] = t['transactionID'] event['type'] = t['transactionType'] event['username'] = None if 'authorData' in t and 'userName' in t['authorData']: event['event_author_name'] = t['authorData']['userName'] event['update_date'] = unixtime_to_datetime(float(t['dateCreated'])).isoformat() event['oldValue'] = '' event['newValue'] = '' if event['type'] == 'core:edge': for val in t['oldValue']: if val in self.phab_ids_names: val = self.phab_ids_names[val] event['oldValue'] += "," + val event['oldValue'] = event['oldValue'][1:] # remove first comma for val in t['newValue']: if val in self.phab_ids_names: val = self.phab_ids_names[val] event['newValue'] += "," + val event['newValue'] = event['newValue'][1:] # remove first comma elif event['type'] in ['status', 'description', 'priority', 'reassign', 'title', 'space', 'core:create', 'parent']: # Convert to str so the field is always a string event['oldValue'] = str(t['oldValue']) if event['oldValue'] in self.phab_ids_names: event['oldValue'] = self.phab_ids_names[event['oldValue']] event['newValue'] = str(t['newValue']) if event['newValue'] in self.phab_ids_names: event['newValue'] = self.phab_ids_names[event['newValue']] elif event['type'] == 'core:comment': event['newValue'] = t['comments'] elif event['type'] == 'core:subscribers': event['newValue'] = ",".join(t['newValue']) else: # logger.debug("Event type %s old to new value not supported", t['transactionType']) pass for f in task_fields_nochange: # The field name must be the same than in task for filtering event[f] = eitem[f] # To track history of some fields if event['type'] in ['status']: task_change['status'] = event['newValue'] elif event['type'] == 'priority': task_change['priority'] = event['newValue'] elif event['type'] == 'core:edge': task_change['tags_custom_analyzed'] = [event['newValue']] if event['type'] in ['reassign']: # Try to get the userName and not the user id if event['newValue'] in self.phab_ids_names: task_change['assigned_to_userName'] = self.phab_ids_names[event['newValue']] event['newValue'] = task_change['assigned_to_userName'] else: task_change['assigned_to_userName'] = event['newValue'] if event['oldValue'] in self.phab_ids_names: # Try to get the userName and not the user id event['oldValue'] = self.phab_ids_names[event['oldValue']] for f in task_change: event[f] = task_change[f] yield event
[ "\n In the events there are some common fields with the task. The name\n of the field must be the same in the task and in the event\n so we can filer using it in task and event at the same time.\n\n * Fields that don't change: the field does not change with the events\n in a task so the value is always the same in the events of a task.\n\n * Fields that change: the value of teh field changes with events\n " ]
Please provide a description of the function:def __fill_phab_ids(self, item): for p in item['projects']: if p and 'name' in p and 'phid' in p: self.phab_ids_names[p['phid']] = p['name'] if 'authorData' not in item['fields'] or not item['fields']['authorData']: return self.phab_ids_names[item['fields']['authorData']['phid']] = item['fields']['authorData']['userName'] if 'ownerData' in item['fields'] and item['fields']['ownerData']: self.phab_ids_names[item['fields']['ownerData']['phid']] = item['fields']['ownerData']['userName'] if 'priority' in item['fields']: val = item['fields']['priority']['value'] self.phab_ids_names[str(val)] = item['fields']['priority']['name'] for t in item['transactions']: if 'authorData' in t and t['authorData'] and 'userName' in t['authorData']: self.phab_ids_names[t['authorData']['phid']] = t['authorData']['userName'] elif t['authorData'] and 'name' in t['authorData']: # Herald self.phab_ids_names[t['authorData']['phid']] = t['authorData']['name']
[ " Get mappings between phab ids and names " ]
Please provide a description of the function:def starting_at(self, datetime_or_str): if isinstance(datetime_or_str, str): self._starting_at = parse(datetime_or_str) elif isinstance(datetime_or_str, datetime.datetime): self._starting_at = datetime_or_str else: raise ValueError('.starting_at() method can only take strings or datetime objects') return self
[ "\n Set the starting time for the cron job. If not specified, the starting time will always\n be the beginning of the interval that is current when the cron is started.\n\n :param datetime_or_str: a datetime object or a string that dateutil.parser can understand\n :return: self\n " ]
Please provide a description of the function:def every(self, **kwargs): if len(kwargs) != 1: raise ValueError('.every() method must be called with exactly one keyword argument') self._every_kwargs = self._clean_kwargs(kwargs) return self
[ "\n Specify the interval at which you want the job run. Takes exactly one keyword argument.\n That argument must be one named one of [second, minute, hour, day, week, month, year] or\n their plural equivalents.\n\n :param kwargs: Exactly one keyword argument\n :return: self\n " ]
Please provide a description of the function:def run(self, func, *func_args, **func__kwargs): self._func = func self._func_args = func_args self._func_kwargs = func__kwargs return self
[ "\n Specify the function to run at the scheduled times\n\n :param func: a callable\n :param func_args: the args to the callable\n :param func__kwargs: the kwargs to the callable\n :return:\n " ]
Please provide a description of the function:def _get_target(self): if None in [self._func, self._func_kwargs, self._func_kwargs, self._every_kwargs]: raise ValueError('You must call the .every() and .run() methods on every tab.') return self._loop
[ "\n returns a callable with no arguments designed\n to be the target of a Subprocess\n " ]
Please provide a description of the function:def wrapped_target(target, q_stdout, q_stderr, q_error, robust, name, *args, **kwargs): # pragma: no cover import sys sys.stdout = IOQueue(q_stdout) sys.stderr = IOQueue(q_stderr) try: target(*args, **kwargs) except: if not robust: s = 'Error in tab\n' + traceback.format_exc() logger = daiquiri.getLogger(name) logger.error(s) else: raise if not robust: q_error.put(name) raise
[ "\n Wraps a target with queues replacing stdout and stderr\n " ]
Please provide a description of the function:def loop(self, max_seconds=None): loop_started = datetime.datetime.now() self._is_running = True while self._is_running: self.process_error_queue(self.q_error) if max_seconds is not None: if (datetime.datetime.now() - loop_started).total_seconds() > max_seconds: break for subprocess in self._subprocesses: if not subprocess.is_alive(): subprocess.start() self.process_io_queue(self.q_stdout, sys.stdout) self.process_io_queue(self.q_stderr, sys.stderr)
[ "\n Main loop for the process. This will run continuously until maxiter\n " ]
Please provide a description of the function:def escape(string, escape_pattern): try: return string.translate(escape_pattern) except AttributeError: warnings.warn("Non-string-like data passed. " "Attempting to convert to 'str'.") return str(string).translate(tag_escape)
[ "Assistant function for string escaping" ]
Please provide a description of the function:def _make_serializer(meas, schema, rm_none, extra_tags, placeholder): # noqa: C901 _validate_schema(schema, placeholder) tags = [] fields = [] ts = None meas = meas for k, t in schema.items(): if t is MEASUREMENT: meas = f"{{i.{k}}}" elif t is TIMEINT: ts = f"{{i.{k}}}" elif t is TIMESTR: if pd: ts = f"{{pd.Timestamp(i.{k} or 0).value}}" else: ts = f"{{dt_to_int(str_to_dt(i.{k}))}}" elif t is TIMEDT: if pd: ts = f"{{pd.Timestamp(i.{k} or 0).value}}" else: ts = f"{{dt_to_int(i.{k})}}" elif t is TAG: tags.append(f"{k}={{str(i.{k}).translate(tag_escape)}}") elif t is TAGENUM: tags.append(f"{k}={{getattr(i.{k}, 'name', i.{k} or None)}}") elif t in (FLOAT, BOOL): fields.append(f"{k}={{i.{k}}}") elif t is INT: fields.append(f"{k}={{i.{k}}}i") elif t is STR: fields.append(f"{k}=\\\"{{str(i.{k}).translate(str_escape)}}\\\"") elif t is ENUM: fields.append(f"{k}=\\\"{{getattr(i.{k}, 'name', i.{k} or None)}}\\\"") else: raise SchemaError(f"Invalid attribute type {k!r}: {t!r}") extra_tags = extra_tags or {} for k, v in extra_tags.items(): tags.append(f"{k}={v}") if placeholder: fields.insert(0, f"_=true") sep = ',' if tags else '' ts = f' {ts}' if ts else '' fmt = f"{meas}{sep}{','.join(tags)} {','.join(fields)}{ts}" if rm_none: # Has substantial runtime impact. Best avoided if performance is critical. # First field can't be removed. pat = r',\w+="?None"?i?' f = eval('lambda i: re.sub(r\'{}\', "", f"{}").encode()'.format(pat, fmt)) else: f = eval('lambda i: f"{}".encode()'.format(fmt)) f.__doc__ = "Returns InfluxDB line protocol representation of user-defined class" f._args = dict(meas=meas, schema=schema, rm_none=rm_none, extra_tags=extra_tags, placeholder=placeholder) return f
[ "Factory of line protocol parsers" ]
Please provide a description of the function:def lineprotocol( cls=None, *, schema: Optional[Mapping[str, type]] = None, rm_none: bool = False, extra_tags: Optional[Mapping[str, str]] = None, placeholder: bool = False ): def _lineprotocol(cls): _schema = schema or getattr(cls, '__annotations__', {}) f = _make_serializer(cls.__name__, _schema, rm_none, extra_tags, placeholder) cls.to_lineprotocol = f return cls return _lineprotocol(cls) if cls else _lineprotocol
[ "Adds ``to_lineprotocol`` method to arbitrary user-defined classes\n\n :param cls: Class to monkey-patch\n :param schema: Schema dictionary (attr/type pairs).\n :param rm_none: Whether apply a regex to remove ``None`` values.\n If ``False``, passing ``None`` values to boolean, integer or float or time fields\n will result in write errors. Setting to ``True`` is \"safer\" but impacts performance.\n :param extra_tags: Hard coded tags to be added to every point generated.\n :param placeholder: If no field attributes are present, add a placeholder attribute (``_``)\n which is always equal to ``True``. This is a workaround for creating field-less points\n (which is not supported natively by InfluxDB)\n " ]
Please provide a description of the function:def serialize(point: Mapping, measurement=None, **extra_tags) -> bytes: tags = _serialize_tags(point, extra_tags) return ( f'{_serialize_measurement(point, measurement)}' f'{"," if tags else ""}{tags} ' f'{_serialize_fields(point)} ' f'{_serialize_timestamp(point)}' ).encode()
[ "Converts dictionary-like data into a single line protocol line (point)" ]
Please provide a description of the function:def _serialize_fields(point): output = [] for k, v in point['fields'].items(): k = escape(k, key_escape) if isinstance(v, bool): output.append(f'{k}={v}') elif isinstance(v, int): output.append(f'{k}={v}i') elif isinstance(v, str): output.append(f'{k}="{v.translate(str_escape)}"') elif v is None: # Empty values continue else: # Floats output.append(f'{k}={v}') return ','.join(output)
[ "Field values can be floats, integers, strings, or Booleans." ]
Please provide a description of the function:def serialize(data, measurement=None, tag_columns=None, **extra_tags): if isinstance(data, bytes): return data elif isinstance(data, str): return data.encode('utf-8') elif hasattr(data, 'to_lineprotocol'): return data.to_lineprotocol() elif pd is not None and isinstance(data, pd.DataFrame): return dataframe.serialize(data, measurement, tag_columns, **extra_tags) elif isinstance(data, dict): return mapping.serialize(data, measurement, **extra_tags) elif hasattr(data, '__iter__'): return b'\n'.join([serialize(i, measurement, tag_columns, **extra_tags) for i in data]) else: raise ValueError('Invalid input', data)
[ "Converts input data into line protocol format" ]
Please provide a description of the function:def iterpoints(resp: dict, parser: Optional[Callable] = None) -> Iterator[Any]: for statement in resp['results']: if 'series' not in statement: continue for series in statement['series']: if parser is None: return (x for x in series['values']) elif 'meta' in inspect.signature(parser).parameters: meta = {k: series[k] for k in series if k != 'values'} meta['statement_id'] = statement['statement_id'] return (parser(*x, meta=meta) for x in series['values']) else: return (parser(*x) for x in series['values']) return iter([])
[ "Iterates a response JSON yielding data point by point.\n\n Can be used with both regular and chunked responses.\n By default, returns just a plain list of values representing each point,\n without column names, or other metadata.\n\n In case a specific format is needed, an optional ``parser`` argument can be passed.\n ``parser`` is a function/callable that takes data point values\n and, optionally, a ``meta`` parameter containing which takes a\n dictionary containing all or a subset of the following:\n ``{'columns', 'name', 'tags', 'statement_id'}``.\n\n Sample parser functions:\n\n .. code:: python\n\n # Function optional meta argument\n def parser(*x, meta):\n return dict(zip(meta['columns'], x))\n\n # Namedtuple (callable)\n from collections import namedtuple\n parser = namedtuple('MyPoint', ['col1', 'col2', 'col3'])\n\n\n :param resp: Dictionary containing parsed JSON (output from InfluxDBClient.query)\n :param parser: Optional parser function/callable\n :return: Generator object\n " ]
Please provide a description of the function:def parse(resp) -> DataFrameType: statements = [] for statement in resp['results']: series = {} for s in statement.get('series', []): series[_get_name(s)] = _drop_zero_index(_serializer(s)) statements.append(series) if len(statements) == 1: series: dict = statements[0] if len(series) == 1: return list(series.values())[0] # DataFrame else: return series # dict return statements
[ "Makes a dictionary of DataFrames from a response object" ]
Please provide a description of the function:def _itertuples(df): cols = [df.iloc[:, k] for k in range(len(df.columns))] return zip(df.index, *cols)
[ "Custom implementation of ``DataFrame.itertuples`` that\n returns plain tuples instead of namedtuples. About 50% faster.\n " ]
Please provide a description of the function:def serialize(df, measurement, tag_columns=None, **extra_tags) -> bytes: # Pre-processing if measurement is None: raise ValueError("Missing 'measurement'") if not isinstance(df.index, pd.DatetimeIndex): raise ValueError('DataFrame index is not DatetimeIndex') tag_columns = set(tag_columns or []) isnull = df.isnull().any(axis=1) # Make parser function tags = [] fields = [] for k, v in extra_tags.items(): tags.append(f"{k}={escape(v, key_escape)}") for i, (k, v) in enumerate(df.dtypes.items()): k = k.translate(key_escape) if k in tag_columns: tags.append(f"{k}={{p[{i+1}]}}") elif issubclass(v.type, np.integer): fields.append(f"{k}={{p[{i+1}]}}i") elif issubclass(v.type, (np.float, np.bool_)): fields.append(f"{k}={{p[{i+1}]}}") else: # String escaping is skipped for performance reasons # Strings containing double-quotes can cause strange write errors # and should be sanitized by the user. # e.g., df[k] = df[k].astype('str').str.translate(str_escape) fields.append(f"{k}=\"{{p[{i+1}]}}\"") fmt = (f'{measurement}', f'{"," if tags else ""}', ','.join(tags), ' ', ','.join(fields), ' {p[0].value}') f = eval("lambda p: f'{}'".format(''.join(fmt))) # Map/concat if isnull.any(): lp = map(f, _itertuples(df[~isnull])) rep = _replace(df) lp_nan = (reduce(lambda a, b: re.sub(*b, a), rep, f(p)) for p in _itertuples(df[isnull])) return '\n'.join(chain(lp, lp_nan)).encode('utf-8') else: return '\n'.join(map(f, _itertuples(df))).encode('utf-8')
[ "Converts a Pandas DataFrame into line protocol format" ]
Please provide a description of the function:def runner(coro): @wraps(coro) def inner(self, *args, **kwargs): if self.mode == 'async': return coro(self, *args, **kwargs) return self._loop.run_until_complete(coro(self, *args, **kwargs)) return inner
[ "Function execution decorator." ]
Please provide a description of the function:async def create_session(self, **kwargs): self.opts.update(kwargs) self._session = aiohttp.ClientSession(**self.opts, loop=self._loop) if self.redis_opts: if aioredis: self._redis = await aioredis.create_redis(**self.redis_opts, loop=self._loop) else: warnings.warn(no_redis_warning)
[ "Creates an :class:`aiohttp.ClientSession`\n\n Override this or call it with ``kwargs`` to use other :mod:`aiohttp`\n functionality not covered by :class:`~.InfluxDBClient.__init__`\n " ]
Please provide a description of the function:async def ping(self) -> dict: if not self._session: await self.create_session() async with self._session.get(self.url.format(endpoint='ping')) as resp: logger.debug(f'{resp.status}: {resp.reason}') return dict(resp.headers.items())
[ "Pings InfluxDB\n\n Returns a dictionary containing the headers of the response from ``influxd``.\n " ]
Please provide a description of the function:async def write( self, data: Union[PointType, Iterable[PointType]], measurement: Optional[str] = None, db: Optional[str] = None, precision: Optional[str] = None, rp: Optional[str] = None, tag_columns: Optional[Iterable] = None, **extra_tags, ) -> bool: if not self._session: await self.create_session() if precision is not None: # FIXME: Implement. Related issue: aioinflux/pull/13 raise NotImplementedError("'precision' parameter is not supported yet") data = serialization.serialize(data, measurement, tag_columns, **extra_tags) params = {'db': db or self.db} if rp: params['rp'] = rp url = self.url.format(endpoint='write') async with self._session.post(url, params=params, data=data) as resp: if resp.status == 204: return True raise InfluxDBWriteError(resp)
[ "Writes data to InfluxDB.\n Input can be:\n\n 1. A mapping (e.g. ``dict``) containing the keys:\n ``measurement``, ``time``, ``tags``, ``fields``\n 2. A Pandas :class:`~pandas.DataFrame` with a :class:`~pandas.DatetimeIndex`\n 3. A user defined class decorated w/\n :func:`~aioinflux.serialization.usertype.lineprotocol`\n 4. A string (``str`` or ``bytes``) properly formatted in InfluxDB's line protocol\n 5. An iterable of one of the above\n\n Input data in formats 1-3 are parsed to the line protocol before being\n written to InfluxDB.\n See the `InfluxDB docs <https://docs.influxdata.com/influxdb/latest/\n write_protocols/line_protocol_reference/>`_ for more details.\n\n :param data: Input data (see description above).\n :param measurement: Measurement name. Mandatory when when writing DataFrames only.\n When writing dictionary-like data, this field is treated as the default value\n for points that do not contain a `measurement` field.\n :param db: Database to be written to. Defaults to `self.db`.\n :param precision: Sets the precision for the supplied Unix time values.\n Ignored if input timestamp data is of non-integer type.\n Valid values: ``{'ns', 'u', 'µ', 'ms', 's', 'm', 'h'}``\n :param rp: Sets the target retention policy for the write.\n If unspecified, data is written to the default retention policy.\n :param tag_columns: Columns to be treated as tags\n (used when writing DataFrames only)\n :param extra_tags: Additional tags to be added to all points passed.\n :return: Returns ``True`` if insert is successful.\n Raises :py:class:`ValueError` otherwise.\n " ]
Please provide a description of the function:async def query( self, q: AnyStr, *, epoch: str = 'ns', chunked: bool = False, chunk_size: Optional[int] = None, db: Optional[str] = None, use_cache: bool = False, ) -> Union[AsyncGenerator[ResultType, None], ResultType]: async def _chunked_generator(url, data): async with self._session.post(url, data=data) as resp: logger.debug(f'{resp.status} (CHUNKED): {q}') # Hack to avoid aiohttp raising ValueError('Line is too long') # The number 16 is arbitrary (may be too large/small). resp.content._high_water *= 16 async for chunk in resp.content: chunk = json.loads(chunk) self._check_error(chunk) yield chunk if not self._session: await self.create_session() # InfluxDB documentation is wrong regarding `/query` parameters # See https://github.com/influxdata/docs.influxdata.com/issues/1807 if not isinstance(chunked, bool): raise ValueError("'chunked' must be a boolean") data = dict(q=q, db=db or self.db, chunked=str(chunked).lower(), epoch=epoch) if chunked and chunk_size: data['chunk_size'] = chunk_size url = self.url.format(endpoint='query') if chunked: if use_cache: raise ValueError("Can't use cache w/ chunked queries") if self.mode != 'async': raise ValueError("Can't use 'chunked' with non-async mode") if self.output == 'json': return _chunked_generator(url, data) raise ValueError(f"Chunked queries are not support with {self.output!r} output") key = f'aioinflux:{q}' if use_cache and self._redis and await self._redis.exists(key): logger.debug(f'Cache HIT: {q}') data = lz4.decompress(await self._redis.get(key)) else: async with self._session.post(url, data=data) as resp: data = await resp.read() if use_cache and self._redis: logger.debug(f'Cache MISS ({resp.status}): {q}') if resp.status == 200: await self._redis.set(key, lz4.compress(data)) await self._redis.expire(key, self.cache_expiry) else: logger.debug(f'{resp.status}: {q}') data = json.loads(data) self._check_error(data) if self.output == 'json': return data elif self.output == 'dataframe': return serialization.dataframe.parse(data) else: raise ValueError('Invalid output format')
[ "Sends a query to InfluxDB.\n Please refer to the InfluxDB documentation for all the possible queries:\n https://docs.influxdata.com/influxdb/latest/query_language/\n\n :param q: Raw query string\n :param db: Database to be queried. Defaults to `self.db`.\n :param epoch: Precision level of response timestamps.\n Valid values: ``{'ns', 'u', 'µ', 'ms', 's', 'm', 'h'}``.\n :param chunked: If ``True``, makes InfluxDB return results in streamed batches\n rather than as a single response.\n Returns an AsyncGenerator which yields responses\n in the same format as non-chunked queries.\n :param chunk_size: Max number of points for each chunk. By default, InfluxDB chunks\n responses by series or by every 10,000 points, whichever occurs first.\n :param use_cache:\n :return: Response in the format specified by the combination of\n :attr:`.InfluxDBClient.output` and ``chunked``\n " ]
Please provide a description of the function:def _check_error(response): if 'error' in response: raise InfluxDBError(response['error']) elif 'results' in response: for statement in response['results']: if 'error' in statement: msg = '{d[error]} (statement {d[statement_id]})' raise InfluxDBError(msg.format(d=statement))
[ "Checks for JSON error messages and raises Python exception" ]
Please provide a description of the function:def create_magic_packet(macaddress): if len(macaddress) == 12: pass elif len(macaddress) == 17: sep = macaddress[2] macaddress = macaddress.replace(sep, '') else: raise ValueError('Incorrect MAC address format') # Pad the synchronization stream data = b'FFFFFFFFFFFF' + (macaddress * 16).encode() send_data = b'' # Split up the hex values in pack for i in range(0, len(data), 2): send_data += struct.pack(b'B', int(data[i: i + 2], 16)) return send_data
[ "\n Create a magic packet.\n\n A magic packet is a packet that can be used with the for wake on lan\n protocol to wake up a computer. The packet is constructed from the\n mac address given as a parameter.\n\n Args:\n macaddress (str): the mac address that should be parsed into a\n magic packet.\n\n " ]
Please provide a description of the function:def send_magic_packet(*macs, **kwargs): packets = [] ip = kwargs.pop('ip_address', BROADCAST_IP) port = kwargs.pop('port', DEFAULT_PORT) for k in kwargs: raise TypeError('send_magic_packet() got an unexpected keyword ' 'argument {!r}'.format(k)) for mac in macs: packet = create_magic_packet(mac) packets.append(packet) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) sock.connect((ip, port)) for packet in packets: sock.send(packet) sock.close()
[ "\n Wake up computers having any of the given mac addresses.\n\n Wake on lan must be enabled on the host device.\n\n Args:\n macs (str): One or more macaddresses of machines to wake.\n\n Keyword Args:\n ip_address (str): the ip address of the host to send the magic packet\n to (default \"255.255.255.255\")\n port (int): the port of the host to send the magic packet to\n (default 9)\n\n " ]
Please provide a description of the function:def main(argv=None): parser = argparse.ArgumentParser( description='Wake one or more computers using the wake on lan' ' protocol.') parser.add_argument( 'macs', metavar='mac address', nargs='+', help='The mac addresses or of the computers you are trying to wake.') parser.add_argument( '-i', metavar='ip', default=BROADCAST_IP, help='The ip address of the host to send the magic packet to.' ' (default {})'.format(BROADCAST_IP)) parser.add_argument( '-p', metavar='port', type=int, default=DEFAULT_PORT, help='The port of the host to send the magic packet to (default 9)') args = parser.parse_args(argv) send_magic_packet(*args.macs, ip_address=args.i, port=args.p)
[ "\n Run wake on lan as a CLI application.\n\n " ]
Please provide a description of the function:def mjml(parser, token): nodelist = parser.parse(('endmjml',)) parser.delete_first_token() tokens = token.split_contents() if len(tokens) != 1: raise template.TemplateSyntaxError("'%r' tag doesn't receive any arguments." % tokens[0]) return MJMLRenderNode(nodelist)
[ "\n Compile MJML template after render django template.\n\n Usage:\n {% mjml %}\n .. MJML template code ..\n {% endmjml %}\n " ]
Please provide a description of the function:def build_models_dict(annotated_models): logger = getLogger(__name__) logger.debug("Parsing models {0}".format(annotated_models) ) parsed_models = {} for family_annotation in annotated_models: family_id = family_annotation.split(':')[0] logger.debug("Parsing family {0}".format(family_id)) models = family_annotation.split(':')[1].split('|') parsed_models[family_id] = models logger.debug("Adding models {0}".format(models)) return parsed_models
[ "\n Take a list with annotated genetic inheritance patterns for each\n family and returns a dictionary with family_id as key and a list of\n genetic models as value.\n \n Args:\n annotated_models : A list on the form ['1:AD','2:AR_comp|AD_dn']\n \n Returns:\n parsed_models : A dictionary on the form\n {\n 1:['AD'],\n 2:['AD_dn','AR_comp']\n }\n \n " ]
Please provide a description of the function:def split_variants(variant_dict, header_parser, allele_symbol='0'): logger = getLogger(__name__) logger.info("Allele symbol {0}".format(allele_symbol)) alternatives = variant_dict['ALT'].split(',') reference = variant_dict['REF'] number_of_values = 1 # Go through each of the alternative alleles: for alternative_number, alternative in enumerate(alternatives): variant = {} info_dict = OrderedDict() # This is a dict on the form {ALT:[{vep_info_dict}]} vep_dict = {} genotype_dict = {} variant['CHROM'] = variant_dict['CHROM'] variant['POS'] = variant_dict['POS'] try: # There will not allways be one rsID for each alternative variant['ID'] = variant_dict['ID'].split(';')[alternative_number] # If only one id is present for multiple alleles they all get the same ID except IndexError: variant['ID'] = variant_dict['ID'] variant['REF'] = variant_dict['REF'] variant['ALT'] = alternative variant['QUAL'] = variant_dict['QUAL'] variant['FILTER'] = variant_dict['FILTER'] if 'FORMAT' in variant_dict: gt_format = variant_dict['FORMAT'] variant['FORMAT'] = gt_format for info in variant_dict['info_dict']: if info and info != '.': # Check if the info field have one entry per allele: number_of_values = header_parser.extra_info[info]['Number'] if info == 'CSQ': vep_dict[alternative] = variant_dict['vep_info'][alternative] if vep_dict[alternative]: info_dict['CSQ'] = [ build_vep_string( vep_dict[alternative], header_parser.vep_columns ) ] # If there is one value per allele we need to split it in # the proper way elif number_of_values == 'A': try: # When we split the alleles we only want to annotate with the correct number info_dict[info] = [variant_dict['info_dict'][info][alternative_number]] except IndexError: # If there is only one annotation we choose that one info_dict[info] = [variant_dict['info_dict'][info][0]] # Choose the right vep info from the old variant elif number_of_values == 'R': reference_value = variant_dict['info_dict'][info][0] new_info = [reference_value] try: # When we split the alleles we only want to annotate with the correct number allele_value = variant_dict['info_dict'][info][alternative_number + 1] new_info.append(allele_value) info_dict[info] = new_info except IndexError: # If annotation is missing we keep the original annotation info_dict[info] = variant_dict['info_dict'][info] else: info_dict[info] = variant_dict['info_dict'][info] else: info_dict[info] = [] variant['INFO'] = build_info_string(info_dict) for individual in variant_dict['genotypes']: new_genotype = split_genotype( variant_dict[individual], variant['FORMAT'], alternative_number, allele_symbol ) variant[individual] = new_genotype genotype_dict[individual] = Genotype(**dict(zip(gt_format.split(':'), variant[individual].split(':')))) variant['info_dict'] = info_dict variant['vep_info'] = vep_dict variant['genotypes'] = genotype_dict variant['variant_id'] = '_'.join([variant['CHROM'], variant['POS'], variant['REF'], alternative]) yield variant
[ "\n Checks if there are multiple alternative alleles and splitts the \n variant.\n If there are multiple alternatives the info fields, vep annotations \n and genotype calls will be splitted in the correct way\n \n Args:\n variant_dict: a dictionary with the variant information\n \n Yields:\n variant: A variant dictionary with the splitted information for each\n alternative\n " ]
Please provide a description of the function:def build_rank_score_dict(rank_scores): logger = getLogger(__name__) logger.debug("Checking rank scores: {0}".format(rank_scores)) scores = {} for family in rank_scores: entry = family.split(':') try: family_id = entry[0] logger.debug("Extracting rank score for family:{0}".format(family_id)) score = entry[1] logger.debug("Score:{0}".format(score)) except Exception: raise SyntaxError("Malformed rank score input") scores[family_id] = score return scores
[ "\n Take a list with annotated rank scores for each family and returns a \n dictionary with family_id as key and a list of genetic models as value.\n \n Args:\n rank_scores : A list on the form ['1:12','2:20']\n \n Returns:\n scores : A dictionary with family id:s as key and scores as value\n {\n '1':'12',\n '2':'20'\n }\n \n " ]
Please provide a description of the function:def check_info_annotation(annotation, info, extra_info, alternatives, individuals=[]): number = extra_info['Number'] if is_number(number): number_of_entrys = float(number) if number_of_entrys != 0: if len(annotation) != number_of_entrys: raise SyntaxError("Info field {0} has the wrong "\ "number of entrys according to the vcf header."\ " Vcf header specifies {1} should have {2} entry(s)".format( '='.join([info, ','.join(annotation)]), info, number )) elif number == 'A': if len(annotation) != len(alternatives): raise SyntaxError("Info field {0} has the wrong "\ "number of entrys according to the vcf header."\ "Vcf header specifies {1} should have {2} entry(s)".format( '='.join([info, ','.join(annotation)]), info, number )) elif number == 'R': if len(annotation) != (len(alternatives) + 1): raise SyntaxError("Info field {0} has the wrong "\ "number of entrys according to the vcf header."\ "Vcf header specifies {1} should have {2} entry(s)".format( '='.join([info, ','.join(annotation)]), info, number )) elif number == 'G': if len(annotation) != len(individuals): raise SyntaxError("Info field {0} has the wrong "\ "number of entrys according to the vcf header."\ "Vcf header specifies {1} should have {2} entry(s)".format( '='.join([info, ','.join(annotation)]), info, number )) return True
[ "\n Check if the info annotation corresponds to the metadata specification\n \n Arguments:\n annotation (list): The annotation from the vcf file\n info (str): Name of the info field\n extra_info (dict): The metadata specification\n alternatives (list): A list with the alternative variants\n individuals (list): a list with the individuals\n \n Returns:\n bool: If the annotation is correct or not\n " ]
Please provide a description of the function:def format_variant(line, header_parser, check_info=False): logger = getLogger(__name__) individuals = [] vcf_header = header_parser.header individuals = header_parser.individuals variant_line = line.rstrip().split('\t') logger.debug("Checking if variant line is malformed") if len(vcf_header) != len(variant_line): raise SyntaxError("One of the variant lines is malformed: {0}".format( line )) variant = dict(zip(vcf_header, variant_line)) # A dictionary with the vep information variant['vep_info'] = {} # A dictionary with the genetic models (family ids as keys) variant['genetic_models'] = {} # A dictionary with genotype objects (individual ids as keys) variant['genotypes'] = {} # A dictionary with the compounds (family ids as keys) variant['compound_variants'] = {} # A dictionary with the rank scores (family ids as keys) variant['rank_scores'] = {} variant['individual_scores'] = {} alternatives = variant['ALT'].split(',') info_dict = build_info_dict(variant.get('INFO', '')) #For testing # Check that the entry is on the proper format_ if check_info: for info in info_dict: annotation = info_dict[info] extra_info = header_parser.extra_info.get(info, None) if not extra_info: raise SyntaxError("The INFO field {0} is not specified in vcf"\ " header. {1}".format(info, line)) try: check_info_annotation(annotation, info, extra_info, alternatives, individuals) except SyntaxError as e: logger.critical(e) logger.info("Line:{0}".format(line)) raise e variant['info_dict'] = info_dict #################### Some fields require special parsing ########################### ##### VEP ANNOTATIONS ##### if 'CSQ' in info_dict: vep_columns = header_parser.vep_columns variant['vep_info'] = build_vep_annotation( info_dict['CSQ'], variant['REF'], alternatives, vep_columns ) ##### GENMOD ANNOTATIONS ##### if 'GeneticModels' in info_dict: variant['genetic_models'] = build_models_dict( info_dict['GeneticModels']) if 'Compounds' in info_dict: variant['compound_variants'] = build_compounds_dict( info_dict['Compounds']) if 'RankScore' in info_dict: variant['rank_scores'] = build_rank_score_dict( info_dict['RankScore']) if 'IndividualRankScore' in info_dict: variant['individual_scores'] = build_rank_score_dict( info_dict['IndividualRankScore']) ##### GENOTYPE ANNOTATIONS ##### gt_format = variant.get('FORMAT', '').split(':') genotype_dict = {} for individual in individuals: gt_info = variant[individual].split(':') gt_call = dict(zip(gt_format, gt_info)) #Create a genotype object for this individual genotype_dict[individual] = Genotype(**gt_call) variant['genotypes'] = genotype_dict variant['variant_id'] = '_'.join( [ variant['CHROM'], variant['POS'], variant['REF'], alternatives[0] ] ) return variant
[ "\n Yield the variant in the right format. \n \n If the variants should be splitted on alternative alles one variant \n for each alternative will be yielded.\n \n Arguments:\n line (str): A string that represents a variant line in the vcf format\n header_parser (HeaderParser): A HeaderParser object\n check_info (bool): If the info fields should be checked\n \n Yields:\n variant (dict): A dictionary with the variant information. The number\n of variants yielded depends on if split variant is used\n and how many alternatives there are\n \n " ]
Please provide a description of the function:def build_info_string(info): info_list = [] for annotation in info: if info[annotation]: info_list.append('='.join([annotation, ','.join(info[annotation])])) else: info_list.append(annotation) return ';'.join(info_list)
[ "\n Build a new vcf INFO string based on the information in the info_dict.\n \n The info is a dictionary with vcf info keys as keys and lists of vcf values\n as values. If there is no value False is value in info\n \n Args:\n info (dict): A dictionary with information from the vcf file\n \n Returns:\n String: A string that is on the proper vcf format for the INFO column\n \n " ]
Please provide a description of the function:def build_info_dict(vcf_info): logger = logging.getLogger(__name__) logger.debug("Building info dict") info_dict = OrderedDict() for info in vcf_info.split(';'): info = info.split('=') if len(info) > 1: # If the INFO entry is like key=value, we store the value as a list info[1] = '='.join(info[1:]) info_dict[info[0]] = info[1].split(',') else: info_dict[info[0]] = [] return info_dict
[ "\n Build a dictionary from the info of a vcf line\n \n The dictionary will have the info keys as keys and info values as values.\n Values will allways be lists that are splitted on ','\n \n Arguments:\n vcf_info (str): A string with vcf info\n \n Returns:\n info_dict (OrderedDict): A ordered dictionary with the vcf info keys as \n keys and lists of values as values\n " ]
Please provide a description of the function:def split_genotype(genotype, gt_format, alternative_number, allele_symbol = '0'): logger = getLogger(__name__) logger.info("Allele symbol {0}".format(allele_symbol)) splitted_genotype = genotype.split(':') logger.debug("Parsing genotype {0}".format(splitted_genotype)) splitted_gt_format = gt_format.split(':') logger.debug("Parsing gt format {0}".format(splitted_gt_format)) new_genotype = [] phased = False for number, genotype_info in enumerate(splitted_genotype): gt_info = splitted_gt_format[number] if gt_info == 'GT': if '/' in genotype_info: gt = genotype_info.split('/') else: gt = genotype_info.split('|') phased = True ref_allele = '.' alt_allele = '.' try: # Check the ref Allele if len(gt) == 2 and gt[0] != '.' and gt[1] != '.': ref_allele = allele_symbol alt_allele = allele_symbol if gt[0] == gt[1]: # In this case we have a homozygous call: if int(gt[0]) == alternative_number + 1: ref_allele = '1' alt_allele = '1' else: if (int(gt[0]) == alternative_number + 1 or int(gt[1]) == alternative_number + 1): alt_allele = '1' else: # We now know that at least one of the alleles are uncalled if gt[0] != '.': if int(gt[0]) == alternative_number + 1: ref_allele = '1' else: ref_allele = '0' elif len(gt) == 2 and gt[1] != '.': if int(gt[1]) == alternative_number + 1: alt_allele = '1' else: alt_allele = '0' except (ValueError, KeyError): pass if len(gt) == 2: if phased: new_genotype.append('|'.join([ref_allele,alt_allele])) else: new_genotype.append('/'.join([ref_allele,alt_allele])) else: new_genotype.append(ref_allele) elif gt_info == 'AD': ad = [] # The reference depth will allways be the original depth now ad.append(genotype_info.split(',')[0]) try: ad.append(genotype_info.split(',')[alternative_number+1]) except IndexError: ad.append('0') new_genotype.append(','.join(ad)) elif gt_info == 'DP': new_genotype.append(genotype_info) elif gt_info == 'PL': new_genotype.append(genotype_info) else: # There are several cases that we do not know how to handle yet so we just add the information new_genotype.append(genotype_info) return ':'.join(new_genotype)
[ "\n Take a genotype call and make a new one that is working for the new\n splitted variant\n \n Arguments:\n genotype (str): The original genotype call\n gt_format (str): The format of the gt call\n alternative_number (int): What genotype call should we return\n allele_symbol (str): How should the unobserved allele be represented\n when genotype is splitted\n \n Returns:\n new_genotype (str): A string that represents the new genotype\n " ]
Please provide a description of the function:def parse_header_line(self, line): self.header = line[1:].rstrip().split('\t') if len(self.header) < 9: self.header = line[1:].rstrip().split() self.individuals = self.header[9:]
[ "docstring for parse_header_line" ]
Please provide a description of the function:def print_header(self): lines_to_print = [] lines_to_print.append('##fileformat='+self.fileformat) if self.filedate: lines_to_print.append('##fileformat='+self.fileformat) for filt in self.filter_dict: lines_to_print.append(self.filter_dict[filt]) for form in self.format_dict: lines_to_print.append(self.format_dict[form]) for info in self.info_dict: lines_to_print.append(self.info_dict[info]) for contig in self.contig_dict: lines_to_print.append(self.contig_dict[contig]) for alt in self.alt_dict: lines_to_print.append(self.alt_dict[alt]) for other in self.other_dict: lines_to_print.append(self.other_dict[other]) lines_to_print.append('#'+ '\t'.join(self.header)) return lines_to_print
[ "Returns a list with the header lines if proper format" ]
Please provide a description of the function:def add_info(self, info_id, number, entry_type, description): info_line = '##INFO=<ID={0},Number={1},Type={2},Description="{3}">'.format( info_id, number, entry_type, description ) self.logger.info("Adding info line to vcf: {0}".format(info_line)) self.parse_meta_data(info_line) return
[ "\n Add an info line to the header.\n \n Arguments:\n info_id (str): The id of the info line\n number (str): Integer or any of [A,R,G,.]\n entry_type (str): Any of [Integer,Float,Flag,Character,String]\n description (str): A description of the info line\n \n " ]
Please provide a description of the function:def add_version_tracking(self, info_id, version, date, command_line=''): other_line = '##Software=<ID={0},Version={1},Date="{2}",CommandLineOptions="{3}">'.format( info_id, version, date, command_line) self.other_dict[info_id] = other_line return
[ "\n Add a line with information about which software that was run and when \n to the header.\n \n Arguments:\n info_id (str): The id of the info line\n version (str): The version of the software used\n date (str): Date when software was run\n command_line (str): The command line that was used for run\n \n " ]
Please provide a description of the function:def build_compounds_dict(compounds): logger = getLogger(__name__) logger.debug("Parsing compounds: {0}".format(compounds)) parsed_compounds = {} for family_info in compounds: logger.debug("Parsing entry {0}".format(family_info)) splitted_family_info = family_info.split(':') family_id = splitted_family_info[0] logger.debug("Found family {0}".format(family_id)) parsed_compounds[family_id] = [] compound_list = splitted_family_info[1].split('|') for compound in compound_list: compound_id = compound.split('>')[0] try: compound_score = compound.split('>')[1] except IndexError: compound_score = None parsed_compounds[family_id].append( { 'variant_id': compound_id, 'compound_score': compound_score } ) return parsed_compounds
[ "\n Take a list with annotated compound variants for each family and \n returns a dictionary with family_id as key and a list of dictionarys\n that holds the information about the compounds.\n \n Args:\n compounds : A list that can be either on the form \n [\n '1:1_23_A_C|1_24_T_A',\n '2:1_24_T_A'\n ]\n or if the compounds are scored:\n [\n '1:1_23_A_C>24|1_24_T_A>19',\n '2:1_24_T_A>17'\n ]\n \n Returns:\n parsed_compounds : A dictionary on the form\n {\n 1:[\n {\n 'variant_id':'1_23_A_C',\n 'compound_score':24\n },\n {\n 'variant_id':'1_24_T_A',\n 'compound_score:'19\n },\n ],\n 2:[\n {'variant_id':'1_24_T_A',\n 'compound_score':17\n }\n ]\n }\n \n " ]
Please provide a description of the function:def cli(variant_file, vep, split, outfile, verbose, silent, check_info, allele_symbol, logfile, loglevel): from vcf_parser import logger, init_log if not loglevel: if verbose: loglevel = 'INFO' init_log(logger, logfile, loglevel) nr_of_variants = 0 start = datetime.now() # with open(variant_file, 'r', encoding="utf-8") as f: # for line in f: # if not line.startswith('#'): # nr_of_variants += 1 if variant_file == '-': logger.info("Start parsing variants from stdin") my_parser = VCFParser( fsock=sys.stdin, split_variants=split, check_info=check_info, allele_symbol=allele_symbol ) else: logger.info("Start parsing variants from file {0}".format(variant_file)) my_parser = VCFParser( infile = variant_file, split_variants=split, check_info=check_info, allele_symbol=allele_symbol ) if outfile: f = open(outfile, 'w', encoding='utf-8') logger.info("Printing vcf to file {0}".format(outfile)) if not silent: logger.info("Printing vcf to stdout") else: logger.info("Skip printing since silent is active") for line in my_parser.metadata.print_header(): if outfile: f.write(line+'\n') else: if not silent: print(line) try: for variant in my_parser: variant_line = '\t'.join([variant[head] for head in my_parser.header]) if outfile: f.write(variant_line + '\n') else: if not silent: print(variant_line) nr_of_variants += 1 except SyntaxError as e: print(e) logger.info('Number of variants: {0}'.format(nr_of_variants)) logger.info('Time to parse file: {0}'.format(str(datetime.now() - start)))
[ "\n Tool for parsing vcf files.\n \n Prints the vcf file to output. \n If --split/-s is used all multiallelic calls will be splitted and printed \n as single variant calls.\n For more information, please see github.com/moonso/vcf_parser.\n " ]
Please provide a description of the function:def cli(variant_file, vep, split): from datetime import datetime from pprint import pprint as pp if variant_file == '-': my_parser = VCFParser(fsock=sys.stdin, split_variants=split) else: my_parser = VCFParser(infile = variant_file, split_variants=split) start = datetime.now() nr_of_variants = 0 for line in my_parser.metadata.print_header(): print(line) for variant in my_parser: pp(variant) nr_of_variants += 1 print('Number of variants: %s' % nr_of_variants)
[ "Parses a vcf file.\\n\n \\n\n Usage:\\n\n parser infile.vcf\\n\n If pipe:\\n\n parser - \n " ]
Please provide a description of the function:def add_variant(self, chrom, pos, rs_id, ref, alt, qual, filt, info, form=None, genotypes=[]): variant_info = [chrom, pos, rs_id, ref, alt, qual, filt, info] if form: variant_info.append(form) for individual in genotypes: variant_info.append(individual) variant_line = '\t'.join(variant_info) variant = format_variant( line = variant_line, header_parser = self.metadata, check_info = self.check_info ) if not (self.split_variants and len(variant['ALT'].split(',')) > 1): self.variants.append(variant) # If multiple alternative and split_variants we must split the variant else: for splitted_variant in split_variants( variant_dict=variant, header_parser=self.metadata, allele_symbol=self.allele_symbol): self.variants.append(splitted_variant)
[ "\n Add a variant to the parser.\n \n This function is for building a vcf. It takes the relevant parameters \n and make a vcf variant in the proper format.\n " ]
Please provide a description of the function:def build_vep_string(vep_info, vep_columns): logger = getLogger(__name__) logger.debug("Building vep string from {0}".format(vep_info)) logger.debug("Found vep headers {0}".format(vep_columns)) vep_strings = [] for vep_annotation in vep_info: try: vep_info_list = [ vep_annotation[vep_key] for vep_key in vep_columns ] except KeyError: raise SyntaxError("Vep entry does not correspond to vep headers") vep_strings.append('|'.join(vep_info_list)) return ','.join(vep_strings)
[ "\n Build a vep string formatted string.\n \n Take a list with vep annotations and build a new vep string\n \n Args:\n vep_info (list): A list with vep annotation dictionaries\n vep_columns (list): A list with the vep column names found in the\n header of the vcf\n \n Returns:\n string: A string with the proper vep annotations\n \n " ]
Please provide a description of the function:def build_vep_annotation(csq_info, reference, alternatives, vep_columns): logger = getLogger(__name__) # The keys in the vep dict are the vcf formatted alternatives, values are the # dictionaries with vep annotations vep_dict = {} # If we have several alternatives we need to check what types of # alternatives we have vep_to_vcf = {} number_of_deletions = 0 for alternative in alternatives: if len(alternative) < len(reference): number_of_deletions += 1 logger.debug("Number of deletions found: {0}".format(number_of_deletions)) for alternative in alternatives: # We store the annotations with keys from the vcf alternatives vep_dict[alternative] = [] # If substitutuion reference and alternative have the same length if len(alternative) == len(reference): vep_to_vcf[alternative] = alternative # If deletion alternative is shorter that the reference else: # If there is a deletion then the alternative will be '-' in vep entry if len(alternative) == 1: vep_to_vcf['-'] = alternative else: vep_to_vcf[alternative[1:]] = alternative for vep_annotation in csq_info: logger.debug("Parsing vep annotation: {0}".format(vep_annotation)) splitted_vep = vep_annotation.split('|') if len(splitted_vep) != len(vep_columns): raise SyntaxError("Csq info for variant does not match csq info in "\ "header. {0}, {1}".format( '|'.join(splitted_vep), '|'.join(vep_columns))) # Build the vep dict: vep_info = dict(zip(vep_columns, splitted_vep)) # If no allele is found we can not determine what allele if vep_info.get('Allele', None): vep_allele = vep_info['Allele'] try: vcf_allele = vep_to_vcf[vep_allele] except KeyError as e: vcf_allele = vep_allele if vcf_allele in vep_dict: vep_dict[vcf_allele].append(vep_info) else: vep_dict[vcf_allele] = [vep_info] else: logger.warning("No allele found in vep annotation! Skipping annotation") return vep_dict
[ "\n Build a dictionary with the vep information from the vep annotation.\n \n Indels are handled different by vep depending on the number of \n alternative alleles there is for a variant.\n \n If only one alternative:\n \n Insertion: vep represents the alternative by removing the first \n base from the vcf alternative.\n \n Deletion: vep represents the alternative with '-'\n \n If there are several alternatives:\n \n Insertion: \n vep represents the alternative by removing the first \n base from the vcf alternative(Like above).\n \n Deletion: \n If there are multiple alternative deletions vep represents them by \n removing the first base from the vcf alternative.\n If the vcf line looks like:\n 1 970549 . TGGG TG,TGG\n vep annotation for alternatives will be: G,GG\n \n Args:\n csq_info (list): A list with the raw vep annotations from the vcf line.\n reference (str): A string that represents the vcf reference\n alternatives (list): A list of strings that represents the vcf formated\n alternatives\n vep_columns (list): A list of strings that represents the vep comluns\n defined in the vcf header.\n \n Returns:\n vep_dict (dict): A dictionary with the alternative alleles (in vcf form)\n as keys and a list of annotations for each alternative \n alleles. \n One key named 'gene_ids', \n value is a set with the genes found. \n " ]
Please provide a description of the function:def user_login(self, email=None, password=None): email = six.moves.input("Email: ") if email is None else email password = getpass.getpass() if password is None else password login_data = { "method": "user.login", "params": {"email": email, "pass": password} } # If the user/password match, the server respond will contain a # session cookie that you can use to authenticate future requests. r = self.session.post( self.base_api_urls["logic"], data=json.dumps(login_data), ) if r.json()["result"] not in ["OK"]: raise AuthenticationError("Could not authenticate.\n{}" .format(r.json()))
[ "Login with email, password and get back a session cookie\n\n :type email: str\n :param email: The email used for authentication\n :type password: str\n :param password: The password used for authentication\n " ]
Please provide a description of the function:def demo_login(self, auth=None, url=None): assert all([ auth or url, # Must provide at least one not (auth and url) # Cannot provide more than one ]) if url is None: url = "https://piazza.com/demo_login" params = dict(nid=self._nid, auth=auth) res = self.session.get(url, params=params) else: res = self.session.get(url)
[ "Authenticate with a \"Share Your Class\" URL using a demo user.\n\n You may provide either the entire ``url`` or simply the ``auth``\n parameter.\n\n :param url: Example - \"https://piazza.com/demo_login?nid=hbj11a1gcvl1s6&auth=06c111b\"\n :param auth: Example - \"06c111b\"\n " ]
Please provide a description of the function:def content_get(self, cid, nid=None): r = self.request( method="content.get", data={"cid": cid}, nid=nid ) return self._handle_error(r, "Could not get post {}.".format(cid))
[ "Get data from post `cid` in network `nid`\n\n :type nid: str\n :param nid: This is the ID of the network (or class) from which\n to query posts. This is optional and only to override the existing\n `network_id` entered when created the class\n :type cid: str|int\n :param cid: This is the post ID which we grab\n :returns: Python object containing returned data\n " ]
Please provide a description of the function:def content_create(self, params): r = self.request( method="content.create", data=params ) return self._handle_error( r, "Could not create object {}.".format(repr(params)) )
[ "Create a post or followup.\n\n :type params: dict\n :param params: A dict of options to pass to the endpoint. Depends on\n the specific type of content being created.\n :returns: Python object containing returned data\n " ]
Please provide a description of the function:def add_students(self, student_emails, nid=None): r = self.request( method="network.update", data={ "from": "ClassSettingsPage", "add_students": student_emails }, nid=nid, nid_key="id" ) return self._handle_error(r, "Could not add users.")
[ "Enroll students in a network `nid`.\n\n Piazza will email these students with instructions to\n activate their account.\n\n :type student_emails: list of str\n :param student_emails: A listing of email addresses to enroll\n in the network (or class). This can be a list of length one.\n :type nid: str\n :param nid: This is the ID of the network to add students\n to. This is optional and only to override the existing\n `network_id` entered when created the class\n :returns: Python object containing returned data, a list\n of dicts of user data of all of the users in the network\n including the ones that were just added.\n " ]
Please provide a description of the function:def get_all_users(self, nid=None): r = self.request( method="network.get_all_users", nid=nid ) return self._handle_error(r, "Could not get users.")
[ "Get a listing of data for each user in a network `nid`\n\n :type nid: str\n :param nid: This is the ID of the network to get users\n from. This is optional and only to override the existing\n `network_id` entered when created the class\n :returns: Python object containing returned data, a list\n of dicts containing user data.\n " ]
Please provide a description of the function:def get_users(self, user_ids, nid=None): r = self.request( method="network.get_users", data={"ids": user_ids}, nid=nid ) return self._handle_error(r, "Could not get users.")
[ "Get a listing of data for specific users `user_ids` in\n a network `nid`\n\n :type user_ids: list of str\n :param user_ids: a list of user ids. These are the same\n ids that are returned by get_all_users.\n :type nid: str\n :param nid: This is the ID of the network to get students\n from. This is optional and only to override the existing\n `network_id` entered when created the class\n :returns: Python object containing returned data, a list\n of dicts containing user data.\n " ]
Please provide a description of the function:def remove_users(self, user_ids, nid=None): r = self.request( method="network.update", data={"remove_users": user_ids}, nid=nid, nid_key="id" ) return self._handle_error(r, "Could not remove users.")
[ "Remove users from a network `nid`\n\n :type user_ids: list of str\n :param user_ids: a list of user ids. These are the same\n ids that are returned by get_all_users.\n :type nid: str\n :param nid: This is the ID of the network to remove students\n from. This is optional and only to override the existing\n `network_id` entered when created the class\n :returns: Python object containing returned data, a list\n of dicts of user data of all of the users remaining in\n the network after users are removed.\n " ]
Please provide a description of the function:def get_my_feed(self, limit=150, offset=20, sort="updated", nid=None): r = self.request( method="network.get_my_feed", nid=nid, data=dict( limit=limit, offset=offset, sort=sort ) ) return self._handle_error(r, "Could not retrieve your feed.")
[ "Get my feed\n\n :type limit: int\n :param limit: Number of posts from feed to get, starting from ``offset``\n :type offset: int\n :param offset: Offset starting from bottom of feed\n :type sort: str\n :param sort: How to sort feed that will be retrieved; only current\n known value is \"updated\"\n :type nid: str\n :param nid: This is the ID of the network to get the feed\n from. This is optional and only to override the existing\n `network_id` entered when created the class\n " ]
Please provide a description of the function:def filter_feed(self, updated=False, following=False, folder=False, filter_folder="", sort="updated", nid=None): assert sum([updated, following, folder]) == 1 if folder: assert filter_folder if updated: filter_type = dict(updated=1) elif following: filter_type = dict(following=1) else: filter_type = dict(folder=1, filter_folder=filter_folder) r = self.request( nid=nid, method="network.filter_feed", data=dict( sort=sort, **filter_type ) ) return self._handle_error(r, "Could not retrieve filtered feed.")
[ "Get filtered feed\n\n Only one filter type (updated, following, folder) is possible.\n\n :type nid: str\n :param nid: This is the ID of the network to get the feed\n from. This is optional and only to override the existing\n `network_id` entered when created the class\n :type sort: str\n :param sort: How to sort feed that will be retrieved; only current\n known value is \"updated\"\n :type updated: bool\n :param updated: Set to filter through only posts which have been updated\n since you last read them\n :type following: bool\n :param following: Set to filter through only posts which you are\n following\n :type folder: bool\n :param folder: Set to filter through only posts which are in the\n provided ``filter_folder``\n :type filter_folder: str\n :param filter_folder: Name of folder to show posts from; required\n only if ``folder`` is set\n " ]
Please provide a description of the function:def search(self, query, nid=None): r = self.request( method="network.search", nid=nid, data=dict(query=query) ) return self._handle_error(r, "Search with query '{}' failed." .format(query))
[ "Search for posts with ``query``\n\n :type nid: str\n :param nid: This is the ID of the network to get the feed\n from. This is optional and only to override the existing\n `network_id` entered when created the class\n :type query: str\n :param query: The search query; should just be keywords for posts\n that you are looking for\n " ]
Please provide a description of the function:def get_stats(self, nid=None): r = self.request( api_type="main", method="network.get_stats", nid=nid, ) return self._handle_error(r, "Could not retrieve stats for class.")
[ "Get statistics for class\n\n :type nid: str\n :param nid: This is the ID of the network to get stats\n from. This is optional and only to override the existing\n `network_id` entered when created the class\n " ]
Please provide a description of the function:def request(self, method, data=None, nid=None, nid_key='nid', api_type="logic", return_response=False): self._check_authenticated() nid = nid if nid else self._nid if data is None: data = {} headers = {} if "session_id" in self.session.cookies: headers["CSRF-Token"] = self.session.cookies["session_id"] # Adding a nonce to the request endpoint = self.base_api_urls[api_type] if api_type == "logic": endpoint += "?method={}&aid={}".format( method, _piazza_nonce() ) response = self.session.post( endpoint, data=json.dumps({ "method": method, "params": dict({nid_key: nid}, **data) }), headers=headers ) return response if return_response else response.json()
[ "Get data from arbitrary Piazza API endpoint `method` in network `nid`\n\n :type method: str\n :param method: An internal Piazza API method name like `content.get`\n or `network.get_users`\n :type data: dict\n :param data: Key-value data to pass to Piazza in the request\n :type nid: str\n :param nid: This is the ID of the network to which the request\n should be made. This is optional and only to override the\n existing `network_id` entered when creating the class\n :type nid_key: str\n :param nid_key: Name expected by Piazza for `nid` when making request.\n (Usually and by default \"nid\", but sometimes \"id\" is expected)\n :returns: Python object containing returned data\n :type return_response: bool\n :param return_response: If set, returns whole :class:`requests.Response`\n object rather than just the response body\n " ]
Please provide a description of the function:def _handle_error(self, result, err_msg): if result.get(u'error'): raise RequestError("{}\nResponse: {}".format( err_msg, json.dumps(result, indent=2) )) else: return result.get(u'result')
[ "Check result for error\n\n :type result: dict\n :param result: response body\n :type err_msg: str\n :param err_msg: The message given to the :class:`RequestError` instance\n raised\n :returns: Actual result from result\n :raises RequestError: If result has error\n " ]
Please provide a description of the function:def user_login(self, email=None, password=None): self._rpc_api = PiazzaRPC() self._rpc_api.user_login(email=email, password=password)
[ "Login with email, password and get back a session cookie\n\n :type email: str\n :param email: The email used for authentication\n :type password: str\n :param password: The password used for authentication\n " ]
Please provide a description of the function:def demo_login(self, auth=None, url=None): self._rpc_api = PiazzaRPC() self._rpc_api.demo_login(auth=auth, url=url)
[ "Authenticate with a \"Share Your Class\" URL using a demo user.\n\n You may provide either the entire ``url`` or simply the ``auth``\n parameter.\n\n :param url: Example - \"https://piazza.com/demo_login?nid=hbj11a1gcvl1s6&auth=06c111b\"\n :param auth: Example - \"06c111b\"\n " ]
Please provide a description of the function:def network(self, network_id): self._ensure_authenticated() return Network(network_id, self._rpc_api.session)
[ "Returns :class:`Network` instance for ``network_id``\n\n :type network_id: str\n :param network_id: This is the ID of the network.\n This can be found by visiting your class page\n on Piazza's web UI and grabbing it from\n https://piazza.com/class/{network_id}\n " ]
Please provide a description of the function:def get_user_classes(self): # Previously getting classes from profile (such a list is incomplete) # raw_classes = self.get_user_profile().get('all_classes').values() # Get classes from the user status (includes all classes) status = self.get_user_status() uid = status['id'] raw_classes = status.get('networks', []) classes = [] for rawc in raw_classes: c = {k: rawc[k] for k in ['name', 'term']} c['num'] = rawc.get('course_number', '') c['nid'] = rawc['id'] c['is_ta'] = uid in rawc['prof_hash'] classes.append(c) return classes
[ "Get list of the current user's classes. This is a subset of the\n information returned by the call to ``get_user_status``.\n\n :returns: Classes of currently authenticated user\n :rtype: list\n " ]
Please provide a description of the function:def nonce(): nonce_part1 = _int2base(int(_time()*1000), 36) nonce_part2 = _int2base(round(_random()*1679616), 36) return "{}{}".format(nonce_part1, nonce_part2)
[ "\n Returns a new nonce to be used with the Piazza API.\n " ]
Please provide a description of the function:def _int2base(x, base): if base > len(_exradix_digits): raise ValueError( "Base is too large: The defined digit set only allows for " "bases smaller than " + len(_exradix_digits) + "." ) if x > 0: sign = 1 elif x == 0: return _exradix_digits[0] else: sign = -1 x *= sign digits = [] while x: digits.append( _exradix_digits[int(x % base)]) x = int(x / base) if sign < 0: digits.append('-') digits.reverse() return ''.join(digits)
[ "\n Converts an integer from base 10 to some arbitrary numerical base,\n and return a string representing the number in the new base (using\n letters to extend the numerical digits).\n\n :type x: int\n :param x: The integer to convert\n :type base: int\n :param base: The base to convert the integer to\n :rtype: str\n :returns: Dictionary with all data on the post\n " ]
Please provide a description of the function:def iter_all_posts(self, limit=None): feed = self.get_feed(limit=999999, offset=0) cids = [post['id'] for post in feed["feed"]] if limit is not None: cids = cids[:limit] for cid in cids: yield self.get_post(cid)
[ "Get all posts visible to the current user\n\n This grabs you current feed and ids of all posts from it; each post\n is then individually fetched. This method does not go against\n a bulk endpoint; it retrieves each post individually, so a\n caution to the user when using this.\n\n :type limit: int|None\n :param limit: If given, will limit the number of posts to fetch\n before the generator is exhausted and raises StopIteration.\n No special consideration is given to `0`; provide `None` to\n retrieve all posts.\n :returns: An iterator which yields all posts which the current user\n can view\n :rtype: generator\n " ]
Please provide a description of the function:def create_post(self, post_type, post_folders, post_subject, post_content, is_announcement=0, bypass_email=0, anonymous=False): params = { "anonymous": "yes" if anonymous else "no", "subject": post_subject, "content": post_content, "folders": post_folders, "type": post_type, "config": { "bypass_email": bypass_email, "is_announcement": is_announcement } } return self._rpc.content_create(params)
[ "Create a post\n\n It seems like if the post has `<p>` tags, then it's treated as HTML,\n but is treated as text otherwise. You'll want to provide `content`\n accordingly.\n\n :type post_type: str\n :param post_type: 'note', 'question'\n :type post_folders: str\n :param post_folders: Folder to put post into\n :type post_subject: str\n :param post_subject: Subject string\n :type post_content: str\n :param post_content: Content string\n :type is_announcement: bool\n :param is_announcement:\n :type bypass_email: bool\n :param bypass_email:\n :type anonymous: bool\n :param anonymous:\n :rtype: dict\n :returns: Dictionary with information about the created post.\n " ]
Please provide a description of the function:def create_followup(self, post, content, anonymous=False): try: cid = post["id"] except KeyError: cid = post params = { "cid": cid, "type": "followup", # For followups, the content is actually put into the subject. "subject": content, "content": "", "anonymous": "yes" if anonymous else "no", } return self._rpc.content_create(params)
[ "Create a follow-up on a post `post`.\n\n It seems like if the post has `<p>` tags, then it's treated as HTML,\n but is treated as text otherwise. You'll want to provide `content`\n accordingly.\n\n :type post: dict|str|int\n :param post: Either the post dict returned by another API method, or\n the `cid` field of that post.\n :type content: str\n :param content: The content of the followup.\n :type anonymous: bool\n :param anonymous: Whether or not to post anonymously.\n :rtype: dict\n :returns: Dictionary with information about the created follow-up.\n " ]
Please provide a description of the function:def create_instructor_answer(self, post, content, revision, anonymous=False): try: cid = post["id"] except KeyError: cid = post params = { "cid": cid, "type": "i_answer", "content": content, "revision": revision, "anonymous": "yes" if anonymous else "no", } return self._rpc.content_instructor_answer(params)
[ "Create an instructor's answer to a post `post`.\n\n It seems like if the post has `<p>` tags, then it's treated as HTML,\n but is treated as text otherwise. You'll want to provide `content`\n accordingly.\n\n :type post: dict|str|int\n :param post: Either the post dict returned by another API method, or\n the `cid` field of that post.\n :type content: str\n :param content: The content of the answer.\n :type revision: int\n :param revision: The number of revisions the answer has gone through.\n The first responder should out 0, the first editor 1, etc.\n :type anonymous: bool\n :param anonymous: Whether or not to post anonymously.\n :rtype: dict\n :returns: Dictionary with information about the created answer.\n " ]