language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def fetchChannelList(self, types=["public_channel", "private_channel", "mpim", "im"]): """ Method returns a list of all channel-like conversations in a workspace. Args: types:list<str>: Mix and match channel types by providing a comma-separated list of any combination of public_channel, private_channel, mpim, im """ channels = [] logger.debug("fetch channel list for types " + str(types)) rsp = self.client.api_call( "conversations.list", types=", ".join(types) ) if rsp["ok"]: for cha in rsp["channels"]: cha_id = cha["id"] channels.append(cha_id) else: logger.error(json.dumps(rsp, indent=2)) return channels
def fetchChannelList(self, types=["public_channel", "private_channel", "mpim", "im"]): """ Method returns a list of all channel-like conversations in a workspace. Args: types:list<str>: Mix and match channel types by providing a comma-separated list of any combination of public_channel, private_channel, mpim, im """ channels = [] logger.debug("fetch channel list for types " + str(types)) rsp = self.client.api_call( "conversations.list", types=", ".join(types) ) if rsp["ok"]: for cha in rsp["channels"]: cha_id = cha["id"] channels.append(cha_id) else: logger.error(json.dumps(rsp, indent=2)) return channels
Python
def fetchChannelHistory(self, channel): """ This method returns a list of all messages from the specified conversation, latest to oldest. Args: channel:str: channel id Returns: msgs:list<str>: list of messages in the form of "date user: text" """ hasMore = True cur_cursor = "" msgs = [] sleep(0.5) # dont spam the server if to much history is fechted while hasMore: logger.debug("fetch conversation history from " + channel) rsp = self.client.api_call( "conversations.history", channel=channel, cursor=cur_cursor ) if rsp["ok"]: logging.debug("has more: " + str(rsp["has_more"])) hasMore = rsp["has_more"] for msg in rsp["messages"]: user = self.userIdDict[msg["user"]] # user real_name text = msg["text"] ts = int(msg["ts"].split('.')[0]) # unix timestamp date = datetime.utcfromtimestamp( ts).strftime('%Y-%m-%d %H:%M:%S') msgs.append("{} {}: {}".format(date, user, text)) logger.debug("added {} messages to history from {}".format( len(msgs), channel)) if hasMore: # get next cursor cur_cursor = rsp["response_metadata"]["next_cursor"] else: hasMore = False logger.error(json.dumps(rsp, indent=2)) return msgs
def fetchChannelHistory(self, channel): """ This method returns a list of all messages from the specified conversation, latest to oldest. Args: channel:str: channel id Returns: msgs:list<str>: list of messages in the form of "date user: text" """ hasMore = True cur_cursor = "" msgs = [] sleep(0.5) # dont spam the server if to much history is fechted while hasMore: logger.debug("fetch conversation history from " + channel) rsp = self.client.api_call( "conversations.history", channel=channel, cursor=cur_cursor ) if rsp["ok"]: logging.debug("has more: " + str(rsp["has_more"])) hasMore = rsp["has_more"] for msg in rsp["messages"]: user = self.userIdDict[msg["user"]] # user real_name text = msg["text"] ts = int(msg["ts"].split('.')[0]) # unix timestamp date = datetime.utcfromtimestamp( ts).strftime('%Y-%m-%d %H:%M:%S') msgs.append("{} {}: {}".format(date, user, text)) logger.debug("added {} messages to history from {}".format( len(msgs), channel)) if hasMore: # get next cursor cur_cursor = rsp["response_metadata"]["next_cursor"] else: hasMore = False logger.error(json.dumps(rsp, indent=2)) return msgs
Python
def enter_rtm_loop(self, retry=1): """ Starts the real time messaging loop """ try: if self.client.rtm_connect(with_team_state=False): logger.info("Connected to rtm api...") online = True while online: event = self.client.rtm_read() self._parse_rtm_event(event) sleep(1) else: logger.error("Connection Failed") except TimeoutError: logger.error("Connection timeout!") if retry > 0: logger.info("try to reconnect: " + str(retry)) self.enter_rtm_loop(retry=(retry - 1))
def enter_rtm_loop(self, retry=1): """ Starts the real time messaging loop """ try: if self.client.rtm_connect(with_team_state=False): logger.info("Connected to rtm api...") online = True while online: event = self.client.rtm_read() self._parse_rtm_event(event) sleep(1) else: logger.error("Connection Failed") except TimeoutError: logger.error("Connection timeout!") if retry > 0: logger.info("try to reconnect: " + str(retry)) self.enter_rtm_loop(retry=(retry - 1))
Python
def _parse_rtm_event(self, event): """ Try to parse an JSON respons and handle it. List of possible events and respons format under https://api.slack.com/rtm Args: event:json: JSON respons from the rtm websocket """ if len(event) == 0: return # got nothing, pass on rsp = event[0] # rtm event comes as an list with one or none element try: if rsp["type"] == "message": # got a message if "subtype" in rsp: # has a subtype if rsp["subtype"] == "message_deleted": # message deleted msg = rsp["previous_message"]["text"] logger.info("\"{}\" got deleted!".format(msg)) elif rsp["subtype"] == "message_changed": # message changed old = rsp["previous_message"]["text"] new = rsp["message"]["text"] logger.info( "\"{}\" got changed to \"{}\"".format(old, new)) else: # unexpected rsp logger.warning(json.dumps(event, indent=2)) else: # regular message msg = rsp["text"] userId = rsp["user"] logger.info("msg: \"{}\" from \"{}\"".format( msg, self.userIdDict[userId])) if msg.startswith("."): # msg is a command self._parse_command(msg, userId) elif rsp["type"] == "hello": # server hello logger.debug("got hello from server") elif rsp["type"] == "user_typing": # user typing logger.info("{} is typing".format( self.userIdDict[rsp["user"]])) elif rsp["type"] == "user_change": # user changed name = rsp["user"]["real_name"] status = rsp["user"]["profile"]["status_text"] # in the assumption that this user entered or left a call logger.info("{} changed. Status: {}".format(name, status)) logger.debug(json.dumps(event, indent=2)) elif rsp["type"] == "desktop_notification": # notification logger.info("desktop_notification") else: logger.warning(json.dumps(event, indent=2)) # unexpected rsp except KeyError as ke: logger.error("KeyError: " + str(ke)) logger.error(json.dumps(event, indent=2))
def _parse_rtm_event(self, event): """ Try to parse an JSON respons and handle it. List of possible events and respons format under https://api.slack.com/rtm Args: event:json: JSON respons from the rtm websocket """ if len(event) == 0: return # got nothing, pass on rsp = event[0] # rtm event comes as an list with one or none element try: if rsp["type"] == "message": # got a message if "subtype" in rsp: # has a subtype if rsp["subtype"] == "message_deleted": # message deleted msg = rsp["previous_message"]["text"] logger.info("\"{}\" got deleted!".format(msg)) elif rsp["subtype"] == "message_changed": # message changed old = rsp["previous_message"]["text"] new = rsp["message"]["text"] logger.info( "\"{}\" got changed to \"{}\"".format(old, new)) else: # unexpected rsp logger.warning(json.dumps(event, indent=2)) else: # regular message msg = rsp["text"] userId = rsp["user"] logger.info("msg: \"{}\" from \"{}\"".format( msg, self.userIdDict[userId])) if msg.startswith("."): # msg is a command self._parse_command(msg, userId) elif rsp["type"] == "hello": # server hello logger.debug("got hello from server") elif rsp["type"] == "user_typing": # user typing logger.info("{} is typing".format( self.userIdDict[rsp["user"]])) elif rsp["type"] == "user_change": # user changed name = rsp["user"]["real_name"] status = rsp["user"]["profile"]["status_text"] # in the assumption that this user entered or left a call logger.info("{} changed. Status: {}".format(name, status)) logger.debug(json.dumps(event, indent=2)) elif rsp["type"] == "desktop_notification": # notification logger.info("desktop_notification") else: logger.warning(json.dumps(event, indent=2)) # unexpected rsp except KeyError as ke: logger.error("KeyError: " + str(ke)) logger.error(json.dumps(event, indent=2))
Python
def _system_call(self, cmd, timeout=5): """ Calls a given command on the system. DANGEROUS! Don't let user input stuff to run arbitrarily commands. Args: cmd:list<str>: First element of the list is the command, following are arguments. timeout:int: Seconds before the execution is aborted. Return: (out, err): touple of stdout and stderr from the command """ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return proc.communicate(timeout=timeout)
def _system_call(self, cmd, timeout=5): """ Calls a given command on the system. DANGEROUS! Don't let user input stuff to run arbitrarily commands. Args: cmd:list<str>: First element of the list is the command, following are arguments. timeout:int: Seconds before the execution is aborted. Return: (out, err): touple of stdout and stderr from the command """ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return proc.communicate(timeout=timeout)
Python
def handle_pull_request(job: PullRequestJob): """Analyse and handle a pull request that has just been updated.""" if job.pull_request.author == job.settings.robot: return handle_parent_pull_request(job, job.pull_request) try: _handle_pull_request(job) except messages.TemplateException as err: send_comment(job.settings, job.pull_request, err) raise
def handle_pull_request(job: PullRequestJob): """Analyse and handle a pull request that has just been updated.""" if job.pull_request.author == job.settings.robot: return handle_parent_pull_request(job, job.pull_request) try: _handle_pull_request(job) except messages.TemplateException as err: send_comment(job.settings, job.pull_request, err) raise
Python
def handle_commit(job: CommitJob): """Handle a job triggered by an updated build status.""" candidates = [ branch_factory(job.git.repo, b) for b in job.git.repo.get_branches_from_commit(job.commit) ] if not candidates: raise messages.NothingToDo( 'Could not find any branch for commit {}' .format(job.commit) ) if job.settings.use_queue: if any(isinstance(b, QueueBranch) for b in candidates): return queueing.handle_merge_queues(QueuesJob(bert_e=job.bert_e)) def get_parent_branch(branch): if isinstance(branch, IntegrationBranch): return branch.feature_branch else: return branch.name candidates = list(map(get_parent_branch, candidates)) prs = list( job.project_repo.get_pull_requests(src_branch=candidates) ) if not prs: raise messages.NothingToDo( 'Could not find the main pull request for commit {}' .format( job.commit) ) pr = min(prs, key=lambda pr: pr.id) return handle_pull_request( PullRequestJob( bert_e=job.bert_e, pull_request=job.project_repo.get_pull_request(int(pr.id)) ) )
def handle_commit(job: CommitJob): """Handle a job triggered by an updated build status.""" candidates = [ branch_factory(job.git.repo, b) for b in job.git.repo.get_branches_from_commit(job.commit) ] if not candidates: raise messages.NothingToDo( 'Could not find any branch for commit {}' .format(job.commit) ) if job.settings.use_queue: if any(isinstance(b, QueueBranch) for b in candidates): return queueing.handle_merge_queues(QueuesJob(bert_e=job.bert_e)) def get_parent_branch(branch): if isinstance(branch, IntegrationBranch): return branch.feature_branch else: return branch.name candidates = list(map(get_parent_branch, candidates)) prs = list( job.project_repo.get_pull_requests(src_branch=candidates) ) if not prs: raise messages.NothingToDo( 'Could not find the main pull request for commit {}' .format( job.commit) ) pr = min(prs, key=lambda pr: pr.id) return handle_pull_request( PullRequestJob( bert_e=job.bert_e, pull_request=job.project_repo.get_pull_request(int(pr.id)) ) )
Python
def handle_parent_pull_request(job, child_pr, is_child=True): """Handle the parent of an integration pull request.""" if is_child: ids = re.findall(r'\d+', child_pr.description) if not ids: raise messages.ParentPullRequestNotFound(child_pr.id) parent_id, *_ = ids else: parent_id = child_pr.id return handle_pull_request( PullRequestJob( bert_e=job.bert_e, pull_request=job.project_repo.get_pull_request(int(parent_id)) ) )
def handle_parent_pull_request(job, child_pr, is_child=True): """Handle the parent of an integration pull request.""" if is_child: ids = re.findall(r'\d+', child_pr.description) if not ids: raise messages.ParentPullRequestNotFound(child_pr.id) parent_id, *_ = ids else: parent_id = child_pr.id return handle_pull_request( PullRequestJob( bert_e=job.bert_e, pull_request=job.project_repo.get_pull_request(int(parent_id)) ) )
Python
def early_checks(job): """Early checks to filter out pull requests where no action is needed.""" status = job.pull_request.status if status not in ('OPEN', 'DECLINED'): raise messages.NothingToDo("The pull request is '{}'".format(status)) src, dst = job.pull_request.src_branch, job.pull_request.dst_branch if not is_cascade_producer(src) or not is_cascade_consumer(dst): raise messages.NotMyJob(src, dst) if not job.git.repo.remote_branch_exists(dst): raise messages.WrongDestination(dst_branch=dst, active_options=job.active_options)
def early_checks(job): """Early checks to filter out pull requests where no action is needed.""" status = job.pull_request.status if status not in ('OPEN', 'DECLINED'): raise messages.NothingToDo("The pull request is '{}'".format(status)) src, dst = job.pull_request.src_branch, job.pull_request.dst_branch if not is_cascade_producer(src) or not is_cascade_consumer(dst): raise messages.NotMyJob(src, dst) if not job.git.repo.remote_branch_exists(dst): raise messages.WrongDestination(dst_branch=dst, active_options=job.active_options)
Python
def send_greetings(job): """Send welcome message to the pull request's author and set default tasks. """ username = job.settings.robot if find_comment(job.pull_request, username=username): return tasks = list(reversed(job.settings.tasks)) comment = send_comment( job.settings, job.pull_request, messages.InitMessage( bert_e=username, author=job.pull_request.author_display_name, status={}, active_options=job.active_options, tasks=tasks, frontend_url=job.bert_e.settings.frontend_url ) ) for task in tasks: create_task(job.settings, task, comment)
def send_greetings(job): """Send welcome message to the pull request's author and set default tasks. """ username = job.settings.robot if find_comment(job.pull_request, username=username): return tasks = list(reversed(job.settings.tasks)) comment = send_comment( job.settings, job.pull_request, messages.InitMessage( bert_e=username, author=job.pull_request.author_display_name, status={}, active_options=job.active_options, tasks=tasks, frontend_url=job.bert_e.settings.frontend_url ) ) for task in tasks: create_task(job.settings, task, comment)
Python
def handle_comments(job): """Handle options and commands in the pull request's comments. Raises: UnknownCommand: if an unrecognized command is sent to BertE. NotEnoughCredentials: if the author of a message is trying to set an option or call a command he is not allowed to. """ reactor = Reactor() admins = job.settings.admins pr_author = job.pull_request.author reactor.init_settings(job) prefix = '@{}'.format(job.settings.robot) LOG.debug('looking for prefix: %s', prefix) # Handle options # Look for options in all of the pull request's comments. for comment in job.pull_request.comments: author = comment.author privileged = author in admins and author != pr_author authored = author == pr_author text = comment.text try: reactor.handle_options(job, text, prefix, privileged, authored) except NotFound as err: raise messages.UnknownCommand( active_options=job.active_options, command=err.keyword, author=author, comment=text ) from err except NotPrivileged as err: raise messages.NotEnoughCredentials( active_options=job.active_options, command=err.keyword, author=author, self_pr=(author == pr_author), comment=text ) from err except NotAuthored as err: raise messages.NotAuthor( active_options=job.active_options, command=err.keyword, author=author, pr_author=pr_author, authored=authored ) from err except TypeError as err: raise messages.IncorrectCommandSyntax( extra_message=str(err), active_options=job.active_options ) from err # Handle commands # Look for commands in comments posted after BertE's last message. for comment in reversed(job.pull_request.comments): author = comment.author if author == job.settings.robot: return privileged = author in admins and author != pr_author text = comment.text try: reactor.handle_commands(job, text, prefix, privileged) except NotFound as err: raise messages.UnknownCommand( active_options=job.active_options, command=err.keyword, author=author, comment=text ) from err except NotPrivileged as err: raise messages.NotEnoughCredentials( active_options=job.active_options, command=err.keyword, author=author, self_pr=(author == pr_author), comment=text ) from err
def handle_comments(job): """Handle options and commands in the pull request's comments. Raises: UnknownCommand: if an unrecognized command is sent to BertE. NotEnoughCredentials: if the author of a message is trying to set an option or call a command he is not allowed to. """ reactor = Reactor() admins = job.settings.admins pr_author = job.pull_request.author reactor.init_settings(job) prefix = '@{}'.format(job.settings.robot) LOG.debug('looking for prefix: %s', prefix) # Handle options # Look for options in all of the pull request's comments. for comment in job.pull_request.comments: author = comment.author privileged = author in admins and author != pr_author authored = author == pr_author text = comment.text try: reactor.handle_options(job, text, prefix, privileged, authored) except NotFound as err: raise messages.UnknownCommand( active_options=job.active_options, command=err.keyword, author=author, comment=text ) from err except NotPrivileged as err: raise messages.NotEnoughCredentials( active_options=job.active_options, command=err.keyword, author=author, self_pr=(author == pr_author), comment=text ) from err except NotAuthored as err: raise messages.NotAuthor( active_options=job.active_options, command=err.keyword, author=author, pr_author=pr_author, authored=authored ) from err except TypeError as err: raise messages.IncorrectCommandSyntax( extra_message=str(err), active_options=job.active_options ) from err # Handle commands # Look for commands in comments posted after BertE's last message. for comment in reversed(job.pull_request.comments): author = comment.author if author == job.settings.robot: return privileged = author in admins and author != pr_author text = comment.text try: reactor.handle_commands(job, text, prefix, privileged) except NotFound as err: raise messages.UnknownCommand( active_options=job.active_options, command=err.keyword, author=author, comment=text ) from err except NotPrivileged as err: raise messages.NotEnoughCredentials( active_options=job.active_options, command=err.keyword, author=author, self_pr=(author == pr_author), comment=text ) from err
Python
def check_commit_diff(job): """Check for divergence between a PR's source and destination branches. raises: SourceBranchTooOld: if the branches have diverged. """ threshold = job.settings.max_commit_diff LOG.debug('max_commit_diff: %d', job.settings.max_commit_diff) if threshold < 1: # Feature is deactivated (default) return commits = list(job.git.dst_branch.get_commit_diff(job.git.src_branch)) LOG.debug('commit_diff: %d', len(commits)) if len(commits) > threshold: raise messages.SourceBranchTooOld( src_branch=job.git.src_branch.name, dst_branch=job.git.dst_branch.name, threshold=threshold, active_options=job.active_options )
def check_commit_diff(job): """Check for divergence between a PR's source and destination branches. raises: SourceBranchTooOld: if the branches have diverged. """ threshold = job.settings.max_commit_diff LOG.debug('max_commit_diff: %d', job.settings.max_commit_diff) if threshold < 1: # Feature is deactivated (default) return commits = list(job.git.dst_branch.get_commit_diff(job.git.src_branch)) LOG.debug('commit_diff: %d', len(commits)) if len(commits) > threshold: raise messages.SourceBranchTooOld( src_branch=job.git.src_branch.name, dst_branch=job.git.dst_branch.name, threshold=threshold, active_options=job.active_options )
Python
def check_branch_compatibility(job): """Check that the pull request's source and destination branches are compatible with one another. For example, check that the user is not trying to merge a new feature into any older development/* branch. Raises: IncompatibleSourceBranchPrefix: if the prefix of the source branch is incorrect. """ if bypass_incompatible_branch(job): return src_branch = job.git.src_branch for dst_branch in job.git.cascade.dst_branches: if src_branch.prefix not in dst_branch.allow_prefixes: raise messages.IncompatibleSourceBranchPrefix( source=src_branch, destination=job.git.dst_branch, active_options=job.active_options )
def check_branch_compatibility(job): """Check that the pull request's source and destination branches are compatible with one another. For example, check that the user is not trying to merge a new feature into any older development/* branch. Raises: IncompatibleSourceBranchPrefix: if the prefix of the source branch is incorrect. """ if bypass_incompatible_branch(job): return src_branch = job.git.src_branch for dst_branch in job.git.cascade.dst_branches: if src_branch.prefix not in dst_branch.allow_prefixes: raise messages.IncompatibleSourceBranchPrefix( source=src_branch, destination=job.git.dst_branch, active_options=job.active_options )
Python
def check_dependencies(job): """Check the pull request's dependencies, if any. Raises: AfterPullRequest: if the current pull request depends on other open pull requests to be merged. NothingToDo: if the wait option is set then nothing will be checked. """ if job.settings.wait: raise messages.NothingToDo('wait option is set') after_prs = job.settings.after_pull_request if not after_prs: return prs = [] for pr_id in after_prs: try: prs.append(job.project_repo.get_pull_request(int(pr_id))) except Exception as err: raise messages.IncorrectPullRequestNumber( pr_id=pr_id, active_options=job.active_options ) from err opened = [p for p in prs if p.status == 'OPEN'] merged = [p for p in prs if p.status == 'MERGED'] declined = [p for p in prs if p.status == 'DECLINED'] if len(after_prs) != len(merged): raise messages.AfterPullRequest( opened_prs=opened, declined_prs=declined, active_options=job.active_options )
def check_dependencies(job): """Check the pull request's dependencies, if any. Raises: AfterPullRequest: if the current pull request depends on other open pull requests to be merged. NothingToDo: if the wait option is set then nothing will be checked. """ if job.settings.wait: raise messages.NothingToDo('wait option is set') after_prs = job.settings.after_pull_request if not after_prs: return prs = [] for pr_id in after_prs: try: prs.append(job.project_repo.get_pull_request(int(pr_id))) except Exception as err: raise messages.IncorrectPullRequestNumber( pr_id=pr_id, active_options=job.active_options ) from err opened = [p for p in prs if p.status == 'OPEN'] merged = [p for p in prs if p.status == 'MERGED'] declined = [p for p in prs if p.status == 'DECLINED'] if len(after_prs) != len(merged): raise messages.AfterPullRequest( opened_prs=opened, declined_prs=declined, active_options=job.active_options )
Python
def handle_declined_pull_request(job): """The pull request was declined. Decline integration pull requests and cleanup integration branches. Raises: PullRequestDeclined: if some cleanup was done. NothingToDo: if everything was already clean. """ build_branch_cascade(job) changed = False src_branch = job.pull_request.src_branch dst_branches = job.git.cascade.dst_branches wbranch_names = ['w/{}/{}'.format(b.version, src_branch) for b in dst_branches] open_prs = list( job.project_repo.get_pull_requests(src_branch=wbranch_names) ) for name, dst_branch in zip(wbranch_names, dst_branches): for pr in open_prs: if (pr.status == 'OPEN' and pr.src_branch == name and pr.dst_branch == dst_branch.name): pr.decline() changed = True break wbranch = branch_factory(job.git.repo, name) wbranch.src_branch = src_branch wbranch.dst_branch = dst_branch if wbranch.exists(): wbranch.remove() changed = True if changed: push(job.git.repo, prune=True) raise messages.PullRequestDeclined() else: raise messages.NothingToDo()
def handle_declined_pull_request(job): """The pull request was declined. Decline integration pull requests and cleanup integration branches. Raises: PullRequestDeclined: if some cleanup was done. NothingToDo: if everything was already clean. """ build_branch_cascade(job) changed = False src_branch = job.pull_request.src_branch dst_branches = job.git.cascade.dst_branches wbranch_names = ['w/{}/{}'.format(b.version, src_branch) for b in dst_branches] open_prs = list( job.project_repo.get_pull_requests(src_branch=wbranch_names) ) for name, dst_branch in zip(wbranch_names, dst_branches): for pr in open_prs: if (pr.status == 'OPEN' and pr.src_branch == name and pr.dst_branch == dst_branch.name): pr.decline() changed = True break wbranch = branch_factory(job.git.repo, name) wbranch.src_branch = src_branch wbranch.dst_branch = dst_branch if wbranch.exists(): wbranch.remove() changed = True if changed: push(job.git.repo, prune=True) raise messages.PullRequestDeclined() else: raise messages.NothingToDo()
Python
def check_in_sync(job, wbranches) -> bool: """Validate that each integration branch contains the last commit from its predecessor. Returns: True: if integration branches are in sync. False: otherwise. """ prev = job.git.src_branch for branch in wbranches: if not branch.includes_commit(prev.get_latest_commit()): return False prev = branch return True
def check_in_sync(job, wbranches) -> bool: """Validate that each integration branch contains the last commit from its predecessor. Returns: True: if integration branches are in sync. False: otherwise. """ prev = job.git.src_branch for branch in wbranches: if not branch.includes_commit(prev.get_latest_commit()): return False prev = branch return True
Python
def check_pull_request_skew(job, wbranches, child_prs): """Check potential skew between local commit and commit in PR. Three cases are possible: - the local commit and the commit we obtained in the PR object are identical; nothing to do. - the local commit, that has just been pushed by Bert-E, does not reflect yet in the PR object we obtained from bitbucket (the cache mechanism from BB mean the PR is still pointing to a previous commit); the solution is to update the PR object with the latest commit we know of. - the local commit is outdated, someone else has pushed new commits on the integration branch, and it reflects in the PR object; in this case we abort the process, Bert-E will be called again on the new commits. Raises: PullRequestSkewDetected: if a skew is detected. """ for branch, pull_request in zip(wbranches, child_prs): branch_sha1 = branch.get_latest_commit() pr_sha1 = pull_request.src_commit # 12 hex hash if branch_sha1.startswith(pr_sha1): continue if branch.includes_commit(pr_sha1): LOG.warning('Skew detected (expected commit: %s, ' 'got PR commit: %s).', branch_sha1, pr_sha1) LOG.warning('Updating the integration PR locally.') pull_request.src_commit = branch_sha1 continue raise messages.PullRequestSkewDetected(pull_request.id, branch_sha1, pr_sha1)
def check_pull_request_skew(job, wbranches, child_prs): """Check potential skew between local commit and commit in PR. Three cases are possible: - the local commit and the commit we obtained in the PR object are identical; nothing to do. - the local commit, that has just been pushed by Bert-E, does not reflect yet in the PR object we obtained from bitbucket (the cache mechanism from BB mean the PR is still pointing to a previous commit); the solution is to update the PR object with the latest commit we know of. - the local commit is outdated, someone else has pushed new commits on the integration branch, and it reflects in the PR object; in this case we abort the process, Bert-E will be called again on the new commits. Raises: PullRequestSkewDetected: if a skew is detected. """ for branch, pull_request in zip(wbranches, child_prs): branch_sha1 = branch.get_latest_commit() pr_sha1 = pull_request.src_commit # 12 hex hash if branch_sha1.startswith(pr_sha1): continue if branch.includes_commit(pr_sha1): LOG.warning('Skew detected (expected commit: %s, ' 'got PR commit: %s).', branch_sha1, pr_sha1) LOG.warning('Updating the integration PR locally.') pull_request.src_commit = branch_sha1 continue raise messages.PullRequestSkewDetected(pull_request.id, branch_sha1, pr_sha1)
Python
def check_approvals(job): """Check approval of a pull request by author, peers, and leaders. Raises: - ApprovalRequired """ required_peer_approvals = job.settings.required_peer_approvals current_peer_approvals = 0 if bypass_peer_approval(job): current_peer_approvals = required_peer_approvals required_leader_approvals = job.settings.required_leader_approvals current_leader_approvals = 0 if bypass_leader_approval(job): current_leader_approvals = required_leader_approvals approved_by_author = ( not job.settings.need_author_approval or bypass_author_approval(job) or job.settings.approve ) requires_unanimity = job.settings.unanimity is_unanimous = True if (approved_by_author and (current_peer_approvals >= required_peer_approvals) and (current_leader_approvals >= required_leader_approvals) and not requires_unanimity): return # NB: when author hasn't approved the PR, author isn't listed in # 'participants' username = job.settings.robot participants = set(job.pull_request.get_participants()) approvals = set(job.pull_request.get_approvals()) if job.settings.approve: approvals.add(job.pull_request.author) # Exclude Bert-E from consideration participants -= {username} leaders = set(job.settings.project_leaders) is_unanimous = approvals - {username} == participants approved_by_author |= job.pull_request.author in approvals current_leader_approvals += len(approvals.intersection(leaders)) if (job.pull_request.author in leaders and job.pull_request.author not in approvals): # if a project leader creates a PR and has not approved it # (which is not possible on Github for example), always count # one additional mandatory approval current_leader_approvals += 1 missing_leader_approvals = ( required_leader_approvals - current_leader_approvals) peer_approvals = approvals - {job.pull_request.author} current_peer_approvals += len(peer_approvals) missing_peer_approvals = ( required_peer_approvals - current_peer_approvals) change_requests = set(job.pull_request.get_change_requests()) LOG.info('approvals: %s' % locals()) if not approved_by_author or \ missing_leader_approvals > 0 or \ missing_peer_approvals > 0 or \ (requires_unanimity and not is_unanimous) or \ len(change_requests) > 0: raise messages.ApprovalRequired( pr=job.pull_request, required_leader_approvals=required_leader_approvals, leaders=list(leaders), required_peer_approvals=required_peer_approvals, requires_unanimity=requires_unanimity, requires_author_approval=job.settings.need_author_approval, pr_author_options=job.settings.pr_author_options, active_options=job.active_options, change_requesters=list(change_requests) )
def check_approvals(job): """Check approval of a pull request by author, peers, and leaders. Raises: - ApprovalRequired """ required_peer_approvals = job.settings.required_peer_approvals current_peer_approvals = 0 if bypass_peer_approval(job): current_peer_approvals = required_peer_approvals required_leader_approvals = job.settings.required_leader_approvals current_leader_approvals = 0 if bypass_leader_approval(job): current_leader_approvals = required_leader_approvals approved_by_author = ( not job.settings.need_author_approval or bypass_author_approval(job) or job.settings.approve ) requires_unanimity = job.settings.unanimity is_unanimous = True if (approved_by_author and (current_peer_approvals >= required_peer_approvals) and (current_leader_approvals >= required_leader_approvals) and not requires_unanimity): return # NB: when author hasn't approved the PR, author isn't listed in # 'participants' username = job.settings.robot participants = set(job.pull_request.get_participants()) approvals = set(job.pull_request.get_approvals()) if job.settings.approve: approvals.add(job.pull_request.author) # Exclude Bert-E from consideration participants -= {username} leaders = set(job.settings.project_leaders) is_unanimous = approvals - {username} == participants approved_by_author |= job.pull_request.author in approvals current_leader_approvals += len(approvals.intersection(leaders)) if (job.pull_request.author in leaders and job.pull_request.author not in approvals): # if a project leader creates a PR and has not approved it # (which is not possible on Github for example), always count # one additional mandatory approval current_leader_approvals += 1 missing_leader_approvals = ( required_leader_approvals - current_leader_approvals) peer_approvals = approvals - {job.pull_request.author} current_peer_approvals += len(peer_approvals) missing_peer_approvals = ( required_peer_approvals - current_peer_approvals) change_requests = set(job.pull_request.get_change_requests()) LOG.info('approvals: %s' % locals()) if not approved_by_author or \ missing_leader_approvals > 0 or \ missing_peer_approvals > 0 or \ (requires_unanimity and not is_unanimous) or \ len(change_requests) > 0: raise messages.ApprovalRequired( pr=job.pull_request, required_leader_approvals=required_leader_approvals, leaders=list(leaders), required_peer_approvals=required_peer_approvals, requires_unanimity=requires_unanimity, requires_author_approval=job.settings.need_author_approval, pr_author_options=job.settings.pr_author_options, active_options=job.active_options, change_requesters=list(change_requests) )
Python
def check_build_status(job, wbranches): """Check the build statuses of the integration pull requests. Raises: BuildFailed: if a build failed or was stopped. BuildNotStarted: if a build hasn't started yet. BuildInProgress: if a build is still in progress. """ if bypass_build_status(job): return key = job.settings.build_key if not key: return ordered_state = { status: idx for idx, status in enumerate( ('SUCCESSFUL', 'INPROGRESS', 'NOTSTARTED', 'STOPPED', 'FAILED')) } def status(branch): return job.project_repo.get_build_status( branch.get_latest_commit(), key) statuses = {b.name: status(b) for b in wbranches} worst = max(wbranches, key=lambda b: ordered_state[statuses[b.name]]) worst_status = statuses[worst.name] if worst_status in ('FAILED', 'STOPPED'): raise messages.BuildFailed( active_options=job.active_options, branch=worst.name, build_url=job.project_repo.get_build_url( worst.get_latest_commit, key), commit_url=job.project_repo.get_commit_url( worst.get_latest_commit()), ) elif worst_status == 'NOTSTARTED': raise messages.BuildNotStarted() elif worst_status == 'INPROGRESS': raise messages.BuildInProgress() assert worst_status == 'SUCCESSFUL'
def check_build_status(job, wbranches): """Check the build statuses of the integration pull requests. Raises: BuildFailed: if a build failed or was stopped. BuildNotStarted: if a build hasn't started yet. BuildInProgress: if a build is still in progress. """ if bypass_build_status(job): return key = job.settings.build_key if not key: return ordered_state = { status: idx for idx, status in enumerate( ('SUCCESSFUL', 'INPROGRESS', 'NOTSTARTED', 'STOPPED', 'FAILED')) } def status(branch): return job.project_repo.get_build_status( branch.get_latest_commit(), key) statuses = {b.name: status(b) for b in wbranches} worst = max(wbranches, key=lambda b: ordered_state[statuses[b.name]]) worst_status = statuses[worst.name] if worst_status in ('FAILED', 'STOPPED'): raise messages.BuildFailed( active_options=job.active_options, branch=worst.name, build_url=job.project_repo.get_build_url( worst.get_latest_commit, key), commit_url=job.project_repo.get_commit_url( worst.get_latest_commit()), ) elif worst_status == 'NOTSTARTED': raise messages.BuildNotStarted() elif worst_status == 'INPROGRESS': raise messages.BuildInProgress() assert worst_status == 'SUCCESSFUL'
Python
def _patch_url(self, url): """Patch URLs if it is relative to the API root. Returns: an absolute url corresponding to client.base_url / url """ if not url.startswith('http'): url = '/'.join((self.base_url, url.lstrip('/'))) return url
def _patch_url(self, url): """Patch URLs if it is relative to the API root. Returns: an absolute url corresponding to client.base_url / url """ if not url.startswith('http'): url = '/'.join((self.base_url, url.lstrip('/'))) return url
Python
def _cache_value(self, method, url, params, res): """Put a request result in the query cache. If the response's headers contain an ETag or a Last-Modified field, the response can be used in subsequent calls to avoid hitting github's rate limit. Args: - method (str): request method (e.g. GET) - url (str): request url - params (dict): request parameter dict as per requests library's params argument - res (requests.Response): the request's response Returns: The response that was put in cache. """ key = self._mk_key(url, params) headers = res.headers etag = headers.get('ETag', None) date = headers.get('Last-Modified', None) if etag or date: self.query_cache[method].set(key, CacheEntry(res, etag, date)) return res
def _cache_value(self, method, url, params, res): """Put a request result in the query cache. If the response's headers contain an ETag or a Last-Modified field, the response can be used in subsequent calls to avoid hitting github's rate limit. Args: - method (str): request method (e.g. GET) - url (str): request url - params (dict): request parameter dict as per requests library's params argument - res (requests.Response): the request's response Returns: The response that was put in cache. """ key = self._mk_key(url, params) headers = res.headers etag = headers.get('ETag', None) date = headers.get('Last-Modified', None) if etag or date: self.query_cache[method].set(key, CacheEntry(res, etag, date)) return res
Python
def _get_cached_value(self, method, url, params): """Get a value from the cache if any. This method is intended to be called before performing an HTTP request, in order to define the special headers used by GitHub's rate-limit system. If the request that follows returns a HTTP 304 code, this means that: - the cached value returned by this method can be returned as a valid result - the request wasn't decremented from GitHub's rate limit counter Args: - method (str): request method (e.g. GET) - url (str): request url - params (dict): request parameter dict as per requests library's params argument Returns: A (response, headers) tuple. - response is the last response we've received for this request (possibly None). - headers is a dictionary defining 'If-None-Match' and 'If-Modified-Since' headers to add to the request. See: the _get() method to understand how it is used. """ key = self._mk_key(url, params) entry = self.query_cache[method].get(key, None) headers = { 'If-None-Match': None, 'If-Modified-Since': None } if entry is None: return None, headers if entry.etag: headers['If-None-Match'] = entry.etag elif entry.date: headers['If-Modified-Since'] = entry.date return entry.obj, headers
def _get_cached_value(self, method, url, params): """Get a value from the cache if any. This method is intended to be called before performing an HTTP request, in order to define the special headers used by GitHub's rate-limit system. If the request that follows returns a HTTP 304 code, this means that: - the cached value returned by this method can be returned as a valid result - the request wasn't decremented from GitHub's rate limit counter Args: - method (str): request method (e.g. GET) - url (str): request url - params (dict): request parameter dict as per requests library's params argument Returns: A (response, headers) tuple. - response is the last response we've received for this request (possibly None). - headers is a dictionary defining 'If-None-Match' and 'If-Modified-Since' headers to add to the request. See: the _get() method to understand how it is used. """ key = self._mk_key(url, params) entry = self.query_cache[method].get(key, None) headers = { 'If-None-Match': None, 'If-Modified-Since': None } if entry is None: return None, headers if entry.etag: headers['If-None-Match'] = entry.etag elif entry.date: headers['If-Modified-Since'] = entry.date return entry.obj, headers
Python
def _get(self, url, **kwargs): """Perform a GET request using the rate-limit cache system. This method is not supposed to be called by other objects. Instead, it is wrapped by the get() and iter_get() methods. Returns: A requests.Response object """ params = kwargs.get('params', {}) url = self._patch_url(url) res, headers = self._get_cached_value('GET', url, params) if headers: kwargs.setdefault('headers', {}).update(headers) response = self.session.get(url, **kwargs) if response.status_code == 304: LOG.debug('Not Modified. Returning cached result') return res response.raise_for_status() return self._cache_value('GET', url, params, response)
def _get(self, url, **kwargs): """Perform a GET request using the rate-limit cache system. This method is not supposed to be called by other objects. Instead, it is wrapped by the get() and iter_get() methods. Returns: A requests.Response object """ params = kwargs.get('params', {}) url = self._patch_url(url) res, headers = self._get_cached_value('GET', url, params) if headers: kwargs.setdefault('headers', {}).update(headers) response = self.session.get(url, **kwargs) if response.status_code == 304: LOG.debug('Not Modified. Returning cached result') return res response.raise_for_status() return self._cache_value('GET', url, params, response)
Python
def post(self, url, data, **kwargs): """Perform a POST request to the github API. Args: same as requests.post() Returns: a deserialized json structure Raises: requests.HTTPError """ url = self._patch_url(url) response = self.session.post(url, data=data, **kwargs) response.raise_for_status() return json.loads(response.text)
def post(self, url, data, **kwargs): """Perform a POST request to the github API. Args: same as requests.post() Returns: a deserialized json structure Raises: requests.HTTPError """ url = self._patch_url(url) response = self.session.post(url, data=data, **kwargs) response.raise_for_status() return json.loads(response.text)
Python
def patch(self, url, data, **kwargs): """Perform a PATCH request to the github API. Args: same as requests.patch() Returns: a deserialized json structure Raises: requests.HTTPError """ url = self._patch_url(url) response = self.session.post(url, data=data, **kwargs) response.raise_for_status() return json.loads(response.text)
def patch(self, url, data, **kwargs): """Perform a PATCH request to the github API. Args: same as requests.patch() Returns: a deserialized json structure Raises: requests.HTTPError """ url = self._patch_url(url) response = self.session.post(url, data=data, **kwargs) response.raise_for_status() return json.loads(response.text)
Python
def delete(self, url, **kwargs): """Perform a DELETE request on the github API. Args: same as requests.delete() Raises: requests.HTTPError """ url = self._patch_url(url) response = self.session.delete(url, **kwargs) response.raise_for_status()
def delete(self, url, **kwargs): """Perform a DELETE request on the github API. Args: same as requests.delete() Raises: requests.HTTPError """ url = self._patch_url(url) response = self.session.delete(url, **kwargs) response.raise_for_status()
Python
def put(self, url, **kwargs): """Perform a PUT request to the Github API. Args: same as requests.put() Raises: requests.HTTPError """ url = self._patch_url(url) response = self.session.put(url, **kwargs) response.raise_for_status()
def put(self, url, **kwargs): """Perform a PUT request to the Github API. Args: same as requests.put() Raises: requests.HTTPError """ url = self._patch_url(url) response = self.session.put(url, **kwargs) response.raise_for_status()
Python
def iter_get(self, url, per_page=100, **kwargs): """Perform a paginated GET request to the Github API. This method handles cache verfication and uses conditional requests + a local cache to avoid consuming API calls as counted by Github's rate limit system. Args: - per_page: number of objects to get per page (max & default: 100) - same as requests.get() Yields: deserialized json structures Raises: requests.HTTPError """ params = kwargs.setdefault('params', {}) params.setdefault('per_page', per_page) next_page = url while next_page: response = self._get(next_page, **kwargs) yield from json.loads(response.text) next_page = None if 'link' not in response.headers: break for link_rel in response.headers['link'].split(','): link, rel = link_rel.split(';') if 'rel="next"' in rel.strip(): next_page = link.strip(' <>') break # Params are already contained in the next page's url params.clear()
def iter_get(self, url, per_page=100, **kwargs): """Perform a paginated GET request to the Github API. This method handles cache verfication and uses conditional requests + a local cache to avoid consuming API calls as counted by Github's rate limit system. Args: - per_page: number of objects to get per page (max & default: 100) - same as requests.get() Yields: deserialized json structures Raises: requests.HTTPError """ params = kwargs.setdefault('params', {}) params.setdefault('per_page', per_page) next_page = url while next_page: response = self._get(next_page, **kwargs) yield from json.loads(response.text) next_page = None if 'link' not in response.headers: break for link_rel in response.headers['link'].split(','): link, rel = link_rel.split(';') if 'rel="next"' in rel.strip(): next_page = link.strip(' <>') break # Params are already contained in the next page's url params.clear()
Python
def pull_request(self) -> PullRequest: """Get the PullRequest associated with this issue comment event.""" pr_dict = self.data['issue'].get('pull_request') if pr_dict: try: return PullRequest.get(client=self.client, url=pr_dict['url']) except HTTPError: LOG.error("No pull request at url %s", pr_dict['url']) LOG.debug("Issue #%d is not a pull request", self.data['issue']['number'])
def pull_request(self) -> PullRequest: """Get the PullRequest associated with this issue comment event.""" pr_dict = self.data['issue'].get('pull_request') if pr_dict: try: return PullRequest.get(client=self.client, url=pr_dict['url']) except HTTPError: LOG.error("No pull request at url %s", pr_dict['url']) LOG.debug("Issue #%d is not a pull request", self.data['issue']['number'])
Python
def build(self, repo): """Collect q branches from repository, add them to the collection.""" cmd = 'git branch -r --list origin/q/*' for branch in repo.cmd(cmd).split('\n')[:-1]: match_ = re.match(r'\s*origin/(?P<name>.*)', branch) if not match_: continue try: branch = branch_factory(repo, match_.group('name')) except errors.UnrecognizedBranchPattern: continue self._add_branch(branch) self.finalize()
def build(self, repo): """Collect q branches from repository, add them to the collection.""" cmd = 'git branch -r --list origin/q/*' for branch in repo.cmd(cmd).split('\n')[:-1]: match_ = re.match(r'\s*origin/(?P<name>.*)', branch) if not match_: continue try: branch = branch_factory(repo, match_.group('name')) except errors.UnrecognizedBranchPattern: continue self._add_branch(branch) self.finalize()
Python
def mergeable_queues(self): """Return a collection of queues suitable for merge. This only works after the collection is validated. """ if self._mergeable_queues is None: self._process() return self._mergeable_queues
def mergeable_queues(self): """Return a collection of queues suitable for merge. This only works after the collection is validated. """ if self._mergeable_queues is None: self._process() return self._mergeable_queues
Python
def mergeable_prs(self): """Return the list of pull requests suitable for merge. This only works after the collection is validated. """ if self._mergeable_queues is None: self._process() return self._mergeable_prs
def mergeable_prs(self): """Return the list of pull requests suitable for merge. This only works after the collection is validated. """ if self._mergeable_queues is None: self._process() return self._mergeable_prs
Python
def _add_branch(self, branch): """Add a single branch to the queue collection.""" if not isinstance(branch, (QueueBranch, QueueIntegrationBranch)): raise errors.InvalidQueueBranch(branch) self._validated = False # make sure we have a local copy of the branch # (enables get_latest_commit) branch.checkout() version = branch.version_t if version not in self._queues.keys(): self._queues[version] = { QueueBranch: None, QueueIntegrationBranch: [] } # Sort the top dict again self._queues = OrderedDict(sorted(self._queues.items(), key=cmp_to_key(compare_queues))) if isinstance(branch, QueueBranch): self._queues[version][QueueBranch] = branch else: self._queues[version][QueueIntegrationBranch].append(branch)
def _add_branch(self, branch): """Add a single branch to the queue collection.""" if not isinstance(branch, (QueueBranch, QueueIntegrationBranch)): raise errors.InvalidQueueBranch(branch) self._validated = False # make sure we have a local copy of the branch # (enables get_latest_commit) branch.checkout() version = branch.version_t if version not in self._queues.keys(): self._queues[version] = { QueueBranch: None, QueueIntegrationBranch: [] } # Sort the top dict again self._queues = OrderedDict(sorted(self._queues.items(), key=cmp_to_key(compare_queues))) if isinstance(branch, QueueBranch): self._queues[version][QueueBranch] = branch else: self._queues[version][QueueIntegrationBranch].append(branch)
Python
def _horizontal_validation(self, version): """Validation of the queue collection on one given version. Called by validate(). """ masterq = self._queues[version][QueueBranch] # check master queue state if not masterq: yield errors.MasterQueueMissing(version) else: if not masterq.includes_commit(masterq.dst_branch): yield errors.MasterQueueLateVsDev(masterq, masterq.dst_branch) if not self._queues[version][QueueIntegrationBranch]: # check master queue points to dev if (masterq.get_latest_commit() != masterq.dst_branch.get_latest_commit()): yield errors.MasterQueueNotInSync(masterq, masterq.dst_branch) else: # check state of master queue wrt to greatest integration # queue greatest_intq = ( self._queues[version][QueueIntegrationBranch][0] ) if (greatest_intq.get_latest_commit() != masterq.get_latest_commit()): if greatest_intq.includes_commit(masterq): yield errors.MasterQueueLateVsInt(masterq, greatest_intq) elif masterq.includes_commit(greatest_intq): yield errors.MasterQueueYoungerThanInt(masterq, greatest_intq) else: yield errors.MasterQueueDiverged(masterq, greatest_intq) # check each integration queue contains the previous one nextq = masterq for intq in self._queues[version][QueueIntegrationBranch]: if not nextq.includes_commit(intq): yield errors.QueueInclusionIssue(nextq, intq) nextq = intq if not nextq.includes_commit(masterq.dst_branch): yield errors.QueueInclusionIssue(nextq, masterq.dst_branch)
def _horizontal_validation(self, version): """Validation of the queue collection on one given version. Called by validate(). """ masterq = self._queues[version][QueueBranch] # check master queue state if not masterq: yield errors.MasterQueueMissing(version) else: if not masterq.includes_commit(masterq.dst_branch): yield errors.MasterQueueLateVsDev(masterq, masterq.dst_branch) if not self._queues[version][QueueIntegrationBranch]: # check master queue points to dev if (masterq.get_latest_commit() != masterq.dst_branch.get_latest_commit()): yield errors.MasterQueueNotInSync(masterq, masterq.dst_branch) else: # check state of master queue wrt to greatest integration # queue greatest_intq = ( self._queues[version][QueueIntegrationBranch][0] ) if (greatest_intq.get_latest_commit() != masterq.get_latest_commit()): if greatest_intq.includes_commit(masterq): yield errors.MasterQueueLateVsInt(masterq, greatest_intq) elif masterq.includes_commit(greatest_intq): yield errors.MasterQueueYoungerThanInt(masterq, greatest_intq) else: yield errors.MasterQueueDiverged(masterq, greatest_intq) # check each integration queue contains the previous one nextq = masterq for intq in self._queues[version][QueueIntegrationBranch]: if not nextq.includes_commit(intq): yield errors.QueueInclusionIssue(nextq, intq) nextq = intq if not nextq.includes_commit(masterq.dst_branch): yield errors.QueueInclusionIssue(nextq, masterq.dst_branch)
Python
def _vertical_validation(self, stack, versions): """Validation of the queue collection on one given merge path. Called by validate(). """ prs = self._extract_pr_ids(stack) last_version = versions[-1] hf_detected = False if len(list(stack.keys())) == 1: if len(list(stack.keys())[0]) == 4: hf_detected = True # check all subsequent versions have a master queue has_queues = False for version in versions: if version not in stack: if has_queues and not hf_detected: yield errors.MasterQueueMissing(version) continue has_queues = True if not stack[version][QueueBranch]: yield errors.MasterQueueMissing(version) # check queues are sync'ed vertically and included in each other # (last one corresponds to same PR on all versions..., and so on) # other way to say it: each version has all the PR_ids of the # previous version if last_version in stack: while stack[last_version][QueueIntegrationBranch]: next_vqint = stack[last_version][QueueIntegrationBranch].pop(0) pr = next_vqint.pr_id if pr not in prs: # early fail break for version in reversed(versions[:-1]): if version not in stack: # supposedly finished break if len(version) == 4: # skip hf from check loop continue if (stack[version][QueueIntegrationBranch] and stack[version][QueueIntegrationBranch][0].pr_id == pr): vqint = stack[version][QueueIntegrationBranch].pop(0) # take this opportunity to check vertical inclusion if not next_vqint.includes_commit(vqint): yield errors.QueueInclusionIssue(next_vqint, vqint) next_vqint = vqint else: # this pr is supposedly entirely removed from the stack # if it comes back again, its an error break prs.remove(pr) # skip hf from stack and prs before final checks for version in versions: if len(version) == 4: if version not in stack: continue while stack[version][QueueIntegrationBranch]: pr_id = stack[version][QueueIntegrationBranch][0].pr_id stack[version][QueueIntegrationBranch].pop(0) if pr_id in prs: prs.remove(pr_id) if prs: # after this algorithm prs should be empty yield errors.QueueInconsistentPullRequestsOrder() else: # and stack should be empty too for version in versions: if (version in stack and stack[version][QueueIntegrationBranch]): yield errors.QueueInconsistentPullRequestsOrder()
def _vertical_validation(self, stack, versions): """Validation of the queue collection on one given merge path. Called by validate(). """ prs = self._extract_pr_ids(stack) last_version = versions[-1] hf_detected = False if len(list(stack.keys())) == 1: if len(list(stack.keys())[0]) == 4: hf_detected = True # check all subsequent versions have a master queue has_queues = False for version in versions: if version not in stack: if has_queues and not hf_detected: yield errors.MasterQueueMissing(version) continue has_queues = True if not stack[version][QueueBranch]: yield errors.MasterQueueMissing(version) # check queues are sync'ed vertically and included in each other # (last one corresponds to same PR on all versions..., and so on) # other way to say it: each version has all the PR_ids of the # previous version if last_version in stack: while stack[last_version][QueueIntegrationBranch]: next_vqint = stack[last_version][QueueIntegrationBranch].pop(0) pr = next_vqint.pr_id if pr not in prs: # early fail break for version in reversed(versions[:-1]): if version not in stack: # supposedly finished break if len(version) == 4: # skip hf from check loop continue if (stack[version][QueueIntegrationBranch] and stack[version][QueueIntegrationBranch][0].pr_id == pr): vqint = stack[version][QueueIntegrationBranch].pop(0) # take this opportunity to check vertical inclusion if not next_vqint.includes_commit(vqint): yield errors.QueueInclusionIssue(next_vqint, vqint) next_vqint = vqint else: # this pr is supposedly entirely removed from the stack # if it comes back again, its an error break prs.remove(pr) # skip hf from stack and prs before final checks for version in versions: if len(version) == 4: if version not in stack: continue while stack[version][QueueIntegrationBranch]: pr_id = stack[version][QueueIntegrationBranch][0].pr_id stack[version][QueueIntegrationBranch].pop(0) if pr_id in prs: prs.remove(pr_id) if prs: # after this algorithm prs should be empty yield errors.QueueInconsistentPullRequestsOrder() else: # and stack should be empty too for version in versions: if (version in stack and stack[version][QueueIntegrationBranch]): yield errors.QueueInconsistentPullRequestsOrder()
Python
def validate(self): """Check the state of queues declared via add_branch. The following checks are performed: - horizontal checks: on a given branch version, each integration queue must include the previous one; the master queue must point to the last integration queue; In case there is no integration queue (nothing queued for this version), the master queue must point on the corresponding development branch. - vertical checks: across versions, on each merge path, the queues must be in the correct order (pr1 queued first, pr2 then, ...); when a pr is queued in a version, it must be present in all the following integration queues; for a given pr, the queues must be included in each other. - completeness: in order to detect missing integration queues (manuel delete for example), deconstruct the master queues by reverting merge commits; each result should not differ in content from the previous integration queue; The last diff is checked vs the corresponding development branch. TODO """ errs = [] versions = self._queues.keys() if not versions: # no queues, cool stuff self._validated = True return for version in versions: errs.extend(self._horizontal_validation(version)) for merge_path in self.merge_paths: versions = [branch.version_t for branch in merge_path] stack = deepcopy(self._queues) # remove versions not on this merge_path from consideration for version in list(stack.keys()): if version not in versions: stack.pop(version) errs.extend(self._vertical_validation(stack, versions)) if errs: raise errors.IncoherentQueues(errs) self._validated = True
def validate(self): """Check the state of queues declared via add_branch. The following checks are performed: - horizontal checks: on a given branch version, each integration queue must include the previous one; the master queue must point to the last integration queue; In case there is no integration queue (nothing queued for this version), the master queue must point on the corresponding development branch. - vertical checks: across versions, on each merge path, the queues must be in the correct order (pr1 queued first, pr2 then, ...); when a pr is queued in a version, it must be present in all the following integration queues; for a given pr, the queues must be included in each other. - completeness: in order to detect missing integration queues (manuel delete for example), deconstruct the master queues by reverting merge commits; each result should not differ in content from the previous integration queue; The last diff is checked vs the corresponding development branch. TODO """ errs = [] versions = self._queues.keys() if not versions: # no queues, cool stuff self._validated = True return for version in versions: errs.extend(self._horizontal_validation(version)) for merge_path in self.merge_paths: versions = [branch.version_t for branch in merge_path] stack = deepcopy(self._queues) # remove versions not on this merge_path from consideration for version in list(stack.keys()): if version not in versions: stack.pop(version) errs.extend(self._vertical_validation(stack, versions)) if errs: raise errors.IncoherentQueues(errs) self._validated = True
Python
def _recursive_lookup(self, queues): """Given a set of queues, remove all queues that can't be merged, based on the build status obtained from the repository manager. A pull request must be removed from the list if the build on at least one version is FAILED, and if this failure is not recovered by a later pull request. Return once a mergeable set is identified or the set is empty. """ first_failed_pr = 0 for version in queues.keys(): qints = queues[version][QueueIntegrationBranch] if qints: qint = qints[0] status = self.bbrepo.get_build_status( qint.get_latest_commit(), self.build_key ) if status != 'SUCCESSFUL': first_failed_pr = qint.pr_id break if first_failed_pr == 0: # all tip queues are pass, merge as it is return # remove all queues that don't pass globally, # up to the identified failed pr, and retry for version in queues.keys(): intqs = queues[version][QueueIntegrationBranch] if all([inq.pr_id != first_failed_pr for inq in intqs]): # do not pop anything if failed pr is not on the current path continue while intqs: intq = intqs.pop(0) if intq.pr_id == first_failed_pr: break self._recursive_lookup(queues)
def _recursive_lookup(self, queues): """Given a set of queues, remove all queues that can't be merged, based on the build status obtained from the repository manager. A pull request must be removed from the list if the build on at least one version is FAILED, and if this failure is not recovered by a later pull request. Return once a mergeable set is identified or the set is empty. """ first_failed_pr = 0 for version in queues.keys(): qints = queues[version][QueueIntegrationBranch] if qints: qint = qints[0] status = self.bbrepo.get_build_status( qint.get_latest_commit(), self.build_key ) if status != 'SUCCESSFUL': first_failed_pr = qint.pr_id break if first_failed_pr == 0: # all tip queues are pass, merge as it is return # remove all queues that don't pass globally, # up to the identified failed pr, and retry for version in queues.keys(): intqs = queues[version][QueueIntegrationBranch] if all([inq.pr_id != first_failed_pr for inq in intqs]): # do not pop anything if failed pr is not on the current path continue while intqs: intq = intqs.pop(0) if intq.pr_id == first_failed_pr: break self._recursive_lookup(queues)
Python
def _extract_pr_ids(self, queues): """Return list of pull requests present in a set of queues. This is obtained by reading pr ids from the greatest development queue branch, so assumes that this branch contains a reference to all pull requests in queues (this is normally the case if everything was queued by Bert-E. Return (list): pull request ids in provided queue set (in the order of addition to the queue, from oldest to newest) """ prs_hf = [] prs = [] # identify version corresponding to last dev queue # (i.e. ignore stab queues) greatest_dev = None for version in reversed(queues.keys()): if len(version) == 2 and greatest_dev is None: greatest_dev = version if len(version) == 4: # we may not catch the hf pr_id later from greatest_dev # so insert them now for qint in queues[version][QueueIntegrationBranch]: if qint.pr_id not in prs_hf: prs_hf.insert(0, qint.pr_id) if greatest_dev: for qint in queues[greatest_dev][QueueIntegrationBranch]: if qint.pr_id not in prs_hf + prs: prs.insert(0, qint.pr_id) return prs_hf + prs
def _extract_pr_ids(self, queues): """Return list of pull requests present in a set of queues. This is obtained by reading pr ids from the greatest development queue branch, so assumes that this branch contains a reference to all pull requests in queues (this is normally the case if everything was queued by Bert-E. Return (list): pull request ids in provided queue set (in the order of addition to the queue, from oldest to newest) """ prs_hf = [] prs = [] # identify version corresponding to last dev queue # (i.e. ignore stab queues) greatest_dev = None for version in reversed(queues.keys()): if len(version) == 2 and greatest_dev is None: greatest_dev = version if len(version) == 4: # we may not catch the hf pr_id later from greatest_dev # so insert them now for qint in queues[version][QueueIntegrationBranch]: if qint.pr_id not in prs_hf: prs_hf.insert(0, qint.pr_id) if greatest_dev: for qint in queues[greatest_dev][QueueIntegrationBranch]: if qint.pr_id not in prs_hf + prs: prs.insert(0, qint.pr_id) return prs_hf + prs
Python
def _remove_unmergeable(self, prs, queues): """Given a set of queues, remove all queues that are not in the provided list of pull request ids. """ for version in queues.keys(): while (queues[version][QueueIntegrationBranch] and queues[version][QueueIntegrationBranch][0].pr_id not in prs): queues[version][QueueIntegrationBranch].pop(0)
def _remove_unmergeable(self, prs, queues): """Given a set of queues, remove all queues that are not in the provided list of pull request ids. """ for version in queues.keys(): while (queues[version][QueueIntegrationBranch] and queues[version][QueueIntegrationBranch][0].pr_id not in prs): queues[version][QueueIntegrationBranch].pop(0)
Python
def _process(self): """Given a sorted list of queues, identify most buildable series. We need to look at mergeable PRs from the point of view of all the possible merge_paths individually, then merge the results in a super-mergeable status. Populates: - _mergeable_queues (list): queues corresponding to the mergeable PRs - _mergeable_prs (list): pull requests affected by the merge """ if not self._validated: raise errors.QueuesNotValidated() mergeable_prs = self._extract_pr_ids(self._queues) if not self.force_merge: for merge_path in self.merge_paths: versions = [branch.version_t for branch in merge_path] stack = deepcopy(self._queues) # remove versions not on this merge_path from consideration for version in list(stack.keys()): # exclude hf version from this pop process if version not in versions and len(version) < 4: stack.pop(version) # obtain list of mergeable prs on this merge_path self._recursive_lookup(stack) path_mergeable_prs = self._extract_pr_ids(stack) # smallest table is the common denominator if len(path_mergeable_prs) < len(mergeable_prs): mergeable_prs = path_mergeable_prs self._mergeable_prs = mergeable_prs mergeable_queues = deepcopy(self._queues) self._remove_unmergeable(mergeable_prs, mergeable_queues) self._mergeable_queues = mergeable_queues
def _process(self): """Given a sorted list of queues, identify most buildable series. We need to look at mergeable PRs from the point of view of all the possible merge_paths individually, then merge the results in a super-mergeable status. Populates: - _mergeable_queues (list): queues corresponding to the mergeable PRs - _mergeable_prs (list): pull requests affected by the merge """ if not self._validated: raise errors.QueuesNotValidated() mergeable_prs = self._extract_pr_ids(self._queues) if not self.force_merge: for merge_path in self.merge_paths: versions = [branch.version_t for branch in merge_path] stack = deepcopy(self._queues) # remove versions not on this merge_path from consideration for version in list(stack.keys()): # exclude hf version from this pop process if version not in versions and len(version) < 4: stack.pop(version) # obtain list of mergeable prs on this merge_path self._recursive_lookup(stack) path_mergeable_prs = self._extract_pr_ids(stack) # smallest table is the common denominator if len(path_mergeable_prs) < len(mergeable_prs): mergeable_prs = path_mergeable_prs self._mergeable_prs = mergeable_prs mergeable_queues = deepcopy(self._queues) self._remove_unmergeable(mergeable_prs, mergeable_queues) self._mergeable_queues = mergeable_queues
Python
def finalize(self): """Finalize the collection of queues. Assumes _queues has been populated by calls to add_branch. """ # order integration queues by content for version in self._queues.keys(): self._queues[version][ QueueIntegrationBranch].sort(reverse=True)
def finalize(self): """Finalize the collection of queues. Assumes _queues has been populated by calls to add_branch. """ # order integration queues by content for version in self._queues.keys(): self._queues[version][ QueueIntegrationBranch].sort(reverse=True)
Python
def queued_prs(self): """Ordered list of queued PR IDs (oldest first).""" if not self._queues: return [] # Find last_entry for which there is not a hf entry last_entry = None pr_ids = [] for key in list(reversed(self._queues.keys())): if len(key) < 4: last_entry = self._queues[key] break if last_entry is not None: pr_ids = list(reversed([branch.pr_id for branch in last_entry[QueueIntegrationBranch]])) # Add hotfix PRs that are not seen from the queues top key pr_hf_ids = [] for key in list(reversed(self._queues.keys())): if len(key) == 4: entry = self._queues[key] new_pr_ids = list([branch.pr_id for branch in entry[QueueIntegrationBranch]]) for pr_hf_id in new_pr_ids: if pr_hf_id not in pr_hf_ids: pr_hf_ids = [pr_hf_id] + pr_hf_ids # Remove hotfix PRs from the first set pr_non_hf_ids = [] for pr_id in pr_ids: if pr_id not in pr_hf_ids: pr_non_hf_ids = pr_non_hf_ids + [pr_id] return pr_hf_ids + pr_non_hf_ids
def queued_prs(self): """Ordered list of queued PR IDs (oldest first).""" if not self._queues: return [] # Find last_entry for which there is not a hf entry last_entry = None pr_ids = [] for key in list(reversed(self._queues.keys())): if len(key) < 4: last_entry = self._queues[key] break if last_entry is not None: pr_ids = list(reversed([branch.pr_id for branch in last_entry[QueueIntegrationBranch]])) # Add hotfix PRs that are not seen from the queues top key pr_hf_ids = [] for key in list(reversed(self._queues.keys())): if len(key) == 4: entry = self._queues[key] new_pr_ids = list([branch.pr_id for branch in entry[QueueIntegrationBranch]]) for pr_hf_id in new_pr_ids: if pr_hf_id not in pr_hf_ids: pr_hf_ids = [pr_hf_id] + pr_hf_ids # Remove hotfix PRs from the first set pr_non_hf_ids = [] for pr_id in pr_ids: if pr_id not in pr_hf_ids: pr_non_hf_ids = pr_non_hf_ids + [pr_id] return pr_hf_ids + pr_non_hf_ids
Python
def update_micro(self, tag): """Update development branch latest micro based on tag.""" pattern = r"^(?P<major>\d+)\.(?P<minor>\d+)\.(?P<micro>\d+)" \ r"(\.(?P<hfrev>\d+)|)$" match = re.match(pattern, tag) if not match: LOG.debug("Ignore tag: %s", tag) return LOG.debug("Consider tag: %s", tag) major = int(match.groupdict()['major']) minor = int(match.groupdict()['minor']) micro = int(match.groupdict()['micro']) hfrev = 0 # default hfrev if match.groupdict()['hfrev'] is not None: hfrev = int(match.groupdict()['hfrev']) try: branches = self._cascade[(major, minor)] except KeyError: LOG.debug("Ignore tag: %s", tag) return hf_branch = branches[HotfixBranch] stb_branch = branches[StabilizationBranch] dev_branch = branches[DevelopmentBranch] if hf_branch: if hf_branch.micro == micro: hf_branch.hfrev = max(hfrev + 1, hf_branch.hfrev) hf_branch.version = '%d.%d.%d.%d' % (hf_branch.major, hf_branch.minor, hf_branch.micro, hf_branch.hfrev) if stb_branch is not None and \ stb_branch.micro == hf_branch.micro: # We have a hotfix branch but we did not remove the # stabilization branch. raise errors.DeprecatedStabilizationBranch(stb_branch.name, hf_branch.name) if stb_branch is not None and stb_branch.micro <= micro: # We have a tag but we did not remove the stabilization branch. raise errors.DeprecatedStabilizationBranch(stb_branch.name, tag) if dev_branch: dev_branch.micro = max(micro, dev_branch.micro)
def update_micro(self, tag): """Update development branch latest micro based on tag.""" pattern = r"^(?P<major>\d+)\.(?P<minor>\d+)\.(?P<micro>\d+)" \ r"(\.(?P<hfrev>\d+)|)$" match = re.match(pattern, tag) if not match: LOG.debug("Ignore tag: %s", tag) return LOG.debug("Consider tag: %s", tag) major = int(match.groupdict()['major']) minor = int(match.groupdict()['minor']) micro = int(match.groupdict()['micro']) hfrev = 0 # default hfrev if match.groupdict()['hfrev'] is not None: hfrev = int(match.groupdict()['hfrev']) try: branches = self._cascade[(major, minor)] except KeyError: LOG.debug("Ignore tag: %s", tag) return hf_branch = branches[HotfixBranch] stb_branch = branches[StabilizationBranch] dev_branch = branches[DevelopmentBranch] if hf_branch: if hf_branch.micro == micro: hf_branch.hfrev = max(hfrev + 1, hf_branch.hfrev) hf_branch.version = '%d.%d.%d.%d' % (hf_branch.major, hf_branch.minor, hf_branch.micro, hf_branch.hfrev) if stb_branch is not None and \ stb_branch.micro == hf_branch.micro: # We have a hotfix branch but we did not remove the # stabilization branch. raise errors.DeprecatedStabilizationBranch(stb_branch.name, hf_branch.name) if stb_branch is not None and stb_branch.micro <= micro: # We have a tag but we did not remove the stabilization branch. raise errors.DeprecatedStabilizationBranch(stb_branch.name, tag) if dev_branch: dev_branch.micro = max(micro, dev_branch.micro)
Python
def _set_target_versions(self, dst_branch): """Compute list of expected Jira FixVersion/s. Must be called after the cascade has been finalised. """ for (major, minor), branch_set in self._cascade.items(): dev_branch = branch_set[DevelopmentBranch] stb_branch = branch_set[StabilizationBranch] hf_branch = branch_set[HotfixBranch] if hf_branch and dst_branch.name.startswith('hotfix/'): self.target_versions.append('%d.%d.%d.%d' % ( hf_branch.major, hf_branch.minor, hf_branch.micro, hf_branch.hfrev)) if stb_branch: self.target_versions.append('%d.%d.%d' % ( major, minor, stb_branch.micro)) elif dev_branch: offset = 2 if dev_branch.has_stabilization else 1 self.target_versions.append('%d.%d.%d' % ( major, minor, dev_branch.micro + offset))
def _set_target_versions(self, dst_branch): """Compute list of expected Jira FixVersion/s. Must be called after the cascade has been finalised. """ for (major, minor), branch_set in self._cascade.items(): dev_branch = branch_set[DevelopmentBranch] stb_branch = branch_set[StabilizationBranch] hf_branch = branch_set[HotfixBranch] if hf_branch and dst_branch.name.startswith('hotfix/'): self.target_versions.append('%d.%d.%d.%d' % ( hf_branch.major, hf_branch.minor, hf_branch.micro, hf_branch.hfrev)) if stb_branch: self.target_versions.append('%d.%d.%d' % ( major, minor, stb_branch.micro)) elif dev_branch: offset = 2 if dev_branch.has_stabilization else 1 self.target_versions.append('%d.%d.%d' % ( major, minor, dev_branch.micro + offset))
Python
def finalize(self, dst_branch): """Finalize cascade considering given destination. Assumes the cascade has been populated by calls to add_branch and update_micro. The local lists keeping track Args: dst_branch: where the pull request wants to merge Raises: Returns: list: list of destination branches list: list of ignored destination branches """ self.get_merge_paths() # populate merge paths before removing data ignore_stb_branches = False include_dev_branches = False dev_branch = None dst_hf = dst_branch.name.startswith('hotfix/') for (major, minor), branch_set in list(self._cascade.items()): dev_branch = branch_set[DevelopmentBranch] stb_branch = branch_set[StabilizationBranch] hf_branch = branch_set[HotfixBranch] # we have to target at least a hf or a dev branch if dev_branch is None and hf_branch is None: raise errors.DevBranchDoesNotExist( 'development/%d.%d' % (major, minor)) # remember if a stab is attached before it is removed # from path, for the correct target_version computation if stb_branch: dev_branch.has_stabilization = True # remove untargetted branches from cascade if dst_branch == dev_branch: include_dev_branches = True ignore_stb_branches = True if stb_branch and (ignore_stb_branches or dst_hf): branch_set[StabilizationBranch] = None self.ignored_branches.append(stb_branch.name) if dst_branch == stb_branch: include_dev_branches = True ignore_stb_branches = True if not include_dev_branches or dst_hf: if branch_set[DevelopmentBranch]: branch_set[DevelopmentBranch] = None self.ignored_branches.append(dev_branch.name) if branch_set[StabilizationBranch]: branch_set[StabilizationBranch] = None self.ignored_branches.append(stb_branch.name) if not dst_hf: del self._cascade[(major, minor)] continue if not hf_branch or hf_branch.name != dst_branch.name: if branch_set[HotfixBranch]: branch_set[HotfixBranch] = None self.ignored_branches.append(hf_branch.name) del self._cascade[(major, minor)] # add to dst_branches in the correct order if not dst_hf: if branch_set[StabilizationBranch]: self.dst_branches.append(stb_branch) if branch_set[DevelopmentBranch]: self.dst_branches.append(dev_branch) else: if branch_set[HotfixBranch]: if dst_branch.name == hf_branch.name: self.dst_branches.append(hf_branch) if not dev_branch and not dst_hf: raise errors.NotASingleDevBranch() self._set_target_versions(dst_branch) self.ignored_branches.sort()
def finalize(self, dst_branch): """Finalize cascade considering given destination. Assumes the cascade has been populated by calls to add_branch and update_micro. The local lists keeping track Args: dst_branch: where the pull request wants to merge Raises: Returns: list: list of destination branches list: list of ignored destination branches """ self.get_merge_paths() # populate merge paths before removing data ignore_stb_branches = False include_dev_branches = False dev_branch = None dst_hf = dst_branch.name.startswith('hotfix/') for (major, minor), branch_set in list(self._cascade.items()): dev_branch = branch_set[DevelopmentBranch] stb_branch = branch_set[StabilizationBranch] hf_branch = branch_set[HotfixBranch] # we have to target at least a hf or a dev branch if dev_branch is None and hf_branch is None: raise errors.DevBranchDoesNotExist( 'development/%d.%d' % (major, minor)) # remember if a stab is attached before it is removed # from path, for the correct target_version computation if stb_branch: dev_branch.has_stabilization = True # remove untargetted branches from cascade if dst_branch == dev_branch: include_dev_branches = True ignore_stb_branches = True if stb_branch and (ignore_stb_branches or dst_hf): branch_set[StabilizationBranch] = None self.ignored_branches.append(stb_branch.name) if dst_branch == stb_branch: include_dev_branches = True ignore_stb_branches = True if not include_dev_branches or dst_hf: if branch_set[DevelopmentBranch]: branch_set[DevelopmentBranch] = None self.ignored_branches.append(dev_branch.name) if branch_set[StabilizationBranch]: branch_set[StabilizationBranch] = None self.ignored_branches.append(stb_branch.name) if not dst_hf: del self._cascade[(major, minor)] continue if not hf_branch or hf_branch.name != dst_branch.name: if branch_set[HotfixBranch]: branch_set[HotfixBranch] = None self.ignored_branches.append(hf_branch.name) del self._cascade[(major, minor)] # add to dst_branches in the correct order if not dst_hf: if branch_set[StabilizationBranch]: self.dst_branches.append(stb_branch) if branch_set[DevelopmentBranch]: self.dst_branches.append(dev_branch) else: if branch_set[HotfixBranch]: if dst_branch.name == hf_branch.name: self.dst_branches.append(hf_branch) if not dev_branch and not dst_hf: raise errors.NotASingleDevBranch() self._set_target_versions(dst_branch) self.ignored_branches.sort()
Python
def branch_factory(repo: git.Repository, branch_name: str) -> GWFBranch: """Construct a GWFBranch object corresponding to the branch_name. Args: repo: corresponding git repository. branch_name: name of the branch to construct. Returns: The constructed GWFBranch. Raises: UnrecognizedBranchPattern if the branch name is invalid. """ for cls in [StabilizationBranch, DevelopmentBranch, ReleaseBranch, QueueBranch, QueueIntegrationBranch, FeatureBranch, HotfixBranch, LegacyHotfixBranch, IntegrationBranch, UserBranch]: try: branch = cls(repo, branch_name) return branch except errors.BranchNameInvalid: pass raise errors.UnrecognizedBranchPattern(branch_name)
def branch_factory(repo: git.Repository, branch_name: str) -> GWFBranch: """Construct a GWFBranch object corresponding to the branch_name. Args: repo: corresponding git repository. branch_name: name of the branch to construct. Returns: The constructed GWFBranch. Raises: UnrecognizedBranchPattern if the branch name is invalid. """ for cls in [StabilizationBranch, DevelopmentBranch, ReleaseBranch, QueueBranch, QueueIntegrationBranch, FeatureBranch, HotfixBranch, LegacyHotfixBranch, IntegrationBranch, UserBranch]: try: branch = cls(repo, branch_name) return branch except errors.BranchNameInvalid: pass raise errors.UnrecognizedBranchPattern(branch_name)
Python
def build_branch_cascade(job): """Initialize the job's branch cascade.""" cascade = job.git.cascade if cascade.dst_branches: # Do not rebuild cascade return cascade cascade.build(job.git.repo, job.git.dst_branch) LOG.debug(cascade.dst_branches) return cascade
def build_branch_cascade(job): """Initialize the job's branch cascade.""" cascade = job.git.cascade if cascade.dst_branches: # Do not rebuild cascade return cascade cascade.build(job.git.repo, job.git.dst_branch) LOG.debug(cascade.dst_branches) return cascade
Python
def ignore_files(dir: str, files: list[str]): """ Returns a list of files to ignore. To be used by shutil.copytree() """ return [f for f in files if Path(dir, f).is_file()]
def ignore_files(dir: str, files: list[str]): """ Returns a list of files to ignore. To be used by shutil.copytree() """ return [f for f in files if Path(dir, f).is_file()]
Python
def resize_image( image: Image.Image, max_width: int = 0, max_height: int = 0 ) -> Image.Image: """ Resize an image and return it. :param image: A Pill.Image object. :param max_width: The max width the processed image can have. :param max_height: The max height the processed image can have. """ old_width, old_height = image.size[0], image.size[1] @cache def get_proper_sizes( max_width: int, max_height: int, old_width: int, old_height: int ) -> tuple[int, int]: new_width, new_height = max_width, max_height if max_width == 0: """If width is not set.""" new_width = round(old_width * (max_height / old_height)) elif max_height == 0: """If height is not set.""" new_height = round(old_height * (max_width / old_width)) else: if old_width > old_height: """If image's original width is bigger than original height.""" new_height = round(old_height * (new_width / old_width)) elif old_height > old_width: """If image's original height is bigger than original width.""" new_width = round(old_width * (new_height / old_height)) elif old_width == old_height: """If image's original width and height are the same.""" if max_width > max_height: """If new width is bigger than new height.""" new_width = max_height elif max_height > max_width: """If new height is bigger than new width.""" new_height = max_height if new_width > max_width and max_width != 0: new_width = max_width new_height = round(old_height * (new_width / old_width)) if new_height > max_height and max_height != 0: new_height = max_height new_width = round(old_width * (new_height / old_height)) new_width = new_width new_height = new_height return new_width, new_height new_width, new_height = get_proper_sizes( max_width, max_height, old_width, old_height ) image = image.resize((new_width, new_height), Image.LANCZOS) return image
def resize_image( image: Image.Image, max_width: int = 0, max_height: int = 0 ) -> Image.Image: """ Resize an image and return it. :param image: A Pill.Image object. :param max_width: The max width the processed image can have. :param max_height: The max height the processed image can have. """ old_width, old_height = image.size[0], image.size[1] @cache def get_proper_sizes( max_width: int, max_height: int, old_width: int, old_height: int ) -> tuple[int, int]: new_width, new_height = max_width, max_height if max_width == 0: """If width is not set.""" new_width = round(old_width * (max_height / old_height)) elif max_height == 0: """If height is not set.""" new_height = round(old_height * (max_width / old_width)) else: if old_width > old_height: """If image's original width is bigger than original height.""" new_height = round(old_height * (new_width / old_width)) elif old_height > old_width: """If image's original height is bigger than original width.""" new_width = round(old_width * (new_height / old_height)) elif old_width == old_height: """If image's original width and height are the same.""" if max_width > max_height: """If new width is bigger than new height.""" new_width = max_height elif max_height > max_width: """If new height is bigger than new width.""" new_height = max_height if new_width > max_width and max_width != 0: new_width = max_width new_height = round(old_height * (new_width / old_width)) if new_height > max_height and max_height != 0: new_height = max_height new_width = round(old_width * (new_height / old_height)) new_width = new_width new_height = new_height return new_width, new_height new_width, new_height = get_proper_sizes( max_width, max_height, old_width, old_height ) image = image.resize((new_width, new_height), Image.LANCZOS) return image
Python
def dir_generator() -> tuple[Path, Path, Path]: """Generates files and folders for the test.""" parent_dir = Path(tmp_path, "parent") first_child = Path(parent_dir, "first_child") second_child = Path(first_child, "second_child") parent_dir.mkdir() first_child.mkdir() second_child.mkdir() tmp_img = Image.new("RGB", (original_width, original_height)) for index, dir in enumerate((parent_dir, first_child, second_child)): for i in range(index + 1): img_path = Path(dir, f"image-{i}.jpg") tmp_img.save(img_path) return parent_dir, first_child, second_child
def dir_generator() -> tuple[Path, Path, Path]: """Generates files and folders for the test.""" parent_dir = Path(tmp_path, "parent") first_child = Path(parent_dir, "first_child") second_child = Path(first_child, "second_child") parent_dir.mkdir() first_child.mkdir() second_child.mkdir() tmp_img = Image.new("RGB", (original_width, original_height)) for index, dir in enumerate((parent_dir, first_child, second_child)): for i in range(index + 1): img_path = Path(dir, f"image-{i}.jpg") tmp_img.save(img_path) return parent_dir, first_child, second_child
Python
def SanityCheckName(self,name): """The way to check that the name is good. For now we only allow letters, numbers and underscores. The name is used directly as part of a file-name and should therefore be nice.""" import re if not re.match("""[a-zA-Z0-9_]+""", name): raise self.NameError(name)
def SanityCheckName(self,name): """The way to check that the name is good. For now we only allow letters, numbers and underscores. The name is used directly as part of a file-name and should therefore be nice.""" import re if not re.match("""[a-zA-Z0-9_]+""", name): raise self.NameError(name)
Python
def SessionName(self,name, nocheck=False): """The only way anyone whould ever contruct the session name""" if not nocheck: self.SanityCheckName(name) return self.session_dir + os.sep + self.session_pattern % name
def SessionName(self,name, nocheck=False): """The only way anyone whould ever contruct the session name""" if not nocheck: self.SanityCheckName(name) return self.session_dir + os.sep + self.session_pattern % name
Python
def StateName(self,name, nocheck=False): """The only way anyone should ever construct the state name""" if not nocheck: self.SanityCheckName(name) return self.session_dir + os.sep + self.state_pattern % name
def StateName(self,name, nocheck=False): """The only way anyone should ever construct the state name""" if not nocheck: self.SanityCheckName(name) return self.session_dir + os.sep + self.state_pattern % name
Python
def ConstructDifference(self, old_env, new_env ): """Construct the dictionary of EWDiffObjects that is called a 'state'.""" from EnvironmentObjects import EWDiffObject diff = {} #consider all keys that are in either einvironment, old or new for key in set(new_env.keys() + old_env.keys()): if key not in new_env: #it must be in old_env diff[key] = EWDiffObject(old=old_env[key]) # new is none, this corresponds to a removal elif key not in old_env: # it must be in new_env diff[key] = EWDiffObject(new=new_env[key]) # old is none, this is an addition else: # maybe nothing changed? This is most likely. if old_env[key] != new_env[key]: # something changed diff[key] = EWDiffObject(old=old_env[key], new=new_env[key]) return diff
def ConstructDifference(self, old_env, new_env ): """Construct the dictionary of EWDiffObjects that is called a 'state'.""" from EnvironmentObjects import EWDiffObject diff = {} #consider all keys that are in either einvironment, old or new for key in set(new_env.keys() + old_env.keys()): if key not in new_env: #it must be in old_env diff[key] = EWDiffObject(old=old_env[key]) # new is none, this corresponds to a removal elif key not in old_env: # it must be in new_env diff[key] = EWDiffObject(new=new_env[key]) # old is none, this is an addition else: # maybe nothing changed? This is most likely. if old_env[key] != new_env[key]: # something changed diff[key] = EWDiffObject(old=old_env[key], new=new_env[key]) return diff
Python
def GetStateOrSessionDiff(self,name): import os log("Session Dir",os.listdir(self.session_dir)) """If an existing saved state by this name is found, return that,if an open session by this name is found, construct the difference to the present.""" session_file_name = self.SessionName(name) state_file_name = self.StateName(name) try: diff = self.GetState(name) except: log() log("session wans't available. making it.") try: diff = self.ConstructDifference(self.GetSession(name), self.shell.environment) except: log() log("that didn't work either") raise self.UnknownName(name) return diff
def GetStateOrSessionDiff(self,name): import os log("Session Dir",os.listdir(self.session_dir)) """If an existing saved state by this name is found, return that,if an open session by this name is found, construct the difference to the present.""" session_file_name = self.SessionName(name) state_file_name = self.StateName(name) try: diff = self.GetState(name) except: log() log("session wans't available. making it.") try: diff = self.ConstructDifference(self.GetSession(name), self.shell.environment) except: log() log("that didn't work either") raise self.UnknownName(name) return diff
Python
def CloseSession(self,name): """Close an existing open recording session""" oldsession = self.GetSession(name) diff = self.ConstructDifference(oldsession, self.shell.environment) self.WriteState(name, diff) # Remove the session file, thereby closing the session import os os.remove(self.SessionName(name))
def CloseSession(self,name): """Close an existing open recording session""" oldsession = self.GetSession(name) diff = self.ConstructDifference(oldsession, self.shell.environment) self.WriteState(name, diff) # Remove the session file, thereby closing the session import os os.remove(self.SessionName(name))
Python
def undo(self,name): """remove the changes associated with a state.""" import os if not os.path.exists(self.StateName(name)) and os.path.exists(self.SessionName(name)): force = ("-f" in sys.argv or "--force" in sys.argv) if force: self.ClearState(name) self.CloseSession(name) else: print >>sys.stderr, "Session %s is open. Do you want to close it? " % name, user=raw_input() log("Question:","Session %s is open. Do you want to close it? " % name,"answer:",user) if re.match("""y(es?)?""",user): print >>sys.stderr," --> Closing." self.UserClearState(name) self.CloseSession(name) else: print >>sys.stderr," --> Using open session." return self.redo(name,Reverse=True)
def undo(self,name): """remove the changes associated with a state.""" import os if not os.path.exists(self.StateName(name)) and os.path.exists(self.SessionName(name)): force = ("-f" in sys.argv or "--force" in sys.argv) if force: self.ClearState(name) self.CloseSession(name) else: print >>sys.stderr, "Session %s is open. Do you want to close it? " % name, user=raw_input() log("Question:","Session %s is open. Do you want to close it? " % name,"answer:",user) if re.match("""y(es?)?""",user): print >>sys.stderr," --> Closing." self.UserClearState(name) self.CloseSession(name) else: print >>sys.stderr," --> Using open session." return self.redo(name,Reverse=True)
Python
def GetShellParser(shell_var): """Decides which shell environment reader and diff writer to use.""" if shell_var == "/bin/bash": from Bash import BashInteractor return BashInteractor else: raise Exception("Unknown shell.")
def GetShellParser(shell_var): """Decides which shell environment reader and diff writer to use.""" if shell_var == "/bin/bash": from Bash import BashInteractor return BashInteractor else: raise Exception("Unknown shell.")
Python
def _crop_center(image): """Returns a cropped square image.""" shape = image.shape new_shape = min(shape[1], shape[2]) offset_y = max(shape[1] - shape[2], 0) // 2 offset_x = max(shape[2] - shape[1], 0) // 2 return tf.image.crop_to_bounding_box(image, offset_y, offset_x, new_shape, new_shape)
def _crop_center(image): """Returns a cropped square image.""" shape = image.shape new_shape = min(shape[1], shape[2]) offset_y = max(shape[1] - shape[2], 0) // 2 offset_x = max(shape[2] - shape[1], 0) // 2 return tf.image.crop_to_bounding_box(image, offset_y, offset_x, new_shape, new_shape)
Python
def fseq(options): ''' Call peaks using F-seq ''' # parse options if not which('fseq'): sys.exit('Error: No F-seq installed!') folder = check_dir(options['<rampagedir>']) flength = options['-l'] wig_flag = options['--wig'] percent = float(options['-p']) with open(os.path.join(folder, 'total_counts.txt'), 'r') as f: total = int(f.read().rstrip()) # run F-seq flist = {'+': 'rampage_plus_5end.bed', '-': 'rampage_minus_5end.bed'} all_peak_f = os.path.join(folder, 'rampage_peaks.txt') with open(all_peak_f, 'w') as out: for strand in flist: peak_f = run_fseq(folder, flist[strand], strand, flength, wig_flag, percent) with open(peak_f, 'r') as f: for line in f: if total: # calculate RPM reads = int(line.rstrip().split()[9]) rpm = reads * 1000000.0 / total out.write(line.rstrip() + '\t%f\n' % rpm) else: out.write(line)
def fseq(options): ''' Call peaks using F-seq ''' # parse options if not which('fseq'): sys.exit('Error: No F-seq installed!') folder = check_dir(options['<rampagedir>']) flength = options['-l'] wig_flag = options['--wig'] percent = float(options['-p']) with open(os.path.join(folder, 'total_counts.txt'), 'r') as f: total = int(f.read().rstrip()) # run F-seq flist = {'+': 'rampage_plus_5end.bed', '-': 'rampage_minus_5end.bed'} all_peak_f = os.path.join(folder, 'rampage_peaks.txt') with open(all_peak_f, 'w') as out: for strand in flist: peak_f = run_fseq(folder, flist[strand], strand, flength, wig_flag, percent) with open(peak_f, 'r') as f: for line in f: if total: # calculate RPM reads = int(line.rstrip().split()[9]) rpm = reads * 1000000.0 / total out.write(line.rstrip() + '\t%f\n' % rpm) else: out.write(line)
Python
def forward(self, support, support_mask, query, query_mask): """Att-Induction Networks forward. Args: support: torch.Tensor, [-1, N, K, max_length] support_mask: torch.Tensor, [-1, N, K, max_length] query: torch.Tensor, [-1, totalQ, max_length] query_mask: torch.Tensor, [-1, totalQ, max_length] Returns: relation_score: torch.Tensor, [B, totalQ, N] predict_label: torch.Tensor, [B, totalQ]""" B, N, K = support.size()[:3] totalQ = query.size()[1] # Number of query instances for each batch # 1. Encoder support = support.view(-1, self.max_length) # [B * N * K, max_length] support_mask = support_mask.view(-1, self.max_length) query = query.view(-1, self.max_length) # [B * totalQ, max_length] query_mask = query_mask.view(-1, self.max_length) support = self.encoder(support, support_mask) # [B * N * K, D] query = self.encoder(query, query_mask) # [B * totalQ, D] support = support.view(-1, N, K, self.hidden_size) # [B, N, K, D] query = query.view(-1, totalQ, self.hidden_size) # [B, totalQ, D] # 2. Induction # 2.1 Attention score support_att = support.unsqueeze(1).expand(-1, totalQ, -1, -1, -1) # [B, totalQ, N, K, D] query_att = query.unsqueeze(2).unsqueeze(3).expand(-1, -1, N, -1, -1) # [B, totalQ, N, 1, D] support_query_att = torch.cat((query_att, support_att), dim=3) # [B, totalQ, N, 1 + K, D] support_query_att = support_query_att.view(-1, 1 + K, self.hidden_size).transpose(0, 1) # [1 + K, B * totalQ * N , D] att_score = self.self_att(support_query_att, support_query_att, support_query_att)[0] # [1 + K, B * totalQ * N , D] att_score = support_query_att + self.dropout(att_score) att_score = self.layer_norm(att_score) att_score = att_score[0].view(-1, totalQ, N, self.hidden_size) # [B, totalQ, N, D] att_score = att_score.unsqueeze(3) # [B, totalQ, N, 1, D] # 2.2 Attention-based dynamic routing support_hat = self.__squash(self.fc_induction(support).unsqueeze(1).expand(-1, totalQ, -1, -1, -1)) # [B, totalQ, N, K, D] b = torch.zeros(B, totalQ, N, K, 1, device=self.current_device, requires_grad=False) # [B, totalQ, N, K, 1] for _ in range(self.induction_iters): d = F.softmax(b, dim=3) # [B, totalQ, N, K, 1] c_hat = torch.mul(d, support_hat).sum(3, keepdims=True) # [B, totalQ, N, 1, D] c = self.__squash(c_hat) # [B, totalQ, N, 1, D] b = b + torch.mul(att_score, torch.tanh(torch.mul(support_hat, c))).sum(-1, keepdims=True) # [B, totalQ, N, K, 1] # 3. Relation c = c.squeeze(3) # [B, totalQ, N, D] query = query.unsqueeze(2).expand(-1, -1, N, -1) # [B, totalQ, N, D] query = query.contiguous() relation_score = self.relation(c, query) # [B, totalQ, N] predict_label = relation_score.argmax(dim=-1, keepdims=False) # [B, totalQ] return relation_score, predict_label
def forward(self, support, support_mask, query, query_mask): """Att-Induction Networks forward. Args: support: torch.Tensor, [-1, N, K, max_length] support_mask: torch.Tensor, [-1, N, K, max_length] query: torch.Tensor, [-1, totalQ, max_length] query_mask: torch.Tensor, [-1, totalQ, max_length] Returns: relation_score: torch.Tensor, [B, totalQ, N] predict_label: torch.Tensor, [B, totalQ]""" B, N, K = support.size()[:3] totalQ = query.size()[1] # Number of query instances for each batch # 1. Encoder support = support.view(-1, self.max_length) # [B * N * K, max_length] support_mask = support_mask.view(-1, self.max_length) query = query.view(-1, self.max_length) # [B * totalQ, max_length] query_mask = query_mask.view(-1, self.max_length) support = self.encoder(support, support_mask) # [B * N * K, D] query = self.encoder(query, query_mask) # [B * totalQ, D] support = support.view(-1, N, K, self.hidden_size) # [B, N, K, D] query = query.view(-1, totalQ, self.hidden_size) # [B, totalQ, D] # 2. Induction # 2.1 Attention score support_att = support.unsqueeze(1).expand(-1, totalQ, -1, -1, -1) # [B, totalQ, N, K, D] query_att = query.unsqueeze(2).unsqueeze(3).expand(-1, -1, N, -1, -1) # [B, totalQ, N, 1, D] support_query_att = torch.cat((query_att, support_att), dim=3) # [B, totalQ, N, 1 + K, D] support_query_att = support_query_att.view(-1, 1 + K, self.hidden_size).transpose(0, 1) # [1 + K, B * totalQ * N , D] att_score = self.self_att(support_query_att, support_query_att, support_query_att)[0] # [1 + K, B * totalQ * N , D] att_score = support_query_att + self.dropout(att_score) att_score = self.layer_norm(att_score) att_score = att_score[0].view(-1, totalQ, N, self.hidden_size) # [B, totalQ, N, D] att_score = att_score.unsqueeze(3) # [B, totalQ, N, 1, D] # 2.2 Attention-based dynamic routing support_hat = self.__squash(self.fc_induction(support).unsqueeze(1).expand(-1, totalQ, -1, -1, -1)) # [B, totalQ, N, K, D] b = torch.zeros(B, totalQ, N, K, 1, device=self.current_device, requires_grad=False) # [B, totalQ, N, K, 1] for _ in range(self.induction_iters): d = F.softmax(b, dim=3) # [B, totalQ, N, K, 1] c_hat = torch.mul(d, support_hat).sum(3, keepdims=True) # [B, totalQ, N, 1, D] c = self.__squash(c_hat) # [B, totalQ, N, 1, D] b = b + torch.mul(att_score, torch.tanh(torch.mul(support_hat, c))).sum(-1, keepdims=True) # [B, totalQ, N, K, 1] # 3. Relation c = c.squeeze(3) # [B, totalQ, N, D] query = query.unsqueeze(2).expand(-1, -1, N, -1) # [B, totalQ, N, D] query = query.contiguous() relation_score = self.relation(c, query) # [B, totalQ, N] predict_label = relation_score.argmax(dim=-1, keepdims=False) # [B, totalQ] return relation_score, predict_label
Python
def forward(self, tokens, lengths): """Self-Attention Bi-LSTM encoder forward. Args: tokens: torch.Tensor, [-1, max_length] lengths: torch.Tensor, [-1, max_length] Returns: sentence_embedding: torch.Tensor, [-1, 2 * hidden_size]""" embedding = self.word_embedding(tokens) # [-1, max_length, word_vec_dim] lengths = lengths[:, 0] # [-1] embedding = nn.utils.rnn.pack_padded_sequence( embedding, lengths, batch_first=True, enforce_sorted=False ) output, _ = self.bilstm(embedding) # packed sequence. Throw away (h_n, c_n) output, _ = nn.utils.rnn.pad_packed_sequence( output, batch_first=True, padding_value=0.0, total_length=self.max_length ) # [-1, max_length, 2 * hidden_size]. Throw away lengths tensor att_score = torch.tanh(self.att1(output)) # [-1, max_length, att_dim] att_score = F.softmax(self.att2(att_score), dim=1) # [-1, max_length, 1] output = torch.mul(att_score, output).sum(dim=1, keepdim=False) # [-1, 2 * hidden_size] return output
def forward(self, tokens, lengths): """Self-Attention Bi-LSTM encoder forward. Args: tokens: torch.Tensor, [-1, max_length] lengths: torch.Tensor, [-1, max_length] Returns: sentence_embedding: torch.Tensor, [-1, 2 * hidden_size]""" embedding = self.word_embedding(tokens) # [-1, max_length, word_vec_dim] lengths = lengths[:, 0] # [-1] embedding = nn.utils.rnn.pack_padded_sequence( embedding, lengths, batch_first=True, enforce_sorted=False ) output, _ = self.bilstm(embedding) # packed sequence. Throw away (h_n, c_n) output, _ = nn.utils.rnn.pad_packed_sequence( output, batch_first=True, padding_value=0.0, total_length=self.max_length ) # [-1, max_length, 2 * hidden_size]. Throw away lengths tensor att_score = torch.tanh(self.att1(output)) # [-1, max_length, att_dim] att_score = F.softmax(self.att2(att_score), dim=1) # [-1, max_length, 1] output = torch.mul(att_score, output).sum(dim=1, keepdim=False) # [-1, 2 * hidden_size] return output
Python
def glove_preprocessing(word_vec_file, output_path): """Transforming English word embedding txt into .npy embedding matrix and JSON index file.""" token2idx = {} word_vec = [] with open(word_vec_file, "r") as f: line = f.readline() index = 0 while line: line = line.strip().split() token, vec = line[0], line[1:] vec = list(map(float, vec)) if token in token2idx: raise ValueError("{} is existed!".format(token)) else: token2idx[token] = index word_vec.append(vec) index += 1 line = f.readline() word_vec = np.array(word_vec) assert len(token2idx) == np.shape(word_vec)[0], "Length is not same!" json.dump(token2idx, open(os.path.join(output_path, "token2idx.json"), "w")) np.save(os.path.join(output_path, "word_vec.npy"), word_vec)
def glove_preprocessing(word_vec_file, output_path): """Transforming English word embedding txt into .npy embedding matrix and JSON index file.""" token2idx = {} word_vec = [] with open(word_vec_file, "r") as f: line = f.readline() index = 0 while line: line = line.strip().split() token, vec = line[0], line[1:] vec = list(map(float, vec)) if token in token2idx: raise ValueError("{} is existed!".format(token)) else: token2idx[token] = index word_vec.append(vec) index += 1 line = f.readline() word_vec = np.array(word_vec) assert len(token2idx) == np.shape(word_vec)[0], "Length is not same!" json.dump(token2idx, open(os.path.join(output_path, "token2idx.json"), "w")) np.save(os.path.join(output_path, "word_vec.npy"), word_vec)
Python
def chinese_word_vec_preprocessing(word_vec_file, output_path): """Transforming Chinese word embedding txt into .npy embedding matrix and JSON index file.""" token2idx = {} word_vec = [] with open(word_vec_file, "r") as f: line = f.readline() # 1292607 300 line = f.readline() index = 0 while line: line = line.rstrip().split(" ") # print(line) token, vec = line[0], line[1:] vec = list(map(float, vec)) if token in token2idx: print("{} is existed!".format(token)) line = f.readline() continue else: token2idx[token] = index word_vec.append(vec) index += 1 line = f.readline() if index % 100000 == 0: print("{:d} done!".format(index)) word_vec = np.array(word_vec) assert len(token2idx) == np.shape(word_vec)[0], "Length is not same!" json.dump(token2idx, open(os.path.join(output_path, "token2idx.json"), "w")) np.save(os.path.join(output_path, "word_vec.npy"), word_vec)
def chinese_word_vec_preprocessing(word_vec_file, output_path): """Transforming Chinese word embedding txt into .npy embedding matrix and JSON index file.""" token2idx = {} word_vec = [] with open(word_vec_file, "r") as f: line = f.readline() # 1292607 300 line = f.readline() index = 0 while line: line = line.rstrip().split(" ") # print(line) token, vec = line[0], line[1:] vec = list(map(float, vec)) if token in token2idx: print("{} is existed!".format(token)) line = f.readline() continue else: token2idx[token] = index word_vec.append(vec) index += 1 line = f.readline() if index % 100000 == 0: print("{:d} done!".format(index)) word_vec = np.array(word_vec) assert len(token2idx) == np.shape(word_vec)[0], "Length is not same!" json.dump(token2idx, open(os.path.join(output_path, "token2idx.json"), "w")) np.save(os.path.join(output_path, "word_vec.npy"), word_vec)
Python
def make_train_indices(test_indices, n_examples, downsample=False, labels=None): """Makes a set of train indices from a set of test indices. test_indices: List of test indices. n_examples: Number of total (testing + training) examples. downsample: Whether to downsample. Default False. labels: Binary labels. Only needed if downsample is True. -> sorted list of training indices. """ if downsample and labels is None: raise ValueError('labels must be specified if downsample is specified') training = sorted(set(range(n_examples)) - set(test_indices)) if not downsample: logging.debug('Not downsampling...') return training logging.debug('Downsampling...') training = numpy.array(training) positives = training[labels[training] == 1] negatives = training[labels[training] == 0] if positives.shape[0] > negatives.shape[0]: numpy.random.shuffle(positives) positives = positives[:negatives.shape[0]] training = numpy.concatenate([positives, negatives]) else: numpy.random.shuffle(negatives) negatives = negatives[:positives.shape[0]] training = numpy.concatenate([negatives, positives]) training.sort() logging.debug('Positive examples: {}'.format(positives.shape[0])) logging.debug('Negative examples: {}'.format(negatives.shape[0])) logging.debug('Total examples: {}'.format(training.shape[0])) assert training.shape[0] == positives.shape[0] + negatives.shape[0] return training
def make_train_indices(test_indices, n_examples, downsample=False, labels=None): """Makes a set of train indices from a set of test indices. test_indices: List of test indices. n_examples: Number of total (testing + training) examples. downsample: Whether to downsample. Default False. labels: Binary labels. Only needed if downsample is True. -> sorted list of training indices. """ if downsample and labels is None: raise ValueError('labels must be specified if downsample is specified') training = sorted(set(range(n_examples)) - set(test_indices)) if not downsample: logging.debug('Not downsampling...') return training logging.debug('Downsampling...') training = numpy.array(training) positives = training[labels[training] == 1] negatives = training[labels[training] == 0] if positives.shape[0] > negatives.shape[0]: numpy.random.shuffle(positives) positives = positives[:negatives.shape[0]] training = numpy.concatenate([positives, negatives]) else: numpy.random.shuffle(negatives) negatives = negatives[:positives.shape[0]] training = numpy.concatenate([negatives, positives]) training.sort() logging.debug('Positive examples: {}'.format(positives.shape[0])) logging.debug('Negative examples: {}'.format(negatives.shape[0])) logging.debug('Total examples: {}'.format(training.shape[0])) assert training.shape[0] == positives.shape[0] + negatives.shape[0] return training
Python
def lr(results, method_name, split_id, features, targets, test_indices, C=1.0, overwrite=False, n_train=None, selection='passive'): """Run logistic regression and store results. results: Results object. features: (n_examples, n_features) array of features. targets: (n_examples,) array of binary targets. test_indices: List of integer testing indices. method_name: Name of this method in the results. split_id: ID of the split in the results. C: Regularisation parameter. Default 1.0. Higher values mean less regularisation. overwrite: Whether to overwrite existing results (default False). n_train: Number of training examples. Default as many as possible. selection: How to select training examples. Default 'passive'. """ assert max(test_indices) < features.shape[0] assert min(test_indices) >= 0 if results.has_run(method_name, split_id) and not overwrite: logging.info('Skipping trial {}:{}.'.format(method_name, split_id)) return train_indices = make_train_indices(test_indices, features.shape[0]) if n_train is not None and n_train != len(train_indices): assert n_train <= len(train_indices) train_indices, _ = sklearn.cross_validation.train_test_split( train_indices, train_size=n_train, stratify=targets[train_indices]) train_indices.sort() lr = sklearn.linear_model.LogisticRegression(class_weight='balanced', C=C, n_jobs=-1) logging.debug('Fitting {} instances.'.format(len(features))) lr.fit(features[train_indices], targets[train_indices]) logging.debug('Fit complete. Storing...'.format(len(features))) results.store_trial( method_name, split_id, lr.predict_proba(features[test_indices])[:, 1], indices=test_indices, params=lr_to_params(lr))
def lr(results, method_name, split_id, features, targets, test_indices, C=1.0, overwrite=False, n_train=None, selection='passive'): """Run logistic regression and store results. results: Results object. features: (n_examples, n_features) array of features. targets: (n_examples,) array of binary targets. test_indices: List of integer testing indices. method_name: Name of this method in the results. split_id: ID of the split in the results. C: Regularisation parameter. Default 1.0. Higher values mean less regularisation. overwrite: Whether to overwrite existing results (default False). n_train: Number of training examples. Default as many as possible. selection: How to select training examples. Default 'passive'. """ assert max(test_indices) < features.shape[0] assert min(test_indices) >= 0 if results.has_run(method_name, split_id) and not overwrite: logging.info('Skipping trial {}:{}.'.format(method_name, split_id)) return train_indices = make_train_indices(test_indices, features.shape[0]) if n_train is not None and n_train != len(train_indices): assert n_train <= len(train_indices) train_indices, _ = sklearn.cross_validation.train_test_split( train_indices, train_size=n_train, stratify=targets[train_indices]) train_indices.sort() lr = sklearn.linear_model.LogisticRegression(class_weight='balanced', C=C, n_jobs=-1) logging.debug('Fitting {} instances.'.format(len(features))) lr.fit(features[train_indices], targets[train_indices]) logging.debug('Fit complete. Storing...'.format(len(features))) results.store_trial( method_name, split_id, lr.predict_proba(features[test_indices])[:, 1], indices=test_indices, params=lr_to_params(lr))
Python
def rf(results, method_name, split_id, features, targets, test_indices, overwrite=False): """Run random forest and store results. Does not store the model as random forests are difficult to serialise. results: Results object. features: (n_examples, n_features) array of features. targets: (n_examples,) array of binary targets. test_indices: List of integer testing indices. method_name: Name of this method in the results. split_id: ID of the split in the results. overwrite: Whether to overwrite existing results (default False). """ assert max(test_indices) < features.shape[0] assert min(test_indices) >= 0 train_indices = make_train_indices(test_indices, features.shape[0]) if results.has_run(method_name, split_id) and not overwrite: logging.info('Skipping trial {}:{}.'.format(method_name, split_id)) return rf = sklearn.ensemble.RandomForestClassifier(class_weight='balanced') rf.fit(features[train_indices], targets[train_indices]) results.store_trial( method_name, split_id, rf.predict_proba(features[test_indices])[:, 1], indices=test_indices, params=numpy.zeros((results.n_params,)))
def rf(results, method_name, split_id, features, targets, test_indices, overwrite=False): """Run random forest and store results. Does not store the model as random forests are difficult to serialise. results: Results object. features: (n_examples, n_features) array of features. targets: (n_examples,) array of binary targets. test_indices: List of integer testing indices. method_name: Name of this method in the results. split_id: ID of the split in the results. overwrite: Whether to overwrite existing results (default False). """ assert max(test_indices) < features.shape[0] assert min(test_indices) >= 0 train_indices = make_train_indices(test_indices, features.shape[0]) if results.has_run(method_name, split_id) and not overwrite: logging.info('Skipping trial {}:{}.'.format(method_name, split_id)) return rf = sklearn.ensemble.RandomForestClassifier(class_weight='balanced') rf.fit(features[train_indices], targets[train_indices]) results.store_trial( method_name, split_id, rf.predict_proba(features[test_indices])[:, 1], indices=test_indices, params=numpy.zeros((results.n_params,)))
Python
def raykar(results, method_name, split_id, features, targets, test_indices, overwrite=False, n_restarts=5, downsample=False): """Run the Raykar algorithm and store results. results: Results object. features: (n_examples, n_features) array of features. targets: (n_labellers, n_examples) masked array of binary targets. test_indices: List of integer testing indices. method_name: Name of this method in the results. split_id: ID of the split in the results. overwrite: Whether to overwrite existing results (default False). n_restarts: Number of random restarts. Default 5. downsample: Whether to downsample. Default False. """ assert max(test_indices) < features.shape[0] assert min(test_indices) >= 0 train_indices = make_train_indices(test_indices, features.shape[0], downsample=downsample, labels=majority_vote(targets)) if results.has_run(method_name, split_id) and not overwrite: logging.info('Skipping trial {}:{}.'.format(method_name, split_id)) return rc = RaykarClassifier(n_restarts=n_restarts) rc.fit(features[train_indices], targets[:, train_indices]) results.store_trial( method_name, split_id, rc.predict_proba(features[test_indices]), indices=test_indices, params=rc.serialise())
def raykar(results, method_name, split_id, features, targets, test_indices, overwrite=False, n_restarts=5, downsample=False): """Run the Raykar algorithm and store results. results: Results object. features: (n_examples, n_features) array of features. targets: (n_labellers, n_examples) masked array of binary targets. test_indices: List of integer testing indices. method_name: Name of this method in the results. split_id: ID of the split in the results. overwrite: Whether to overwrite existing results (default False). n_restarts: Number of random restarts. Default 5. downsample: Whether to downsample. Default False. """ assert max(test_indices) < features.shape[0] assert min(test_indices) >= 0 train_indices = make_train_indices(test_indices, features.shape[0], downsample=downsample, labels=majority_vote(targets)) if results.has_run(method_name, split_id) and not overwrite: logging.info('Skipping trial {}:{}.'.format(method_name, split_id)) return rc = RaykarClassifier(n_restarts=n_restarts) rc.fit(features[train_indices], targets[:, train_indices]) results.store_trial( method_name, split_id, rc.predict_proba(features[test_indices]), indices=test_indices, params=rc.serialise())
Python
def yan(results, method_name, split_id, features, targets, test_indices, overwrite=False, n_restarts=5): """Run the Yan algorithm and store results. results: Results object. features: (n_examples, n_features) array of features. targets: (n_labellers, n_examples) masked array of binary targets. test_indices: List of integer testing indices. method_name: Name of this method in the results. split_id: ID of the split in the results. overwrite: Whether to overwrite existing results (default False). n_restarts: Number of random restarts. Default 5. """ assert max(test_indices) < features.shape[0] assert min(test_indices) >= 0 train_indices = make_train_indices(test_indices, features.shape[0]) if results.has_run(method_name, split_id) and not overwrite: logging.info('Skipping trial {}:{}.'.format(method_name, split_id)) return yc = YanClassifier(n_restarts=n_restarts) yc.fit(features[train_indices], targets[:, train_indices]) results.store_trial(method_name, split_id, yc.predict_proba(features[test_indices]), indices=test_indices, params=yc.serialise())
def yan(results, method_name, split_id, features, targets, test_indices, overwrite=False, n_restarts=5): """Run the Yan algorithm and store results. results: Results object. features: (n_examples, n_features) array of features. targets: (n_labellers, n_examples) masked array of binary targets. test_indices: List of integer testing indices. method_name: Name of this method in the results. split_id: ID of the split in the results. overwrite: Whether to overwrite existing results (default False). n_restarts: Number of random restarts. Default 5. """ assert max(test_indices) < features.shape[0] assert min(test_indices) >= 0 train_indices = make_train_indices(test_indices, features.shape[0]) if results.has_run(method_name, split_id) and not overwrite: logging.info('Skipping trial {}:{}.'.format(method_name, split_id)) return yc = YanClassifier(n_restarts=n_restarts) yc.fit(features[train_indices], targets[:, train_indices]) results.store_trial(method_name, split_id, yc.predict_proba(features[test_indices]), indices=test_indices, params=yc.serialise())
Python
def generate(f_h5, out_f_h5, field='cdfs'): """Generates potential hosts and their astronomical features. f_h5: crowdastro input HDF5 file. out_f_h5: Training data output HDF5 file. """ ir_survey = f_h5.attrs['ir_survey'] if ir_survey == 'swire': swire = f_h5['/swire/{}/numeric'.format(field)] fluxes = swire[:, 2:7] # Skip stellarities. distances = swire[:, 8].reshape((-1, 1)) images = swire[:, 9:] coords = swire[:, :2] s1_s2 = fluxes[:, 0] / fluxes[:, 1] s2_s3 = fluxes[:, 1] / fluxes[:, 2] s3_s4 = fluxes[:, 2] / fluxes[:, 3] astro_features = numpy.concatenate( [fluxes, s1_s2.reshape((-1, 1)), s2_s3.reshape((-1, 1)), s3_s4.reshape((-1, 1))], axis=1) elif ir_survey == 'wise': wise = f_h5['/wise/{}/numeric'.format(field)] magnitudes = wise[:, 2:6] distances = wise[:, 6].reshape((-1, 1)) images = wise[:, 7:] coords = wise[:, :2] # Magnitude differences are probably useful features. w1_w2 = magnitudes[:, 0] - magnitudes[:, 1] w2_w3 = magnitudes[:, 1] - magnitudes[:, 2] # Converting the magnitudes to a linear scale seems to improve # performance. linearised_magnitudes = numpy.power(10, -0.4 * magnitudes) w1_w2 = numpy.power(10, -0.4 * w1_w2) w2_w3 = numpy.power(10, -0.4 * w2_w3) astro_features = numpy.concatenate( [linearised_magnitudes, w1_w2.reshape((-1, 1)), w2_w3.reshape((-1, 1))], axis=1) n_features = config['surveys'][ir_survey]['n_features'] assert astro_features.shape[1] + distances.shape[1] == n_features # We now need to find the labels for each. if field == 'cdfs': truths = set(f_h5['/atlas/cdfs/consensus_objects'][:, 1]) labels = numpy.array([o in truths for o in range(len(astro_features))]) assert len(labels) == len(astro_features) out_f_h5.create_dataset('labels', data=labels) assert len(astro_features) == len(distances) assert len(distances) == len(images) features = numpy.hstack([astro_features, distances, images]) n_astro = features.shape[1] - images.shape[1] # Save to HDF5. out_f_h5.create_dataset('raw_features', data=features) out_f_h5.create_dataset('positions', data=coords) out_f_h5.attrs['ir_survey'] = ir_survey out_f_h5.attrs['field'] = field
def generate(f_h5, out_f_h5, field='cdfs'): """Generates potential hosts and their astronomical features. f_h5: crowdastro input HDF5 file. out_f_h5: Training data output HDF5 file. """ ir_survey = f_h5.attrs['ir_survey'] if ir_survey == 'swire': swire = f_h5['/swire/{}/numeric'.format(field)] fluxes = swire[:, 2:7] # Skip stellarities. distances = swire[:, 8].reshape((-1, 1)) images = swire[:, 9:] coords = swire[:, :2] s1_s2 = fluxes[:, 0] / fluxes[:, 1] s2_s3 = fluxes[:, 1] / fluxes[:, 2] s3_s4 = fluxes[:, 2] / fluxes[:, 3] astro_features = numpy.concatenate( [fluxes, s1_s2.reshape((-1, 1)), s2_s3.reshape((-1, 1)), s3_s4.reshape((-1, 1))], axis=1) elif ir_survey == 'wise': wise = f_h5['/wise/{}/numeric'.format(field)] magnitudes = wise[:, 2:6] distances = wise[:, 6].reshape((-1, 1)) images = wise[:, 7:] coords = wise[:, :2] # Magnitude differences are probably useful features. w1_w2 = magnitudes[:, 0] - magnitudes[:, 1] w2_w3 = magnitudes[:, 1] - magnitudes[:, 2] # Converting the magnitudes to a linear scale seems to improve # performance. linearised_magnitudes = numpy.power(10, -0.4 * magnitudes) w1_w2 = numpy.power(10, -0.4 * w1_w2) w2_w3 = numpy.power(10, -0.4 * w2_w3) astro_features = numpy.concatenate( [linearised_magnitudes, w1_w2.reshape((-1, 1)), w2_w3.reshape((-1, 1))], axis=1) n_features = config['surveys'][ir_survey]['n_features'] assert astro_features.shape[1] + distances.shape[1] == n_features # We now need to find the labels for each. if field == 'cdfs': truths = set(f_h5['/atlas/cdfs/consensus_objects'][:, 1]) labels = numpy.array([o in truths for o in range(len(astro_features))]) assert len(labels) == len(astro_features) out_f_h5.create_dataset('labels', data=labels) assert len(astro_features) == len(distances) assert len(distances) == len(images) features = numpy.hstack([astro_features, distances, images]) n_astro = features.shape[1] - images.shape[1] # Save to HDF5. out_f_h5.create_dataset('raw_features', data=features) out_f_h5.create_dataset('positions', data=coords) out_f_h5.attrs['ir_survey'] = ir_survey out_f_h5.attrs['field'] = field
Python
def compute_disagreement(self): """Finds disagreement for all objects in the pool.""" labels = numpy.array([c.predict(self.pool) for c in self.classifiers]) # Each column is the classifications of one data point. n_agree = labels.sum(axis=0) self.disagreement = numpy.ma.masked_array( numpy.abs(n_agree - self.n_classifiers // 2), mask=~self.labels.mask)
def compute_disagreement(self): """Finds disagreement for all objects in the pool.""" labels = numpy.array([c.predict(self.pool) for c in self.classifiers]) # Each column is the classifications of one data point. n_agree = labels.sum(axis=0) self.disagreement = numpy.ma.masked_array( numpy.abs(n_agree - self.n_classifiers // 2), mask=~self.labels.mask)
Python
def score(self, test_xs, test_ts): """Finds cross-entropy error on test data.""" return numpy.mean([ sklearn.metrics.log_loss( test_ts, c.predict(test_xs)) for c in self.classifiers])
def score(self, test_xs, test_ts): """Finds cross-entropy error on test data.""" return numpy.mean([ sklearn.metrics.log_loss( test_ts, c.predict(test_xs)) for c in self.classifiers])
Python
def ba(self, test_xs, test_ts): """Finds balanced accuracy on test data.""" # labels = numpy.zeros((self.n_classifiers, len(test_xs))) # for c in range(self.n_classifiers): # preds = self.classifiers[c].predict(test_xs) # labels[c, :] = preds # labels = numpy.ma.MaskedArray(labels, mask=numpy.zeros(labels.shape)) # return balanced_accuracy(test_ts, majority_vote(labels)) return balanced_accuracy( test_ts, self.reference_classifier.predict(test_xs))
def ba(self, test_xs, test_ts): """Finds balanced accuracy on test data.""" # labels = numpy.zeros((self.n_classifiers, len(test_xs))) # for c in range(self.n_classifiers): # preds = self.classifiers[c].predict(test_xs) # labels[c, :] = preds # labels = numpy.ma.MaskedArray(labels, mask=numpy.zeros(labels.shape)) # return balanced_accuracy(test_ts, majority_vote(labels)) return balanced_accuracy( test_ts, self.reference_classifier.predict(test_xs))
Python
def unpack(params, n_dim, n_annotators): """Unpacks an array of parameters into a, b, and h.""" a = params[:n_dim] b = params[n_dim] h = params[n_dim + 1:] assert h.shape[0] == n_annotators return a, b, h
def unpack(params, n_dim, n_annotators): """Unpacks an array of parameters into a, b, and h.""" a = params[:n_dim] b = params[n_dim] h = params[n_dim + 1:] assert h.shape[0] == n_annotators return a, b, h
Python
def sample_index(self): """Finds index of a random unlabelled point.""" unlabelled = self.labels.mask.nonzero()[0] if len(unlabelled): index = numpy.random.choice(unlabelled) return index return 0
def sample_index(self): """Finds index of a random unlabelled point.""" unlabelled = self.labels.mask.nonzero()[0] if len(unlabelled): index = numpy.random.choice(unlabelled) return index return 0
Python
def sample_indices(self, n): """Finds indices of n random unlabelled points.""" indices = set() unlabelled = self.labels.mask.nonzero()[0] if len(unlabelled) < n: return unlabelled while len(indices) < n: index = numpy.random.choice(unlabelled) indices.add(index) return sorted(indices)
def sample_indices(self, n): """Finds indices of n random unlabelled points.""" indices = set() unlabelled = self.labels.mask.nonzero()[0] if len(unlabelled) < n: return unlabelled while len(indices) < n: index = numpy.random.choice(unlabelled) indices.add(index) return sorted(indices)
Python
def sample_index(self): """Finds index of a random unlabelled point.""" unlabelled = self.labels.mask.nonzero()[0] unlabelled_groundtruth = self.labels.data[unlabelled] if len(unlabelled): if numpy.random.random() < 0.5: index = numpy.random.choice( unlabelled[unlabelled_groundtruth == 1]) else: index = numpy.random.choice( unlabelled[unlabelled_groundtruth == 0]) return index return 0
def sample_index(self): """Finds index of a random unlabelled point.""" unlabelled = self.labels.mask.nonzero()[0] unlabelled_groundtruth = self.labels.data[unlabelled] if len(unlabelled): if numpy.random.random() < 0.5: index = numpy.random.choice( unlabelled[unlabelled_groundtruth == 1]) else: index = numpy.random.choice( unlabelled[unlabelled_groundtruth == 0]) return index return 0
Python
def sample_indices(self, n): """Finds indices of n random unlabelled points.""" indices = set() unlabelled = self.labels.mask.nonzero()[0] if len(unlabelled) < n: return unlabelled unlabelled_groundtruth = self.labels.data[unlabelled] while len(indices) < n: if ((numpy.random.random() < 0.5 and len(unlabelled[unlabelled_groundtruth == 1]) > 0) or len(unlabelled[unlabelled_groundtruth == 0]) == 0): index = numpy.random.choice( unlabelled[unlabelled_groundtruth == 1]) else: index = numpy.random.choice( unlabelled[unlabelled_groundtruth == 0]) indices.add(index) return sorted(indices)
def sample_indices(self, n): """Finds indices of n random unlabelled points.""" indices = set() unlabelled = self.labels.mask.nonzero()[0] if len(unlabelled) < n: return unlabelled unlabelled_groundtruth = self.labels.data[unlabelled] while len(indices) < n: if ((numpy.random.random() < 0.5 and len(unlabelled[unlabelled_groundtruth == 1]) > 0) or len(unlabelled[unlabelled_groundtruth == 0]) == 0): index = numpy.random.choice( unlabelled[unlabelled_groundtruth == 1]) else: index = numpy.random.choice( unlabelled[unlabelled_groundtruth == 0]) indices.add(index) return sorted(indices)
Python
def compute_uncertainties(self): """Finds uncertainties for all objects in the pool.""" # To keep things simple, I'll use the (negative) proximity to the # decision boundary as the uncertainty. Note that the uncertainties # array is masked such that labelled points have no uncertainty. probs = self.classifier.predict_proba(self.pool)[:, 1] self.uncertainties = numpy.ma.masked_array(-numpy.abs(probs - 0.5), mask=~self.labels.mask)
def compute_uncertainties(self): """Finds uncertainties for all objects in the pool.""" # To keep things simple, I'll use the (negative) proximity to the # decision boundary as the uncertainty. Note that the uncertainties # array is masked such that labelled points have no uncertainty. probs = self.classifier.predict_proba(self.pool)[:, 1] self.uncertainties = numpy.ma.masked_array(-numpy.abs(probs - 0.5), mask=~self.labels.mask)
Python
def _top_n_accurate_targets(crowdastro_h5, y_true, n_annotators=5, threshold=700): """Get the labels of the top n most accurate annotators assessed against y_true, above a threshold number of annotations.""" ir_survey = crowdastro_h5.attrs['ir_survey'] labels = crowdastro_h5['/{}/cdfs/rgz_raw_labels'.format(ir_survey)].value labels_mask = crowdastro_h5[ '/{}/cdfs/rgz_raw_labels_mask'.format(ir_survey)].value labels = numpy.ma.MaskedArray(labels, mask=labels_mask) # Compare each annotator to the Norris labels. Get their balanced accuracy. annotator_accuracies = [] for t in range(labels.shape[0]): cm = sklearn.metrics.confusion_matrix(y_true[~labels[t].mask], labels[t][~labels[t].mask]) if cm.shape[0] == 1: continue tp = cm[1, 1] n, p = cm.sum(axis=1) tn = cm[0, 0] if not n or not p or p + n < threshold: annotator_accuracies.append(0) continue ba = (tp / p + tn / n) / 2 annotator_accuracies.append(ba) ranked_annotators = numpy.argsort(annotator_accuracies) top_n_annotators = ranked_annotators[-n_annotators:] return labels[top_n_annotators]
def _top_n_accurate_targets(crowdastro_h5, y_true, n_annotators=5, threshold=700): """Get the labels of the top n most accurate annotators assessed against y_true, above a threshold number of annotations.""" ir_survey = crowdastro_h5.attrs['ir_survey'] labels = crowdastro_h5['/{}/cdfs/rgz_raw_labels'.format(ir_survey)].value labels_mask = crowdastro_h5[ '/{}/cdfs/rgz_raw_labels_mask'.format(ir_survey)].value labels = numpy.ma.MaskedArray(labels, mask=labels_mask) # Compare each annotator to the Norris labels. Get their balanced accuracy. annotator_accuracies = [] for t in range(labels.shape[0]): cm = sklearn.metrics.confusion_matrix(y_true[~labels[t].mask], labels[t][~labels[t].mask]) if cm.shape[0] == 1: continue tp = cm[1, 1] n, p = cm.sum(axis=1) tn = cm[0, 0] if not n or not p or p + n < threshold: annotator_accuracies.append(0) continue ba = (tp / p + tn / n) / 2 annotator_accuracies.append(ba) ranked_annotators = numpy.argsort(annotator_accuracies) top_n_annotators = ranked_annotators[-n_annotators:] return labels[top_n_annotators]
Python
def top_n_accurate_targets(crowdastro_h5, n_annotators=5, threshold=700): """Get the labels of the top n most accurate annotators, assessed against the groundtruth, above a threshold number of annotations. """ ir_survey = crowdastro_h5.attrs['ir_survey'] norris = crowdastro_h5['/{}/cdfs/norris_labels'.format(ir_survey)].value return _top_n_accurate_targets(crowdastro_h5, norris, n_annotators=n_annotators, threshold=threshold)
def top_n_accurate_targets(crowdastro_h5, n_annotators=5, threshold=700): """Get the labels of the top n most accurate annotators, assessed against the groundtruth, above a threshold number of annotations. """ ir_survey = crowdastro_h5.attrs['ir_survey'] norris = crowdastro_h5['/{}/cdfs/norris_labels'.format(ir_survey)].value return _top_n_accurate_targets(crowdastro_h5, norris, n_annotators=n_annotators, threshold=threshold)
Python
def top_n_mv_accurate_targets(crowdastro_h5, n_annotators=5, threshold=700): """Get the labels of the top n most accurate annotators, assessed against the majority vote, above a threshold number of annotations. """ ir_survey = crowdastro_h5.attrs['ir_survey'] labels = crowdastro_h5['/{}/cdfs/rgz_raw_labels'.format(ir_survey)].value labels_mask = crowdastro_h5[ '/{}/cdfs/rgz_raw_labels_mask'.format(ir_survey)].value labels = numpy.ma.MaskedArray(labels, mask=labels_mask) mv = majority_vote(labels) return _top_n_accurate_targets(crowdastro_h5, mv, n_annotators=n_annotators, threshold=threshold)
def top_n_mv_accurate_targets(crowdastro_h5, n_annotators=5, threshold=700): """Get the labels of the top n most accurate annotators, assessed against the majority vote, above a threshold number of annotations. """ ir_survey = crowdastro_h5.attrs['ir_survey'] labels = crowdastro_h5['/{}/cdfs/rgz_raw_labels'.format(ir_survey)].value labels_mask = crowdastro_h5[ '/{}/cdfs/rgz_raw_labels_mask'.format(ir_survey)].value labels = numpy.ma.MaskedArray(labels, mask=labels_mask) mv = majority_vote(labels) return _top_n_accurate_targets(crowdastro_h5, mv, n_annotators=n_annotators, threshold=threshold)
Python
def top_n_prolific_targets(crowdastro_h5, n_annotators=5): """Get the labels of the top n most prolific annotators.""" labels = crowdastro_h5['/wise/cdfs/rgz_raw_labels'].value labels_mask = crowdastro_h5['/wise/cdfs/rgz_raw_labels_mask'].value labels = numpy.ma.MaskedArray(labels, mask=labels_mask) n_seen = labels_mask.sum(axis=1) top_seen = numpy.argsort(n_seen)[-n_annotators:] return labels[top_seen]
def top_n_prolific_targets(crowdastro_h5, n_annotators=5): """Get the labels of the top n most prolific annotators.""" labels = crowdastro_h5['/wise/cdfs/rgz_raw_labels'].value labels_mask = crowdastro_h5['/wise/cdfs/rgz_raw_labels_mask'].value labels = numpy.ma.MaskedArray(labels, mask=labels_mask) n_seen = labels_mask.sum(axis=1) top_seen = numpy.argsort(n_seen)[-n_annotators:] return labels[top_seen]
Python
def _unpack(self, params, n_dim, n_annotators): """Unpacks an array of parameters into a and w.""" a = params[:n_dim] w = params[n_dim:].reshape((n_annotators, n_dim)) return a, w
def _unpack(self, params, n_dim, n_annotators): """Unpacks an array of parameters into a and w.""" a = params[:n_dim] w = params[n_dim:].reshape((n_annotators, n_dim)) return a, w
Python
def predict(self, x): """Classify data points using logistic regression. x: Data points. (n_samples, n_dim) NumPy array. """ return numpy.round(self.predict_proba(x))
def predict(self, x): """Classify data points using logistic regression. x: Data points. (n_samples, n_dim) NumPy array. """ return numpy.round(self.predict_proba(x))
Python
def predict_proba(self, x): """Predict probabilities of data points using logistic regression. x: Data points. (n_samples, n_dim) NumPy array. """ x = numpy.hstack([x, numpy.ones((x.shape[0], 1))]) return logistic_regression(self.a_, x)
def predict_proba(self, x): """Predict probabilities of data points using logistic regression. x: Data points. (n_samples, n_dim) NumPy array. """ x = numpy.hstack([x, numpy.ones((x.shape[0], 1))]) return logistic_regression(self.a_, x)
Python
def score(self, X, Y): """Computes the likelihood of labels and data under the model. X: (n_samples, n_features) NumPy array of data. Y: (n_labellers, n_samples) NumPy masked array of crowd labels. """ X = numpy.hstack([X, numpy.ones((X.shape[0], 1))]) return self._likelihood(self.a_, self.w_, X, Y)
def score(self, X, Y): """Computes the likelihood of labels and data under the model. X: (n_samples, n_features) NumPy array of data. Y: (n_labellers, n_samples) NumPy masked array of crowd labels. """ X = numpy.hstack([X, numpy.ones((X.shape[0], 1))]) return self._likelihood(self.a_, self.w_, X, Y)
Python
def _likelihood(self, a, w, X, Y): """Computes the likelihood of labels and data under a model. X: (n_samples, n_features) NumPy array of data. Y: (n_labellers, n_samples) NumPy masked array of crowd labels. """ lh = 1 for i in range(X.shape[0]): for t in range(Y.shape[0]): if Y.mask[t, i]: continue lr = logistic_regression(a, X[i]) p1 = self._annotator_model(w[t], X[i], Y[t, i], 1) * lr p0 = self._annotator_model(w[t], X[i], Y[t, i], 0) * (1 - lr) lh *= p1 + p0 return lh
def _likelihood(self, a, w, X, Y): """Computes the likelihood of labels and data under a model. X: (n_samples, n_features) NumPy array of data. Y: (n_labellers, n_samples) NumPy masked array of crowd labels. """ lh = 1 for i in range(X.shape[0]): for t in range(Y.shape[0]): if Y.mask[t, i]: continue lr = logistic_regression(a, X[i]) p1 = self._annotator_model(w[t], X[i], Y[t, i], 1) * lr p0 = self._annotator_model(w[t], X[i], Y[t, i], 0) * (1 - lr) lh *= p1 + p0 return lh
Python
def require_atlas(f): """Decorator that ensures a subject (the first argument) is from the ATLAS survey. """ def g(subject, *args, **kwargs): if subject['metadata']['survey'] != 'atlas': raise ValueError('Subject not from ATLAS survey.') return f(subject, *args, **kwargs) return g
def require_atlas(f): """Decorator that ensures a subject (the first argument) is from the ATLAS survey. """ def g(subject, *args, **kwargs): if subject['metadata']['survey'] != 'atlas': raise ValueError('Subject not from ATLAS survey.') return f(subject, *args, **kwargs) return g
Python
def open_fits(subject, field, wavelength, size='2x2'): """Opens a FITS image of a subject. Can be used as a context handler. subject: RGZ subject dict, from the ATLAS survey. field: 'elais' or 'cdfs' wavelength: 'ir' or 'radio' size: Optional. '2x2' or '5x5'. -> FITS image file. """ if field not in {'elais-s1', 'cdfs'}: raise ValueError('field must be either "elais-s1" or "cdfs".') if wavelength not in {'ir', 'radio'}: raise ValueError('wavelength must be either "ir" or "radio".') cid = subject['metadata']['source'] filename = '{}_{}.fits'.format(cid, wavelength) path = os.path.join(config['data_sources']['{}_fits'.format(field)], size, filename) return astropy.io.fits.open(path, ignore_blank=True)
def open_fits(subject, field, wavelength, size='2x2'): """Opens a FITS image of a subject. Can be used as a context handler. subject: RGZ subject dict, from the ATLAS survey. field: 'elais' or 'cdfs' wavelength: 'ir' or 'radio' size: Optional. '2x2' or '5x5'. -> FITS image file. """ if field not in {'elais-s1', 'cdfs'}: raise ValueError('field must be either "elais-s1" or "cdfs".') if wavelength not in {'ir', 'radio'}: raise ValueError('wavelength must be either "ir" or "radio".') cid = subject['metadata']['source'] filename = '{}_{}.fits'.format(cid, wavelength) path = os.path.join(config['data_sources']['{}_fits'.format(field)], size, filename) return astropy.io.fits.open(path, ignore_blank=True)
Python
def train(x, y, epsilon=1e-5, lr_init=False, skip_zeros=False): """Expectation-maximisation algorithm from Yan et al. (2010). x: Data. (n_samples, n_dim) NumPy array y: Labels. (n_annotators, n_samples) NumPy array epsilon: Convergence threshold. Default 1e-5. float lr_init: Initialised with logistic regression. Default False. skip_zeros: Whether to detect and skip zero probabilities. Default False. """ # TODO(MatthewJA): Restore skip_zeros functionality. n_samples, n_dim = x.shape n_annotators, n_samples_ = y.shape assert n_samples == n_samples_, 'Label array has wrong number of labels.' # Compute majority vote labels (for debugging + logistic regression init). majority_y = numpy.zeros((n_samples,)) for i in range(n_samples): labels = y[:, i] if labels.mask is False: counter = collections.Counter(labels) else: counter = collections.Counter(labels[~labels.mask]) if counter: majority_y[i] = max(counter, key=counter.get) else: # No labels for this data point. majority_y[i] = numpy.random.randint(2) # ¯\_(ツ)_/¯ logging.info('Initialising...') if lr_init: # For our initial guess, we'll fit logistic regression to the majority # vote. lr_ab = sklearn.linear_model.LogisticRegression() lr_ab.fit(x, majority_y) a = lr_ab.coef_.ravel() b = lr_ab.intercept_[0] else: a = numpy.random.normal(size=(n_dim,)) b = numpy.random.normal() w = numpy.random.normal(size=(n_annotators, n_dim)) g = numpy.ones((n_annotators,)) logging.debug('Initial a: %s', a) logging.debug('Initial b: %s', b) logging.debug('Initial w: %s', w) logging.debug('Initial g: %s', g) assert x.shape == (n_samples, n_dim) assert y.shape == (n_annotators, n_samples) logging.info('Iterating until convergence...') iters = 0 while True: # Until convergence (checked later). iters += 1 logging.info('Iteration %d', iters) a_, b_, w_, g_ = em_step( n_samples, n_annotators, n_dim, a, b, w, g, x, y) # Check convergence. dist = numpy.linalg.norm(a - a_) ** 2 + (b - b_) ** 2 logging.debug('Distance: {:.02f}'.format(dist)) if dist <= epsilon: return a_, b_, w_, g_ a, b, w, g = a_, b_, w_, g_
def train(x, y, epsilon=1e-5, lr_init=False, skip_zeros=False): """Expectation-maximisation algorithm from Yan et al. (2010). x: Data. (n_samples, n_dim) NumPy array y: Labels. (n_annotators, n_samples) NumPy array epsilon: Convergence threshold. Default 1e-5. float lr_init: Initialised with logistic regression. Default False. skip_zeros: Whether to detect and skip zero probabilities. Default False. """ # TODO(MatthewJA): Restore skip_zeros functionality. n_samples, n_dim = x.shape n_annotators, n_samples_ = y.shape assert n_samples == n_samples_, 'Label array has wrong number of labels.' # Compute majority vote labels (for debugging + logistic regression init). majority_y = numpy.zeros((n_samples,)) for i in range(n_samples): labels = y[:, i] if labels.mask is False: counter = collections.Counter(labels) else: counter = collections.Counter(labels[~labels.mask]) if counter: majority_y[i] = max(counter, key=counter.get) else: # No labels for this data point. majority_y[i] = numpy.random.randint(2) # ¯\_(ツ)_/¯ logging.info('Initialising...') if lr_init: # For our initial guess, we'll fit logistic regression to the majority # vote. lr_ab = sklearn.linear_model.LogisticRegression() lr_ab.fit(x, majority_y) a = lr_ab.coef_.ravel() b = lr_ab.intercept_[0] else: a = numpy.random.normal(size=(n_dim,)) b = numpy.random.normal() w = numpy.random.normal(size=(n_annotators, n_dim)) g = numpy.ones((n_annotators,)) logging.debug('Initial a: %s', a) logging.debug('Initial b: %s', b) logging.debug('Initial w: %s', w) logging.debug('Initial g: %s', g) assert x.shape == (n_samples, n_dim) assert y.shape == (n_annotators, n_samples) logging.info('Iterating until convergence...') iters = 0 while True: # Until convergence (checked later). iters += 1 logging.info('Iteration %d', iters) a_, b_, w_, g_ = em_step( n_samples, n_annotators, n_dim, a, b, w, g, x, y) # Check convergence. dist = numpy.linalg.norm(a - a_) ** 2 + (b - b_) ** 2 logging.debug('Distance: {:.02f}'.format(dist)) if dist <= epsilon: return a_, b_, w_, g_ a, b, w, g = a_, b_, w_, g_
Python
def run_sample(data_path, catalogue_path, limit=0): """Runs the consensus algorithm described in Banfield et al., 2015.""" # TODO(MatthewJA): Reimplement the 'update' argument. # TODO(MatthewJA): Reimplement the 'do_plot' argument. # TODO(MatthewJA): This only works for ATLAS subjects. # TODO(MatthewJA): The original code only worked on FIRST subjects. Have I # broken anything by running ATLAS subjects through it? paths = load_contours.make_pathdict(data_path, catalogue_path) sample_subjects = [cz for cz in subjects.find({ 'metadata.survey': 'atlas' }).limit(limit)] logging.debug('Found {} subjects.'.format(len(sample_subjects))) zooniverse_ids = [cz['zooniverse_id'] for cz in sample_subjects] with open('%s/csv/%s.csv' % (rgz_dir, PATH_STEM), 'w') as csv_file: writer = csv.writer(csv_file) writer.writerow([ 'zooniverse_id', 'first_id', 'n_users', 'n_total', 'consensus_level', 'n_radio', 'label', 'bbox', 'ir_peak', ]) for idx, zid in enumerate(zooniverse_ids): logging.debug('Zooniverse ID: {}'.format(zid)) if not idx % 100: # Progress. now = datetime.datetime.now().strftime('%H:%M:%S.%f') progress = idx / len(zooniverse_ids) logging.info('{:.02%} {}'.format(progress, now)) cons = consensus(zid) cons['consensus_level'] = cons['n_users'] / cons['n_total'] # CSV. for ans in cons['answer'].values(): try: ir_peak = ans['ir_peak'] except KeyError: ir_peak = ans.get('ir', None) writer.writerow([ cons['zid'], cons['source'], cons['n_users'], cons['n_total'], cons['consensus_level'], len(ans['xmax']), alpha(ans['ind']), bbox_unravel(ans['bbox']), ir_peak, ]) # CSV cmaster = pandas.read_csv('%s/csv/%s.csv' % (rgz_dir, PATH_STEM)) cmaster75 = cmaster[cmaster['consensus_level'] >= 0.75] cmaster75.to_csv('%s/csv/%s_75.csv' % (rgz_dir, PATH_STEM), index=False) logging.info('Completed consensus.')
def run_sample(data_path, catalogue_path, limit=0): """Runs the consensus algorithm described in Banfield et al., 2015.""" # TODO(MatthewJA): Reimplement the 'update' argument. # TODO(MatthewJA): Reimplement the 'do_plot' argument. # TODO(MatthewJA): This only works for ATLAS subjects. # TODO(MatthewJA): The original code only worked on FIRST subjects. Have I # broken anything by running ATLAS subjects through it? paths = load_contours.make_pathdict(data_path, catalogue_path) sample_subjects = [cz for cz in subjects.find({ 'metadata.survey': 'atlas' }).limit(limit)] logging.debug('Found {} subjects.'.format(len(sample_subjects))) zooniverse_ids = [cz['zooniverse_id'] for cz in sample_subjects] with open('%s/csv/%s.csv' % (rgz_dir, PATH_STEM), 'w') as csv_file: writer = csv.writer(csv_file) writer.writerow([ 'zooniverse_id', 'first_id', 'n_users', 'n_total', 'consensus_level', 'n_radio', 'label', 'bbox', 'ir_peak', ]) for idx, zid in enumerate(zooniverse_ids): logging.debug('Zooniverse ID: {}'.format(zid)) if not idx % 100: # Progress. now = datetime.datetime.now().strftime('%H:%M:%S.%f') progress = idx / len(zooniverse_ids) logging.info('{:.02%} {}'.format(progress, now)) cons = consensus(zid) cons['consensus_level'] = cons['n_users'] / cons['n_total'] # CSV. for ans in cons['answer'].values(): try: ir_peak = ans['ir_peak'] except KeyError: ir_peak = ans.get('ir', None) writer.writerow([ cons['zid'], cons['source'], cons['n_users'], cons['n_total'], cons['consensus_level'], len(ans['xmax']), alpha(ans['ind']), bbox_unravel(ans['bbox']), ir_peak, ]) # CSV cmaster = pandas.read_csv('%s/csv/%s.csv' % (rgz_dir, PATH_STEM)) cmaster75 = cmaster[cmaster['consensus_level'] >= 0.75] cmaster75.to_csv('%s/csv/%s_75.csv' % (rgz_dir, PATH_STEM), index=False) logging.info('Completed consensus.')
Python
def hash_file(f): """Finds the MD5 hash of a file. File must be opened in bytes mode. """ h = hashlib.md5() chunk_size = 65536 # 64 KiB for chunk in iter(lambda: f.read(chunk_size), b''): h.update(chunk) return h.hexdigest()
def hash_file(f): """Finds the MD5 hash of a file. File must be opened in bytes mode. """ h = hashlib.md5() chunk_size = 65536 # 64 KiB for chunk in iter(lambda: f.read(chunk_size), b''): h.update(chunk) return h.hexdigest()
Python
def prep_h5(f_h5, ir_survey): """Creates hierarchy in HDF5 file.""" f_h5.create_group('/atlas/cdfs') f_h5.create_group('/atlas/elais') f_h5.create_group('/{}/cdfs'.format(ir_survey)) f_h5.create_group('/{}/elais'.format(ir_survey)) f_h5.attrs['version'] = VERSION f_h5.attrs['ir_survey'] = ir_survey
def prep_h5(f_h5, ir_survey): """Creates hierarchy in HDF5 file.""" f_h5.create_group('/atlas/cdfs') f_h5.create_group('/atlas/elais') f_h5.create_group('/{}/cdfs'.format(ir_survey)) f_h5.create_group('/{}/elais'.format(ir_survey)) f_h5.attrs['version'] = VERSION f_h5.attrs['ir_survey'] = ir_survey
Python
def import_atlas(f_h5, test=False, field='cdfs'): """Imports the ATLAS dataset into crowdastro, as well as associated SWIRE. f_h5: An HDF5 file. test: Flag to run on only 10 subjects. Default False. """ from . import rgz_data as data # Fetch groups from HDF5. cdfs = f_h5['/atlas/{}'.format(field)] # First pass, I'll find coords, names, and Zooniverse IDs, as well as how # many data points there are. coords = [] names = [] zooniverse_ids = [] if (field == 'cdfs'): # We need the ATLAS name, but we can only get it by going through the # ATLAS catalogue and finding the nearest component. # https://github.com/chengsoonong/crowdastro/issues/63 # Fortunately, @jbanfield has already done this, so we can just load # that CSV and match the names. # TODO(MatthewJA): This matches the ATLAS component ID, but maybe we # should be using the name instead. rgz_to_atlas = {} with open(config['data_sources']['rgz_to_atlas']) as f: reader = csv.DictReader(f) for row in reader: rgz_to_atlas[row['ID_RGZ']] = row['ID'] all_subjects = data.get_all_subjects(survey='atlas', field=field) if test: all_subjects = all_subjects.limit(10) for subject in all_subjects: ra, dec = subject['coords'] zooniverse_id = subject['zooniverse_id'] rgz_source_id = subject['metadata']['source'] if rgz_source_id not in rgz_to_atlas: logging.debug('Skipping %s; no matching ATLAS component.', zooniverse_id) continue name = rgz_to_atlas[rgz_source_id] # Store the results. coords.append((ra, dec)) names.append(name) zooniverse_ids.append(zooniverse_id) elif (field == 'elais'): atlascatalogue = ascii.read(config['data_sources']['atlas_catalogue']) ras, decs = atlascatalogue['RA_deg'], atlascatalogue['Dec_deg'] e_ids = atlascatalogue['ID'] fields = atlascatalogue['field'] # Store the results. for ra, dec, e_id, field_ in zip(ras, decs, e_ids, fields): if (field_ == 'ELAIS-S1'): coords.append((ra, dec)) names.append(e_id) zooniverse_ids.append(e_id) n_cdfs = len(names) # Sort the data by Zooniverse ID. coords_to_zooniverse_ids = dict(zip(coords, zooniverse_ids)) names_to_zooniverse_ids = dict(zip(names, zooniverse_ids)) coords.sort(key=coords_to_zooniverse_ids.get) names.sort(key=names_to_zooniverse_ids.get) zooniverse_ids.sort() # Begin to store the data. We will have two tables: one for numeric data, # and one for strings. We will have to preallocate the numeric table so that # we aren't storing huge amounts of image data in memory. # Strings. dtype = [('zooniverse_id', '<S{}'.format(MAX_ZOONIVERSE_ID_LENGTH)), ('name', '<S{}'.format(MAX_NAME_LENGTH))] string_data = numpy.array(list(zip(zooniverse_ids, names)), dtype=dtype) cdfs.create_dataset('string', data=string_data, dtype=dtype) # Numeric. image_size = (config['surveys']['atlas']['fits_width'] * config['surveys']['atlas']['fits_height']) # RA, DEC, radio, (distance to SWIRE object added later) dim = (n_cdfs, 1 + 1 + image_size) numeric = cdfs.create_dataset('_numeric', shape=dim, dtype='float32') # Load image patches and store numeric data. with astropy.io.fits.open( config['data_sources']['atlas_{}_image'.format(field)], ignore_blank=True) as atlas_image: wcs = astropy.wcs.WCS(atlas_image[0].header).dropaxis(3).dropaxis(2) pix_coords = wcs.all_world2pix(coords, FITS_CONVENTION) assert pix_coords.shape[1] == 2 logging.debug('Fetching %d ATLAS images.', len(pix_coords)) for index, (x, y) in enumerate(pix_coords): radio = atlas_image[0].data[ 0, 0, # stokes, freq int(y) - config['surveys']['atlas']['fits_height'] // 2: int(y) + config['surveys']['atlas']['fits_height'] // 2, int(x) - config['surveys']['atlas']['fits_width'] // 2: int(x) + config['surveys']['atlas']['fits_width'] // 2] numeric[index, 0] = coords[index][0] numeric[index, 1] = coords[index][1] numeric[index, 2:2 + image_size] = radio.reshape(-1) logging.debug('ATLAS imported.')
def import_atlas(f_h5, test=False, field='cdfs'): """Imports the ATLAS dataset into crowdastro, as well as associated SWIRE. f_h5: An HDF5 file. test: Flag to run on only 10 subjects. Default False. """ from . import rgz_data as data # Fetch groups from HDF5. cdfs = f_h5['/atlas/{}'.format(field)] # First pass, I'll find coords, names, and Zooniverse IDs, as well as how # many data points there are. coords = [] names = [] zooniverse_ids = [] if (field == 'cdfs'): # We need the ATLAS name, but we can only get it by going through the # ATLAS catalogue and finding the nearest component. # https://github.com/chengsoonong/crowdastro/issues/63 # Fortunately, @jbanfield has already done this, so we can just load # that CSV and match the names. # TODO(MatthewJA): This matches the ATLAS component ID, but maybe we # should be using the name instead. rgz_to_atlas = {} with open(config['data_sources']['rgz_to_atlas']) as f: reader = csv.DictReader(f) for row in reader: rgz_to_atlas[row['ID_RGZ']] = row['ID'] all_subjects = data.get_all_subjects(survey='atlas', field=field) if test: all_subjects = all_subjects.limit(10) for subject in all_subjects: ra, dec = subject['coords'] zooniverse_id = subject['zooniverse_id'] rgz_source_id = subject['metadata']['source'] if rgz_source_id not in rgz_to_atlas: logging.debug('Skipping %s; no matching ATLAS component.', zooniverse_id) continue name = rgz_to_atlas[rgz_source_id] # Store the results. coords.append((ra, dec)) names.append(name) zooniverse_ids.append(zooniverse_id) elif (field == 'elais'): atlascatalogue = ascii.read(config['data_sources']['atlas_catalogue']) ras, decs = atlascatalogue['RA_deg'], atlascatalogue['Dec_deg'] e_ids = atlascatalogue['ID'] fields = atlascatalogue['field'] # Store the results. for ra, dec, e_id, field_ in zip(ras, decs, e_ids, fields): if (field_ == 'ELAIS-S1'): coords.append((ra, dec)) names.append(e_id) zooniverse_ids.append(e_id) n_cdfs = len(names) # Sort the data by Zooniverse ID. coords_to_zooniverse_ids = dict(zip(coords, zooniverse_ids)) names_to_zooniverse_ids = dict(zip(names, zooniverse_ids)) coords.sort(key=coords_to_zooniverse_ids.get) names.sort(key=names_to_zooniverse_ids.get) zooniverse_ids.sort() # Begin to store the data. We will have two tables: one for numeric data, # and one for strings. We will have to preallocate the numeric table so that # we aren't storing huge amounts of image data in memory. # Strings. dtype = [('zooniverse_id', '<S{}'.format(MAX_ZOONIVERSE_ID_LENGTH)), ('name', '<S{}'.format(MAX_NAME_LENGTH))] string_data = numpy.array(list(zip(zooniverse_ids, names)), dtype=dtype) cdfs.create_dataset('string', data=string_data, dtype=dtype) # Numeric. image_size = (config['surveys']['atlas']['fits_width'] * config['surveys']['atlas']['fits_height']) # RA, DEC, radio, (distance to SWIRE object added later) dim = (n_cdfs, 1 + 1 + image_size) numeric = cdfs.create_dataset('_numeric', shape=dim, dtype='float32') # Load image patches and store numeric data. with astropy.io.fits.open( config['data_sources']['atlas_{}_image'.format(field)], ignore_blank=True) as atlas_image: wcs = astropy.wcs.WCS(atlas_image[0].header).dropaxis(3).dropaxis(2) pix_coords = wcs.all_world2pix(coords, FITS_CONVENTION) assert pix_coords.shape[1] == 2 logging.debug('Fetching %d ATLAS images.', len(pix_coords)) for index, (x, y) in enumerate(pix_coords): radio = atlas_image[0].data[ 0, 0, # stokes, freq int(y) - config['surveys']['atlas']['fits_height'] // 2: int(y) + config['surveys']['atlas']['fits_height'] // 2, int(x) - config['surveys']['atlas']['fits_width'] // 2: int(x) + config['surveys']['atlas']['fits_width'] // 2] numeric[index, 0] = coords[index][0] numeric[index, 1] = coords[index][1] numeric[index, 2:2 + image_size] = radio.reshape(-1) logging.debug('ATLAS imported.')
Python
def import_norris(f_h5): """Imports the Norris et al. (2006) labels. f_h5: crowdastro HDF5 file with WISE or SWIRE already imported. """ ir_survey = f_h5.attrs['ir_survey'] ir_positions = f_h5['/{}/cdfs/numeric'.format(ir_survey)][:, :2] ir_tree = sklearn.neighbors.KDTree(ir_positions) norris_dat = astropy.io.ascii.read(config['data_sources']['norris_coords']) norris_swire = norris_dat['SWIRE'] norris_coords = [] for s in norris_swire: s = s.strip() if len(s) < 19: continue # e.g. J032931.44-281722.0 ra_hr = s[1:3] ra_min = s[3:5] ra_sec = s[5:10] dec_sgn = s[10] dec_deg = s[11:13] dec_min = s[13:15] dec_sec = s[15:19] ra = '{} {} {}'.format(ra_hr, ra_min, ra_sec) dec = '{}{} {} {}'.format(dec_sgn, dec_deg, dec_min, dec_sec) logging.debug('Reading Norris coordinate: {}; {}'.format(ra, dec)) coord = SkyCoord(ra=ra, dec=dec, unit=('hourangle, deg')) norris_coords.append(coord) norris_labels = numpy.zeros((ir_positions.shape[0],)) for skycoord in norris_coords: # Find a neighbour. ra = skycoord.ra.degree dec = skycoord.dec.degree ((dist,),), ((ir,),) = ir_tree.query([(ra, dec)]) if dist < config['surveys'][ir_survey]['distance_cutoff']: norris_labels[ir] = 1 f_h5.create_dataset('/{}/cdfs/norris_labels'.format(ir_survey), data=norris_labels)
def import_norris(f_h5): """Imports the Norris et al. (2006) labels. f_h5: crowdastro HDF5 file with WISE or SWIRE already imported. """ ir_survey = f_h5.attrs['ir_survey'] ir_positions = f_h5['/{}/cdfs/numeric'.format(ir_survey)][:, :2] ir_tree = sklearn.neighbors.KDTree(ir_positions) norris_dat = astropy.io.ascii.read(config['data_sources']['norris_coords']) norris_swire = norris_dat['SWIRE'] norris_coords = [] for s in norris_swire: s = s.strip() if len(s) < 19: continue # e.g. J032931.44-281722.0 ra_hr = s[1:3] ra_min = s[3:5] ra_sec = s[5:10] dec_sgn = s[10] dec_deg = s[11:13] dec_min = s[13:15] dec_sec = s[15:19] ra = '{} {} {}'.format(ra_hr, ra_min, ra_sec) dec = '{}{} {} {}'.format(dec_sgn, dec_deg, dec_min, dec_sec) logging.debug('Reading Norris coordinate: {}; {}'.format(ra, dec)) coord = SkyCoord(ra=ra, dec=dec, unit=('hourangle, deg')) norris_coords.append(coord) norris_labels = numpy.zeros((ir_positions.shape[0],)) for skycoord in norris_coords: # Find a neighbour. ra = skycoord.ra.degree dec = skycoord.dec.degree ((dist,),), ((ir,),) = ir_tree.query([(ra, dec)]) if dist < config['surveys'][ir_survey]['distance_cutoff']: norris_labels[ir] = 1 f_h5.create_dataset('/{}/cdfs/norris_labels'.format(ir_survey), data=norris_labels)
Python
def import_fan(f_h5): """Imports the Fan et al. (2015) labels. f_h5: crowdastro HDF5 file with WISE or SWIRE already imported. """ ir_survey = f_h5.attrs['ir_survey'] ir_names = f_h5['/{}/cdfs/string'.format(ir_survey)] ir_positions = f_h5['/{}/cdfs/numeric'.format(ir_survey)][:, :2] ir_tree = sklearn.neighbors.KDTree(ir_positions) fan_coords = [] with open(config['data_sources']['fan_swire'], 'r') as fan_dat: for row in csv.DictReader(fan_dat): ra_hr = row['swire'][8:10] ra_min = row['swire'][10:12] ra_sec = row['swire'][12:17] dec_sgn = row['swire'][17] dec_deg = row['swire'][18:20] dec_min = row['swire'][20:22] dec_sec = row['swire'][22:26] ra = '{} {} {}'.format(ra_hr, ra_min, ra_sec) dec = '{}{} {} {}'.format(dec_sgn, dec_deg, dec_min, dec_sec) fan_coords.append((ra, dec)) fan_labels = numpy.zeros((ir_positions.shape[0],)) for ra, dec in fan_coords: # Find a neighbour. skycoord = SkyCoord(ra=ra, dec=dec, unit=('hourangle', 'deg')) ra = skycoord.ra.degree dec = skycoord.dec.degree ((dist,),), ((ir,),) = ir_tree.query([(ra, dec)]) if dist < config['surveys'][ir_survey]['distance_cutoff']: fan_labels[ir] = 1 f_h5.create_dataset('/{}/cdfs/fan_labels'.format(ir_survey), data=fan_labels)
def import_fan(f_h5): """Imports the Fan et al. (2015) labels. f_h5: crowdastro HDF5 file with WISE or SWIRE already imported. """ ir_survey = f_h5.attrs['ir_survey'] ir_names = f_h5['/{}/cdfs/string'.format(ir_survey)] ir_positions = f_h5['/{}/cdfs/numeric'.format(ir_survey)][:, :2] ir_tree = sklearn.neighbors.KDTree(ir_positions) fan_coords = [] with open(config['data_sources']['fan_swire'], 'r') as fan_dat: for row in csv.DictReader(fan_dat): ra_hr = row['swire'][8:10] ra_min = row['swire'][10:12] ra_sec = row['swire'][12:17] dec_sgn = row['swire'][17] dec_deg = row['swire'][18:20] dec_min = row['swire'][20:22] dec_sec = row['swire'][22:26] ra = '{} {} {}'.format(ra_hr, ra_min, ra_sec) dec = '{}{} {} {}'.format(dec_sgn, dec_deg, dec_min, dec_sec) fan_coords.append((ra, dec)) fan_labels = numpy.zeros((ir_positions.shape[0],)) for ra, dec in fan_coords: # Find a neighbour. skycoord = SkyCoord(ra=ra, dec=dec, unit=('hourangle', 'deg')) ra = skycoord.ra.degree dec = skycoord.dec.degree ((dist,),), ((ir,),) = ir_tree.query([(ra, dec)]) if dist < config['surveys'][ir_survey]['distance_cutoff']: fan_labels[ir] = 1 f_h5.create_dataset('/{}/cdfs/fan_labels'.format(ir_survey), data=fan_labels)
Python
def contains(bbox, point): """Checks if point is within bbox. bbox: [[x0, x1], [y0, y1]] point: [x, y] -> bool """ return (bbox[0][0] <= point[0] <= bbox[0][1] and bbox[1][0] <= point[1] <= bbox[1][1])
def contains(bbox, point): """Checks if point is within bbox. bbox: [[x0, x1], [y0, y1]] point: [x, y] -> bool """ return (bbox[0][0] <= point[0] <= bbox[0][1] and bbox[1][0] <= point[1] <= bbox[1][1])
Python
def make_radio_combination_signature(radio_annotation, wcs, atlas_positions, subject, pix_offset): """Generates a unique signature for a radio annotation. radio_annotation: 'radio' dictionary from a classification. wcs: World coordinate system associated with the ATLAS image. atlas_positions: [[RA, DEC]] NumPy array. subject: RGZ subject dict. pix_offset: (x, y) pixel position of this radio subject on the ATLAS image. -> Something immutable """ from . import rgz_data as data # TODO(MatthewJA): This only works on ATLAS. Generalise. # My choice of immutable object will be stringified crowdastro ATLAS # indices. zooniverse_id = subject['zooniverse_id'] subject_fits = data.get_radio_fits(subject) subject_wcs = astropy.wcs.WCS(subject_fits.header) atlas_ids = [] x_offset, y_offset = pix_offset for c in radio_annotation.values(): # Note that the x scale is not the same as the IR scale, but the scale # factor is included in the annotation, so I have multiplied this out # here for consistency. scale_width = c.get('scale_width', '') scale_height = c.get('scale_height', '') if scale_width: scale_width = float(scale_width) else: # Sometimes, there's no scale, so I've included a default scale. scale_width = config['surveys']['atlas']['scale_width'] if scale_height: scale_height = float(scale_height) else: scale_height = config['surveys']['atlas']['scale_height'] # These numbers are in terms of the PNG images, so I need to multiply by # the click-to-fits ratio. scale_width *= config['surveys']['atlas']['click_to_fits_x'] scale_height *= config['surveys']['atlas']['click_to_fits_y'] subject_bbox = [ [ float(c['xmin']) * scale_width, float(c['xmax']) * scale_width, ], [ float(c['ymin']) * scale_height, float(c['ymax']) * scale_height, ], ] # ...and by the mosaic ratio. There's probably double-up here, but this # makes more sense. scale_width *= config['surveys']['atlas']['mosaic_scale_x'] scale_height *= config['surveys']['atlas']['mosaic_scale_y'] # Get the bounding box of the radio source in pixels. # Format: [xs, ys] bbox = [ [ float(c['xmin']) * scale_width, float(c['xmax']) * scale_width, ], [ float(c['ymin']) * scale_height, float(c['ymax']) * scale_height, ], ] assert bbox[0][0] < bbox[0][1] assert bbox[1][0] < bbox[1][1] # Convert the bounding box into RA/DEC. bbox = wcs.all_pix2world(bbox[0] + x_offset, bbox[1] + y_offset, FITS_CONVENTION) subject_bbox = subject_wcs.all_pix2world(subject_bbox[0], subject_bbox[1], FITS_CONVENTION) # TODO(MatthewJA): Remove (or disable) this sanity check. # The bbox is backwards along the x-axis for some reason. bbox[0] = bbox[0][::-1] assert bbox[0][0] < bbox[0][1] assert bbox[1][0] < bbox[1][1] bbox = numpy.array(bbox) # What is this radio source called? Check if we have an object in the # bounding box. We'll cache these results because there is a lot of # overlap. cache_key = tuple(tuple(b) for b in bbox) if cache_key in bbox_cache_: index = bbox_cache_[cache_key] else: x_gt_min = atlas_positions[:, 0] >= bbox[0, 0] x_lt_max = atlas_positions[:, 0] <= bbox[0, 1] y_gt_min = atlas_positions[:, 1] >= bbox[1, 0] y_lt_max = atlas_positions[:, 1] <= bbox[1, 1] within = numpy.all([x_gt_min, x_lt_max, y_gt_min, y_lt_max], axis=0) indices = numpy.where(within)[0] if len(indices) == 0: logging.debug('Skipping radio source not in catalogue for ' '%s', zooniverse_id) continue else: if len(indices) > 1: logging.debug('Found multiple (%d) ATLAS matches ' 'for %s', len(indices), zooniverse_id) index = indices[0] bbox_cache_[cache_key] = index atlas_ids.append(str(index)) atlas_ids.sort() if not atlas_ids: raise CatalogueError('No catalogued radio sources.') return ';'.join(atlas_ids)
def make_radio_combination_signature(radio_annotation, wcs, atlas_positions, subject, pix_offset): """Generates a unique signature for a radio annotation. radio_annotation: 'radio' dictionary from a classification. wcs: World coordinate system associated with the ATLAS image. atlas_positions: [[RA, DEC]] NumPy array. subject: RGZ subject dict. pix_offset: (x, y) pixel position of this radio subject on the ATLAS image. -> Something immutable """ from . import rgz_data as data # TODO(MatthewJA): This only works on ATLAS. Generalise. # My choice of immutable object will be stringified crowdastro ATLAS # indices. zooniverse_id = subject['zooniverse_id'] subject_fits = data.get_radio_fits(subject) subject_wcs = astropy.wcs.WCS(subject_fits.header) atlas_ids = [] x_offset, y_offset = pix_offset for c in radio_annotation.values(): # Note that the x scale is not the same as the IR scale, but the scale # factor is included in the annotation, so I have multiplied this out # here for consistency. scale_width = c.get('scale_width', '') scale_height = c.get('scale_height', '') if scale_width: scale_width = float(scale_width) else: # Sometimes, there's no scale, so I've included a default scale. scale_width = config['surveys']['atlas']['scale_width'] if scale_height: scale_height = float(scale_height) else: scale_height = config['surveys']['atlas']['scale_height'] # These numbers are in terms of the PNG images, so I need to multiply by # the click-to-fits ratio. scale_width *= config['surveys']['atlas']['click_to_fits_x'] scale_height *= config['surveys']['atlas']['click_to_fits_y'] subject_bbox = [ [ float(c['xmin']) * scale_width, float(c['xmax']) * scale_width, ], [ float(c['ymin']) * scale_height, float(c['ymax']) * scale_height, ], ] # ...and by the mosaic ratio. There's probably double-up here, but this # makes more sense. scale_width *= config['surveys']['atlas']['mosaic_scale_x'] scale_height *= config['surveys']['atlas']['mosaic_scale_y'] # Get the bounding box of the radio source in pixels. # Format: [xs, ys] bbox = [ [ float(c['xmin']) * scale_width, float(c['xmax']) * scale_width, ], [ float(c['ymin']) * scale_height, float(c['ymax']) * scale_height, ], ] assert bbox[0][0] < bbox[0][1] assert bbox[1][0] < bbox[1][1] # Convert the bounding box into RA/DEC. bbox = wcs.all_pix2world(bbox[0] + x_offset, bbox[1] + y_offset, FITS_CONVENTION) subject_bbox = subject_wcs.all_pix2world(subject_bbox[0], subject_bbox[1], FITS_CONVENTION) # TODO(MatthewJA): Remove (or disable) this sanity check. # The bbox is backwards along the x-axis for some reason. bbox[0] = bbox[0][::-1] assert bbox[0][0] < bbox[0][1] assert bbox[1][0] < bbox[1][1] bbox = numpy.array(bbox) # What is this radio source called? Check if we have an object in the # bounding box. We'll cache these results because there is a lot of # overlap. cache_key = tuple(tuple(b) for b in bbox) if cache_key in bbox_cache_: index = bbox_cache_[cache_key] else: x_gt_min = atlas_positions[:, 0] >= bbox[0, 0] x_lt_max = atlas_positions[:, 0] <= bbox[0, 1] y_gt_min = atlas_positions[:, 1] >= bbox[1, 0] y_lt_max = atlas_positions[:, 1] <= bbox[1, 1] within = numpy.all([x_gt_min, x_lt_max, y_gt_min, y_lt_max], axis=0) indices = numpy.where(within)[0] if len(indices) == 0: logging.debug('Skipping radio source not in catalogue for ' '%s', zooniverse_id) continue else: if len(indices) > 1: logging.debug('Found multiple (%d) ATLAS matches ' 'for %s', len(indices), zooniverse_id) index = indices[0] bbox_cache_[cache_key] = index atlas_ids.append(str(index)) atlas_ids.sort() if not atlas_ids: raise CatalogueError('No catalogued radio sources.') return ';'.join(atlas_ids)
Python
def parse_classification(classification, subject, atlas_positions, wcs, pix_offset): """Converts a raw RGZ classification into a classification dict. Scales all positions and flips y axis of clicks. classification: RGZ classification dict. subject: Associated RGZ subject dict. atlas_positions: [[RA, DEC]] NumPy array. wcs: World coordinate system of the ATLAS image. pix_offset: (x, y) pixel position of this radio subject on the ATLAS image. -> dict mapping radio signature to list of corresponding IR host pixel locations. """ result = {} n_invalid = 0 for annotation in classification['annotations']: if 'radio' not in annotation: # This is a metadata annotation and we can ignore it. continue if annotation['radio'] == 'No Contours': # I'm not sure how this occurs. I'm going to ignore it. continue try: radio_signature = make_radio_combination_signature( annotation['radio'], wcs, atlas_positions, subject, pix_offset) except CatalogueError: # Ignore invalid annotations. n_invalid += 1 logging.debug('Ignoring invalid annotation for %s.', subject['zooniverse_id']) continue ir_locations = [] if annotation['ir'] != 'No Sources': for ir_click in annotation['ir']: ir_x = float(annotation['ir'][ir_click]['x']) ir_y = float(annotation['ir'][ir_click]['y']) # Rescale to a consistent size. ir_x *= config['surveys']['atlas']['click_to_fits_x'] ir_y *= config['surveys']['atlas']['click_to_fits_y'] # Ignore out-of-range data. if not 0 <= ir_x <= config['surveys']['atlas']['fits_width']: n_invalid += 1 continue if not 0 <= ir_y <= config['surveys']['atlas']['fits_height']: n_invalid += 1 continue # Flip the y axis to match other data conventions. ir_y = config['surveys']['atlas']['fits_height'] - ir_y # Rescale to match the mosaic WCS. ir_x *= config['surveys']['atlas']['mosaic_scale_x'] ir_y *= config['surveys']['atlas']['mosaic_scale_y'] # Move to the reference location of the radio subject. ir_x += pix_offset[0] ir_y += pix_offset[1] # Convert the location into RA/DEC. (ir_x,), (ir_y,) = wcs.wcs_pix2world([ir_x], [ir_y], 1) ir_location = (ir_x, ir_y) ir_locations.append(ir_location) result[radio_signature] = ir_locations if n_invalid: logging.debug('%d invalid annotations for %s.', n_invalid, subject['zooniverse_id']) return result
def parse_classification(classification, subject, atlas_positions, wcs, pix_offset): """Converts a raw RGZ classification into a classification dict. Scales all positions and flips y axis of clicks. classification: RGZ classification dict. subject: Associated RGZ subject dict. atlas_positions: [[RA, DEC]] NumPy array. wcs: World coordinate system of the ATLAS image. pix_offset: (x, y) pixel position of this radio subject on the ATLAS image. -> dict mapping radio signature to list of corresponding IR host pixel locations. """ result = {} n_invalid = 0 for annotation in classification['annotations']: if 'radio' not in annotation: # This is a metadata annotation and we can ignore it. continue if annotation['radio'] == 'No Contours': # I'm not sure how this occurs. I'm going to ignore it. continue try: radio_signature = make_radio_combination_signature( annotation['radio'], wcs, atlas_positions, subject, pix_offset) except CatalogueError: # Ignore invalid annotations. n_invalid += 1 logging.debug('Ignoring invalid annotation for %s.', subject['zooniverse_id']) continue ir_locations = [] if annotation['ir'] != 'No Sources': for ir_click in annotation['ir']: ir_x = float(annotation['ir'][ir_click]['x']) ir_y = float(annotation['ir'][ir_click]['y']) # Rescale to a consistent size. ir_x *= config['surveys']['atlas']['click_to_fits_x'] ir_y *= config['surveys']['atlas']['click_to_fits_y'] # Ignore out-of-range data. if not 0 <= ir_x <= config['surveys']['atlas']['fits_width']: n_invalid += 1 continue if not 0 <= ir_y <= config['surveys']['atlas']['fits_height']: n_invalid += 1 continue # Flip the y axis to match other data conventions. ir_y = config['surveys']['atlas']['fits_height'] - ir_y # Rescale to match the mosaic WCS. ir_x *= config['surveys']['atlas']['mosaic_scale_x'] ir_y *= config['surveys']['atlas']['mosaic_scale_y'] # Move to the reference location of the radio subject. ir_x += pix_offset[0] ir_y += pix_offset[1] # Convert the location into RA/DEC. (ir_x,), (ir_y,) = wcs.wcs_pix2world([ir_x], [ir_y], 1) ir_location = (ir_x, ir_y) ir_locations.append(ir_location) result[radio_signature] = ir_locations if n_invalid: logging.debug('%d invalid annotations for %s.', n_invalid, subject['zooniverse_id']) return result