Code
stringlengths
103
85.9k
Summary
listlengths
0
94
Please provide a description of the function:def save_favorite_query(arg, **_): usage = 'Syntax: \\fs name query.\n\n' + favoritequeries.usage if not arg: return [(None, None, None, usage)] name, _, query = arg.partition(' ') # If either name or query is missing then print the usage and complain. if (not name) or (not query): return [(None, None, None, usage + 'Err: Both name and query are required.')] favoritequeries.save(name, query) return [(None, None, None, "Saved.")]
[ "Save a new favorite query.\n Returns (title, rows, headers, status)" ]
Please provide a description of the function:def delete_favorite_query(arg, **_): usage = 'Syntax: \\fd name.\n\n' + favoritequeries.usage if not arg: return [(None, None, None, usage)] status = favoritequeries.delete(arg) return [(None, None, None, status)]
[ "Delete an existing favorite query.\n " ]
Please provide a description of the function:def execute_system_command(arg, **_): usage = "Syntax: system [command].\n" if not arg: return [(None, None, None, usage)] try: command = arg.strip() if command.startswith('cd'): ok, error_message = handle_cd_command(arg) if not ok: return [(None, None, None, error_message)] return [(None, None, None, '')] args = arg.split(' ') process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) output, error = process.communicate() response = output if not error else error # Python 3 returns bytes. This needs to be decoded to a string. if isinstance(response, bytes): encoding = locale.getpreferredencoding(False) response = response.decode(encoding) return [(None, None, None, response)] except OSError as e: return [(None, None, None, 'OSError: %s' % e.strerror)]
[ "Execute a system shell command." ]
Please provide a description of the function:def need_completion_refresh(queries): tokens = { 'use', '\\u', 'create', 'drop' } for query in sqlparse.split(queries): try: first_token = query.split()[0] if first_token.lower() in tokens: return True except Exception: return False
[ "Determines if the completion needs a refresh by checking if the sql\n statement is an alter, create, drop or change db." ]
Please provide a description of the function:def is_mutating(status): if not status: return False mutating = set(['insert', 'update', 'delete', 'alter', 'create', 'drop', 'replace', 'truncate', 'load']) return status.split(None, 1)[0].lower() in mutating
[ "Determines if the statement is mutating based on the status." ]
Please provide a description of the function:def cli(execute, region, aws_access_key_id, aws_secret_access_key, s3_staging_dir, athenaclirc, profile, database): '''A Athena terminal client with auto-completion and syntax highlighting. \b Examples: - athenacli - athenacli my_database ''' if (athenaclirc == ATHENACLIRC) and (not os.path.exists(os.path.expanduser(ATHENACLIRC))): err_msg = ''' Welcome to athenacli! It seems this is your first time to run athenacli, we generated a default config file for you %s Please change it accordingly, and run athenacli again. ''' % ATHENACLIRC print(err_msg) write_default_config(DEFAULT_CONFIG_FILE, ATHENACLIRC) sys.exit(1) if profile != 'default': os.environ['AWS_PROFILE'] = profile athenacli = AthenaCli( region=region, aws_access_key_id=aws_access_key_id, aws_secret_access_key= aws_secret_access_key, s3_staging_dir=s3_staging_dir, athenaclirc=athenaclirc, profile=profile, database=database ) # --execute argument if execute: if os.path.exists(execute): with open(execute) as f: query = f.read() else: query = execute try: athenacli.formatter.format_name = 'csv' athenacli.run_query(query) exit(0) except Exception as e: click.secho(str(e), err=True, fg='red') exit(1) athenacli.run_cli()
[]
Please provide a description of the function:def change_prompt_format(self, arg, **_): if not arg: message = 'Missing required argument, format.' return [(None, None, None, message)] self.prompt = self.get_prompt(arg) return [(None, None, None, "Changed prompt format to %s" % arg)]
[ "\n Change the prompt format.\n " ]
Please provide a description of the function:def handle_editor_command(self, cli, document): # FIXME: using application.pre_run_callables like this here is not the best solution. # It's internal api of prompt_toolkit that may change. This was added to fix # https://github.com/dbcli/pgcli/issues/668. We may find a better way to do it in the future. saved_callables = cli.application.pre_run_callables while special.editor_command(document.text): filename = special.get_filename(document.text) query = (special.get_editor_query(document.text) or self.get_last_query()) sql, message = special.open_external_editor(filename, sql=query) if message: # Something went wrong. Raise an exception and bail. raise RuntimeError(message) cli.current_buffer.document = Document(sql, cursor_position=len(sql)) cli.application.pre_run_callables = [] document = cli.run() continue cli.application.pre_run_callables = saved_callables return document
[ "\n Editor command is any query that is prefixed or suffixed\n by a '\\e'. The reason for a while loop is because a user\n might edit a query multiple times.\n For eg:\n \"select * from \\e\"<enter> to edit it in vim, then come\n back to the prompt with the edited query \"select * from\n blah where q = 'abc'\\e\" to edit it again.\n :param cli: CommandLineInterface\n :param document: Document\n :return: Document\n " ]
Please provide a description of the function:def run_query(self, query, new_line=True): if (self.destructive_warning and confirm_destructive_query(query) is False): message = 'Wise choice. Command execution stopped.' click.echo(message) return results = self.sqlexecute.run(query) for result in results: title, rows, headers, _ = result self.formatter.query = query output = self.format_output(title, rows, headers) for line in output: click.echo(line, nl=new_line)
[ "Runs *query*." ]
Please provide a description of the function:def get_output_margin(self, status=None): margin = self.get_reserved_space() + self.get_prompt(self.prompt).count('\n') + 1 if special.is_timing_enabled(): margin += 1 if status: margin += 1 + status.count('\n') return margin
[ "Get the output margin (number of rows for the prompt, footer and\n timing message." ]
Please provide a description of the function:def output(self, output, status=None): if output: size = self.cli.output.get_size() margin = self.get_output_margin(status) fits = True buf = [] output_via_pager = self.explicit_pager and special.is_pager_enabled() for i, line in enumerate(output, 1): special.write_tee(line) special.write_once(line) if fits or output_via_pager: # buffering buf.append(line) if len(line) > size.columns or i > (size.rows - margin): fits = False if not self.explicit_pager and special.is_pager_enabled(): # doesn't fit, use pager output_via_pager = True if not output_via_pager: # doesn't fit, flush buffer for line in buf: click.secho(line) buf = [] else: click.secho(line) if buf: if output_via_pager: # sadly click.echo_via_pager doesn't accept generators click.echo_via_pager("\n".join(buf)) else: for line in buf: click.secho(line) if status: click.secho(status)
[ "Output text to stdout or a pager command.\n The status text is not outputted to pager or files.\n The message will be logged in the audit log, if enabled. The\n message will be written to the tee file, if enabled. The\n message will be written to the output file, if enabled.\n " ]
Please provide a description of the function:def _on_completions_refreshed(self, new_completer): with self._completer_lock: self.completer = new_completer # When cli is first launched we call refresh_completions before # instantiating the cli object. So it is necessary to check if cli # exists before trying the replace the completer object in cli. if self.cli: self.cli.current_buffer.completer = new_completer if self.cli: # After refreshing, redraw the CLI to clear the statusbar # "Refreshing completions..." indicator self.cli.request_redraw()
[ "Swap the completer object in cli with the newly created completer.\n " ]
Please provide a description of the function:def get_reserved_space(self): reserved_space_ratio = .45 max_reserved_space = 8 _, height = click.get_terminal_size() return min(int(round(height * reserved_space_ratio)), max_reserved_space)
[ "Get the number of lines to reserve for the completion menu." ]
Please provide a description of the function:def list_path(root_dir): res = [] if os.path.isdir(root_dir): for name in os.listdir(root_dir): res.append(name) return res
[ "List directory if exists.\n :param dir: str\n :return: list\n " ]
Please provide a description of the function:def complete_path(curr_dir, last_dir): if not last_dir or curr_dir.startswith(last_dir): return curr_dir elif last_dir == '~': return os.path.join(last_dir, curr_dir)
[ "Return the path to complete that matches the last entered component.\n If the last entered component is ~, expanded path would not\n match, so return all of the available paths.\n :param curr_dir: str\n :param last_dir: str\n :return: str\n " ]
Please provide a description of the function:def parse_path(root_dir): base_dir, last_dir, position = '', '', 0 if root_dir: base_dir, last_dir = os.path.split(root_dir) position = -len(last_dir) if last_dir else 0 return base_dir, last_dir, position
[ "Split path into head and last component for the completer.\n Also return position where last component starts.\n :param root_dir: str path\n :return: tuple of (string, string, int)\n " ]
Please provide a description of the function:def suggest_path(root_dir): if not root_dir: return [os.path.abspath(os.sep), '~', os.curdir, os.pardir] if '~' in root_dir: root_dir = os.path.expanduser(root_dir) if not os.path.exists(root_dir): root_dir, _ = os.path.split(root_dir) return list_path(root_dir)
[ "List all files and subdirectories in a directory.\n If the directory is not specified, suggest root directory,\n user directory, current and parent directory.\n :param root_dir: string: directory to list\n :return: list\n " ]
Please provide a description of the function:def extend_relations(self, data, kind): # 'data' is a generator object. It can throw an exception while being # consumed. This could happen if the user has launched the app without # specifying a database name. This exception must be handled to prevent # crashing. try: data = [self.escaped_names(d) for d in data] except Exception: data = [] # dbmetadata['tables'][$schema_name][$table_name] should be a list of # column names. Default to an asterisk metadata = self.dbmetadata[kind] for relname in data: try: metadata[self.dbname][relname[0]] = ['*'] except KeyError: _logger.error('%r %r listed in unrecognized schema %r', kind, relname[0], self.dbname) self.all_completions.add(relname[0])
[ "Extend metadata for tables or views\n :param data: list of (rel_name, ) tuples\n :param kind: either 'tables' or 'views'\n :return:\n " ]
Please provide a description of the function:def extend_columns(self, column_data, kind): # 'column_data' is a generator object. It can throw an exception while # being consumed. This could happen if the user has launched the app # without specifying a database name. This exception must be handled to # prevent crashing. try: column_data = [self.escaped_names(d, '"') for d in column_data] except Exception: column_data = [] metadata = self.dbmetadata[kind] for relname, column in column_data: metadata[self.dbname][relname].append(column) self.all_completions.add(column)
[ "Extend column metadata\n :param column_data: list of (rel_name, column_name) tuples\n :param kind: either 'tables' or 'views'\n :return:\n " ]
Please provide a description of the function:def find_matches(text, collection, start_only=False, fuzzy=True, casing=None): last = last_word(text, include='most_punctuations') text = last.lower() completions = [] if fuzzy: regex = '.*?'.join(map(escape, text)) pat = compile('(%s)' % regex) for item in sorted(collection): r = pat.search(item.lower()) if r: completions.append((len(r.group()), r.start(), item)) else: match_end_limit = len(text) if start_only else None for item in sorted(collection): match_point = item.lower().find(text, 0, match_end_limit) if match_point >= 0: completions.append((len(text), match_point, item)) if casing == 'auto': casing = 'lower' if last and last[-1].islower() else 'upper' def apply_case(kw): if casing == 'upper': return kw.upper() return kw.lower() return (Completion(z if casing is None else apply_case(z), -len(text)) for x, y, z in sorted(completions))
[ "Find completion matches for the given text.\n Given the user's input text and a collection of available\n completions, find completions matching the last word of the\n text.\n If `start_only` is True, the text will match an available\n completion only at the beginning. Otherwise, a completion is\n considered a match if the text appears anywhere within it.\n yields prompt_toolkit Completion instances for any matches found\n in the collection of available completions.\n " ]
Please provide a description of the function:def find_files(self, word): base_path, last_path, position = parse_path(word) paths = suggest_path(word) for name in sorted(paths): suggestion = complete_path(name, last_path) if suggestion: yield Completion(suggestion, position)
[ "Yield matching directory or file names.\n :param word:\n :return: iterable\n " ]
Please provide a description of the function:def populate_scoped_cols(self, scoped_tbls): columns = [] meta = self.dbmetadata for tbl in scoped_tbls: # A fully qualified schema.relname reference or default_schema # DO NOT escape schema names. schema = tbl[0] or self.dbname relname = tbl[1] escaped_relname = self.escape_name(tbl[1]) # We don't know if schema.relname is a table or view. Since # tables and views cannot share the same name, we can check one # at a time try: columns.extend(meta['tables'][schema][relname]) # Table exists, so don't bother checking for a view continue except KeyError: try: columns.extend(meta['tables'][schema][escaped_relname]) # Table exists, so don't bother checking for a view continue except KeyError: pass try: columns.extend(meta['views'][schema][relname]) except KeyError: pass return columns
[ "Find all columns in a set of scoped_tables\n :param scoped_tbls: list of (schema, table, alias) tuples\n :return: list of column names\n " ]
Please provide a description of the function:def populate_schema_objects(self, schema, obj_type): metadata = self.dbmetadata[obj_type] schema = schema or self.dbname try: objects = metadata[schema].keys() except KeyError: # schema doesn't exist objects = [] return objects
[ "Returns list of tables or functions for a (optional) schema" ]
Please provide a description of the function:def log(logger, level, message): if logger.parent.name != 'root': logger.log(level, message) else: print(message, file=sys.stderr)
[ "Logs message to stderr if logging isn't initialized." ]
Please provide a description of the function:def read_config_file(f): if isinstance(f, basestring): f = os.path.expanduser(f) try: config = ConfigObj(f, interpolation=False, encoding='utf8') except ConfigObjError as e: log(LOGGER, logging.ERROR, "Unable to parse line {0} of config file " "'{1}'.".format(e.line_number, f)) log(LOGGER, logging.ERROR, "Using successfully parsed config values.") return e.config except (IOError, OSError) as e: log(LOGGER, logging.WARNING, "You don't have permission to read " "config file '{0}'.".format(e.filename)) return None return config
[ "Read a config file." ]
Please provide a description of the function:def read_config_files(files): config = ConfigObj() for _file in files: _config = read_config_file(_file) if bool(_config) is True: config.merge(_config) config.filename = _config.filename return config
[ "Read and merge a list of config files." ]
Please provide a description of the function:def cli_bindings(): key_binding_manager = KeyBindingManager( enable_open_in_editor=True, enable_system_bindings=True, enable_auto_suggest_bindings=True, enable_search=True, enable_abort_and_exit_bindings=True) @key_binding_manager.registry.add_binding(Keys.F2) def _(event): _logger.debug('Detected F2 key.') buf = event.cli.current_buffer buf.completer.smart_completion = not buf.completer.smart_completion @key_binding_manager.registry.add_binding(Keys.F3) def _(event): _logger.debug('Detected F3 key.') buf = event.cli.current_buffer buf.always_multiline = not buf.always_multiline @key_binding_manager.registry.add_binding(Keys.F4) def _(event): _logger.debug('Detected F4 key.') if event.cli.editing_mode == EditingMode.VI: event.cli.editing_mode = EditingMode.EMACS else: event.cli.editing_mode = EditingMode.VI @key_binding_manager.registry.add_binding(Keys.Tab) def _(event): _logger.debug('Detected <Tab> key.') b = event.cli.current_buffer if b.complete_state: b.complete_next() else: event.cli.start_completion(select_first=True) @key_binding_manager.registry.add_binding(Keys.ControlSpace) def _(event): _logger.debug('Detected <C-Space> key.') b = event.cli.current_buffer if b.complete_state: b.complete_next() else: event.cli.start_completion(select_first=False) @key_binding_manager.registry.add_binding(Keys.ControlJ, filter=HasSelectedCompletion()) def _(event): _logger.debug('Detected <C-J> key.') event.current_buffer.complete_state = None b = event.cli.current_buffer b.complete_state = None return key_binding_manager
[ "\n Custom key bindings for cli.\n ", "\n Enable/Disable SmartCompletion Mode.\n ", "\n Enable/Disable Multiline Mode.\n ", "\n Toggle between Vi and Emacs mode.\n ", "\n Force autocompletion at cursor.\n ", "\n Initialize autocompletion at cursor.\n If the autocompletion menu is not showing, display it with the\n appropriate completions for the context.\n If the menu is showing, select the next completion.\n ", "\n Makes the enter key work as the tab key only when showing the menu.\n " ]
Please provide a description of the function:def confirm_destructive_query(queries): prompt_text = ("You're about to run a destructive command.\n" "Do you want to proceed? (y/n)") if is_destructive(queries) and sys.stdin.isatty(): return prompt(prompt_text, type=bool)
[ "Check if the query is destructive and prompts the user to confirm.\n Returns:\n * None if the query is non-destructive or we can't prompt the user.\n * True if the query is destructive and the user wants to proceed.\n * False if the query is destructive and the user doesn't want to proceed.\n " ]
Please provide a description of the function:def confirm(*args, **kwargs): try: return click.confirm(*args, **kwargs) except click.Abort: return False
[ "Prompt for confirmation (yes/no) and handle any abort exceptions." ]
Please provide a description of the function:def prompt(*args, **kwargs): try: return click.prompt(*args, **kwargs) except click.Abort: return False
[ "Prompt the user for input and handle any abort exceptions." ]
Please provide a description of the function:def style_factory(name, cli_style): try: style = pygments.styles.get_style_by_name(name) except ClassNotFound: style = pygments.styles.get_style_by_name('native') style_tokens = {} style_tokens.update(style.styles) custom_styles = {string_to_tokentype(x): y for x, y in cli_style.items()} style_tokens.update(custom_styles) class CliStyle(pygments.style.Style): default_styles = '' styles = style_tokens return CliStyle
[ "Create a Pygments Style class based on the user's preferences.\n :param str name: The name of a built-in Pygments style.\n :param dict cli_style: The user's token-type style preferences.\n " ]
Please provide a description of the function:def run(self, statement): '''Execute the sql in the database and return the results. The results are a list of tuples. Each tuple has 4 values (title, rows, headers, status). ''' # Remove spaces and EOL statement = statement.strip() if not statement: # Empty string yield (None, None, None, None) # Split the sql into separate queries and run each one. components = sqlparse.split(statement) for sql in components: # Remove spaces, eol and semi-colons. sql = sql.rstrip(';') # \G is treated specially since we have to set the expanded output. if sql.endswith('\\G'): special.set_expanded_output(True) sql = sql[:-2].strip() cur = self.conn.cursor() try: for result in special.execute(cur, sql): yield result except special.CommandNotFound: # Regular SQL cur.execute(sql) yield self.get_result(cur)
[]
Please provide a description of the function:def get_result(self, cursor): '''Get the current result's data from the cursor.''' title = headers = None # cursor.description is not None for queries that return result sets, # e.g. SELECT or SHOW. if cursor.description is not None: headers = [x[0] for x in cursor.description] rows = cursor.fetchall() status = '%d row%s in set' % (len(rows), '' if len(rows) == 1 else 's') else: logger.debug('No rows in result.') rows = None status = 'Query OK' return (title, rows, headers, status)
[]
Please provide a description of the function:def tables(self): '''Yields table names.''' with self.conn.cursor() as cur: cur.execute(self.TABLES_QUERY) for row in cur: yield row
[]
Please provide a description of the function:def table_columns(self): '''Yields column names.''' with self.conn.cursor() as cur: cur.execute(self.TABLE_COLUMNS_QUERY % self.database) for row in cur: yield row
[]
Please provide a description of the function:def create_toolbar_tokens_func(get_is_refreshing, show_fish_help): token = Token.Toolbar def get_toolbar_tokens(cli): result = [] result.append((token, ' ')) if cli.buffers[DEFAULT_BUFFER].always_multiline: result.append((token.On, '[F3] Multiline: ON ')) else: result.append((token.Off, '[F3] Multiline: OFF ')) if cli.buffers[DEFAULT_BUFFER].always_multiline: result.append((token, ' (Semi-colon [;] will end the line)')) if cli.editing_mode == EditingMode.VI: result.append(( token.On, 'Vi-mode ({})'.format(_get_vi_mode(cli)) )) if show_fish_help(): result.append((token, ' Right-arrow to complete suggestion')) if get_is_refreshing(): result.append((token, ' Refreshing completions...')) return result return get_toolbar_tokens
[ "\n Return a function that generates the toolbar tokens.\n " ]
Please provide a description of the function:def _get_vi_mode(cli): return { InputMode.INSERT: 'I', InputMode.NAVIGATION: 'N', InputMode.REPLACE: 'R', InputMode.INSERT_MULTIPLE: 'M' }[cli.vi_state.input_mode]
[ "Get the current vi mode for display." ]
Please provide a description of the function:def suggest_type(full_text, text_before_cursor): word_before_cursor = last_word(text_before_cursor, include='many_punctuations') identifier = None # here should be removed once sqlparse has been fixed try: # If we've partially typed a word then word_before_cursor won't be an empty # string. In that case we want to remove the partially typed string before # sending it to the sqlparser. Otherwise the last token will always be the # partially typed string which renders the smart completion useless because # it will always return the list of keywords as completion. if word_before_cursor: if word_before_cursor.endswith( '(') or word_before_cursor.startswith('\\'): parsed = sqlparse.parse(text_before_cursor) else: parsed = sqlparse.parse( text_before_cursor[:-len(word_before_cursor)]) # word_before_cursor may include a schema qualification, like # "schema_name.partial_name" or "schema_name.", so parse it # separately p = sqlparse.parse(word_before_cursor)[0] if p.tokens and isinstance(p.tokens[0], Identifier): identifier = p.tokens[0] else: parsed = sqlparse.parse(text_before_cursor) except (TypeError, AttributeError): return (Keyword(),) if len(parsed) > 1: # Multiple statements being edited -- isolate the current one by # cumulatively summing statement lengths to find the one that bounds the # current position current_pos = len(text_before_cursor) stmt_start, stmt_end = 0, 0 for statement in parsed: stmt_len = len(text_type(statement)) stmt_start, stmt_end = stmt_end, stmt_end + stmt_len if stmt_end >= current_pos: text_before_cursor = full_text[stmt_start:current_pos] full_text = full_text[stmt_start:] break elif parsed: # A single statement statement = parsed[0] else: # The empty string statement = None # Check for special commands and handle those separately if statement: # Be careful here because trivial whitespace is parsed as a statement, # but the statement won't have a first token tok1 = statement.token_first() if tok1 and tok1.value in ['\\', 'source']: return suggest_special(text_before_cursor) last_token = statement and statement.token_prev(len(statement.tokens))[1] or '' return suggest_based_on_last_token(last_token, text_before_cursor, full_text, identifier)
[ "Takes the full_text that is typed so far and also the text before the\n cursor to suggest completion type and scope.\n Returns a tuple with a type of entity ('table', 'column' etc) and a scope.\n A scope for a column category will be a list of tables.\n " ]
Please provide a description of the function:def export(defn): globals()[defn.__name__] = defn __all__.append(defn.__name__) return defn
[ "Decorator to explicitly mark functions that are exposed in a lib." ]
Please provide a description of the function:def run_step(*args, prompt=None): global DRY_RUN cmd = args print(' '.join(cmd)) if skip_step(): print('--- Skipping...') elif DRY_RUN: print('--- Pretending to run...') else: if prompt: print(prompt) subprocess.check_output(cmd)
[ "\n Prints out the command and asks if it should be run.\n If yes (default), runs it.\n :param args: list of strings (command and args)\n " ]
Please provide a description of the function:def execute(cur, sql): command, verbose, arg = parse_special_command(sql) if (command not in COMMANDS) and (command.lower() not in COMMANDS): raise CommandNotFound try: special_cmd = COMMANDS[command] except KeyError: special_cmd = COMMANDS[command.lower()] if special_cmd.case_sensitive: raise CommandNotFound('Command not found: %s' % command) # "help <SQL KEYWORD> is a special case. if command == 'help' and arg: return show_keyword_help(cur=cur, arg=arg) if special_cmd.arg_type == NO_QUERY: return special_cmd.handler() elif special_cmd.arg_type == PARSED_QUERY: return special_cmd.handler(cur=cur, arg=arg, verbose=verbose) elif special_cmd.arg_type == RAW_QUERY: return special_cmd.handler(cur=cur, query=sql)
[ "Execute a special command and return the results. If the special command\n is not supported a KeyError will be raised.\n " ]
Please provide a description of the function:def show_keyword_help(cur, arg): keyword = arg.strip('"').strip("'") query = "help '{0}'".format(keyword) log.debug(query) cur.execute(query) if cur.description and cur.rowcount > 0: headers = [x[0] for x in cur.description] return [(None, cur.fetchall(), headers, '')] else: return [(None, None, None, 'No help found for {0}.'.format(keyword))]
[ "\n Call the built-in \"show <command>\", to display help for an SQL keyword.\n :param cur: cursor\n :param arg: string\n :return: list\n " ]
Please provide a description of the function:def last_word(text, include='alphanum_underscore'): if not text: # Empty string return '' if text[-1].isspace(): return '' else: regex = cleanup_regex[include] matches = regex.search(text) if matches: return matches.group(0) else: return ''
[ "\n Find the last word in a sentence.\n >>> last_word('abc')\n 'abc'\n >>> last_word(' abc')\n 'abc'\n >>> last_word('')\n ''\n >>> last_word(' ')\n ''\n >>> last_word('abc ')\n ''\n >>> last_word('abc def')\n 'def'\n >>> last_word('abc def ')\n ''\n >>> last_word('abc def;')\n ''\n >>> last_word('bac $def')\n 'def'\n >>> last_word('bac $def', include='most_punctuations')\n '$def'\n >>> last_word('bac \\def', include='most_punctuations')\n '\\\\\\\\def'\n >>> last_word('bac \\def;', include='most_punctuations')\n '\\\\\\\\def;'\n >>> last_word('bac::def', include='most_punctuations')\n 'def'\n " ]
Please provide a description of the function:def extract_table_identifiers(token_stream): for item in token_stream: if isinstance(item, IdentifierList): for identifier in item.get_identifiers(): # Sometimes Keywords (such as FROM ) are classified as # identifiers which don't have the get_real_name() method. try: schema_name = identifier.get_parent_name() real_name = identifier.get_real_name() except AttributeError: continue if real_name: yield (schema_name, real_name, identifier.get_alias()) elif isinstance(item, Identifier): real_name = item.get_real_name() schema_name = item.get_parent_name() if real_name: yield (schema_name, real_name, item.get_alias()) else: name = item.get_name() yield (None, name, item.get_alias() or name) elif isinstance(item, Function): yield (None, item.get_name(), item.get_name())
[ "yields tuples of (schema_name, table_name, table_alias)" ]
Please provide a description of the function:def extract_tables(sql): parsed = sqlparse.parse(sql) if not parsed: return [] # INSERT statements must stop looking for tables at the sign of first # Punctuation. eg: INSERT INTO abc (col1, col2) VALUES (1, 2) # abc is the table name, but if we don't stop at the first lparen, then # we'll identify abc, col1 and col2 as table names. insert_stmt = parsed[0].token_first().value.lower() == 'insert' stream = extract_from_part(parsed[0], stop_at_punctuation=insert_stmt) return list(extract_table_identifiers(stream))
[ "Extract the table names from an SQL statment.\n Returns a list of (schema, table, alias) tuples\n " ]
Please provide a description of the function:def find_prev_keyword(sql): if not sql.strip(): return None, '' parsed = sqlparse.parse(sql)[0] flattened = list(parsed.flatten()) logical_operators = ('AND', 'OR', 'NOT', 'BETWEEN') for t in reversed(flattened): if t.value == '(' or (t.is_keyword and ( t.value.upper() not in logical_operators)): # Find the location of token t in the original parsed statement # We can't use parsed.token_index(t) because t may be a child token # inside a TokenList, in which case token_index thows an error # Minimal example: # p = sqlparse.parse('select * from foo where bar') # t = list(p.flatten())[-3] # The "Where" token # p.token_index(t) # Throws ValueError: not in list idx = flattened.index(t) # Combine the string values of all tokens in the original list # up to and including the target keyword token t, to produce a # query string with everything after the keyword token removed text = ''.join(tok.value for tok in flattened[:idx+1]) return t, text return None, ''
[ " Find the last sql keyword in an SQL statement\n Returns the value of the last keyword, and the text of the query with\n everything after the last keyword stripped\n " ]
Please provide a description of the function:def query_starts_with(query, prefixes): prefixes = [prefix.lower() for prefix in prefixes] formatted_sql = sqlparse.format(query.lower(), strip_comments=True) return bool(formatted_sql) and formatted_sql.split()[0] in prefixes
[ "Check if the query starts with any item from *prefixes*." ]
Please provide a description of the function:def queries_start_with(queries, prefixes): for query in sqlparse.split(queries): if query and query_starts_with(query, prefixes) is True: return True return False
[ "Check if any queries start with any item from *prefixes*." ]
Please provide a description of the function:def _get_thumbnail_options(self, context, instance): width, height = None, None subject_location = False placeholder_width = context.get('width', None) placeholder_height = context.get('height', None) if instance.use_autoscale and placeholder_width: # use the placeholder width as a hint for sizing width = int(placeholder_width) if instance.use_autoscale and placeholder_height: height = int(placeholder_height) elif instance.width: width = instance.width if instance.height: height = instance.height if instance.image: if instance.image.subject_location: subject_location = instance.image.subject_location if not height and width: # height was not externally defined: use ratio to scale it by the width height = int(float(width) * float(instance.image.height) / float(instance.image.width)) if not width and height: # width was not externally defined: use ratio to scale it by the height width = int(float(height) * float(instance.image.width) / float(instance.image.height)) if not width: # width is still not defined. fallback the actual image width width = instance.image.width if not height: # height is still not defined. fallback the actual image height height = instance.image.height return {'size': (width, height), 'subject_location': subject_location}
[ "\n Return the size and options of the thumbnail that should be inserted\n " ]
Please provide a description of the function:def create_image_plugin(filename, image, parent_plugin, **kwargs): from cmsplugin_filer_image.models import FilerImage from filer.models import Image image_plugin = FilerImage() image_plugin.placeholder = parent_plugin.placeholder image_plugin.parent = CMSPlugin.objects.get(pk=parent_plugin.id) image_plugin.position = CMSPlugin.objects.filter(parent=parent_plugin).count() image_plugin.language = parent_plugin.language image_plugin.plugin_type = 'FilerImagePlugin' image.seek(0) image_model = Image.objects.create(file=SimpleUploadedFile(name=filename, content=image.read())) image_plugin.image = image_model image_plugin.save() return image_plugin
[ "\n Used for drag-n-drop image insertion with djangocms-text-ckeditor.\n Set TEXT_SAVE_IMAGE_FUNCTION='cmsplugin_filer_image.integrations.ckeditor.create_image_plugin' to enable.\n " ]
Please provide a description of the function:def rename_tables(db, table_mapping, reverse=False): from django.db import connection if reverse: table_mapping = [(dst, src) for src, dst in table_mapping] table_names = connection.introspection.table_names() for source, destination in table_mapping: if source in table_names and destination in table_names: print(u" WARNING: not renaming {0} to {1}, because both tables already exist.".format(source, destination)) elif source in table_names and destination not in table_names: print(u" - renaming {0} to {1}".format(source, destination)) db.rename_table(source, destination)
[ "\n renames tables from source to destination name, if the source exists and the destination does\n not exist yet.\n " ]
Please provide a description of the function:def group_and_sort_statements(stmt_list, ev_totals=None): def _count(stmt): if ev_totals is None: return len(stmt.evidence) else: return ev_totals[stmt.get_hash()] stmt_rows = defaultdict(list) stmt_counts = defaultdict(lambda: 0) arg_counts = defaultdict(lambda: 0) for key, s in _get_keyed_stmts(stmt_list): # Update the counts, and add key if needed. stmt_rows[key].append(s) # Keep track of the total evidence counts for this statement and the # arguments. stmt_counts[key] += _count(s) # Add up the counts for the arguments, pairwise for Complexes and # Conversions. This allows, for example, a complex between MEK, ERK, # and something else to lend weight to the interactions between MEK # and ERK. if key[0] == 'Conversion': subj = key[1] for obj in key[2] + key[3]: arg_counts[(subj, obj)] += _count(s) else: arg_counts[key[1:]] += _count(s) # Sort the rows by count and agent names. def process_rows(stmt_rows): for key, stmts in stmt_rows.items(): verb = key[0] inps = key[1:] sub_count = stmt_counts[key] arg_count = arg_counts[inps] if verb == 'Complex' and sub_count == arg_count and len(inps) <= 2: if all([len(set(ag.name for ag in s.agent_list())) > 2 for s in stmts]): continue new_key = (arg_count, inps, sub_count, verb) stmts = sorted(stmts, key=lambda s: _count(s) + 1/(1+len(s.agent_list())), reverse=True) yield new_key, verb, stmts sorted_groups = sorted(process_rows(stmt_rows), key=lambda tpl: tpl[0], reverse=True) return sorted_groups
[ "Group statements by type and arguments, and sort by prevalence.\n\n Parameters\n ----------\n stmt_list : list[Statement]\n A list of INDRA statements.\n ev_totals : dict{int: int}\n A dictionary, keyed by statement hash (shallow) with counts of total\n evidence as the values. Including this will allow statements to be\n better sorted.\n\n Returns\n -------\n sorted_groups : list[tuple]\n A list of tuples containing a sort key, the statement type, and a list\n of statements, also sorted by evidence count, for that key and type.\n The sort key contains a count of statements with those argument, the\n arguments (normalized strings), the count of statements with those\n arguements and type, and then the statement type.\n " ]
Please provide a description of the function:def make_stmt_from_sort_key(key, verb): def make_agent(name): if name == 'None' or name is None: return None return Agent(name) StmtClass = get_statement_by_name(verb) inps = list(key[1]) if verb == 'Complex': stmt = StmtClass([make_agent(name) for name in inps]) elif verb == 'Conversion': stmt = StmtClass(make_agent(inps[0]), [make_agent(name) for name in inps[1]], [make_agent(name) for name in inps[2]]) elif verb == 'ActiveForm' or verb == 'HasActivity': stmt = StmtClass(make_agent(inps[0]), inps[1], inps[2]) else: stmt = StmtClass(*[make_agent(name) for name in inps]) return stmt
[ "Make a Statement from the sort key.\n\n Specifically, the sort key used by `group_and_sort_statements`.\n " ]
Please provide a description of the function:def wait_for_complete(queue_name, job_list=None, job_name_prefix=None, poll_interval=10, idle_log_timeout=None, kill_on_log_timeout=False, stash_log_method=None, tag_instances=False, result_record=None): if stash_log_method == 's3' and job_name_prefix is None: raise Exception('A job_name_prefix is required to post logs on s3.') start_time = datetime.now() if job_list is None: job_id_list = [] else: job_id_list = [job['jobId'] for job in job_list] if result_record is None: result_record = {} def get_jobs_by_status(status, job_id_filter=None, job_name_prefix=None): res = batch_client.list_jobs(jobQueue=queue_name, jobStatus=status, maxResults=10000) jobs = res['jobSummaryList'] if job_name_prefix: jobs = [job for job in jobs if job['jobName'].startswith(job_name_prefix)] if job_id_filter: jobs = [job_def for job_def in jobs if job_def['jobId'] in job_id_filter] return jobs job_log_dict = {} def check_logs(job_defs): stalled_jobs = set() # Check the status of all the jobs we're tracking. for job_def in job_defs: try: # Get the logs for this job. log_lines = get_job_log(job_def, write_file=False) # Get the job id. jid = job_def['jobId'] now = datetime.now() if jid not in job_log_dict.keys(): # If the job is new... logger.info("Adding job %s to the log tracker at %s." % (jid, now)) job_log_dict[jid] = {'log': log_lines, 'last change time': now} elif len(job_log_dict[jid]['log']) == len(log_lines): # If the job log hasn't changed, announce as such, and check # to see if it has been the same for longer than stall time. check_dt = now - job_log_dict[jid]['last change time'] logger.warning(('Job \'%s\' has not produced output for ' '%d seconds.') % (job_def['jobName'], check_dt.seconds)) if check_dt.seconds > idle_log_timeout: logger.warning("Job \'%s\' has stalled." % job_def['jobName']) stalled_jobs.add(jid) else: # If the job is known, and the logs have changed, update the # "last change time". old_log = job_log_dict[jid]['log'] old_log += log_lines[len(old_log):] job_log_dict[jid]['last change time'] = now except Exception as e: # Sometimes due to sync et al. issues, a part of this will fail. # Such things are usually transitory issues so we keep trying. logger.error("Failed to check log for: %s" % str(job_def)) logger.exception(e) # Pass up the set of job id's for stalled jobs. return stalled_jobs # Don't start watching jobs added after this command was initialized. observed_job_def_dict = {} def get_dict_of_job_tuples(job_defs): return {jdef['jobId']: [(k, jdef[k]) for k in ['jobName', 'jobId']] for jdef in job_defs} batch_client = boto3.client('batch') if tag_instances: ecs_cluster_name = get_ecs_cluster_for_queue(queue_name, batch_client) terminate_msg = 'Job log has stalled for at least %f minutes.' terminated_jobs = set() stashed_id_set = set() while True: pre_run = [] for status in ('SUBMITTED', 'PENDING', 'RUNNABLE', 'STARTING'): pre_run += get_jobs_by_status(status, job_id_list, job_name_prefix) running = get_jobs_by_status('RUNNING', job_id_list, job_name_prefix) failed = get_jobs_by_status('FAILED', job_id_list, job_name_prefix) done = get_jobs_by_status('SUCCEEDED', job_id_list, job_name_prefix) observed_job_def_dict.update(get_dict_of_job_tuples(pre_run + running)) logger.info('(%d s)=(pre: %d, running: %d, failed: %d, done: %d)' % ((datetime.now() - start_time).seconds, len(pre_run), len(running), len(failed), len(done))) # Check the logs for new output, and possibly terminate some jobs. stalled_jobs = check_logs(running) if idle_log_timeout is not None: if kill_on_log_timeout: # Keep track of terminated jobs so we don't send a terminate # message twice. for jid in stalled_jobs - terminated_jobs: batch_client.terminate_job( jobId=jid, reason=terminate_msg % (idle_log_timeout/60.0) ) logger.info('Terminating %s.' % jid) terminated_jobs.add(jid) if job_id_list: if (len(failed) + len(done)) == len(job_id_list): ret = 0 break else: if (len(failed) + len(done) > 0) and \ (len(pre_run) + len(running) == 0): ret = 0 break if tag_instances: tag_instances_on_cluster(ecs_cluster_name) # Stash the logs of things that have finished so far. Note that jobs # terminated in this round will not be picked up until the next round. if stash_log_method: stash_logs(observed_job_def_dict, done, failed, queue_name, stash_log_method, job_name_prefix, start_time.strftime('%Y%m%d_%H%M%S'), ids_stashed=stashed_id_set) sleep(poll_interval) # Pick up any stragglers if stash_log_method: stash_logs(observed_job_def_dict, done, failed, queue_name, stash_log_method, job_name_prefix, start_time.strftime('%Y%m%d_%H%M%S'), ids_stashed=stashed_id_set) result_record['terminated'] = terminated_jobs result_record['failed'] = failed result_record['succeeded'] = done return ret
[ "Return when all jobs in the given list finished.\n\n If not job list is given, return when all jobs in queue finished.\n\n Parameters\n ----------\n queue_name : str\n The name of the queue to wait for completion.\n job_list : Optional[list(dict)]\n A list of jobID-s in a dict, as returned by the submit function.\n Example: [{'jobId': 'e6b00f24-a466-4a72-b735-d205e29117b4'}, ...]\n If not given, this function will return if all jobs completed.\n job_name_prefix : Optional[str]\n A prefix for the name of the jobs to wait for. This is useful if the\n explicit job list is not available but filtering is needed.\n poll_interval : Optional[int]\n The time delay between API calls to check the job statuses.\n idle_log_timeout : Optional[int] or None\n If not None, then track the logs of the active jobs, and if new output\n is not produced after `idle_log_timeout` seconds, a warning is printed.\n If `kill_on_log_timeout` is set to True, the job will also be\n terminated.\n kill_on_log_timeout : Optional[bool]\n If True, and if `idle_log_timeout` is set, jobs will be terminated\n after timeout. This has no effect if `idle_log_timeout` is None.\n Default is False.\n stash_log_method : Optional[str]\n Select a method to store the job logs, either 's3' or 'local'. If no\n method is specified, the logs will not be loaded off of AWS. If 's3' is\n specified, then `job_name_prefix` must also be given, as this will\n indicate where on s3 to store the logs.\n tag_instances : bool\n Default is False. If True, apply tags to the instances. This is toady\n typically done by each job, so in most cases this should not be needed.\n result_record : dict\n A dict which will be modified in place to record the results of the job.\n ", "Updates teh job_log_dict." ]
Please provide a description of the function:def get_ecs_cluster_for_queue(queue_name, batch_client=None): if batch_client is None: batch_client = boto3.client('batch') queue_resp = batch_client.describe_job_queues(jobQueues=[queue_name]) if len(queue_resp['jobQueues']) == 1: queue = queue_resp['jobQueues'][0] else: raise BatchReadingError('Error finding queue with name %s.' % queue_name) compute_env_names = queue['computeEnvironmentOrder'] if len(compute_env_names) == 1: compute_env_name = compute_env_names[0]['computeEnvironment'] else: raise BatchReadingError('Error finding the compute environment name ' 'for %s.' % queue_name) compute_envs = batch_client.describe_compute_environments( computeEnvironments=[compute_env_name] )['computeEnvironments'] if len(compute_envs) == 1: compute_env = compute_envs[0] else: raise BatchReadingError("Error getting compute environment %s for %s. " "Got %d environments instead of 1." % (compute_env_name, queue_name, len(compute_envs))) ecs_cluster_name = os.path.basename(compute_env['ecsClusterArn']) return ecs_cluster_name
[ "Get the name of the ecs cluster using the batch client." ]
Please provide a description of the function:def tag_instances_on_cluster(cluster_name, project='cwc'): # Get the relevant instance ids from the ecs cluster ecs = boto3.client('ecs') task_arns = ecs.list_tasks(cluster=cluster_name)['taskArns'] if not task_arns: return tasks = ecs.describe_tasks(cluster=cluster_name, tasks=task_arns)['tasks'] container_instances = ecs.describe_container_instances( cluster=cluster_name, containerInstances=[task['containerInstanceArn'] for task in tasks] )['containerInstances'] ec2_instance_ids = [ci['ec2InstanceId'] for ci in container_instances] # Instantiate each instance to tag as a resource and create project tag for instance_id in ec2_instance_ids: tag_instance(instance_id, project=project) return
[ "Adds project tag to untagged instances in a given cluster.\n\n Parameters\n ----------\n cluster_name : str\n The name of the AWS ECS cluster in which running instances\n should be tagged.\n project : str\n The name of the project to tag instances with.\n " ]
Please provide a description of the function:def submit_reading(basename, pmid_list_filename, readers, start_ix=None, end_ix=None, pmids_per_job=3000, num_tries=2, force_read=False, force_fulltext=False, project_name=None): sub = PmidSubmitter(basename, readers, project_name) sub.set_options(force_read, force_fulltext) sub.submit_reading(pmid_list_filename, start_ix, end_ix, pmids_per_job, num_tries) return sub.job_list
[ "Submit an old-style pmid-centered no-database s3 only reading job.\n\n This function is provided for the sake of backward compatibility. It is\n preferred that you use the object-oriented PmidSubmitter and the\n submit_reading job going forward.\n " ]
Please provide a description of the function:def submit_combine(basename, readers, job_ids=None, project_name=None): sub = PmidSubmitter(basename, readers, project_name) sub.job_list = job_ids sub.submit_combine() return sub
[ "Submit a batch job to combine the outputs of a reading job.\n\n This function is provided for backwards compatibility. You should use the\n PmidSubmitter and submit_combine methods.\n " ]
Please provide a description of the function:def create_read_parser(): import argparse parent_read_parser = argparse.ArgumentParser(add_help=False) parent_read_parser.add_argument( 'input_file', help=('Path to file containing input ids of content to read. For the ' 'no-db options, this is simply a file with each line being a ' 'pmid. For the with-db options, this is a file where each line ' 'is of the form \'<id type>:<id>\', for example \'pmid:12345\'') ) parent_read_parser.add_argument( '--start_ix', type=int, help='Start index of ids to read.' ) parent_read_parser.add_argument( '--end_ix', type=int, help='End index of ids to read. If `None`, read content from all ids.' ) parent_read_parser.add_argument( '--force_read', action='store_true', help='Read papers even if previously read by current REACH.' ) parent_read_parser.add_argument( '--force_fulltext', action='store_true', help='Get full text content even if content already on S3.' ) parent_read_parser.add_argument( '--ids_per_job', default=3000, type=int, help='Number of PMIDs to read for each AWS Batch job.' ) ''' Not currently supported. parent_read_parser.add_argument( '--num_tries', default=2, type=int, help='Maximum number of times to try running job.' ) ''' return parent_read_parser
[]
Please provide a description of the function:def submit_reading(self, input_fname, start_ix, end_ix, ids_per_job, num_tries=1, stagger=0): # stash this for later. self.ids_per_job = ids_per_job # Upload the pmid_list to Amazon S3 id_list_key = 'reading_results/%s/%s' % (self.basename, self._s3_input_name) s3_client = boto3.client('s3') s3_client.upload_file(input_fname, bucket_name, id_list_key) # If no end index is specified, read all the PMIDs if end_ix is None: with open(input_fname, 'rt') as f: lines = f.readlines() end_ix = len(lines) if start_ix is None: start_ix = 0 # Get environment variables environment_vars = get_environment() # Iterate over the list of PMIDs and submit the job in chunks batch_client = boto3.client('batch', region_name='us-east-1') job_list = [] for job_start_ix in range(start_ix, end_ix, ids_per_job): sleep(stagger) job_end_ix = job_start_ix + ids_per_job if job_end_ix > end_ix: job_end_ix = end_ix job_name, cmd = self._make_command(job_start_ix, job_end_ix) command_list = get_batch_command(cmd, purpose=self._purpose, project=self.project_name) logger.info('Command list: %s' % str(command_list)) job_info = batch_client.submit_job( jobName=job_name, jobQueue=self._job_queue, jobDefinition=self._job_def, containerOverrides={ 'environment': environment_vars, 'command': command_list}, retryStrategy={'attempts': num_tries} ) logger.info("submitted...") job_list.append({'jobId': job_info['jobId']}) self.job_list = job_list return job_list
[ "Submit a batch of reading jobs\n\n Parameters\n ----------\n input_fname : str\n The name of the file containing the ids to be read.\n start_ix : int\n The line index of the first item in the list to read.\n end_ix : int\n The line index of the last item in the list to be read.\n ids_per_job : int\n The number of ids to be given to each job.\n num_tries : int\n The number of times a job may be attempted.\n stagger : float\n The number of seconds to wait between job submissions.\n\n Returns\n -------\n job_list : list[str]\n A list of job id strings.\n " ]
Please provide a description of the function:def watch_and_wait(self, poll_interval=10, idle_log_timeout=None, kill_on_timeout=False, stash_log_method=None, tag_instances=False, **kwargs): return wait_for_complete(self._job_queue, job_list=self.job_list, job_name_prefix=self.basename, poll_interval=poll_interval, idle_log_timeout=idle_log_timeout, kill_on_log_timeout=kill_on_timeout, stash_log_method=stash_log_method, tag_instances=tag_instances, **kwargs)
[ "This provides shortcut access to the wait_for_complete_function." ]
Please provide a description of the function:def run(self, input_fname, ids_per_job, stagger=0, **wait_params): submit_thread = Thread(target=self.submit_reading, args=(input_fname, 0, None, ids_per_job), kwargs={'stagger': stagger}, daemon=True) submit_thread.start() self.watch_and_wait(**wait_params) submit_thread.join(0) if submit_thread.is_alive(): logger.warning("Submit thread is still running even after job" "completion.") return
[ "Run this submission all the way.\n\n This method will run both `submit_reading` and `watch_and_wait`,\n blocking on the latter.\n " ]
Please provide a description of the function:def set_options(self, force_read=False, force_fulltext=False): self.options['force_read'] = force_read self.options['force_fulltext'] = force_fulltext return
[ "Set the options for this run." ]
Please provide a description of the function:def get_chebi_name_from_id(chebi_id, offline=False): chebi_name = chebi_id_to_name.get(chebi_id) if chebi_name is None and not offline: chebi_name = get_chebi_name_from_id_web(chebi_id) return chebi_name
[ "Return a ChEBI name corresponding to the given ChEBI ID.\n\n Parameters\n ----------\n chebi_id : str\n The ChEBI ID whose name is to be returned.\n offline : Optional[bool]\n Choose whether to allow an online lookup if the local lookup fails. If\n True, the online lookup is not attempted. Default: False.\n\n Returns\n -------\n chebi_name : str\n The name corresponding to the given ChEBI ID. If the lookup\n fails, None is returned.\n " ]
Please provide a description of the function:def get_chebi_name_from_id_web(chebi_id): url_base = 'http://www.ebi.ac.uk/webservices/chebi/2.0/test/' url_fmt = url_base + 'getCompleteEntity?chebiId=%s' resp = requests.get(url_fmt % chebi_id) if resp.status_code != 200: logger.warning("Got bad code form CHEBI client: %s" % resp.status_code) return None tree = etree.fromstring(resp.content) # Get rid of the namespaces. # Credit: https://stackoverflow.com/questions/18159221/remove-namespace-and-prefix-from-xml-in-python-using-lxml for elem in tree.getiterator(): if not hasattr(elem.tag, 'find'): continue # (1) i = elem.tag.find('}') if i >= 0: elem.tag = elem.tag[i+1:] objectify.deannotate(tree, cleanup_namespaces=True) elem = tree.find('Body/getCompleteEntityResponse/return/chebiAsciiName') if elem is not None: return elem.text return None
[ "Return a ChEBI mame corresponding to a given ChEBI ID using a REST API.\n\n Parameters\n ----------\n chebi_id : str\n The ChEBI ID whose name is to be returned.\n\n Returns\n -------\n chebi_name : str\n The name corresponding to the given ChEBI ID. If the lookup\n fails, None is returned.\n " ]
Please provide a description of the function:def get_subnetwork(statements, nodes, relevance_network=None, relevance_node_lim=10): if relevance_network is not None: relevant_nodes = _find_relevant_nodes(nodes, relevance_network, relevance_node_lim) all_nodes = nodes + relevant_nodes else: all_nodes = nodes filtered_statements = _filter_statements(statements, all_nodes) pa = PysbAssembler() pa.add_statements(filtered_statements) model = pa.make_model() return model
[ "Return a PySB model based on a subset of given INDRA Statements.\n\n Statements are first filtered for nodes in the given list and other nodes\n are optionally added based on relevance in a given network. The filtered\n statements are then assembled into an executable model using INDRA's\n PySB Assembler.\n\n Parameters\n ----------\n statements : list[indra.statements.Statement]\n A list of INDRA Statements to extract a subnetwork from.\n nodes : list[str]\n The names of the nodes to extract the subnetwork for.\n relevance_network : Optional[str]\n The UUID of the NDEx network in which nodes relevant to the given\n nodes are found.\n relevance_node_lim : Optional[int]\n The maximal number of additional nodes to add to the subnetwork\n based on relevance.\n\n Returns\n -------\n model : pysb.Model\n A PySB model object assembled using INDRA's PySB Assembler from\n the INDRA Statements corresponding to the subnetwork.\n " ]
Please provide a description of the function:def _filter_statements(statements, agents): filtered_statements = [] for s in stmts: if all([a is not None for a in s.agent_list()]) and \ all([a.name in agents for a in s.agent_list()]): filtered_statements.append(s) return filtered_statements
[ "Return INDRA Statements which have Agents in the given list.\n\n Only statements are returned in which all appearing Agents as in the\n agents list.\n\n Parameters\n ----------\n statements : list[indra.statements.Statement]\n A list of INDRA Statements to filter.\n agents : list[str]\n A list of agent names that need to appear in filtered statements.\n\n Returns\n -------\n filtered_statements : list[indra.statements.Statement]\n The list of filtered INDRA Statements.\n " ]
Please provide a description of the function:def _find_relevant_nodes(query_nodes, relevance_network, relevance_node_lim): all_nodes = relevance_client.get_relevant_nodes(relevance_network, query_nodes) nodes = [n[0] for n in all_nodes[:relevance_node_lim]] return nodes
[ "Return a list of nodes that are relevant for the query.\n\n Parameters\n ----------\n query_nodes : list[str]\n A list of node names to query for.\n relevance_network : str\n The UUID of the NDEx network to query relevance in.\n relevance_node_lim : int\n The number of top relevant nodes to return.\n\n Returns\n -------\n nodes : list[str]\n A list of node names that are relevant for the query.\n " ]
Please provide a description of the function:def process_jsonld_file(fname): with open(fname, 'r') as fh: json_dict = json.load(fh) return process_jsonld(json_dict)
[ "Process a JSON-LD file in the new format to extract Statements.\n\n Parameters\n ----------\n fname : str\n The path to the JSON-LD file to be processed.\n\n Returns\n -------\n indra.sources.hume.HumeProcessor\n A HumeProcessor instance, which contains a list of INDRA Statements\n as its statements attribute.\n " ]
Please provide a description of the function:def kill_all(job_queue, reason='None given', states=None): if states is None: states = ['STARTING', 'RUNNABLE', 'RUNNING'] batch = boto3.client('batch') runnable = batch.list_jobs(jobQueue=job_queue, jobStatus='RUNNABLE') job_info = runnable.get('jobSummaryList') if job_info: job_ids = [job['jobId'] for job in job_info] # Cancel jobs for job_id in job_ids: batch.cancel_job(jobId=job_id, reason=reason) res_list = [] for status in states: running = batch.list_jobs(jobQueue=job_queue, jobStatus=status) job_info = running.get('jobSummaryList') if job_info: job_ids = [job['jobId'] for job in job_info] for job_id in job_ids: logger.info('Killing %s' % job_id) res = batch.terminate_job(jobId=job_id, reason=reason) res_list.append(res) return res_list
[ "Terminates/cancels all RUNNING, RUNNABLE, and STARTING jobs." ]
Please provide a description of the function:def tag_instance(instance_id, **tags): logger.debug("Got request to add tags %s to instance %s." % (str(tags), instance_id)) ec2 = boto3.resource('ec2') instance = ec2.Instance(instance_id) # Remove None's from `tags` filtered_tags = {k: v for k, v in tags.items() if v and k} # Check for existing tags if instance.tags is not None: existing_tags = {tag.get('Key'): tag.get('Value') for tag in instance.tags} logger.debug("Ignoring existing tags; %s" % str(existing_tags)) for tag_key in existing_tags.keys(): filtered_tags.pop(tag_key, None) # If we have new tags to add, add them. tag_list = [{'Key': k, 'Value': v} for k, v in filtered_tags.items()] if len(tag_list): logger.info('Adding project tags "%s" to instance %s' % (filtered_tags, instance_id)) instance.create_tags(Tags=tag_list) else: logger.info('No new tags from: %s' % str(tags)) return
[ "Tag a single ec2 instance." ]
Please provide a description of the function:def tag_myself(project='cwc', **other_tags): base_url = "http://169.254.169.254" try: resp = requests.get(base_url + "/latest/meta-data/instance-id") except requests.exceptions.ConnectionError: logger.warning("Could not connect to service. Note this should only " "be run from within a batch job.") return instance_id = resp.text tag_instance(instance_id, project=project, **other_tags) return
[ "Function run when indra is used in an EC2 instance to apply tags." ]
Please provide a description of the function:def get_batch_command(command_list, project=None, purpose=None): command_str = ' '.join(command_list) ret = ['python', '-m', 'indra.util.aws', 'run_in_batch', command_str] if not project and has_config('DEFAULT_AWS_PROJECT'): project = get_config('DEFAULT_AWS_PROJECT') if project: ret += ['--project', project] if purpose: ret += ['--purpose', purpose] return ret
[ "Get the command appropriate for running something on batch." ]
Please provide a description of the function:def get_jobs(job_queue='run_reach_queue', job_status='RUNNING'): batch = boto3.client('batch') jobs = batch.list_jobs(jobQueue=job_queue, jobStatus=job_status) return jobs.get('jobSummaryList')
[ "Returns a list of dicts with jobName and jobId for each job with the\n given status." ]
Please provide a description of the function:def get_job_log(job_info, log_group_name='/aws/batch/job', write_file=True, verbose=False): job_name = job_info['jobName'] job_id = job_info['jobId'] logs = boto3.client('logs') batch = boto3.client('batch') resp = batch.describe_jobs(jobs=[job_id]) job_desc = resp['jobs'][0] job_def_name = job_desc['jobDefinition'].split('/')[-1].split(':')[0] task_arn_id = job_desc['container']['taskArn'].split('/')[-1] log_stream_name = '%s/default/%s' % (job_def_name, task_arn_id) stream_resp = logs.describe_log_streams( logGroupName=log_group_name, logStreamNamePrefix=log_stream_name) streams = stream_resp.get('logStreams') if not streams: logger.warning('No streams for job') return None elif len(streams) > 1: logger.warning('More than 1 stream for job, returning first') log_stream_name = streams[0]['logStreamName'] if verbose: logger.info("Getting log for %s/%s" % (job_name, job_id)) out_file = ('%s_%s.log' % (job_name, job_id)) if write_file else None lines = get_log_by_name(log_group_name, log_stream_name, out_file, verbose) return lines
[ "Gets the Cloudwatch log associated with the given job.\n\n Parameters\n ----------\n job_info : dict\n dict containing entries for 'jobName' and 'jobId', e.g., as returned\n by get_jobs()\n log_group_name : string\n Name of the log group; defaults to '/aws/batch/job'\n write_file : boolean\n If True, writes the downloaded log to a text file with the filename\n '%s_%s.log' % (job_name, job_id)\n\n\n Returns\n -------\n list of strings\n The event messages in the log, with the earliest events listed first.\n " ]
Please provide a description of the function:def get_log_by_name(log_group_name, log_stream_name, out_file=None, verbose=True): logs = boto3.client('logs') kwargs = {'logGroupName': log_group_name, 'logStreamName': log_stream_name, 'startFromHead': True} lines = [] while True: response = logs.get_log_events(**kwargs) # If we've gotten all the events already, the nextForwardToken for # this call will be the same as the last one if response.get('nextForwardToken') == kwargs.get('nextToken'): break else: events = response.get('events') if events: lines += ['%s: %s\n' % (evt['timestamp'], evt['message']) for evt in events] kwargs['nextToken'] = response.get('nextForwardToken') if verbose: logger.info('%d %s' % (len(lines), lines[-1])) if out_file: with open(out_file, 'wt') as f: for line in lines: f.write(line) return lines
[ "Download a log given the log's group and stream name.\n\n Parameters\n ----------\n log_group_name : str\n The name of the log group, e.g. /aws/batch/job.\n\n log_stream_name : str\n The name of the log stream, e.g. run_reach_jobdef/default/<UUID>\n\n Returns\n -------\n lines : list[str]\n The lines of the log as a list.\n " ]
Please provide a description of the function:def dump_logs(job_queue='run_reach_queue', job_status='RUNNING'): jobs = get_jobs(job_queue, job_status) for job in jobs: get_job_log(job, write_file=True)
[ "Write logs for all jobs with given the status to files." ]
Please provide a description of the function:def get_s3_file_tree(s3, bucket, prefix): def get_some_keys(keys, marker=None): if marker: relevant_files = s3.list_objects(Bucket=bucket, Prefix=prefix, Marker=marker) else: relevant_files = s3.list_objects(Bucket=bucket, Prefix=prefix) keys.extend([entry['Key'] for entry in relevant_files['Contents'] if entry['Key'] != marker]) return relevant_files['IsTruncated'] file_keys = [] marker = None while get_some_keys(file_keys, marker): marker = file_keys[-1] file_tree = NestedDict() pref_path = prefix.split('/')[:-1] # avoid the trailing empty str. for key in file_keys: full_path = key.split('/') relevant_path = full_path[len(pref_path):] curr = file_tree for step in relevant_path: curr = curr[step] curr['key'] = key return file_tree
[ "Overcome s3 response limit and return NestedDict tree of paths.\n\n The NestedDict object also allows the user to search by the ends of a path.\n\n The tree mimics a file directory structure, with the leave nodes being the\n full unbroken key. For example, 'path/to/file.txt' would be retrieved by\n\n ret['path']['to']['file.txt']['key']\n\n The NestedDict object returned also has the capability to get paths that\n lead to a certain value. So if you wanted all paths that lead to something\n called 'file.txt', you could use\n\n ret.get_paths('file.txt')\n\n For more details, see the NestedDict docs.\n " ]
Please provide a description of the function:def make_model(self, use_name_as_key=False, include_mods=False, include_complexes=False): self.graph = nx.DiGraph() self._use_name_as_key = use_name_as_key for st in self.stmts: support_all = len(st.evidence) support_pmid = len(set([ev.pmid for ev in st.evidence if ev.pmid is not None])) attr = {'polarity': 'unknown', 'support_all': support_all, 'support_pmid': support_pmid} if isinstance(st, RegulateActivity): attr['polarity'] = ('positive' if st.is_activation else 'negative') self._add_node_edge(st.subj, st.obj, attr) elif include_mods and isinstance(st, Modification): self._add_node_edge(st.agent_list()[0], st.agent_list()[1], attr) elif include_mods and \ (isinstance(st, Gap) or isinstance(st, DecreaseAmount)): attr['polarity'] = 'negative' self._add_node_edge(st.agent_list()[0], st.agent_list()[1], attr) elif include_mods and \ (isinstance(st, Gef) or isinstance(st, IncreaseAmount)): attr['polarity'] = 'positive' self._add_node_edge(st.agent_list()[0], st.agent_list()[1], attr) elif include_complexes and isinstance(st, Complex): # Create s->t edges between all possible pairs of complex # members for node1, node2 in itertools.permutations(st.members, 2): self._add_node_edge(node1, node2, attr)
[ "Assemble the graph from the assembler's list of INDRA Statements.\n\n Parameters\n ----------\n use_name_as_key : boolean\n If True, uses the name of the agent as the key to the nodes in\n the network. If False (default) uses the matches_key() of the\n agent.\n include_mods : boolean\n If True, adds Modification statements into the graph as directed\n edges. Default is False.\n include_complexes : boolean\n If True, creates two edges (in both directions) between all pairs\n of nodes in Complex statements. Default is False.\n " ]
Please provide a description of the function:def print_model(self, include_unsigned_edges=False): sif_str = '' for edge in self.graph.edges(data=True): n1 = edge[0] n2 = edge[1] data = edge[2] polarity = data.get('polarity') if polarity == 'negative': rel = '-1' elif polarity == 'positive': rel = '1' elif include_unsigned_edges: rel = '0' else: continue sif_str += '%s %s %s\n' % (n1, rel, n2) return sif_str
[ "Return a SIF string of the assembled model.\n\n Parameters\n ----------\n include_unsigned_edges : bool\n If True, includes edges with an unknown activating/inactivating\n relationship (e.g., most PTMs). Default is False.\n " ]
Please provide a description of the function:def save_model(self, fname, include_unsigned_edges=False): sif_str = self.print_model(include_unsigned_edges) with open(fname, 'wb') as fh: fh.write(sif_str.encode('utf-8'))
[ "Save the assembled model's SIF string into a file.\n\n Parameters\n ----------\n fname : str\n The name of the file to save the SIF into.\n include_unsigned_edges : bool\n If True, includes edges with an unknown activating/inactivating\n relationship (e.g., most PTMs). Default is False.\n " ]
Please provide a description of the function:def print_loopy(self, as_url=True): init_str = '' node_id = 1 node_list = {} for node, data in self.graph.nodes(data=True): node_name = data['name'] nodex = int(500*numpy.random.rand()) nodey = int(500*numpy.random.rand()) hue = int(5*numpy.random.rand()) node_attr = [node_id, nodex, nodey, 1, node_name, hue] node_list[node] = node_attr node_id += 1 nodes = list(node_list.values()) edges = [] for s, t, data in self.graph.edges(data=True): s_id = node_list[s][0] t_id = node_list[t][0] if data['polarity'] == 'positive': pol = 1 else: pol = -1 edge = [s_id, t_id, 89, pol, 0] edges.append(edge) labels = [] components = [nodes, edges, labels] model = json.dumps(components, separators=(',', ':')) if as_url: model = 'http://ncase.me/loopy/v1/?data=' + model return model
[ "Return \n\n Parameters\n ----------\n out_file : Optional[str]\n A file name in which the Loopy network is saved.\n\n Returns\n -------\n full_str : str\n The string representing the Loopy network.\n " ]
Please provide a description of the function:def print_boolean_net(self, out_file=None): init_str = '' for node_key in self.graph.nodes(): node_name = self.graph.node[node_key]['name'] init_str += '%s = False\n' % node_name rule_str = '' for node_key in self.graph.nodes(): node_name = self.graph.node[node_key]['name'] in_edges = self.graph.in_edges(node_key) if not in_edges: continue parents = [e[0] for e in in_edges] polarities = [self.graph.edge[e[0]][node_key]['polarity'] for e in in_edges] pos_parents = [par for par, pol in zip(parents, polarities) if pol == 'positive'] neg_parents = [par for par, pol in zip(parents, polarities) if pol == 'negative'] rhs_pos_parts = [] for par in pos_parents: rhs_pos_parts.append(self.graph.node[par]['name']) rhs_pos_str = ' or '.join(rhs_pos_parts) rhs_neg_parts = [] for par in neg_parents: rhs_neg_parts.append(self.graph.node[par]['name']) rhs_neg_str = ' or '.join(rhs_neg_parts) if rhs_pos_str: if rhs_neg_str: rhs_str = '(' + rhs_pos_str + \ ') and not (' + rhs_neg_str + ')' else: rhs_str = rhs_pos_str else: rhs_str = 'not (' + rhs_neg_str + ')' node_eq = '%s* = %s\n' % (node_name, rhs_str) rule_str += node_eq full_str = init_str + '\n' + rule_str if out_file is not None: with open(out_file, 'wt') as fh: fh.write(full_str) return full_str
[ "Return a Boolean network from the assembled graph.\n\n See https://github.com/ialbert/booleannet for details about\n the format used to encode the Boolean rules.\n\n Parameters\n ----------\n out_file : Optional[str]\n A file name in which the Boolean network is saved.\n\n Returns\n -------\n full_str : str\n The string representing the Boolean network.\n " ]
Please provide a description of the function:def _ensure_api_keys(task_desc, failure_ret=None): def check_func_wrapper(func): @wraps(func) def check_api_keys(*args, **kwargs): global ELSEVIER_KEYS if ELSEVIER_KEYS is None: ELSEVIER_KEYS = {} # Try to read in Elsevier API keys. For each key, first check # the environment variables, then check the INDRA config file. if not has_config(INST_KEY_ENV_NAME): logger.warning('Institution API key %s not found in config ' 'file or environment variable: this will ' 'limit access for %s' % (INST_KEY_ENV_NAME, task_desc)) ELSEVIER_KEYS['X-ELS-Insttoken'] = get_config(INST_KEY_ENV_NAME) if not has_config(API_KEY_ENV_NAME): logger.error('API key %s not found in configuration file ' 'or environment variable: cannot %s' % (API_KEY_ENV_NAME, task_desc)) return failure_ret ELSEVIER_KEYS['X-ELS-APIKey'] = get_config(API_KEY_ENV_NAME) elif 'X-ELS-APIKey' not in ELSEVIER_KEYS.keys(): logger.error('No Elsevier API key %s found: cannot %s' % (API_KEY_ENV_NAME, task_desc)) return failure_ret return func(*args, **kwargs) return check_api_keys return check_func_wrapper
[ "Wrap Elsevier methods which directly use the API keys.\n\n Ensure that the keys are retrieved from the environment or config file when\n first called, and store global scope. Subsequently use globally stashed\n results and check for required ids.\n " ]
Please provide a description of the function:def check_entitlement(doi): if doi.lower().startswith('doi:'): doi = doi[4:] url = '%s/%s' % (elsevier_entitlement_url, doi) params = {'httpAccept': 'text/xml'} res = requests.get(url, params, headers=ELSEVIER_KEYS) if not res.status_code == 200: logger.error('Could not check entitlements for article %s: ' 'status code %d' % (doi, res.status_code)) logger.error('Response content: %s' % res.text) return False return True
[ "Check whether IP and credentials enable access to content for a doi.\n\n This function uses the entitlement endpoint of the Elsevier API to check\n whether an article is available to a given institution. Note that this\n feature of the API is itself not available for all institution keys.\n " ]
Please provide a description of the function:def download_article(id_val, id_type='doi', on_retry=False): if id_type == 'pmid': id_type = 'pubmed_id' url = '%s/%s' % (elsevier_article_url_fmt % id_type, id_val) params = {'httpAccept': 'text/xml'} res = requests.get(url, params, headers=ELSEVIER_KEYS) if res.status_code == 404: logger.info("Resource for %s not available on elsevier." % url) return None elif res.status_code == 429: if not on_retry: logger.warning("Broke the speed limit. Waiting half a second then " "trying again...") sleep(0.5) return download_article(id_val, id_type, True) else: logger.error("Still breaking speed limit after waiting.") logger.error("Elsevier response: %s" % res.text) return None elif res.status_code != 200: logger.error('Could not download article %s: status code %d' % (url, res.status_code)) logger.error('Elsevier response: %s' % res.text) return None else: content_str = res.content.decode('utf-8') if content_str.startswith('<service-error>'): logger.error('Got a service error with 200 status: %s' % content_str) return None # Return the XML content as a unicode string, assuming UTF-8 encoding return content_str
[ "Low level function to get an XML article for a particular id.\n\n Parameters\n ----------\n id_val : str\n The value of the id.\n id_type : str\n The type of id, such as pmid (a.k.a. pubmed_id), doi, or eid.\n on_retry : bool\n This function has a recursive retry feature, and this is the only time\n this parameter should be used.\n\n Returns\n -------\n content : str or None\n If found, the content string is returned, otherwise, None is returned.\n " ]
Please provide a description of the function:def download_article_from_ids(**id_dict): valid_id_types = ['eid', 'doi', 'pmid', 'pii'] assert all([k in valid_id_types for k in id_dict.keys()]),\ ("One of these id keys is invalid: %s Valid keys are: %s." % (list(id_dict.keys()), valid_id_types)) if 'doi' in id_dict.keys() and id_dict['doi'].lower().startswith('doi:'): id_dict['doi'] = id_dict['doi'][4:] content = None for id_type in valid_id_types: if id_type in id_dict.keys(): content = download_article(id_dict[id_type], id_type) if content is not None: break else: logger.error("Could not download article with any of the ids: %s." % str(id_dict)) return content
[ "Download an article in XML format from Elsevier matching the set of ids.\n\n Parameters\n ----------\n <id_type> : str\n You can enter any combination of eid, doi, pmid, and/or pii. Ids will be\n checked in that order, until either content has been found or all ids\n have been checked.\n\n Returns\n -------\n content : str or None\n If found, the content is returned as a string, otherwise None is\n returned.\n " ]
Please provide a description of the function:def get_abstract(doi): xml_string = download_article(doi) if xml_string is None: return None assert isinstance(xml_string, str) xml_tree = ET.XML(xml_string.encode('utf-8'), parser=UTB()) if xml_tree is None: return None coredata = xml_tree.find('article:coredata', elsevier_ns) abstract = coredata.find('dc:description', elsevier_ns) abs_text = abstract.text return abs_text
[ "Get the abstract text of an article from Elsevier given a doi." ]
Please provide a description of the function:def get_article(doi, output_format='txt'): xml_string = download_article(doi) if output_format == 'txt' and xml_string is not None: text = extract_text(xml_string) return text return xml_string
[ "Get the full body of an article from Elsevier.\n\n Parameters\n ----------\n doi : str\n The doi for the desired article.\n output_format : 'txt' or 'xml'\n The desired format for the output. Selecting 'txt' (default) strips all\n xml tags and joins the pieces of text in the main text, while 'xml'\n simply takes the tag containing the body of the article and returns it\n as is . In the latter case, downstream code needs to be able to\n interpret Elsever's XML format.\n\n Returns\n -------\n content : str\n Either text content or xml, as described above, for the given doi.\n " ]
Please provide a description of the function:def extract_paragraphs(xml_string): assert isinstance(xml_string, str) xml_tree = ET.XML(xml_string.encode('utf-8'), parser=UTB()) full_text = xml_tree.find('article:originalText', elsevier_ns) if full_text is None: logger.info('Could not find full text element article:originalText') return None article_body = _get_article_body(full_text) if article_body: return article_body raw_text = _get_raw_text(full_text) if raw_text: return [raw_text] return None
[ "Get paragraphs from the body of the given Elsevier xml." ]
Please provide a description of the function:def get_dois(query_str, count=100): url = '%s/%s' % (elsevier_search_url, query_str) params = {'query': query_str, 'count': count, 'httpAccept': 'application/xml', 'sort': '-coverdate', 'field': 'doi'} res = requests.get(url, params) if not res.status_code == 200: return None tree = ET.XML(res.content, parser=UTB()) doi_tags = tree.findall('atom:entry/prism:doi', elsevier_ns) dois = [dt.text for dt in doi_tags] return dois
[ "Search ScienceDirect through the API for articles.\n\n See http://api.elsevier.com/content/search/fields/scidir for constructing a\n query string to pass here. Example: 'abstract(BRAF) AND all(\"colorectal\n cancer\")'\n " ]
Please provide a description of the function:def get_piis(query_str): dates = range(1960, datetime.datetime.now().year) all_piis = flatten([get_piis_for_date(query_str, date) for date in dates]) return all_piis
[ "Search ScienceDirect through the API for articles and return PIIs.\n\n Note that ScienceDirect has a limitation in which a maximum of 6,000\n PIIs can be retrieved for a given search and therefore this call is\n internally broken up into multiple queries by a range of years and the\n results are combined.\n\n Parameters\n ----------\n query_str : str\n The query string to search with\n\n Returns\n -------\n piis : list[str]\n The list of PIIs identifying the papers returned by the search\n " ]
Please provide a description of the function:def get_piis_for_date(query_str, date): count = 200 params = {'query': query_str, 'count': count, 'start': 0, 'sort': '-coverdate', 'date': date, 'field': 'pii'} all_piis = [] while True: res = requests.get(elsevier_search_url, params, headers=ELSEVIER_KEYS) if not res.status_code == 200: logger.info('Got status code: %d' % res.status_code) break res_json = res.json() entries = res_json['search-results']['entry'] logger.info(res_json['search-results']['opensearch:totalResults']) if entries == [{'@_fa': 'true', 'error': 'Result set was empty'}]: logger.info('Search result was empty') return [] piis = [entry['pii'] for entry in entries] all_piis += piis # Get next batch links = res_json['search-results'].get('link', []) cont = False for link in links: if link.get('@ref') == 'next': logger.info('Found link to next batch of results.') params['start'] += count cont = True break if not cont: break return all_piis
[ "Search ScienceDirect with a query string constrained to a given year.\n\n Parameters\n ----------\n query_str : str\n The query string to search with\n date : str\n The year to constrain the search to\n\n Returns\n -------\n piis : list[str]\n The list of PIIs identifying the papers returned by the search\n " ]
Please provide a description of the function:def download_from_search(query_str, folder, do_extract_text=True, max_results=None): piis = get_piis(query_str) for pii in piis[:max_results]: if os.path.exists(os.path.join(folder, '%s.txt' % pii)): continue logger.info('Downloading %s' % pii) xml = download_article(pii, 'pii') sleep(1) if do_extract_text: txt = extract_text(xml) if not txt: continue with open(os.path.join(folder, '%s.txt' % pii), 'wb') as fh: fh.write(txt.encode('utf-8')) else: with open(os.path.join(folder, '%s.xml' % pii), 'wb') as fh: fh.write(xml.encode('utf-8')) return
[ "Save raw text files based on a search for papers on ScienceDirect.\n\n This performs a search to get PIIs, downloads the XML corresponding to\n the PII, extracts the raw text and then saves the text into a file\n in the designated folder.\n\n Parameters\n ----------\n query_str : str\n The query string to search with\n folder : str\n The local path to an existing folder in which the text files\n will be dumped\n do_extract_text : bool\n Choose whether to extract text from the xml, or simply save the raw xml\n files. Default is True, so text is extracted.\n max_results : int or None\n Default is None. If specified, limit the number of results to the given\n maximum.\n " ]
Please provide a description of the function:def extract_statement_from_query_result(self, res): agent_start, agent_end, affected_start, affected_end = res # Convert from rdflib literals to python integers so we can use # them to index strings agent_start = int(agent_start) agent_end = int(agent_end) affected_start = int(affected_start) affected_end = int(affected_end) # Find the text corresponding to these indices agent = self.text[agent_start:agent_end] affected = self.text[affected_start:affected_end] # Strip off surrounding whitespace agent = agent.lstrip().rstrip() affected = affected.lstrip().rstrip() # Make an Agent object for both the subject and the object subj = Agent(agent, db_refs={'TEXT': agent}) obj = Agent(affected, db_refs={'TEXT': affected}) statement = Influence(subj=subj, obj=obj) # Add the statement to the list of statements self.statements.append(statement)
[ "Adds a statement based on one element of a rdflib SPARQL query.\n\n Parameters\n ----------\n res: rdflib.query.ResultRow\n Element of rdflib SPARQL query result\n " ]
Please provide a description of the function:def extract_statements(self): # Look for events that have an AGENT and an AFFECTED, and get the # start and ending text indices for each. query = prefixes + results = self.graph.query(query) for res in results: # Make a statement for each query match self.extract_statement_from_query_result(res) # Look for events that have an AGENT and a RESULT, and get the start # and ending text indices for each. query = query.replace('role:AFFECTED', 'role:RESULT') results = self.graph.query(query) for res in results: # Make a statement for each query match self.extract_statement_from_query_result(res)
[ "Extracts INDRA statements from the RDF graph via SPARQL queries.\n ", "\n SELECT\n ?agent_start\n ?agent_end\n ?affected_start\n ?affected_end\n WHERE {\n ?rel role:AGENT ?agent .\n ?rel role:AFFECTED ?affected .\n ?agent lf:start ?agent_start .\n ?agent lf:end ?agent_end .\n ?affected lf:start ?affected_start .\n ?affected lf:end ?affected_end .\n }\n " ]
Please provide a description of the function:def _recursively_lookup_complex(self, complex_id): assert complex_id in self.complex_map expanded_agent_strings = [] expand_these_next = [complex_id] while len(expand_these_next) > 0: # Pop next element c = expand_these_next[0] expand_these_next = expand_these_next[1:] # If a complex, add expanding it to the end of the queue # If an agent string, add it to the agent string list immediately assert c in self.complex_map for s in self.complex_map[c]: if s in self.complex_map: expand_these_next.append(s) else: expanded_agent_strings.append(s) return expanded_agent_strings
[ "Looks up the constitutents of a complex. If any constituent is\n itself a complex, recursively expands until all constituents are\n not complexes." ]
Please provide a description of the function:def _get_complex_agents(self, complex_id): agents = [] components = self._recursively_lookup_complex(complex_id) for c in components: db_refs = {} name = uniprot_client.get_gene_name(c) if name is None: db_refs['SIGNOR'] = c else: db_refs['UP'] = c hgnc_id = hgnc_client.get_hgnc_id(name) if hgnc_id: db_refs['HGNC'] = hgnc_id famplex_key = ('SIGNOR', c) if famplex_key in famplex_map: db_refs['FPLX'] = famplex_map[famplex_key] if not name: name = db_refs['FPLX'] # Set agent name to Famplex name if # the Uniprot name is not available elif not name: # We neither have a Uniprot nor Famplex grounding logger.info('Have neither a Uniprot nor Famplex grounding ' + \ 'for ' + c) if not name: name = db_refs['SIGNOR'] # Set the agent name to the # Signor name if neither the # Uniprot nor Famplex names are # available assert(name is not None) agents.append(Agent(name, db_refs=db_refs)) return agents
[ "Returns a list of agents corresponding to each of the constituents\n in a SIGNOR complex." ]
Please provide a description of the function:def stmts_from_json(json_in, on_missing_support='handle'): stmts = [] uuid_dict = {} for json_stmt in json_in: try: st = Statement._from_json(json_stmt) except Exception as e: logger.warning("Error creating statement: %s" % e) continue stmts.append(st) uuid_dict[st.uuid] = st for st in stmts: _promote_support(st.supports, uuid_dict, on_missing_support) _promote_support(st.supported_by, uuid_dict, on_missing_support) return stmts
[ "Get a list of Statements from Statement jsons.\n\n In the case of pre-assembled Statements which have `supports` and\n `supported_by` lists, the uuids will be replaced with references to\n Statement objects from the json, where possible. The method of handling\n missing support is controled by the `on_missing_support` key-word argument.\n\n Parameters\n ----------\n json_in : iterable[dict]\n A json list containing json dict representations of INDRA Statements,\n as produced by the `to_json` methods of subclasses of Statement, or\n equivalently by `stmts_to_json`.\n on_missing_support : Optional[str]\n Handles the behavior when a uuid reference in `supports` or\n `supported_by` attribute cannot be resolved. This happens because uuids\n can only be linked to Statements contained in the `json_in` list, and\n some may be missing if only some of all the Statements from pre-\n assembly are contained in the list.\n\n Options:\n\n - *'handle'* : (default) convert unresolved uuids into `Unresolved`\n Statement objects.\n - *'ignore'* : Simply omit any uuids that cannot be linked to any\n Statements in the list.\n - *'error'* : Raise an error upon hitting an un-linkable uuid.\n\n Returns\n -------\n stmts : list[:py:class:`Statement`]\n A list of INDRA Statements.\n " ]
Please provide a description of the function:def stmts_to_json_file(stmts, fname): with open(fname, 'w') as fh: json.dump(stmts_to_json(stmts), fh, indent=1)
[ "Serialize a list of INDRA Statements into a JSON file.\n\n Parameters\n ----------\n stmts : list[indra.statement.Statements]\n The list of INDRA Statements to serialize into the JSON file.\n fname : str\n Path to the JSON file to serialize Statements into.\n " ]