repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_documentation_string
stringlengths
1
47.2k
func_code_url
stringlengths
85
339
sanoma/django-arctic
arctic/templatetags/arctic_pagination_tags.py
str_to_bool
def str_to_bool(val): """ Helper function to turn a string representation of "true" into boolean True. """ if isinstance(val, str): val = val.lower() return val in ["true", "on", "yes", True]
python
def str_to_bool(val): """ Helper function to turn a string representation of "true" into boolean True. """ if isinstance(val, str): val = val.lower() return val in ["true", "on", "yes", True]
Helper function to turn a string representation of "true" into boolean True.
https://github.com/sanoma/django-arctic/blob/c81b092c2643ca220708bf3c586017d9175161f5/arctic/templatetags/arctic_pagination_tags.py#L44-L52
sanoma/django-arctic
arctic/templatetags/arctic_pagination_tags.py
arctic_paginate
def arctic_paginate(parser, token): """ Renders a Page object with pagination bar. Example:: {% arctic_paginate page_obj paginator=page_obj.paginator range=10 %} Named Parameters:: range - The size of the pagination bar (ie, if set to 10 then, at most, 10 page numbers will display at any given time) Defaults to None, which shows all pages. show_first_last - Accepts "true" or "false". Determines whether or not to show the first and last page links. Defaults to "false" """ bits = token.split_contents() if len(bits) < 2: raise TemplateSyntaxError( "'%s' takes at least one argument" " (Page object reference)" % bits[0] ) page = parser.compile_filter(bits[1]) kwargs = {} bits = bits[2:] kwarg_re = re.compile(r"(\w+)=(.+)") if len(bits): for bit in bits: match = kwarg_re.match(bit) if not match: raise TemplateSyntaxError( "Malformed arguments to bootstrap_pagination paginate tag" ) name, value = match.groups() kwargs[name] = parser.compile_filter(value) return PaginationNode(page, kwargs)
python
def arctic_paginate(parser, token): """ Renders a Page object with pagination bar. Example:: {% arctic_paginate page_obj paginator=page_obj.paginator range=10 %} Named Parameters:: range - The size of the pagination bar (ie, if set to 10 then, at most, 10 page numbers will display at any given time) Defaults to None, which shows all pages. show_first_last - Accepts "true" or "false". Determines whether or not to show the first and last page links. Defaults to "false" """ bits = token.split_contents() if len(bits) < 2: raise TemplateSyntaxError( "'%s' takes at least one argument" " (Page object reference)" % bits[0] ) page = parser.compile_filter(bits[1]) kwargs = {} bits = bits[2:] kwarg_re = re.compile(r"(\w+)=(.+)") if len(bits): for bit in bits: match = kwarg_re.match(bit) if not match: raise TemplateSyntaxError( "Malformed arguments to bootstrap_pagination paginate tag" ) name, value = match.groups() kwargs[name] = parser.compile_filter(value) return PaginationNode(page, kwargs)
Renders a Page object with pagination bar. Example:: {% arctic_paginate page_obj paginator=page_obj.paginator range=10 %} Named Parameters:: range - The size of the pagination bar (ie, if set to 10 then, at most, 10 page numbers will display at any given time) Defaults to None, which shows all pages. show_first_last - Accepts "true" or "false". Determines whether or not to show the first and last page links. Defaults to "false"
https://github.com/sanoma/django-arctic/blob/c81b092c2643ca220708bf3c586017d9175161f5/arctic/templatetags/arctic_pagination_tags.py#L153-L188
sanoma/django-arctic
arctic/utils.py
menu
def menu(menu_config=None, **kwargs): """ Tranforms a menu definition into a dictionary which is a frendlier format to parse in a template. """ request = kwargs.pop("request", None) user = kwargs.pop("user", None) url_full_name = ":".join( [request.resolver_match.namespace, request.resolver_match.url_name] ) if not menu_config: menu_config = settings.ARCTIC_MENU menu_dict = OrderedDict() for menu_entry in menu_config: if type(menu_entry) in (list, tuple): # check permission based on named_url path = None if menu_entry[1]: if not view_from_url(menu_entry[1]).has_permission(user): continue path = reverse(menu_entry[1]) # icons and collapse are optional icon = None if (len(menu_entry) >= 3) and ( not type(menu_entry[2]) in (list, tuple) ): icon = menu_entry[2] active_weight = len(path) if path else 0 menu_dict[menu_entry[0]] = { "url": menu_entry[1], "icon": icon, "submenu": None, "active": is_active(menu_entry, url_full_name), "active_weight": active_weight, } # check if the last item in a menu entry is a submenu submenu = _get_submenu(menu_entry) if submenu: menu_dict[menu_entry[0]]["submenu"] = menu( submenu, user=user, request=request ) return menu_clean(menu_dict)
python
def menu(menu_config=None, **kwargs): """ Tranforms a menu definition into a dictionary which is a frendlier format to parse in a template. """ request = kwargs.pop("request", None) user = kwargs.pop("user", None) url_full_name = ":".join( [request.resolver_match.namespace, request.resolver_match.url_name] ) if not menu_config: menu_config = settings.ARCTIC_MENU menu_dict = OrderedDict() for menu_entry in menu_config: if type(menu_entry) in (list, tuple): # check permission based on named_url path = None if menu_entry[1]: if not view_from_url(menu_entry[1]).has_permission(user): continue path = reverse(menu_entry[1]) # icons and collapse are optional icon = None if (len(menu_entry) >= 3) and ( not type(menu_entry[2]) in (list, tuple) ): icon = menu_entry[2] active_weight = len(path) if path else 0 menu_dict[menu_entry[0]] = { "url": menu_entry[1], "icon": icon, "submenu": None, "active": is_active(menu_entry, url_full_name), "active_weight": active_weight, } # check if the last item in a menu entry is a submenu submenu = _get_submenu(menu_entry) if submenu: menu_dict[menu_entry[0]]["submenu"] = menu( submenu, user=user, request=request ) return menu_clean(menu_dict)
Tranforms a menu definition into a dictionary which is a frendlier format to parse in a template.
https://github.com/sanoma/django-arctic/blob/c81b092c2643ca220708bf3c586017d9175161f5/arctic/utils.py#L49-L95
sanoma/django-arctic
arctic/utils.py
menu_clean
def menu_clean(menu_config): """ Make sure that only the menu item with the largest weight is active. If a child of a menu item is active, the parent should be active too. :param menu: :return: """ max_weight = -1 for _, value in list(menu_config.items()): if value["submenu"]: for _, v in list(value["submenu"].items()): if v["active"]: # parent inherits the weight of the axctive child value["active"] = True value["active_weight"] = v["active_weight"] if value["active"]: max_weight = max(value["active_weight"], max_weight) if max_weight > 0: # one of the items is active: make items with lesser weight inactive for _, value in list(menu_config.items()): if value["active"] and value["active_weight"] < max_weight: value["active"] = False return menu_config
python
def menu_clean(menu_config): """ Make sure that only the menu item with the largest weight is active. If a child of a menu item is active, the parent should be active too. :param menu: :return: """ max_weight = -1 for _, value in list(menu_config.items()): if value["submenu"]: for _, v in list(value["submenu"].items()): if v["active"]: # parent inherits the weight of the axctive child value["active"] = True value["active_weight"] = v["active_weight"] if value["active"]: max_weight = max(value["active_weight"], max_weight) if max_weight > 0: # one of the items is active: make items with lesser weight inactive for _, value in list(menu_config.items()): if value["active"] and value["active_weight"] < max_weight: value["active"] = False return menu_config
Make sure that only the menu item with the largest weight is active. If a child of a menu item is active, the parent should be active too. :param menu: :return:
https://github.com/sanoma/django-arctic/blob/c81b092c2643ca220708bf3c586017d9175161f5/arctic/utils.py#L104-L128
sanoma/django-arctic
arctic/utils.py
view_from_url
def view_from_url(named_url): # noqa """ Finds and returns the view class from a named url """ # code below is `stolen` from django's reverse method. resolver = get_resolver(get_urlconf()) if type(named_url) in (list, tuple): named_url = named_url[0] parts = named_url.split(":") parts.reverse() view = parts[0] path = parts[1:] current_path = None resolved_path = [] ns_pattern = "" ns_converters = {} # if it's a local url permission already given, so we just return true if named_url.startswith("#"): class LocalUrlDummyView: @staticmethod def has_permission(user): return True return LocalUrlDummyView while path: ns = path.pop() current_ns = current_path.pop() if current_path else None # Lookup the name to see if it could be an app identifier try: app_list = resolver.app_dict[ns] # Yes! Path part matches an app in the current Resolver if current_ns and current_ns in app_list: # If we are reversing for a particular app, # use that namespace ns = current_ns elif ns not in app_list: # The name isn't shared by one of the instances # (i.e., the default) so just pick the first instance # as the default. ns = app_list[0] except KeyError: pass if ns != current_ns: current_path = None try: extra, resolver = resolver.namespace_dict[ns] resolved_path.append(ns) ns_pattern = ns_pattern + extra try: ns_converters.update(resolver.pattern.converters) except Exception: pass except KeyError as key: if resolved_path: raise NoReverseMatch( "%s is not a registered namespace inside '%s'" % (key, ":".join(resolved_path)) ) else: raise NoReverseMatch("%s is not a registered namespace" % key) if ns_pattern: try: resolver = get_ns_resolver( ns_pattern, resolver, tuple(ns_converters.items()) ) except Exception: resolver = get_ns_resolver(ns_pattern, resolver) # custom code, get view from reverse_dict reverse_dict = resolver.reverse_dict.dict() for key, url_obj in reverse_dict.items(): if url_obj == reverse_dict[view] and key != view: module = importlib.import_module(key.__module__) return getattr(module, key.__name__)
python
def view_from_url(named_url): # noqa """ Finds and returns the view class from a named url """ # code below is `stolen` from django's reverse method. resolver = get_resolver(get_urlconf()) if type(named_url) in (list, tuple): named_url = named_url[0] parts = named_url.split(":") parts.reverse() view = parts[0] path = parts[1:] current_path = None resolved_path = [] ns_pattern = "" ns_converters = {} # if it's a local url permission already given, so we just return true if named_url.startswith("#"): class LocalUrlDummyView: @staticmethod def has_permission(user): return True return LocalUrlDummyView while path: ns = path.pop() current_ns = current_path.pop() if current_path else None # Lookup the name to see if it could be an app identifier try: app_list = resolver.app_dict[ns] # Yes! Path part matches an app in the current Resolver if current_ns and current_ns in app_list: # If we are reversing for a particular app, # use that namespace ns = current_ns elif ns not in app_list: # The name isn't shared by one of the instances # (i.e., the default) so just pick the first instance # as the default. ns = app_list[0] except KeyError: pass if ns != current_ns: current_path = None try: extra, resolver = resolver.namespace_dict[ns] resolved_path.append(ns) ns_pattern = ns_pattern + extra try: ns_converters.update(resolver.pattern.converters) except Exception: pass except KeyError as key: if resolved_path: raise NoReverseMatch( "%s is not a registered namespace inside '%s'" % (key, ":".join(resolved_path)) ) else: raise NoReverseMatch("%s is not a registered namespace" % key) if ns_pattern: try: resolver = get_ns_resolver( ns_pattern, resolver, tuple(ns_converters.items()) ) except Exception: resolver = get_ns_resolver(ns_pattern, resolver) # custom code, get view from reverse_dict reverse_dict = resolver.reverse_dict.dict() for key, url_obj in reverse_dict.items(): if url_obj == reverse_dict[view] and key != view: module = importlib.import_module(key.__module__) return getattr(module, key.__name__)
Finds and returns the view class from a named url
https://github.com/sanoma/django-arctic/blob/c81b092c2643ca220708bf3c586017d9175161f5/arctic/utils.py#L131-L211
sanoma/django-arctic
arctic/utils.py
find_attribute
def find_attribute(obj, value): """ Finds the attribute connected to the last object when a chain of connected objects is given in a string separated with double underscores. For example when a model x has a foreign key to model y and model y has attribute a, findattr(x, 'y__a') will return the a attribute from the y model that exists in x. """ if "__" in value: value_list = value.split("__") attr = get_attribute(obj, value_list[0]) return find_attribute(attr, "__".join(value_list[1:])) return get_attribute(obj, value)
python
def find_attribute(obj, value): """ Finds the attribute connected to the last object when a chain of connected objects is given in a string separated with double underscores. For example when a model x has a foreign key to model y and model y has attribute a, findattr(x, 'y__a') will return the a attribute from the y model that exists in x. """ if "__" in value: value_list = value.split("__") attr = get_attribute(obj, value_list[0]) return find_attribute(attr, "__".join(value_list[1:])) return get_attribute(obj, value)
Finds the attribute connected to the last object when a chain of connected objects is given in a string separated with double underscores. For example when a model x has a foreign key to model y and model y has attribute a, findattr(x, 'y__a') will return the a attribute from the y model that exists in x.
https://github.com/sanoma/django-arctic/blob/c81b092c2643ca220708bf3c586017d9175161f5/arctic/utils.py#L214-L226
sanoma/django-arctic
arctic/utils.py
get_attribute
def get_attribute(obj, value): """ Normally the result of list_items for listviews are a set of model objects. But when you want a GROUP_BY query (with 'values' method), than the result will be a dict. This method will help you find an item for either objects or dictionaries. """ if type(obj) == dict: return dict.get(obj, value) else: return getattr(obj, value)
python
def get_attribute(obj, value): """ Normally the result of list_items for listviews are a set of model objects. But when you want a GROUP_BY query (with 'values' method), than the result will be a dict. This method will help you find an item for either objects or dictionaries. """ if type(obj) == dict: return dict.get(obj, value) else: return getattr(obj, value)
Normally the result of list_items for listviews are a set of model objects. But when you want a GROUP_BY query (with 'values' method), than the result will be a dict. This method will help you find an item for either objects or dictionaries.
https://github.com/sanoma/django-arctic/blob/c81b092c2643ca220708bf3c586017d9175161f5/arctic/utils.py#L229-L239
sanoma/django-arctic
arctic/utils.py
find_field_meta
def find_field_meta(obj, value): """ In a model, finds the attribute meta connected to the last object when a chain of connected objects is given in a string separated with double underscores. """ if "__" in value: value_list = value.split("__") child_obj = obj._meta.get_field(value_list[0]).rel.to return find_field_meta(child_obj, "__".join(value_list[1:])) return obj._meta.get_field(value)
python
def find_field_meta(obj, value): """ In a model, finds the attribute meta connected to the last object when a chain of connected objects is given in a string separated with double underscores. """ if "__" in value: value_list = value.split("__") child_obj = obj._meta.get_field(value_list[0]).rel.to return find_field_meta(child_obj, "__".join(value_list[1:])) return obj._meta.get_field(value)
In a model, finds the attribute meta connected to the last object when a chain of connected objects is given in a string separated with double underscores.
https://github.com/sanoma/django-arctic/blob/c81b092c2643ca220708bf3c586017d9175161f5/arctic/utils.py#L242-L252
sanoma/django-arctic
arctic/utils.py
get_field_class
def get_field_class(qs, field_name): """ Given a queryset and a field name, it will return the field's class """ try: return qs.model._meta.get_field(field_name).__class__.__name__ # while annotating, it's possible that field does not exists. except FieldDoesNotExist: return None
python
def get_field_class(qs, field_name): """ Given a queryset and a field name, it will return the field's class """ try: return qs.model._meta.get_field(field_name).__class__.__name__ # while annotating, it's possible that field does not exists. except FieldDoesNotExist: return None
Given a queryset and a field name, it will return the field's class
https://github.com/sanoma/django-arctic/blob/c81b092c2643ca220708bf3c586017d9175161f5/arctic/utils.py#L255-L263
sanoma/django-arctic
arctic/utils.py
reverse_url
def reverse_url(url, obj, fallback_field=None): """ Reverses a named url, in addition to the standard django reverse, it also accepts a list of ('named url', 'field1', 'field2', ...) and will use the value of the supplied fields as arguments. When a fallback field is given it will use it as an argument if none other are given. """ args = [] if type(url) in (list, tuple): named_url = url[0] for arg in url[1:]: if type(obj) is dict: args.append(obj[arg]) else: args.append(find_attribute(obj, arg)) else: if url.startswith("#"): # local url return url named_url = url if obj and fallback_field: if type(obj) is dict: args = [obj[fallback_field]] else: args = [get_attribute(obj, fallback_field)] # Instead of giving NoReverseMatch exception it's more desirable, # for field_links in listviews to just ignore the link. if fallback_field and not args: return "" return reverse(named_url, args=args)
python
def reverse_url(url, obj, fallback_field=None): """ Reverses a named url, in addition to the standard django reverse, it also accepts a list of ('named url', 'field1', 'field2', ...) and will use the value of the supplied fields as arguments. When a fallback field is given it will use it as an argument if none other are given. """ args = [] if type(url) in (list, tuple): named_url = url[0] for arg in url[1:]: if type(obj) is dict: args.append(obj[arg]) else: args.append(find_attribute(obj, arg)) else: if url.startswith("#"): # local url return url named_url = url if obj and fallback_field: if type(obj) is dict: args = [obj[fallback_field]] else: args = [get_attribute(obj, fallback_field)] # Instead of giving NoReverseMatch exception it's more desirable, # for field_links in listviews to just ignore the link. if fallback_field and not args: return "" return reverse(named_url, args=args)
Reverses a named url, in addition to the standard django reverse, it also accepts a list of ('named url', 'field1', 'field2', ...) and will use the value of the supplied fields as arguments. When a fallback field is given it will use it as an argument if none other are given.
https://github.com/sanoma/django-arctic/blob/c81b092c2643ca220708bf3c586017d9175161f5/arctic/utils.py#L266-L297
sanoma/django-arctic
arctic/utils.py
arctic_setting
def arctic_setting(setting_name, valid_options=None): """ Tries to get a setting from the django settings, if not available defaults to the one defined in defaults.py """ try: value = getattr(settings, setting_name) if valid_options and value not in valid_options: error_message = "Invalid value for {}, must be one of: {}".format( setting_name, str(valid_options) ) raise ImproperlyConfigured(error_message) except AttributeError: pass return getattr(settings, setting_name, getattr(defaults, setting_name))
python
def arctic_setting(setting_name, valid_options=None): """ Tries to get a setting from the django settings, if not available defaults to the one defined in defaults.py """ try: value = getattr(settings, setting_name) if valid_options and value not in valid_options: error_message = "Invalid value for {}, must be one of: {}".format( setting_name, str(valid_options) ) raise ImproperlyConfigured(error_message) except AttributeError: pass return getattr(settings, setting_name, getattr(defaults, setting_name))
Tries to get a setting from the django settings, if not available defaults to the one defined in defaults.py
https://github.com/sanoma/django-arctic/blob/c81b092c2643ca220708bf3c586017d9175161f5/arctic/utils.py#L300-L314
sanoma/django-arctic
arctic/utils.py
offset_limit
def offset_limit(func): """ Decorator that converts python slicing to offset and limit """ def func_wrapper(self, start, stop): offset = start limit = stop - start return func(self, offset, limit) return func_wrapper
python
def offset_limit(func): """ Decorator that converts python slicing to offset and limit """ def func_wrapper(self, start, stop): offset = start limit = stop - start return func(self, offset, limit) return func_wrapper
Decorator that converts python slicing to offset and limit
https://github.com/sanoma/django-arctic/blob/c81b092c2643ca220708bf3c586017d9175161f5/arctic/utils.py#L367-L377
sanoma/django-arctic
arctic/utils.py
is_list_of_list
def is_list_of_list(item): """ check whether the item is list (tuple) and consist of list (tuple) elements """ if ( type(item) in (list, tuple) and len(item) and isinstance(item[0], (list, tuple)) ): return True return False
python
def is_list_of_list(item): """ check whether the item is list (tuple) and consist of list (tuple) elements """ if ( type(item) in (list, tuple) and len(item) and isinstance(item[0], (list, tuple)) ): return True return False
check whether the item is list (tuple) and consist of list (tuple) elements
https://github.com/sanoma/django-arctic/blob/c81b092c2643ca220708bf3c586017d9175161f5/arctic/utils.py#L380-L391
sanoma/django-arctic
arctic/utils.py
generate_id
def generate_id(*s): """ generates an id from one or more given strings it uses english as the base language in case some strings are translated, this ensures consistent ids """ with translation.override("en"): generated_id = slugify("-".join([str(i) for i in s])) return generated_id
python
def generate_id(*s): """ generates an id from one or more given strings it uses english as the base language in case some strings are translated, this ensures consistent ids """ with translation.override("en"): generated_id = slugify("-".join([str(i) for i in s])) return generated_id
generates an id from one or more given strings it uses english as the base language in case some strings are translated, this ensures consistent ids
https://github.com/sanoma/django-arctic/blob/c81b092c2643ca220708bf3c586017d9175161f5/arctic/utils.py#L394-L402
sanoma/django-arctic
arctic/utils.py
append_query_parameter
def append_query_parameter(url, parameters, ignore_if_exists=True): """ quick and dirty appending of query parameters to a url """ if ignore_if_exists: for key in parameters.keys(): if key + "=" in url: del parameters[key] parameters_str = "&".join(k + "=" + v for k, v in parameters.items()) append_token = "&" if "?" in url else "?" return url + append_token + parameters_str
python
def append_query_parameter(url, parameters, ignore_if_exists=True): """ quick and dirty appending of query parameters to a url """ if ignore_if_exists: for key in parameters.keys(): if key + "=" in url: del parameters[key] parameters_str = "&".join(k + "=" + v for k, v in parameters.items()) append_token = "&" if "?" in url else "?" return url + append_token + parameters_str
quick and dirty appending of query parameters to a url
https://github.com/sanoma/django-arctic/blob/c81b092c2643ca220708bf3c586017d9175161f5/arctic/utils.py#L405-L413
sanoma/django-arctic
arctic/widgets.py
BetterFileInput.render
def render(self, name, value, attrs=None, renderer=None): """For django 1.10 compatibility""" if django.VERSION >= (1, 11): return super(BetterFileInput, self).render(name, value, attrs) t = render_to_string( template_name=self.template_name, context=self.get_context(name, value, attrs), ) return mark_safe(t)
python
def render(self, name, value, attrs=None, renderer=None): """For django 1.10 compatibility""" if django.VERSION >= (1, 11): return super(BetterFileInput, self).render(name, value, attrs) t = render_to_string( template_name=self.template_name, context=self.get_context(name, value, attrs), ) return mark_safe(t)
For django 1.10 compatibility
https://github.com/sanoma/django-arctic/blob/c81b092c2643ca220708bf3c586017d9175161f5/arctic/widgets.py#L186-L195
ncrocfer/whatportis
whatportis/__main__.py
get_table
def get_table(ports): """ This function returns a pretty table used to display the port results. :param ports: list of found ports :return: the table to display """ table = PrettyTable(["Name", "Port", "Protocol", "Description"]) table.align["Name"] = "l" table.align["Description"] = "l" table.padding_width = 1 for port in ports: table.add_row(port) return table
python
def get_table(ports): """ This function returns a pretty table used to display the port results. :param ports: list of found ports :return: the table to display """ table = PrettyTable(["Name", "Port", "Protocol", "Description"]) table.align["Name"] = "l" table.align["Description"] = "l" table.padding_width = 1 for port in ports: table.add_row(port) return table
This function returns a pretty table used to display the port results. :param ports: list of found ports :return: the table to display
https://github.com/ncrocfer/whatportis/blob/66a04b249dc9edf23dadd7eb91473b7f125fb27f/whatportis/__main__.py#L17-L32
ncrocfer/whatportis
whatportis/__main__.py
run
def run(port, like, use_json, server): """Search port names and numbers.""" if not port and not server[0]: raise click.UsageError("Please specify a port") if server[0]: app.run(host=server[0], port=server[1]) return ports = get_ports(port, like) if not ports: sys.stderr.write("No ports found for '{0}'\n".format(port)) return if use_json: print(json.dumps(ports, indent=4)) else: table = get_table(ports) print(table)
python
def run(port, like, use_json, server): """Search port names and numbers.""" if not port and not server[0]: raise click.UsageError("Please specify a port") if server[0]: app.run(host=server[0], port=server[1]) return ports = get_ports(port, like) if not ports: sys.stderr.write("No ports found for '{0}'\n".format(port)) return if use_json: print(json.dumps(ports, indent=4)) else: table = get_table(ports) print(table)
Search port names and numbers.
https://github.com/ncrocfer/whatportis/blob/66a04b249dc9edf23dadd7eb91473b7f125fb27f/whatportis/__main__.py#L44-L62
ncrocfer/whatportis
whatportis/core.py
get_ports
def get_ports(port, like=False): """ This function creates the SQL query depending on the specified port and the --like option. :param port: the specified port :param like: the --like option :return: all ports matching the given ``port`` :rtype: list """ where_field = "port" if port.isdigit() else "name" if like: ports = __DB__.search(where(where_field).search(port)) else: ports = __DB__.search(where(where_field) == port) return [Port(**port) for port in ports]
python
def get_ports(port, like=False): """ This function creates the SQL query depending on the specified port and the --like option. :param port: the specified port :param like: the --like option :return: all ports matching the given ``port`` :rtype: list """ where_field = "port" if port.isdigit() else "name" if like: ports = __DB__.search(where(where_field).search(port)) else: ports = __DB__.search(where(where_field) == port) return [Port(**port) for port in ports]
This function creates the SQL query depending on the specified port and the --like option. :param port: the specified port :param like: the --like option :return: all ports matching the given ``port`` :rtype: list
https://github.com/ncrocfer/whatportis/blob/66a04b249dc9edf23dadd7eb91473b7f125fb27f/whatportis/core.py#L22-L38
CityOfZion/neo-python-rpc
neorpc/Client.py
RPCClient.get_account
def get_account(self, address, id=None, endpoint=None): """ Look up an account on the blockchain. Sample output: Args: address: (str) address to lookup ( in format 'AXjaFSP23Jkbe6Pk9pPGT6NBDs1HVdqaXK') id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_ACCOUNT_STATE, params=[address], id=id, endpoint=endpoint)
python
def get_account(self, address, id=None, endpoint=None): """ Look up an account on the blockchain. Sample output: Args: address: (str) address to lookup ( in format 'AXjaFSP23Jkbe6Pk9pPGT6NBDs1HVdqaXK') id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_ACCOUNT_STATE, params=[address], id=id, endpoint=endpoint)
Look up an account on the blockchain. Sample output: Args: address: (str) address to lookup ( in format 'AXjaFSP23Jkbe6Pk9pPGT6NBDs1HVdqaXK') id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L26-L39
CityOfZion/neo-python-rpc
neorpc/Client.py
RPCClient.get_height
def get_height(self, id=None, endpoint=None): """ Get the current height of the blockchain Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_BLOCK_COUNT, id=id, endpoint=endpoint)
python
def get_height(self, id=None, endpoint=None): """ Get the current height of the blockchain Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_BLOCK_COUNT, id=id, endpoint=endpoint)
Get the current height of the blockchain Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L41-L51
CityOfZion/neo-python-rpc
neorpc/Client.py
RPCClient.get_asset
def get_asset(self, asset_hash, id=None, endpoint=None): """ Get an asset by its hash Args: asset_hash: (str) asset to lookup, example would be 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b' id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_ASSET_STATE, params=[asset_hash], id=id, endpoint=endpoint)
python
def get_asset(self, asset_hash, id=None, endpoint=None): """ Get an asset by its hash Args: asset_hash: (str) asset to lookup, example would be 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b' id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_ASSET_STATE, params=[asset_hash], id=id, endpoint=endpoint)
Get an asset by its hash Args: asset_hash: (str) asset to lookup, example would be 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b' id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L53-L64
CityOfZion/neo-python-rpc
neorpc/Client.py
RPCClient.get_balance
def get_balance(self, asset_hash, id=None, endpoint=None): """ Get balance by asset hash Args: asset_hash: (str) asset to lookup, example would be 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b' id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_BALANCE, params=[asset_hash], id=id, endpoint=endpoint)
python
def get_balance(self, asset_hash, id=None, endpoint=None): """ Get balance by asset hash Args: asset_hash: (str) asset to lookup, example would be 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b' id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_BALANCE, params=[asset_hash], id=id, endpoint=endpoint)
Get balance by asset hash Args: asset_hash: (str) asset to lookup, example would be 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b' id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L66-L77
CityOfZion/neo-python-rpc
neorpc/Client.py
RPCClient.get_best_blockhash
def get_best_blockhash(self, id=None, endpoint=None): """ Get the hash of the highest block Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_BEST_BLOCK_HASH, id=id, endpoint=endpoint)
python
def get_best_blockhash(self, id=None, endpoint=None): """ Get the hash of the highest block Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_BEST_BLOCK_HASH, id=id, endpoint=endpoint)
Get the hash of the highest block Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L79-L88
CityOfZion/neo-python-rpc
neorpc/Client.py
RPCClient.get_block
def get_block(self, height_or_hash, id=None, endpoint=None): """ Look up a block by the height or hash of the block. Args: height_or_hash: (int or str) either the height of the desired block or its hash in the form '1e67372c158a4cfbb17b9ad3aaae77001a4247a00318e354c62e53b56af4006f' id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: block: a json object or the ``neorpc.Core.Block.Block`` object """ return self._call_endpoint(GET_BLOCK, params=[height_or_hash, 1], id=id, endpoint=endpoint)
python
def get_block(self, height_or_hash, id=None, endpoint=None): """ Look up a block by the height or hash of the block. Args: height_or_hash: (int or str) either the height of the desired block or its hash in the form '1e67372c158a4cfbb17b9ad3aaae77001a4247a00318e354c62e53b56af4006f' id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: block: a json object or the ``neorpc.Core.Block.Block`` object """ return self._call_endpoint(GET_BLOCK, params=[height_or_hash, 1], id=id, endpoint=endpoint)
Look up a block by the height or hash of the block. Args: height_or_hash: (int or str) either the height of the desired block or its hash in the form '1e67372c158a4cfbb17b9ad3aaae77001a4247a00318e354c62e53b56af4006f' id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: block: a json object or the ``neorpc.Core.Block.Block`` object
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L90-L101
CityOfZion/neo-python-rpc
neorpc/Client.py
RPCClient.get_block_hash
def get_block_hash(self, height, id=None, endpoint=None): """ Get hash of a block by its height Args: height: (int) height of the block to lookup id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_BLOCK_HASH, params=[height], id=id, endpoint=endpoint)
python
def get_block_hash(self, height, id=None, endpoint=None): """ Get hash of a block by its height Args: height: (int) height of the block to lookup id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_BLOCK_HASH, params=[height], id=id, endpoint=endpoint)
Get hash of a block by its height Args: height: (int) height of the block to lookup id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L103-L114
CityOfZion/neo-python-rpc
neorpc/Client.py
RPCClient.get_block_header
def get_block_header(self, block_hash, id=None, endpoint=None): """ Get the corresponding block header information according to the specified script hash. Args: block_hash: (str) the block scripthash (e.g. 'a5508c9b6ed0fc09a531a62bc0b3efcb6b8a9250abaf72ab8e9591294c1f6957') id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_BLOCK_HEADER, params=[block_hash, 1], id=id, endpoint=endpoint)
python
def get_block_header(self, block_hash, id=None, endpoint=None): """ Get the corresponding block header information according to the specified script hash. Args: block_hash: (str) the block scripthash (e.g. 'a5508c9b6ed0fc09a531a62bc0b3efcb6b8a9250abaf72ab8e9591294c1f6957') id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_BLOCK_HEADER, params=[block_hash, 1], id=id, endpoint=endpoint)
Get the corresponding block header information according to the specified script hash. Args: block_hash: (str) the block scripthash (e.g. 'a5508c9b6ed0fc09a531a62bc0b3efcb6b8a9250abaf72ab8e9591294c1f6957') id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L116-L127
CityOfZion/neo-python-rpc
neorpc/Client.py
RPCClient.get_block_sysfee
def get_block_sysfee(self, height, id=None, endpoint=None): """ Get the system fee of a block by height. This is used in calculating gas claims Args: height: (int) height of the block to lookup id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_BLOCK_SYS_FEE, params=[height], id=id, endpoint=endpoint)
python
def get_block_sysfee(self, height, id=None, endpoint=None): """ Get the system fee of a block by height. This is used in calculating gas claims Args: height: (int) height of the block to lookup id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_BLOCK_SYS_FEE, params=[height], id=id, endpoint=endpoint)
Get the system fee of a block by height. This is used in calculating gas claims Args: height: (int) height of the block to lookup id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L129-L140
CityOfZion/neo-python-rpc
neorpc/Client.py
RPCClient.get_connection_count
def get_connection_count(self, id=None, endpoint=None): """ Gets the number of nodes connected to the endpoint Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_CONNECTION_COUNT, id=id, endpoint=endpoint)
python
def get_connection_count(self, id=None, endpoint=None): """ Gets the number of nodes connected to the endpoint Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_CONNECTION_COUNT, id=id, endpoint=endpoint)
Gets the number of nodes connected to the endpoint Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L142-L151
CityOfZion/neo-python-rpc
neorpc/Client.py
RPCClient.get_contract_state
def get_contract_state(self, contract_hash, id=None, endpoint=None): """ Get a contract state object by its hash Args: contract_hash: (str) the hash of the contract to lookup, for example 'd7678dd97c000be3f33e9362e673101bac4ca654' id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_CONTRACT_STATE, params=[contract_hash], id=id, endpoint=endpoint)
python
def get_contract_state(self, contract_hash, id=None, endpoint=None): """ Get a contract state object by its hash Args: contract_hash: (str) the hash of the contract to lookup, for example 'd7678dd97c000be3f33e9362e673101bac4ca654' id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_CONTRACT_STATE, params=[contract_hash], id=id, endpoint=endpoint)
Get a contract state object by its hash Args: contract_hash: (str) the hash of the contract to lookup, for example 'd7678dd97c000be3f33e9362e673101bac4ca654' id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L153-L163
CityOfZion/neo-python-rpc
neorpc/Client.py
RPCClient.get_raw_mempool
def get_raw_mempool(self, id=None, endpoint=None): """ Returns the tx that are in the memorypool of the endpoint Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_RAW_MEMPOOL, id=id, endpoint=endpoint)
python
def get_raw_mempool(self, id=None, endpoint=None): """ Returns the tx that are in the memorypool of the endpoint Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_RAW_MEMPOOL, id=id, endpoint=endpoint)
Returns the tx that are in the memorypool of the endpoint Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L165-L174
CityOfZion/neo-python-rpc
neorpc/Client.py
RPCClient.get_transaction
def get_transaction(self, tx_hash, id=None, endpoint=None): """ Look up a transaction by hash. Args: tx_hash: (str) hash in the form '58c634f81fbd4ae2733d7e3930a9849021840fc19dc6af064d6f2812a333f91d' id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json: the transaction as a json object """ return self._call_endpoint(GET_RAW_TRANSACTION, params=[tx_hash, 1], id=id, endpoint=endpoint)
python
def get_transaction(self, tx_hash, id=None, endpoint=None): """ Look up a transaction by hash. Args: tx_hash: (str) hash in the form '58c634f81fbd4ae2733d7e3930a9849021840fc19dc6af064d6f2812a333f91d' id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json: the transaction as a json object """ return self._call_endpoint(GET_RAW_TRANSACTION, params=[tx_hash, 1], id=id, endpoint=endpoint)
Look up a transaction by hash. Args: tx_hash: (str) hash in the form '58c634f81fbd4ae2733d7e3930a9849021840fc19dc6af064d6f2812a333f91d' id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json: the transaction as a json object
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L176-L187
CityOfZion/neo-python-rpc
neorpc/Client.py
RPCClient.get_storage
def get_storage(self, contract_hash, storage_key, id=None, endpoint=None): """ Returns a storage item of a specified contract Args: contract_hash: (str) hash of the contract to lookup, for example 'd7678dd97c000be3f33e9362e673101bac4ca654' storage_key: (str) storage key to lookup, for example 'totalSupply' id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: bytearray: bytearray value of the storage item """ result = self._call_endpoint(GET_STORAGE, params=[contract_hash, binascii.hexlify(storage_key.encode('utf-8')).decode('utf-8')], id=id, endpoint=endpoint) try: return bytearray(binascii.unhexlify(result.encode('utf-8'))) except Exception as e: raise NEORPCException("could not decode result %s " % e)
python
def get_storage(self, contract_hash, storage_key, id=None, endpoint=None): """ Returns a storage item of a specified contract Args: contract_hash: (str) hash of the contract to lookup, for example 'd7678dd97c000be3f33e9362e673101bac4ca654' storage_key: (str) storage key to lookup, for example 'totalSupply' id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: bytearray: bytearray value of the storage item """ result = self._call_endpoint(GET_STORAGE, params=[contract_hash, binascii.hexlify(storage_key.encode('utf-8')).decode('utf-8')], id=id, endpoint=endpoint) try: return bytearray(binascii.unhexlify(result.encode('utf-8'))) except Exception as e: raise NEORPCException("could not decode result %s " % e)
Returns a storage item of a specified contract Args: contract_hash: (str) hash of the contract to lookup, for example 'd7678dd97c000be3f33e9362e673101bac4ca654' storage_key: (str) storage key to lookup, for example 'totalSupply' id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: bytearray: bytearray value of the storage item
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L189-L205
CityOfZion/neo-python-rpc
neorpc/Client.py
RPCClient.get_tx_out
def get_tx_out(self, tx_hash, vout_id, id=None, endpoint=None): """ Gets a transaction output by specified transaction hash and output index Args: tx_hash: (str) hash in the form '58c634f81fbd4ae2733d7e3930a9849021840fc19dc6af064d6f2812a333f91d' vout_id: (int) index of the transaction output in the transaction id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_TX_OUT, params=[tx_hash, vout_id], id=id, endpoint=endpoint)
python
def get_tx_out(self, tx_hash, vout_id, id=None, endpoint=None): """ Gets a transaction output by specified transaction hash and output index Args: tx_hash: (str) hash in the form '58c634f81fbd4ae2733d7e3930a9849021840fc19dc6af064d6f2812a333f91d' vout_id: (int) index of the transaction output in the transaction id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_TX_OUT, params=[tx_hash, vout_id], id=id, endpoint=endpoint)
Gets a transaction output by specified transaction hash and output index Args: tx_hash: (str) hash in the form '58c634f81fbd4ae2733d7e3930a9849021840fc19dc6af064d6f2812a333f91d' vout_id: (int) index of the transaction output in the transaction id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L207-L218
CityOfZion/neo-python-rpc
neorpc/Client.py
RPCClient.invoke_contract
def invoke_contract(self, contract_hash, params, id=None, endpoint=None): """ Invokes a contract Args: contract_hash: (str) hash of the contract, for example 'd7678dd97c000be3f33e9362e673101bac4ca654' params: (list) a list of json ContractParameters to pass along with the invocation, example [{'type':7,'value':'symbol'},{'type':16, 'value':[]}] id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(INVOKE, params=[contract_hash, params], id=id, endpoint=endpoint)
python
def invoke_contract(self, contract_hash, params, id=None, endpoint=None): """ Invokes a contract Args: contract_hash: (str) hash of the contract, for example 'd7678dd97c000be3f33e9362e673101bac4ca654' params: (list) a list of json ContractParameters to pass along with the invocation, example [{'type':7,'value':'symbol'},{'type':16, 'value':[]}] id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(INVOKE, params=[contract_hash, params], id=id, endpoint=endpoint)
Invokes a contract Args: contract_hash: (str) hash of the contract, for example 'd7678dd97c000be3f33e9362e673101bac4ca654' params: (list) a list of json ContractParameters to pass along with the invocation, example [{'type':7,'value':'symbol'},{'type':16, 'value':[]}] id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L220-L231
CityOfZion/neo-python-rpc
neorpc/Client.py
RPCClient.invoke_contract_fn
def invoke_contract_fn(self, contract_hash, operation, params=None, id=None, endpoint=None): """ Invokes a contract Args: contract_hash: (str) hash of the contract, for example 'd7678dd97c000be3f33e9362e673101bac4ca654' operation: (str) the operation to call on the contract params: (list) a list of json ContractParameters to pass along with the invocation, example [{'type':7,'value':'symbol'},{'type':16, 'value':[]}] id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(INVOKE_FUNCTION, params=[contract_hash, operation, params if params else []], id=id, endpoint=endpoint)
python
def invoke_contract_fn(self, contract_hash, operation, params=None, id=None, endpoint=None): """ Invokes a contract Args: contract_hash: (str) hash of the contract, for example 'd7678dd97c000be3f33e9362e673101bac4ca654' operation: (str) the operation to call on the contract params: (list) a list of json ContractParameters to pass along with the invocation, example [{'type':7,'value':'symbol'},{'type':16, 'value':[]}] id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(INVOKE_FUNCTION, params=[contract_hash, operation, params if params else []], id=id, endpoint=endpoint)
Invokes a contract Args: contract_hash: (str) hash of the contract, for example 'd7678dd97c000be3f33e9362e673101bac4ca654' operation: (str) the operation to call on the contract params: (list) a list of json ContractParameters to pass along with the invocation, example [{'type':7,'value':'symbol'},{'type':16, 'value':[]}] id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L233-L245
CityOfZion/neo-python-rpc
neorpc/Client.py
RPCClient.invoke_script
def invoke_script(self, script, id=None, endpoint=None): """ Invokes a script that has been assembled Args: script: (str) a hexlified string of a contract invocation script, example '00c10b746f74616c537570706c796754a64cac1b1073e662933ef3e30b007cd98d67d7' id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(INVOKE_SCRIPT, params=[script], id=id, endpoint=endpoint)
python
def invoke_script(self, script, id=None, endpoint=None): """ Invokes a script that has been assembled Args: script: (str) a hexlified string of a contract invocation script, example '00c10b746f74616c537570706c796754a64cac1b1073e662933ef3e30b007cd98d67d7' id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(INVOKE_SCRIPT, params=[script], id=id, endpoint=endpoint)
Invokes a script that has been assembled Args: script: (str) a hexlified string of a contract invocation script, example '00c10b746f74616c537570706c796754a64cac1b1073e662933ef3e30b007cd98d67d7' id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L247-L257
CityOfZion/neo-python-rpc
neorpc/Client.py
RPCClient.send_raw_tx
def send_raw_tx(self, serialized_tx, id=None, endpoint=None): """ Submits a serialized tx to the network Args: serialized_tx: (str) a hexlified string of a transaction id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: bool: whether the tx was accepted or not """ return self._call_endpoint(SEND_TX, params=[serialized_tx], id=id, endpoint=endpoint)
python
def send_raw_tx(self, serialized_tx, id=None, endpoint=None): """ Submits a serialized tx to the network Args: serialized_tx: (str) a hexlified string of a transaction id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: bool: whether the tx was accepted or not """ return self._call_endpoint(SEND_TX, params=[serialized_tx], id=id, endpoint=endpoint)
Submits a serialized tx to the network Args: serialized_tx: (str) a hexlified string of a transaction id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: bool: whether the tx was accepted or not
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L259-L269
CityOfZion/neo-python-rpc
neorpc/Client.py
RPCClient.validate_addr
def validate_addr(self, address, id=None, endpoint=None): """ returns whether or not addr string is valid Args: address: (str) address to lookup ( in format 'AXjaFSP23Jkbe6Pk9pPGT6NBDs1HVdqaXK') id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(VALIDATE_ADDR, params=[address], id=id, endpoint=endpoint)
python
def validate_addr(self, address, id=None, endpoint=None): """ returns whether or not addr string is valid Args: address: (str) address to lookup ( in format 'AXjaFSP23Jkbe6Pk9pPGT6NBDs1HVdqaXK') id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(VALIDATE_ADDR, params=[address], id=id, endpoint=endpoint)
returns whether or not addr string is valid Args: address: (str) address to lookup ( in format 'AXjaFSP23Jkbe6Pk9pPGT6NBDs1HVdqaXK') id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L271-L284
CityOfZion/neo-python-rpc
neorpc/Client.py
RPCClient.get_peers
def get_peers(self, id=None, endpoint=None): """ Get the current peers of a remote node Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_PEERS, id=id, endpoint=endpoint)
python
def get_peers(self, id=None, endpoint=None): """ Get the current peers of a remote node Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_PEERS, id=id, endpoint=endpoint)
Get the current peers of a remote node Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L286-L296
CityOfZion/neo-python-rpc
neorpc/Client.py
RPCClient.get_validators
def get_validators(self, id=None, endpoint=None): """ Returns the current NEO consensus nodes information and voting status. Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_VALIDATORS, id=id, endpoint=endpoint)
python
def get_validators(self, id=None, endpoint=None): """ Returns the current NEO consensus nodes information and voting status. Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_VALIDATORS, id=id, endpoint=endpoint)
Returns the current NEO consensus nodes information and voting status. Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L298-L308
CityOfZion/neo-python-rpc
neorpc/Client.py
RPCClient.get_version
def get_version(self, id=None, endpoint=None): """ Get the current version of the endpoint. Note: Not all endpoints currently implement this method Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_VERSION, id=id, endpoint=endpoint)
python
def get_version(self, id=None, endpoint=None): """ Get the current version of the endpoint. Note: Not all endpoints currently implement this method Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_VERSION, id=id, endpoint=endpoint)
Get the current version of the endpoint. Note: Not all endpoints currently implement this method Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L310-L321
CityOfZion/neo-python-rpc
neorpc/Client.py
RPCClient.get_new_address
def get_new_address(self, id=None, endpoint=None): """ Create new address Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_NEW_ADDRESS, id=id, endpoint=endpoint)
python
def get_new_address(self, id=None, endpoint=None): """ Create new address Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_NEW_ADDRESS, id=id, endpoint=endpoint)
Create new address Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L323-L332
CityOfZion/neo-python-rpc
neorpc/Client.py
RPCClient.get_wallet_height
def get_wallet_height(self, id=None, endpoint=None): """ Get the current wallet index height. Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_WALLET_HEIGHT, id=id, endpoint=endpoint)
python
def get_wallet_height(self, id=None, endpoint=None): """ Get the current wallet index height. Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(GET_WALLET_HEIGHT, id=id, endpoint=endpoint)
Get the current wallet index height. Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L334-L343
CityOfZion/neo-python-rpc
neorpc/Client.py
RPCClient.list_address
def list_address(self, id=None, endpoint=None): """ Lists all the addresses in the current wallet. Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(LIST_ADDRESS, id=id, endpoint=endpoint)
python
def list_address(self, id=None, endpoint=None): """ Lists all the addresses in the current wallet. Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ return self._call_endpoint(LIST_ADDRESS, id=id, endpoint=endpoint)
Lists all the addresses in the current wallet. Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L345-L354
CityOfZion/neo-python-rpc
neorpc/Client.py
RPCClient.send_from
def send_from(self, asset_id, addr_from, to_addr, value, fee=None, change_addr=None, id=None, endpoint=None): """ Transfer from the specified address to the destination address. Args: asset_id: (str) asset identifier (for NEO: 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', for GAS: '602c79718b16e442de58778e148d0b1084e3b2dffd5de6b7b16cee7969282de7') addr_from: (str) transfering address to_addr: (str) destination address value: (int/decimal) transfer amount fee: (decimal, optional) Paying the handling fee helps elevate the priority of the network to process the transfer. It defaults to 0, and can be set to a minimum of 0.00000001. The low priority threshold is 0.001. change_addr: (str, optional) Change address, default is the first standard address in the wallet. id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ params = [asset_id, addr_from, to_addr, value] if fee: params.append(fee) if fee and change_addr: params.append(change_addr) elif not fee and change_addr: params.append(0) params.append(change_addr) return self._call_endpoint(SEND_FROM, params=params, id=id, endpoint=endpoint)
python
def send_from(self, asset_id, addr_from, to_addr, value, fee=None, change_addr=None, id=None, endpoint=None): """ Transfer from the specified address to the destination address. Args: asset_id: (str) asset identifier (for NEO: 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', for GAS: '602c79718b16e442de58778e148d0b1084e3b2dffd5de6b7b16cee7969282de7') addr_from: (str) transfering address to_addr: (str) destination address value: (int/decimal) transfer amount fee: (decimal, optional) Paying the handling fee helps elevate the priority of the network to process the transfer. It defaults to 0, and can be set to a minimum of 0.00000001. The low priority threshold is 0.001. change_addr: (str, optional) Change address, default is the first standard address in the wallet. id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ params = [asset_id, addr_from, to_addr, value] if fee: params.append(fee) if fee and change_addr: params.append(change_addr) elif not fee and change_addr: params.append(0) params.append(change_addr) return self._call_endpoint(SEND_FROM, params=params, id=id, endpoint=endpoint)
Transfer from the specified address to the destination address. Args: asset_id: (str) asset identifier (for NEO: 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', for GAS: '602c79718b16e442de58778e148d0b1084e3b2dffd5de6b7b16cee7969282de7') addr_from: (str) transfering address to_addr: (str) destination address value: (int/decimal) transfer amount fee: (decimal, optional) Paying the handling fee helps elevate the priority of the network to process the transfer. It defaults to 0, and can be set to a minimum of 0.00000001. The low priority threshold is 0.001. change_addr: (str, optional) Change address, default is the first standard address in the wallet. id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L356-L379
CityOfZion/neo-python-rpc
neorpc/Client.py
RPCClient.send_to_address
def send_to_address(self, asset_id, to_addr, value, fee=None, change_addr=None, id=None, endpoint=None): """ Args: asset_id: (str) asset identifier (for NEO: 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', for GAS: '602c79718b16e442de58778e148d0b1084e3b2dffd5de6b7b16cee7969282de7') to_addr: (str) destination address value: (int/decimal) transfer amount fee: (decimal, optional) Paying the handling fee helps elevate the priority of the network to process the transfer. It defaults to 0, and can be set to a minimum of 0.00000001. The low priority threshold is 0.001. change_addr: (str, optional) Change address, default is the first standard address in the wallet. id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ params = [asset_id, to_addr, value] if fee: params.append(fee) if fee and change_addr: params.append(change_addr) elif not fee and change_addr: params.append(0) params.append(change_addr) return self._call_endpoint(SEND_TO_ADDRESS, params=params, id=id, endpoint=endpoint)
python
def send_to_address(self, asset_id, to_addr, value, fee=None, change_addr=None, id=None, endpoint=None): """ Args: asset_id: (str) asset identifier (for NEO: 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', for GAS: '602c79718b16e442de58778e148d0b1084e3b2dffd5de6b7b16cee7969282de7') to_addr: (str) destination address value: (int/decimal) transfer amount fee: (decimal, optional) Paying the handling fee helps elevate the priority of the network to process the transfer. It defaults to 0, and can be set to a minimum of 0.00000001. The low priority threshold is 0.001. change_addr: (str, optional) Change address, default is the first standard address in the wallet. id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call """ params = [asset_id, to_addr, value] if fee: params.append(fee) if fee and change_addr: params.append(change_addr) elif not fee and change_addr: params.append(0) params.append(change_addr) return self._call_endpoint(SEND_TO_ADDRESS, params=params, id=id, endpoint=endpoint)
Args: asset_id: (str) asset identifier (for NEO: 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', for GAS: '602c79718b16e442de58778e148d0b1084e3b2dffd5de6b7b16cee7969282de7') to_addr: (str) destination address value: (int/decimal) transfer amount fee: (decimal, optional) Paying the handling fee helps elevate the priority of the network to process the transfer. It defaults to 0, and can be set to a minimum of 0.00000001. The low priority threshold is 0.001. change_addr: (str, optional) Change address, default is the first standard address in the wallet. id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L381-L402
CityOfZion/neo-python-rpc
neorpc/Client.py
RPCClient.send_many
def send_many(self, outputs_array, fee=None, change_addr=None, id=None, endpoint=None): """ Args: outputs_array: (dict) array, the data structure of each element in the array is as follows: {"asset": <asset>,"value": <value>,"address": <address>} asset: (str) asset identifier (for NEO: 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', for GAS: '602c79718b16e442de58778e148d0b1084e3b2dffd5de6b7b16cee7969282de7') value: (int/decimal) transfer amount address: (str) destination address fee: (decimal, optional) Paying the handling fee helps elevate the priority of the network to process the transfer. It defaults to 0, and can be set to a minimum of 0.00000001. The low priority threshold is 0.001. change_addr: (str, optional) Change address, default is the first standard address in the wallet. id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use """ params = [outputs_array] if fee: params.append(fee) if fee and change_addr: params.append(change_addr) elif not fee and change_addr: params.append(0) params.append(change_addr) return self._call_endpoint(SEND_MANY, params=params, id=id, endpoint=endpoint)
python
def send_many(self, outputs_array, fee=None, change_addr=None, id=None, endpoint=None): """ Args: outputs_array: (dict) array, the data structure of each element in the array is as follows: {"asset": <asset>,"value": <value>,"address": <address>} asset: (str) asset identifier (for NEO: 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', for GAS: '602c79718b16e442de58778e148d0b1084e3b2dffd5de6b7b16cee7969282de7') value: (int/decimal) transfer amount address: (str) destination address fee: (decimal, optional) Paying the handling fee helps elevate the priority of the network to process the transfer. It defaults to 0, and can be set to a minimum of 0.00000001. The low priority threshold is 0.001. change_addr: (str, optional) Change address, default is the first standard address in the wallet. id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use """ params = [outputs_array] if fee: params.append(fee) if fee and change_addr: params.append(change_addr) elif not fee and change_addr: params.append(0) params.append(change_addr) return self._call_endpoint(SEND_MANY, params=params, id=id, endpoint=endpoint)
Args: outputs_array: (dict) array, the data structure of each element in the array is as follows: {"asset": <asset>,"value": <value>,"address": <address>} asset: (str) asset identifier (for NEO: 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', for GAS: '602c79718b16e442de58778e148d0b1084e3b2dffd5de6b7b16cee7969282de7') value: (int/decimal) transfer amount address: (str) destination address fee: (decimal, optional) Paying the handling fee helps elevate the priority of the network to process the transfer. It defaults to 0, and can be set to a minimum of 0.00000001. The low priority threshold is 0.001. change_addr: (str, optional) Change address, default is the first standard address in the wallet. id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use
https://github.com/CityOfZion/neo-python-rpc/blob/89d22c4043654b2941bf26b15a1c09082901d9ef/neorpc/Client.py#L404-L425
adrn/schwimmbad
schwimmbad/__init__.py
choose_pool
def choose_pool(mpi=False, processes=1, **kwargs): """ Choose between the different pools given options from, e.g., argparse. Parameters ---------- mpi : bool, optional Use the MPI processing pool, :class:`~schwimmbad.mpi.MPIPool`. By default, ``False``, will use the :class:`~schwimmbad.serial.SerialPool`. processes : int, optional Use the multiprocessing pool, :class:`~schwimmbad.multiprocessing.MultiPool`, with this number of processes. By default, ``processes=1``, will use the :class:`~schwimmbad.serial.SerialPool`. **kwargs Any additional kwargs are passed in to the pool class initializer selected by the arguments. """ if mpi: if not MPIPool.enabled(): raise SystemError("Tried to run with MPI but MPIPool not enabled.") pool = MPIPool(**kwargs) if not pool.is_master(): pool.wait() sys.exit(0) log.info("Running with MPI on {0} cores".format(pool.size)) return pool elif processes != 1 and MultiPool.enabled(): log.info("Running with MultiPool on {0} cores".format(processes)) return MultiPool(processes=processes, **kwargs) else: log.info("Running with SerialPool") return SerialPool(**kwargs)
python
def choose_pool(mpi=False, processes=1, **kwargs): """ Choose between the different pools given options from, e.g., argparse. Parameters ---------- mpi : bool, optional Use the MPI processing pool, :class:`~schwimmbad.mpi.MPIPool`. By default, ``False``, will use the :class:`~schwimmbad.serial.SerialPool`. processes : int, optional Use the multiprocessing pool, :class:`~schwimmbad.multiprocessing.MultiPool`, with this number of processes. By default, ``processes=1``, will use the :class:`~schwimmbad.serial.SerialPool`. **kwargs Any additional kwargs are passed in to the pool class initializer selected by the arguments. """ if mpi: if not MPIPool.enabled(): raise SystemError("Tried to run with MPI but MPIPool not enabled.") pool = MPIPool(**kwargs) if not pool.is_master(): pool.wait() sys.exit(0) log.info("Running with MPI on {0} cores".format(pool.size)) return pool elif processes != 1 and MultiPool.enabled(): log.info("Running with MultiPool on {0} cores".format(processes)) return MultiPool(processes=processes, **kwargs) else: log.info("Running with SerialPool") return SerialPool(**kwargs)
Choose between the different pools given options from, e.g., argparse. Parameters ---------- mpi : bool, optional Use the MPI processing pool, :class:`~schwimmbad.mpi.MPIPool`. By default, ``False``, will use the :class:`~schwimmbad.serial.SerialPool`. processes : int, optional Use the multiprocessing pool, :class:`~schwimmbad.multiprocessing.MultiPool`, with this number of processes. By default, ``processes=1``, will use the :class:`~schwimmbad.serial.SerialPool`. **kwargs Any additional kwargs are passed in to the pool class initializer selected by the arguments.
https://github.com/adrn/schwimmbad/blob/d2538b77c821a56096f92eafecd1c08dd02f1f58/schwimmbad/__init__.py#L30-L67
adrn/schwimmbad
schwimmbad/mpi.py
MPIPool.wait
def wait(self, callback=None): """Tell the workers to wait and listen for the master process. This is called automatically when using :meth:`MPIPool.map` and doesn't need to be called by the user. """ if self.is_master(): return worker = self.comm.rank status = MPI.Status() while True: log.log(_VERBOSE, "Worker {0} waiting for task".format(worker)) task = self.comm.recv(source=self.master, tag=MPI.ANY_TAG, status=status) if task is None: log.log(_VERBOSE, "Worker {0} told to quit work".format(worker)) break func, arg = task log.log(_VERBOSE, "Worker {0} got task {1} with tag {2}" .format(worker, arg, status.tag)) result = func(arg) log.log(_VERBOSE, "Worker {0} sending answer {1} with tag {2}" .format(worker, result, status.tag)) self.comm.ssend(result, self.master, status.tag) if callback is not None: callback()
python
def wait(self, callback=None): """Tell the workers to wait and listen for the master process. This is called automatically when using :meth:`MPIPool.map` and doesn't need to be called by the user. """ if self.is_master(): return worker = self.comm.rank status = MPI.Status() while True: log.log(_VERBOSE, "Worker {0} waiting for task".format(worker)) task = self.comm.recv(source=self.master, tag=MPI.ANY_TAG, status=status) if task is None: log.log(_VERBOSE, "Worker {0} told to quit work".format(worker)) break func, arg = task log.log(_VERBOSE, "Worker {0} got task {1} with tag {2}" .format(worker, arg, status.tag)) result = func(arg) log.log(_VERBOSE, "Worker {0} sending answer {1} with tag {2}" .format(worker, result, status.tag)) self.comm.ssend(result, self.master, status.tag) if callback is not None: callback()
Tell the workers to wait and listen for the master process. This is called automatically when using :meth:`MPIPool.map` and doesn't need to be called by the user.
https://github.com/adrn/schwimmbad/blob/d2538b77c821a56096f92eafecd1c08dd02f1f58/schwimmbad/mpi.py#L74-L106
adrn/schwimmbad
schwimmbad/mpi.py
MPIPool.map
def map(self, worker, tasks, callback=None): """Evaluate a function or callable on each task in parallel using MPI. The callable, ``worker``, is called on each element of the ``tasks`` iterable. The results are returned in the expected order (symmetric with ``tasks``). Parameters ---------- worker : callable A function or callable object that is executed on each element of the specified ``tasks`` iterable. This object must be picklable (i.e. it can't be a function scoped within a function or a ``lambda`` function). This should accept a single positional argument and return a single object. tasks : iterable A list or iterable of tasks. Each task can be itself an iterable (e.g., tuple) of values or data to pass in to the worker function. callback : callable, optional An optional callback function (or callable) that is called with the result from each worker run and is executed on the master process. This is useful for, e.g., saving results to a file, since the callback is only called on the master thread. Returns ------- results : list A list of results from the output of each ``worker()`` call. """ # If not the master just wait for instructions. if not self.is_master(): self.wait() return if callback is None: callback = _dummy_callback workerset = self.workers.copy() tasklist = [(tid, (worker, arg)) for tid, arg in enumerate(tasks)] resultlist = [None] * len(tasklist) pending = len(tasklist) while pending: if workerset and tasklist: worker = workerset.pop() taskid, task = tasklist.pop() log.log(_VERBOSE, "Sent task %s to worker %s with tag %s", task[1], worker, taskid) self.comm.send(task, dest=worker, tag=taskid) if tasklist: flag = self.comm.Iprobe(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG) if not flag: continue else: self.comm.Probe(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG) status = MPI.Status() result = self.comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status) worker = status.source taskid = status.tag log.log(_VERBOSE, "Master received from worker %s with tag %s", worker, taskid) callback(result) workerset.add(worker) resultlist[taskid] = result pending -= 1 return resultlist
python
def map(self, worker, tasks, callback=None): """Evaluate a function or callable on each task in parallel using MPI. The callable, ``worker``, is called on each element of the ``tasks`` iterable. The results are returned in the expected order (symmetric with ``tasks``). Parameters ---------- worker : callable A function or callable object that is executed on each element of the specified ``tasks`` iterable. This object must be picklable (i.e. it can't be a function scoped within a function or a ``lambda`` function). This should accept a single positional argument and return a single object. tasks : iterable A list or iterable of tasks. Each task can be itself an iterable (e.g., tuple) of values or data to pass in to the worker function. callback : callable, optional An optional callback function (or callable) that is called with the result from each worker run and is executed on the master process. This is useful for, e.g., saving results to a file, since the callback is only called on the master thread. Returns ------- results : list A list of results from the output of each ``worker()`` call. """ # If not the master just wait for instructions. if not self.is_master(): self.wait() return if callback is None: callback = _dummy_callback workerset = self.workers.copy() tasklist = [(tid, (worker, arg)) for tid, arg in enumerate(tasks)] resultlist = [None] * len(tasklist) pending = len(tasklist) while pending: if workerset and tasklist: worker = workerset.pop() taskid, task = tasklist.pop() log.log(_VERBOSE, "Sent task %s to worker %s with tag %s", task[1], worker, taskid) self.comm.send(task, dest=worker, tag=taskid) if tasklist: flag = self.comm.Iprobe(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG) if not flag: continue else: self.comm.Probe(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG) status = MPI.Status() result = self.comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status) worker = status.source taskid = status.tag log.log(_VERBOSE, "Master received from worker %s with tag %s", worker, taskid) callback(result) workerset.add(worker) resultlist[taskid] = result pending -= 1 return resultlist
Evaluate a function or callable on each task in parallel using MPI. The callable, ``worker``, is called on each element of the ``tasks`` iterable. The results are returned in the expected order (symmetric with ``tasks``). Parameters ---------- worker : callable A function or callable object that is executed on each element of the specified ``tasks`` iterable. This object must be picklable (i.e. it can't be a function scoped within a function or a ``lambda`` function). This should accept a single positional argument and return a single object. tasks : iterable A list or iterable of tasks. Each task can be itself an iterable (e.g., tuple) of values or data to pass in to the worker function. callback : callable, optional An optional callback function (or callable) that is called with the result from each worker run and is executed on the master process. This is useful for, e.g., saving results to a file, since the callback is only called on the master thread. Returns ------- results : list A list of results from the output of each ``worker()`` call.
https://github.com/adrn/schwimmbad/blob/d2538b77c821a56096f92eafecd1c08dd02f1f58/schwimmbad/mpi.py#L108-L180
adrn/schwimmbad
schwimmbad/mpi.py
MPIPool.close
def close(self): """ Tell all the workers to quit.""" if self.is_worker(): return for worker in self.workers: self.comm.send(None, worker, 0)
python
def close(self): """ Tell all the workers to quit.""" if self.is_worker(): return for worker in self.workers: self.comm.send(None, worker, 0)
Tell all the workers to quit.
https://github.com/adrn/schwimmbad/blob/d2538b77c821a56096f92eafecd1c08dd02f1f58/schwimmbad/mpi.py#L182-L188
adrn/schwimmbad
setup.py
update_git_devstr
def update_git_devstr(version, path=None): """ Updates the git revision string if and only if the path is being imported directly from a git working copy. This ensures that the revision number in the version string is accurate. """ try: # Quick way to determine if we're in git or not - returns '' if not devstr = get_git_devstr(sha=True, show_warning=False, path=path) except OSError: return version if not devstr: # Probably not in git so just pass silently return version if 'dev' in version: # update to the current git revision version_base = version.split('.dev', 1)[0] devstr = get_git_devstr(sha=False, show_warning=False, path=path) return version_base + '.dev' + devstr else: # otherwise it's already the true/release version return version
python
def update_git_devstr(version, path=None): """ Updates the git revision string if and only if the path is being imported directly from a git working copy. This ensures that the revision number in the version string is accurate. """ try: # Quick way to determine if we're in git or not - returns '' if not devstr = get_git_devstr(sha=True, show_warning=False, path=path) except OSError: return version if not devstr: # Probably not in git so just pass silently return version if 'dev' in version: # update to the current git revision version_base = version.split('.dev', 1)[0] devstr = get_git_devstr(sha=False, show_warning=False, path=path) return version_base + '.dev' + devstr else: # otherwise it's already the true/release version return version
Updates the git revision string if and only if the path is being imported directly from a git working copy. This ensures that the revision number in the version string is accurate.
https://github.com/adrn/schwimmbad/blob/d2538b77c821a56096f92eafecd1c08dd02f1f58/setup.py#L58-L82
adrn/schwimmbad
setup.py
get_git_devstr
def get_git_devstr(sha=False, show_warning=True, path=None): """ Determines the number of revisions in this repository. Parameters ---------- sha : bool If True, the full SHA1 hash will be returned. Otherwise, the total count of commits in the repository will be used as a "revision number". show_warning : bool If True, issue a warning if git returns an error code, otherwise errors pass silently. path : str or None If a string, specifies the directory to look in to find the git repository. If `None`, the current working directory is used, and must be the root of the git repository. If given a filename it uses the directory containing that file. Returns ------- devversion : str Either a string with the revision number (if `sha` is False), the SHA1 hash of the current commit (if `sha` is True), or an empty string if git version info could not be identified. """ if path is None: path = os.getcwd() if not _get_repo_path(path, levels=0): return '' if not os.path.isdir(path): path = os.path.abspath(os.path.dirname(path)) if sha: # Faster for getting just the hash of HEAD cmd = ['rev-parse', 'HEAD'] else: cmd = ['rev-list', '--count', 'HEAD'] def run_git(cmd): try: p = subprocess.Popen(['git'] + cmd, cwd=path, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) stdout, stderr = p.communicate() except OSError as e: if show_warning: warnings.warn('Error running git: ' + str(e)) return (None, b'', b'') if p.returncode == 128: if show_warning: warnings.warn('No git repository present at {0!r}! Using ' 'default dev version.'.format(path)) return (p.returncode, b'', b'') if p.returncode == 129: if show_warning: warnings.warn('Your git looks old (does it support {0}?); ' 'consider upgrading to v1.7.2 or ' 'later.'.format(cmd[0])) return (p.returncode, stdout, stderr) elif p.returncode != 0: if show_warning: warnings.warn('Git failed while determining revision ' 'count: {0}'.format(_decode_stdio(stderr))) return (p.returncode, stdout, stderr) return p.returncode, stdout, stderr returncode, stdout, stderr = run_git(cmd) if not sha and returncode == 129: # git returns 129 if a command option failed to parse; in # particular this could happen in git versions older than 1.7.2 # where the --count option is not supported # Also use --abbrev-commit and --abbrev=0 to display the minimum # number of characters needed per-commit (rather than the full hash) cmd = ['rev-list', '--abbrev-commit', '--abbrev=0', 'HEAD'] returncode, stdout, stderr = run_git(cmd) # Fall back on the old method of getting all revisions and counting # the lines if returncode == 0: return str(stdout.count(b'\n')) else: return '' elif sha: return _decode_stdio(stdout)[:40] else: return _decode_stdio(stdout).strip()
python
def get_git_devstr(sha=False, show_warning=True, path=None): """ Determines the number of revisions in this repository. Parameters ---------- sha : bool If True, the full SHA1 hash will be returned. Otherwise, the total count of commits in the repository will be used as a "revision number". show_warning : bool If True, issue a warning if git returns an error code, otherwise errors pass silently. path : str or None If a string, specifies the directory to look in to find the git repository. If `None`, the current working directory is used, and must be the root of the git repository. If given a filename it uses the directory containing that file. Returns ------- devversion : str Either a string with the revision number (if `sha` is False), the SHA1 hash of the current commit (if `sha` is True), or an empty string if git version info could not be identified. """ if path is None: path = os.getcwd() if not _get_repo_path(path, levels=0): return '' if not os.path.isdir(path): path = os.path.abspath(os.path.dirname(path)) if sha: # Faster for getting just the hash of HEAD cmd = ['rev-parse', 'HEAD'] else: cmd = ['rev-list', '--count', 'HEAD'] def run_git(cmd): try: p = subprocess.Popen(['git'] + cmd, cwd=path, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) stdout, stderr = p.communicate() except OSError as e: if show_warning: warnings.warn('Error running git: ' + str(e)) return (None, b'', b'') if p.returncode == 128: if show_warning: warnings.warn('No git repository present at {0!r}! Using ' 'default dev version.'.format(path)) return (p.returncode, b'', b'') if p.returncode == 129: if show_warning: warnings.warn('Your git looks old (does it support {0}?); ' 'consider upgrading to v1.7.2 or ' 'later.'.format(cmd[0])) return (p.returncode, stdout, stderr) elif p.returncode != 0: if show_warning: warnings.warn('Git failed while determining revision ' 'count: {0}'.format(_decode_stdio(stderr))) return (p.returncode, stdout, stderr) return p.returncode, stdout, stderr returncode, stdout, stderr = run_git(cmd) if not sha and returncode == 129: # git returns 129 if a command option failed to parse; in # particular this could happen in git versions older than 1.7.2 # where the --count option is not supported # Also use --abbrev-commit and --abbrev=0 to display the minimum # number of characters needed per-commit (rather than the full hash) cmd = ['rev-list', '--abbrev-commit', '--abbrev=0', 'HEAD'] returncode, stdout, stderr = run_git(cmd) # Fall back on the old method of getting all revisions and counting # the lines if returncode == 0: return str(stdout.count(b'\n')) else: return '' elif sha: return _decode_stdio(stdout)[:40] else: return _decode_stdio(stdout).strip()
Determines the number of revisions in this repository. Parameters ---------- sha : bool If True, the full SHA1 hash will be returned. Otherwise, the total count of commits in the repository will be used as a "revision number". show_warning : bool If True, issue a warning if git returns an error code, otherwise errors pass silently. path : str or None If a string, specifies the directory to look in to find the git repository. If `None`, the current working directory is used, and must be the root of the git repository. If given a filename it uses the directory containing that file. Returns ------- devversion : str Either a string with the revision number (if `sha` is False), the SHA1 hash of the current commit (if `sha` is True), or an empty string if git version info could not be identified.
https://github.com/adrn/schwimmbad/blob/d2538b77c821a56096f92eafecd1c08dd02f1f58/setup.py#L85-L179
adrn/schwimmbad
setup.py
_get_repo_path
def _get_repo_path(pathname, levels=None): """ Given a file or directory name, determine the root of the git repository this path is under. If given, this won't look any higher than ``levels`` (that is, if ``levels=0`` then the given path must be the root of the git repository and is returned if so. Returns `None` if the given path could not be determined to belong to a git repo. """ if os.path.isfile(pathname): current_dir = os.path.abspath(os.path.dirname(pathname)) elif os.path.isdir(pathname): current_dir = os.path.abspath(pathname) else: return None current_level = 0 while levels is None or current_level <= levels: if os.path.exists(os.path.join(current_dir, '.git')): return current_dir current_level += 1 if current_dir == os.path.dirname(current_dir): break current_dir = os.path.dirname(current_dir) return None
python
def _get_repo_path(pathname, levels=None): """ Given a file or directory name, determine the root of the git repository this path is under. If given, this won't look any higher than ``levels`` (that is, if ``levels=0`` then the given path must be the root of the git repository and is returned if so. Returns `None` if the given path could not be determined to belong to a git repo. """ if os.path.isfile(pathname): current_dir = os.path.abspath(os.path.dirname(pathname)) elif os.path.isdir(pathname): current_dir = os.path.abspath(pathname) else: return None current_level = 0 while levels is None or current_level <= levels: if os.path.exists(os.path.join(current_dir, '.git')): return current_dir current_level += 1 if current_dir == os.path.dirname(current_dir): break current_dir = os.path.dirname(current_dir) return None
Given a file or directory name, determine the root of the git repository this path is under. If given, this won't look any higher than ``levels`` (that is, if ``levels=0`` then the given path must be the root of the git repository and is returned if so. Returns `None` if the given path could not be determined to belong to a git repo.
https://github.com/adrn/schwimmbad/blob/d2538b77c821a56096f92eafecd1c08dd02f1f58/setup.py#L182-L212
adrn/schwimmbad
schwimmbad/serial.py
SerialPool.map
def map(self, func, iterable, callback=None): """A wrapper around the built-in ``map()`` function to provide a consistent interface with the other ``Pool`` classes. Parameters ---------- worker : callable A function or callable object that is executed on each element of the specified ``tasks`` iterable. This object must be picklable (i.e. it can't be a function scoped within a function or a ``lambda`` function). This should accept a single positional argument and return a single object. tasks : iterable A list or iterable of tasks. Each task can be itself an iterable (e.g., tuple) of values or data to pass in to the worker function. callback : callable, optional An optional callback function (or callable) that is called with the result from each worker run and is executed on the master process. This is useful for, e.g., saving results to a file, since the callback is only called on the master thread. Returns ------- results : generator """ return self._call_callback(callback, map(func, iterable))
python
def map(self, func, iterable, callback=None): """A wrapper around the built-in ``map()`` function to provide a consistent interface with the other ``Pool`` classes. Parameters ---------- worker : callable A function or callable object that is executed on each element of the specified ``tasks`` iterable. This object must be picklable (i.e. it can't be a function scoped within a function or a ``lambda`` function). This should accept a single positional argument and return a single object. tasks : iterable A list or iterable of tasks. Each task can be itself an iterable (e.g., tuple) of values or data to pass in to the worker function. callback : callable, optional An optional callback function (or callable) that is called with the result from each worker run and is executed on the master process. This is useful for, e.g., saving results to a file, since the callback is only called on the master thread. Returns ------- results : generator """ return self._call_callback(callback, map(func, iterable))
A wrapper around the built-in ``map()`` function to provide a consistent interface with the other ``Pool`` classes. Parameters ---------- worker : callable A function or callable object that is executed on each element of the specified ``tasks`` iterable. This object must be picklable (i.e. it can't be a function scoped within a function or a ``lambda`` function). This should accept a single positional argument and return a single object. tasks : iterable A list or iterable of tasks. Each task can be itself an iterable (e.g., tuple) of values or data to pass in to the worker function. callback : callable, optional An optional callback function (or callable) that is called with the result from each worker run and is executed on the master process. This is useful for, e.g., saving results to a file, since the callback is only called on the master thread. Returns ------- results : generator
https://github.com/adrn/schwimmbad/blob/d2538b77c821a56096f92eafecd1c08dd02f1f58/schwimmbad/serial.py#L26-L52
openvax/varcode
varcode/effects/translate.py
translate_codon
def translate_codon(codon, aa_pos): """Translate a single codon into a single amino acid or stop '*' Parameters ---------- codon : str Expected to be of length 3 aa_pos : int Codon/amino acid offset into the protein (starting from 0) """ # not handling rare Leucine or Valine starts! if aa_pos == 0 and codon in START_CODONS: return "M" elif codon in STOP_CODONS: return "*" else: return DNA_CODON_TABLE[codon]
python
def translate_codon(codon, aa_pos): """Translate a single codon into a single amino acid or stop '*' Parameters ---------- codon : str Expected to be of length 3 aa_pos : int Codon/amino acid offset into the protein (starting from 0) """ # not handling rare Leucine or Valine starts! if aa_pos == 0 and codon in START_CODONS: return "M" elif codon in STOP_CODONS: return "*" else: return DNA_CODON_TABLE[codon]
Translate a single codon into a single amino acid or stop '*' Parameters ---------- codon : str Expected to be of length 3 aa_pos : int Codon/amino acid offset into the protein (starting from 0)
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/translate.py#L31-L47
openvax/varcode
varcode/effects/translate.py
translate
def translate( nucleotide_sequence, first_codon_is_start=True, to_stop=True, truncate=False): """Translates cDNA coding sequence into amino acid protein sequence. Should typically start with a start codon but allowing non-methionine first residues since the CDS we're translating might have been affected by a start loss mutation. The sequence may include the 3' UTR but will stop translation at the first encountered stop codon. Parameters ---------- nucleotide_sequence : BioPython Seq cDNA sequence first_codon_is_start : bool Treat the beginning of nucleotide_sequence (translates methionin) truncate : bool Truncate sequence if it's not a multiple of 3 (default = False) Returns BioPython Seq of amino acids """ if not isinstance(nucleotide_sequence, Seq): nucleotide_sequence = Seq(nucleotide_sequence) if truncate: # if sequence isn't a multiple of 3, truncate it so BioPython # doesn't complain n_nucleotides = int(len(nucleotide_sequence) / 3) * 3 nucleotide_sequence = nucleotide_sequence[:n_nucleotides] else: n_nucleotides = len(nucleotide_sequence) assert n_nucleotides % 3 == 0, \ ("Expected nucleotide sequence to be multiple of 3" " but got %s of length %d") % ( nucleotide_sequence, n_nucleotides) # passing cds=False to translate since we may want to deal with premature # stop codons protein_sequence = nucleotide_sequence.translate(to_stop=to_stop, cds=False) if first_codon_is_start and ( len(protein_sequence) == 0 or protein_sequence[0] != "M"): if nucleotide_sequence[:3] in START_CODONS: # TODO: figure out when these should be made into methionines # and when left as whatever amino acid they normally code for # e.g. Leucine start codons # See: DOI: 10.1371/journal.pbio.0020397 return "M" + protein_sequence[1:] else: raise ValueError( ("Expected first codon of %s to be start codon" " (one of %s) but got %s") % ( protein_sequence[:10], START_CODONS, nucleotide_sequence)) return protein_sequence
python
def translate( nucleotide_sequence, first_codon_is_start=True, to_stop=True, truncate=False): """Translates cDNA coding sequence into amino acid protein sequence. Should typically start with a start codon but allowing non-methionine first residues since the CDS we're translating might have been affected by a start loss mutation. The sequence may include the 3' UTR but will stop translation at the first encountered stop codon. Parameters ---------- nucleotide_sequence : BioPython Seq cDNA sequence first_codon_is_start : bool Treat the beginning of nucleotide_sequence (translates methionin) truncate : bool Truncate sequence if it's not a multiple of 3 (default = False) Returns BioPython Seq of amino acids """ if not isinstance(nucleotide_sequence, Seq): nucleotide_sequence = Seq(nucleotide_sequence) if truncate: # if sequence isn't a multiple of 3, truncate it so BioPython # doesn't complain n_nucleotides = int(len(nucleotide_sequence) / 3) * 3 nucleotide_sequence = nucleotide_sequence[:n_nucleotides] else: n_nucleotides = len(nucleotide_sequence) assert n_nucleotides % 3 == 0, \ ("Expected nucleotide sequence to be multiple of 3" " but got %s of length %d") % ( nucleotide_sequence, n_nucleotides) # passing cds=False to translate since we may want to deal with premature # stop codons protein_sequence = nucleotide_sequence.translate(to_stop=to_stop, cds=False) if first_codon_is_start and ( len(protein_sequence) == 0 or protein_sequence[0] != "M"): if nucleotide_sequence[:3] in START_CODONS: # TODO: figure out when these should be made into methionines # and when left as whatever amino acid they normally code for # e.g. Leucine start codons # See: DOI: 10.1371/journal.pbio.0020397 return "M" + protein_sequence[1:] else: raise ValueError( ("Expected first codon of %s to be start codon" " (one of %s) but got %s") % ( protein_sequence[:10], START_CODONS, nucleotide_sequence)) return protein_sequence
Translates cDNA coding sequence into amino acid protein sequence. Should typically start with a start codon but allowing non-methionine first residues since the CDS we're translating might have been affected by a start loss mutation. The sequence may include the 3' UTR but will stop translation at the first encountered stop codon. Parameters ---------- nucleotide_sequence : BioPython Seq cDNA sequence first_codon_is_start : bool Treat the beginning of nucleotide_sequence (translates methionin) truncate : bool Truncate sequence if it's not a multiple of 3 (default = False) Returns BioPython Seq of amino acids
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/translate.py#L50-L113
openvax/varcode
varcode/effects/translate.py
find_first_stop_codon
def find_first_stop_codon(nucleotide_sequence): """ Given a sequence of codons (expected to have length multiple of three), return index of first stop codon, or -1 if none is in the sequence. """ n_mutant_codons = len(nucleotide_sequence) // 3 for i in range(n_mutant_codons): codon = nucleotide_sequence[3 * i:3 * i + 3] if codon in STOP_CODONS: return i return -1
python
def find_first_stop_codon(nucleotide_sequence): """ Given a sequence of codons (expected to have length multiple of three), return index of first stop codon, or -1 if none is in the sequence. """ n_mutant_codons = len(nucleotide_sequence) // 3 for i in range(n_mutant_codons): codon = nucleotide_sequence[3 * i:3 * i + 3] if codon in STOP_CODONS: return i return -1
Given a sequence of codons (expected to have length multiple of three), return index of first stop codon, or -1 if none is in the sequence.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/translate.py#L116-L126
openvax/varcode
varcode/effects/translate.py
translate_in_frame_mutation
def translate_in_frame_mutation( transcript, ref_codon_start_offset, ref_codon_end_offset, mutant_codons): """ Returns: - mutant amino acid sequence - offset of first stop codon in the mutant sequence (or -1 if there was none) - boolean flag indicating whether any codons from the 3' UTR were used Parameters ---------- transcript : pyensembl.Transcript Reference transcript to which a cDNA mutation should be applied. ref_codon_start_offset : int Starting (base 0) integer offset into codons (character triplets) of the transcript's reference coding sequence. ref_codon_end_offset : int Final (base 0) integer offset into codons of the transcript's reference coding sequence. mutant_codons : str Nucleotide sequence to replace the reference codons with (expected to have length that is a multiple of three) """ mutant_stop_codon_index = find_first_stop_codon(mutant_codons) using_three_prime_utr = False if mutant_stop_codon_index != -1: mutant_codons = mutant_codons[:3 * mutant_stop_codon_index] elif ref_codon_end_offset > len(transcript.protein_sequence): # if the mutant codons didn't contain a stop but did mutate the # true reference stop codon then the translated sequence might involve # the 3' UTR three_prime_utr = transcript.three_prime_utr_sequence n_utr_codons = len(three_prime_utr) // 3 # trim the 3' UTR sequence to have a length that is a multiple of 3 truncated_utr_sequence = three_prime_utr[:n_utr_codons * 3] # note the offset of the first stop codon in the combined # nucleotide sequence of both the end of the CDS and the 3' UTR first_utr_stop_codon_index = find_first_stop_codon(truncated_utr_sequence) if first_utr_stop_codon_index > 0: # if there is a stop codon in the 3' UTR sequence and it's not the # very first codon using_three_prime_utr = True n_mutant_codons_before_utr = len(mutant_codons) // 3 mutant_stop_codon_index = n_mutant_codons_before_utr + first_utr_stop_codon_index # combine the in-frame mutant codons with the truncated sequence of # the 3' UTR mutant_codons += truncated_utr_sequence[:first_utr_stop_codon_index * 3] elif first_utr_stop_codon_index == -1: # if there is no stop codon in the 3' UTR sequence using_three_prime_utr = True mutant_codons += truncated_utr_sequence amino_acids = translate( mutant_codons, first_codon_is_start=(ref_codon_start_offset == 0)) return amino_acids, mutant_stop_codon_index, using_three_prime_utr
python
def translate_in_frame_mutation( transcript, ref_codon_start_offset, ref_codon_end_offset, mutant_codons): """ Returns: - mutant amino acid sequence - offset of first stop codon in the mutant sequence (or -1 if there was none) - boolean flag indicating whether any codons from the 3' UTR were used Parameters ---------- transcript : pyensembl.Transcript Reference transcript to which a cDNA mutation should be applied. ref_codon_start_offset : int Starting (base 0) integer offset into codons (character triplets) of the transcript's reference coding sequence. ref_codon_end_offset : int Final (base 0) integer offset into codons of the transcript's reference coding sequence. mutant_codons : str Nucleotide sequence to replace the reference codons with (expected to have length that is a multiple of three) """ mutant_stop_codon_index = find_first_stop_codon(mutant_codons) using_three_prime_utr = False if mutant_stop_codon_index != -1: mutant_codons = mutant_codons[:3 * mutant_stop_codon_index] elif ref_codon_end_offset > len(transcript.protein_sequence): # if the mutant codons didn't contain a stop but did mutate the # true reference stop codon then the translated sequence might involve # the 3' UTR three_prime_utr = transcript.three_prime_utr_sequence n_utr_codons = len(three_prime_utr) // 3 # trim the 3' UTR sequence to have a length that is a multiple of 3 truncated_utr_sequence = three_prime_utr[:n_utr_codons * 3] # note the offset of the first stop codon in the combined # nucleotide sequence of both the end of the CDS and the 3' UTR first_utr_stop_codon_index = find_first_stop_codon(truncated_utr_sequence) if first_utr_stop_codon_index > 0: # if there is a stop codon in the 3' UTR sequence and it's not the # very first codon using_three_prime_utr = True n_mutant_codons_before_utr = len(mutant_codons) // 3 mutant_stop_codon_index = n_mutant_codons_before_utr + first_utr_stop_codon_index # combine the in-frame mutant codons with the truncated sequence of # the 3' UTR mutant_codons += truncated_utr_sequence[:first_utr_stop_codon_index * 3] elif first_utr_stop_codon_index == -1: # if there is no stop codon in the 3' UTR sequence using_three_prime_utr = True mutant_codons += truncated_utr_sequence amino_acids = translate( mutant_codons, first_codon_is_start=(ref_codon_start_offset == 0)) return amino_acids, mutant_stop_codon_index, using_three_prime_utr
Returns: - mutant amino acid sequence - offset of first stop codon in the mutant sequence (or -1 if there was none) - boolean flag indicating whether any codons from the 3' UTR were used Parameters ---------- transcript : pyensembl.Transcript Reference transcript to which a cDNA mutation should be applied. ref_codon_start_offset : int Starting (base 0) integer offset into codons (character triplets) of the transcript's reference coding sequence. ref_codon_end_offset : int Final (base 0) integer offset into codons of the transcript's reference coding sequence. mutant_codons : str Nucleotide sequence to replace the reference codons with (expected to have length that is a multiple of three)
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/translate.py#L129-L194
openvax/varcode
varcode/cli/genes_script.py
main
def main(args_list=None): """ Script which loads variants and annotates them with overlapping genes. Example usage: varcode-genes --vcf mutect.vcf \ --vcf strelka.vcf \ --maf tcga_brca.maf \ --variant chr1 498584 C G \ --json-variants more_variants.json """ print_version_info() if args_list is None: args_list = sys.argv[1:] args = arg_parser.parse_args(args_list) variants = variant_collection_from_args(args) variants_dataframe = variants.to_dataframe() logger.info('\n%s', variants_dataframe) if args.output_csv: variants_dataframe.to_csv(args.output_csv, index=False)
python
def main(args_list=None): """ Script which loads variants and annotates them with overlapping genes. Example usage: varcode-genes --vcf mutect.vcf \ --vcf strelka.vcf \ --maf tcga_brca.maf \ --variant chr1 498584 C G \ --json-variants more_variants.json """ print_version_info() if args_list is None: args_list = sys.argv[1:] args = arg_parser.parse_args(args_list) variants = variant_collection_from_args(args) variants_dataframe = variants.to_dataframe() logger.info('\n%s', variants_dataframe) if args.output_csv: variants_dataframe.to_csv(args.output_csv, index=False)
Script which loads variants and annotates them with overlapping genes. Example usage: varcode-genes --vcf mutect.vcf \ --vcf strelka.vcf \ --maf tcga_brca.maf \ --variant chr1 498584 C G \ --json-variants more_variants.json
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/cli/genes_script.py#L32-L52
openvax/varcode
varcode/maf.py
load_maf_dataframe
def load_maf_dataframe(path, nrows=None, raise_on_error=True, encoding=None): """ Load the guaranteed columns of a TCGA MAF file into a DataFrame Parameters ---------- path : str Path to MAF file nrows : int Optional limit to number of rows loaded raise_on_error : bool Raise an exception upon encountering an error or log an error encoding : str, optional Encoding to use for UTF when reading MAF file. """ require_string(path, "Path to MAF") n_basic_columns = len(MAF_COLUMN_NAMES) # pylint: disable=no-member # pylint gets confused by read_csv df = pandas.read_csv( path, comment="#", sep="\t", low_memory=False, skip_blank_lines=True, header=0, encoding=encoding) if len(df.columns) < n_basic_columns: error_message = ( "Too few columns in MAF file %s, expected %d but got %d : %s" % ( path, n_basic_columns, len(df.columns), df.columns)) if raise_on_error: raise ValueError(error_message) else: logging.warn(error_message) # check each pair of expected/actual column names to make sure they match for expected, actual in zip(MAF_COLUMN_NAMES, df.columns): if expected != actual: # MAFs in the wild have capitalization differences in their # column names, normalize them to always use the names above if expected.lower() == actual.lower(): # using DataFrame.rename in Python 2.7.x doesn't seem to # work for some files, possibly because Pandas treats # unicode vs. str columns as different? df[expected] = df[actual] del df[actual] else: error_message = ( "Expected column %s but got %s" % (expected, actual)) if raise_on_error: raise ValueError(error_message) else: logging.warn(error_message) return df
python
def load_maf_dataframe(path, nrows=None, raise_on_error=True, encoding=None): """ Load the guaranteed columns of a TCGA MAF file into a DataFrame Parameters ---------- path : str Path to MAF file nrows : int Optional limit to number of rows loaded raise_on_error : bool Raise an exception upon encountering an error or log an error encoding : str, optional Encoding to use for UTF when reading MAF file. """ require_string(path, "Path to MAF") n_basic_columns = len(MAF_COLUMN_NAMES) # pylint: disable=no-member # pylint gets confused by read_csv df = pandas.read_csv( path, comment="#", sep="\t", low_memory=False, skip_blank_lines=True, header=0, encoding=encoding) if len(df.columns) < n_basic_columns: error_message = ( "Too few columns in MAF file %s, expected %d but got %d : %s" % ( path, n_basic_columns, len(df.columns), df.columns)) if raise_on_error: raise ValueError(error_message) else: logging.warn(error_message) # check each pair of expected/actual column names to make sure they match for expected, actual in zip(MAF_COLUMN_NAMES, df.columns): if expected != actual: # MAFs in the wild have capitalization differences in their # column names, normalize them to always use the names above if expected.lower() == actual.lower(): # using DataFrame.rename in Python 2.7.x doesn't seem to # work for some files, possibly because Pandas treats # unicode vs. str columns as different? df[expected] = df[actual] del df[actual] else: error_message = ( "Expected column %s but got %s" % (expected, actual)) if raise_on_error: raise ValueError(error_message) else: logging.warn(error_message) return df
Load the guaranteed columns of a TCGA MAF file into a DataFrame Parameters ---------- path : str Path to MAF file nrows : int Optional limit to number of rows loaded raise_on_error : bool Raise an exception upon encountering an error or log an error encoding : str, optional Encoding to use for UTF when reading MAF file.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/maf.py#L51-L112
openvax/varcode
varcode/maf.py
load_maf
def load_maf( path, optional_cols=[], sort_key=variant_ascending_position_sort_key, distinct=True, raise_on_error=True, encoding=None): """ Load reference name and Variant objects from MAF filename. Parameters ---------- path : str Path to MAF (*.maf). optional_cols : list, optional A list of MAF columns to include as metadata if they are present in the MAF. Does not result in an error if those columns are not present. sort_key : fn Function which maps each element to a sorting criterion. Set to None to not to sort the variants. distinct : bool Don't keep repeated variants raise_on_error : bool Raise an exception upon encountering an error or just log a warning. encoding : str, optional Encoding to use for UTF when reading MAF file. """ # pylint: disable=no-member # pylint gets confused by read_csv inside load_maf_dataframe maf_df = load_maf_dataframe(path, raise_on_error=raise_on_error, encoding=encoding) if len(maf_df) == 0 and raise_on_error: raise ValueError("Empty MAF file %s" % path) ensembl_objects = {} variants = [] metadata = {} for _, x in maf_df.iterrows(): contig = x.Chromosome if isnull(contig): error_message = "Invalid contig name: %s" % (contig,) if raise_on_error: raise ValueError(error_message) else: logging.warn(error_message) continue start_pos = x.Start_Position ref = x.Reference_Allele # it's possible in a MAF file to have multiple Ensembl releases # mixed in a single MAF file (the genome assembly is # specified by the NCBI_Build column) ncbi_build = x.NCBI_Build if ncbi_build in ensembl_objects: ensembl = ensembl_objects[ncbi_build] else: if isinstance(ncbi_build, int): reference_name = "B%d" % ncbi_build else: reference_name = str(ncbi_build) ensembl = infer_genome(reference_name) ensembl_objects[ncbi_build] = ensembl # have to try both Tumor_Seq_Allele1 and Tumor_Seq_Allele2 # to figure out which is different from the reference allele if x.Tumor_Seq_Allele1 != ref: alt = x.Tumor_Seq_Allele1 else: if x.Tumor_Seq_Allele2 == ref: error_message = ( "Both tumor alleles agree with reference %s: %s" % ( ref, x,)) if raise_on_error: raise ValueError(error_message) else: logging.warn(error_message) continue alt = x.Tumor_Seq_Allele2 variant = Variant( contig, start_pos, str(ref), str(alt), ensembl=ensembl) # keep metadata about the variant and its TCGA annotation metadata[variant] = { 'Hugo_Symbol': x.Hugo_Symbol, 'Center': x.Center, 'Strand': x.Strand, 'Variant_Classification': x.Variant_Classification, 'Variant_Type': x.Variant_Type, 'dbSNP_RS': x.dbSNP_RS, 'dbSNP_Val_Status': x.dbSNP_Val_Status, 'Tumor_Sample_Barcode': x.Tumor_Sample_Barcode, 'Matched_Norm_Sample_Barcode': x.Matched_Norm_Sample_Barcode, } for optional_col in optional_cols: if optional_col in x: metadata[variant][optional_col] = x[optional_col] variants.append(variant) return VariantCollection( variants=variants, source_to_metadata_dict={path: metadata}, sort_key=sort_key, distinct=distinct)
python
def load_maf( path, optional_cols=[], sort_key=variant_ascending_position_sort_key, distinct=True, raise_on_error=True, encoding=None): """ Load reference name and Variant objects from MAF filename. Parameters ---------- path : str Path to MAF (*.maf). optional_cols : list, optional A list of MAF columns to include as metadata if they are present in the MAF. Does not result in an error if those columns are not present. sort_key : fn Function which maps each element to a sorting criterion. Set to None to not to sort the variants. distinct : bool Don't keep repeated variants raise_on_error : bool Raise an exception upon encountering an error or just log a warning. encoding : str, optional Encoding to use for UTF when reading MAF file. """ # pylint: disable=no-member # pylint gets confused by read_csv inside load_maf_dataframe maf_df = load_maf_dataframe(path, raise_on_error=raise_on_error, encoding=encoding) if len(maf_df) == 0 and raise_on_error: raise ValueError("Empty MAF file %s" % path) ensembl_objects = {} variants = [] metadata = {} for _, x in maf_df.iterrows(): contig = x.Chromosome if isnull(contig): error_message = "Invalid contig name: %s" % (contig,) if raise_on_error: raise ValueError(error_message) else: logging.warn(error_message) continue start_pos = x.Start_Position ref = x.Reference_Allele # it's possible in a MAF file to have multiple Ensembl releases # mixed in a single MAF file (the genome assembly is # specified by the NCBI_Build column) ncbi_build = x.NCBI_Build if ncbi_build in ensembl_objects: ensembl = ensembl_objects[ncbi_build] else: if isinstance(ncbi_build, int): reference_name = "B%d" % ncbi_build else: reference_name = str(ncbi_build) ensembl = infer_genome(reference_name) ensembl_objects[ncbi_build] = ensembl # have to try both Tumor_Seq_Allele1 and Tumor_Seq_Allele2 # to figure out which is different from the reference allele if x.Tumor_Seq_Allele1 != ref: alt = x.Tumor_Seq_Allele1 else: if x.Tumor_Seq_Allele2 == ref: error_message = ( "Both tumor alleles agree with reference %s: %s" % ( ref, x,)) if raise_on_error: raise ValueError(error_message) else: logging.warn(error_message) continue alt = x.Tumor_Seq_Allele2 variant = Variant( contig, start_pos, str(ref), str(alt), ensembl=ensembl) # keep metadata about the variant and its TCGA annotation metadata[variant] = { 'Hugo_Symbol': x.Hugo_Symbol, 'Center': x.Center, 'Strand': x.Strand, 'Variant_Classification': x.Variant_Classification, 'Variant_Type': x.Variant_Type, 'dbSNP_RS': x.dbSNP_RS, 'dbSNP_Val_Status': x.dbSNP_Val_Status, 'Tumor_Sample_Barcode': x.Tumor_Sample_Barcode, 'Matched_Norm_Sample_Barcode': x.Matched_Norm_Sample_Barcode, } for optional_col in optional_cols: if optional_col in x: metadata[variant][optional_col] = x[optional_col] variants.append(variant) return VariantCollection( variants=variants, source_to_metadata_dict={path: metadata}, sort_key=sort_key, distinct=distinct)
Load reference name and Variant objects from MAF filename. Parameters ---------- path : str Path to MAF (*.maf). optional_cols : list, optional A list of MAF columns to include as metadata if they are present in the MAF. Does not result in an error if those columns are not present. sort_key : fn Function which maps each element to a sorting criterion. Set to None to not to sort the variants. distinct : bool Don't keep repeated variants raise_on_error : bool Raise an exception upon encountering an error or just log a warning. encoding : str, optional Encoding to use for UTF when reading MAF file.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/maf.py#L114-L229
openvax/varcode
varcode/effects/effect_ordering.py
apply_to_field_if_exists
def apply_to_field_if_exists(effect, field_name, fn, default): """ Apply function to specified field of effect if it is not None, otherwise return default. """ value = getattr(effect, field_name, None) if value is None: return default else: return fn(value)
python
def apply_to_field_if_exists(effect, field_name, fn, default): """ Apply function to specified field of effect if it is not None, otherwise return default. """ value = getattr(effect, field_name, None) if value is None: return default else: return fn(value)
Apply function to specified field of effect if it is not None, otherwise return default.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_ordering.py#L103-L112
openvax/varcode
varcode/effects/effect_ordering.py
apply_to_transcript_if_exists
def apply_to_transcript_if_exists(effect, fn, default): """ Apply function to transcript associated with effect, if it exists, otherwise return default. """ return apply_to_field_if_exists( effect=effect, field_name="transcript", fn=fn, default=default)
python
def apply_to_transcript_if_exists(effect, fn, default): """ Apply function to transcript associated with effect, if it exists, otherwise return default. """ return apply_to_field_if_exists( effect=effect, field_name="transcript", fn=fn, default=default)
Apply function to transcript associated with effect, if it exists, otherwise return default.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_ordering.py#L115-L124
openvax/varcode
varcode/effects/effect_ordering.py
number_exons_in_associated_transcript
def number_exons_in_associated_transcript(effect): """ Number of exons on transcript associated with effect, if there is one (otherwise return 0). """ return apply_to_transcript_if_exists( effect=effect, fn=lambda t: len(t.exons), default=0)
python
def number_exons_in_associated_transcript(effect): """ Number of exons on transcript associated with effect, if there is one (otherwise return 0). """ return apply_to_transcript_if_exists( effect=effect, fn=lambda t: len(t.exons), default=0)
Number of exons on transcript associated with effect, if there is one (otherwise return 0).
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_ordering.py#L135-L143
openvax/varcode
varcode/effects/effect_ordering.py
cds_length_of_associated_transcript
def cds_length_of_associated_transcript(effect): """ Length of coding sequence of transcript associated with effect, if there is one (otherwise return 0). """ return apply_to_transcript_if_exists( effect=effect, fn=lambda t: len(t.coding_sequence) if (t.complete and t.coding_sequence) else 0, default=0)
python
def cds_length_of_associated_transcript(effect): """ Length of coding sequence of transcript associated with effect, if there is one (otherwise return 0). """ return apply_to_transcript_if_exists( effect=effect, fn=lambda t: len(t.coding_sequence) if (t.complete and t.coding_sequence) else 0, default=0)
Length of coding sequence of transcript associated with effect, if there is one (otherwise return 0).
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_ordering.py#L146-L154
openvax/varcode
varcode/effects/effect_ordering.py
length_of_associated_transcript
def length_of_associated_transcript(effect): """ Length of spliced mRNA sequence of transcript associated with effect, if there is one (otherwise return 0). """ return apply_to_transcript_if_exists( effect=effect, fn=lambda t: len(t.sequence), default=0)
python
def length_of_associated_transcript(effect): """ Length of spliced mRNA sequence of transcript associated with effect, if there is one (otherwise return 0). """ return apply_to_transcript_if_exists( effect=effect, fn=lambda t: len(t.sequence), default=0)
Length of spliced mRNA sequence of transcript associated with effect, if there is one (otherwise return 0).
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_ordering.py#L157-L165
openvax/varcode
varcode/effects/effect_ordering.py
name_of_associated_transcript
def name_of_associated_transcript(effect): """ Name of transcript associated with effect, if there is one (otherwise return ""). """ return apply_to_transcript_if_exists( effect=effect, fn=lambda t: t.name, default="")
python
def name_of_associated_transcript(effect): """ Name of transcript associated with effect, if there is one (otherwise return ""). """ return apply_to_transcript_if_exists( effect=effect, fn=lambda t: t.name, default="")
Name of transcript associated with effect, if there is one (otherwise return "").
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_ordering.py#L168-L176
openvax/varcode
varcode/effects/effect_ordering.py
gene_id_of_associated_transcript
def gene_id_of_associated_transcript(effect): """ Ensembl gene ID of transcript associated with effect, returns None if effect does not have transcript. """ return apply_to_transcript_if_exists( effect=effect, fn=lambda t: t.gene_id, default=None)
python
def gene_id_of_associated_transcript(effect): """ Ensembl gene ID of transcript associated with effect, returns None if effect does not have transcript. """ return apply_to_transcript_if_exists( effect=effect, fn=lambda t: t.gene_id, default=None)
Ensembl gene ID of transcript associated with effect, returns None if effect does not have transcript.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_ordering.py#L179-L187
openvax/varcode
varcode/effects/effect_ordering.py
effect_has_complete_transcript
def effect_has_complete_transcript(effect): """ Parameters ---------- effect : subclass of MutationEffect Returns True if effect has transcript and that transcript has complete CDS """ return apply_to_transcript_if_exists( effect=effect, fn=lambda t: t.complete, default=False)
python
def effect_has_complete_transcript(effect): """ Parameters ---------- effect : subclass of MutationEffect Returns True if effect has transcript and that transcript has complete CDS """ return apply_to_transcript_if_exists( effect=effect, fn=lambda t: t.complete, default=False)
Parameters ---------- effect : subclass of MutationEffect Returns True if effect has transcript and that transcript has complete CDS
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_ordering.py#L190-L201
openvax/varcode
varcode/effects/effect_ordering.py
effect_associated_with_protein_coding_gene
def effect_associated_with_protein_coding_gene(effect): """ Parameters ---------- effect : subclass of MutationEffect Returns True if effect is associated with a gene and that gene has a protein_coding biotype. """ return apply_to_gene_if_exists( effect=effect, fn=lambda g: g.biotype == "protein_coding", default=False)
python
def effect_associated_with_protein_coding_gene(effect): """ Parameters ---------- effect : subclass of MutationEffect Returns True if effect is associated with a gene and that gene has a protein_coding biotype. """ return apply_to_gene_if_exists( effect=effect, fn=lambda g: g.biotype == "protein_coding", default=False)
Parameters ---------- effect : subclass of MutationEffect Returns True if effect is associated with a gene and that gene has a protein_coding biotype.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_ordering.py#L204-L216
openvax/varcode
varcode/effects/effect_ordering.py
effect_associated_with_protein_coding_transcript
def effect_associated_with_protein_coding_transcript(effect): """ Parameters ---------- effect : subclass of MutationEffect Returns True if effect is associated with a transcript and that transcript has a protein_coding biotype. """ return apply_to_transcript_if_exists( effect=effect, fn=lambda t: t.biotype == "protein_coding", default=False)
python
def effect_associated_with_protein_coding_transcript(effect): """ Parameters ---------- effect : subclass of MutationEffect Returns True if effect is associated with a transcript and that transcript has a protein_coding biotype. """ return apply_to_transcript_if_exists( effect=effect, fn=lambda t: t.biotype == "protein_coding", default=False)
Parameters ---------- effect : subclass of MutationEffect Returns True if effect is associated with a transcript and that transcript has a protein_coding biotype.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_ordering.py#L219-L231
openvax/varcode
varcode/effects/effect_ordering.py
parse_transcript_number
def parse_transcript_number(effect): """ Try to parse the number at the end of a transcript name associated with an effect. e.g. TP53-001 returns the integer 1. Parameters ---------- effect : subclass of MutationEffect Returns int """ name = name_of_associated_transcript(effect) if "-" not in name: return 0 parts = name.split("-") last_part = parts[-1] if last_part.isdigit(): return int(last_part) else: return 0
python
def parse_transcript_number(effect): """ Try to parse the number at the end of a transcript name associated with an effect. e.g. TP53-001 returns the integer 1. Parameters ---------- effect : subclass of MutationEffect Returns int """ name = name_of_associated_transcript(effect) if "-" not in name: return 0 parts = name.split("-") last_part = parts[-1] if last_part.isdigit(): return int(last_part) else: return 0
Try to parse the number at the end of a transcript name associated with an effect. e.g. TP53-001 returns the integer 1. Parameters ---------- effect : subclass of MutationEffect Returns int
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_ordering.py#L249-L270
openvax/varcode
varcode/effects/effect_ordering.py
multi_gene_effect_sort_key
def multi_gene_effect_sort_key(effect): """ This function acts as a sort key for choosing the highest priority effect across multiple genes (so does not assume that effects might involve the same start/stop codons). Returns tuple with the following elements: 1) Integer priority of the effect type. 2) Does the associated gene have a "protein_coding" biotype? False if no gene is associated with effect. 3) Does the associated transcript have a "protein_coding" biotype? False if no transcript is associated with effect. 4) Is the associated transcript complete? False if no transcript is associated with effect. 5) CDS length This value will be 0 if the effect has no associated transcript or if the transcript is noncoding or incomplete 6) Total length of the transcript This value will be 0 intra/intergenic variants effects without an associated transcript. 7) Number of exons This value will be 0 intra/intergenic variants effects without an associated transcript. 8) If everything is the same up this point then let's use the very sloppy heuristic of preferring transcripts like "TP53-201" over "TP53-206", so anything ending with "01" is considered better. 9) Lastly, if we end up with two transcripts like "TP53-202" and "TP53-203", prefer the one with the lowest number in the name. """ return tuple([ effect_priority(effect), effect_associated_with_protein_coding_gene(effect), effect_associated_with_protein_coding_transcript(effect), effect_has_complete_transcript(effect), cds_length_of_associated_transcript(effect), length_of_associated_transcript(effect), number_exons_in_associated_transcript(effect), transcript_name_ends_with_01(effect), -parse_transcript_number(effect) ])
python
def multi_gene_effect_sort_key(effect): """ This function acts as a sort key for choosing the highest priority effect across multiple genes (so does not assume that effects might involve the same start/stop codons). Returns tuple with the following elements: 1) Integer priority of the effect type. 2) Does the associated gene have a "protein_coding" biotype? False if no gene is associated with effect. 3) Does the associated transcript have a "protein_coding" biotype? False if no transcript is associated with effect. 4) Is the associated transcript complete? False if no transcript is associated with effect. 5) CDS length This value will be 0 if the effect has no associated transcript or if the transcript is noncoding or incomplete 6) Total length of the transcript This value will be 0 intra/intergenic variants effects without an associated transcript. 7) Number of exons This value will be 0 intra/intergenic variants effects without an associated transcript. 8) If everything is the same up this point then let's use the very sloppy heuristic of preferring transcripts like "TP53-201" over "TP53-206", so anything ending with "01" is considered better. 9) Lastly, if we end up with two transcripts like "TP53-202" and "TP53-203", prefer the one with the lowest number in the name. """ return tuple([ effect_priority(effect), effect_associated_with_protein_coding_gene(effect), effect_associated_with_protein_coding_transcript(effect), effect_has_complete_transcript(effect), cds_length_of_associated_transcript(effect), length_of_associated_transcript(effect), number_exons_in_associated_transcript(effect), transcript_name_ends_with_01(effect), -parse_transcript_number(effect) ])
This function acts as a sort key for choosing the highest priority effect across multiple genes (so does not assume that effects might involve the same start/stop codons). Returns tuple with the following elements: 1) Integer priority of the effect type. 2) Does the associated gene have a "protein_coding" biotype? False if no gene is associated with effect. 3) Does the associated transcript have a "protein_coding" biotype? False if no transcript is associated with effect. 4) Is the associated transcript complete? False if no transcript is associated with effect. 5) CDS length This value will be 0 if the effect has no associated transcript or if the transcript is noncoding or incomplete 6) Total length of the transcript This value will be 0 intra/intergenic variants effects without an associated transcript. 7) Number of exons This value will be 0 intra/intergenic variants effects without an associated transcript. 8) If everything is the same up this point then let's use the very sloppy heuristic of preferring transcripts like "TP53-201" over "TP53-206", so anything ending with "01" is considered better. 9) Lastly, if we end up with two transcripts like "TP53-202" and "TP53-203", prefer the one with the lowest number in the name.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_ordering.py#L273-L313
openvax/varcode
varcode/effects/effect_ordering.py
select_between_exonic_splice_site_and_alternate_effect
def select_between_exonic_splice_site_and_alternate_effect(effect): """ If the given effect is an ExonicSpliceSite then it might contain an alternate effect of higher priority. In that case, return the alternate effect. Otherwise, this acts as an identity function. """ if effect.__class__ is not ExonicSpliceSite: return effect if effect.alternate_effect is None: return effect splice_priority = effect_priority(effect) alternate_priority = effect_priority(effect.alternate_effect) if splice_priority > alternate_priority: return effect else: return effect.alternate_effect
python
def select_between_exonic_splice_site_and_alternate_effect(effect): """ If the given effect is an ExonicSpliceSite then it might contain an alternate effect of higher priority. In that case, return the alternate effect. Otherwise, this acts as an identity function. """ if effect.__class__ is not ExonicSpliceSite: return effect if effect.alternate_effect is None: return effect splice_priority = effect_priority(effect) alternate_priority = effect_priority(effect.alternate_effect) if splice_priority > alternate_priority: return effect else: return effect.alternate_effect
If the given effect is an ExonicSpliceSite then it might contain an alternate effect of higher priority. In that case, return the alternate effect. Otherwise, this acts as an identity function.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_ordering.py#L316-L331
openvax/varcode
varcode/effects/effect_ordering.py
keep_max_priority_effects
def keep_max_priority_effects(effects): """ Given a list of effects, only keep the ones with the maximum priority effect type. Parameters ---------- effects : list of MutationEffect subclasses Returns list of same length or shorter """ priority_values = map(effect_priority, effects) max_priority = max(priority_values) return [e for (e, p) in zip(effects, priority_values) if p == max_priority]
python
def keep_max_priority_effects(effects): """ Given a list of effects, only keep the ones with the maximum priority effect type. Parameters ---------- effects : list of MutationEffect subclasses Returns list of same length or shorter """ priority_values = map(effect_priority, effects) max_priority = max(priority_values) return [e for (e, p) in zip(effects, priority_values) if p == max_priority]
Given a list of effects, only keep the ones with the maximum priority effect type. Parameters ---------- effects : list of MutationEffect subclasses Returns list of same length or shorter
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_ordering.py#L334-L347
openvax/varcode
varcode/effects/effect_ordering.py
filter_pipeline
def filter_pipeline(effects, filters): """ Apply each filter to the effect list sequentially. If any filter returns zero values then ignore it. As soon as only one effect is left, return it. Parameters ---------- effects : list of MutationEffect subclass instances filters : list of functions Each function takes a list of effects and returns a list of effects Returns list of effects """ for filter_fn in filters: filtered_effects = filter_fn(effects) if len(effects) == 1: return effects elif len(filtered_effects) > 1: effects = filtered_effects return effects
python
def filter_pipeline(effects, filters): """ Apply each filter to the effect list sequentially. If any filter returns zero values then ignore it. As soon as only one effect is left, return it. Parameters ---------- effects : list of MutationEffect subclass instances filters : list of functions Each function takes a list of effects and returns a list of effects Returns list of effects """ for filter_fn in filters: filtered_effects = filter_fn(effects) if len(effects) == 1: return effects elif len(filtered_effects) > 1: effects = filtered_effects return effects
Apply each filter to the effect list sequentially. If any filter returns zero values then ignore it. As soon as only one effect is left, return it. Parameters ---------- effects : list of MutationEffect subclass instances filters : list of functions Each function takes a list of effects and returns a list of effects Returns list of effects
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_ordering.py#L394-L415
openvax/varcode
varcode/effects/effect_ordering.py
top_priority_effect_for_single_gene
def top_priority_effect_for_single_gene(effects): """ For effects which are from the same gene, check to see if there is a canonical transcript with both the maximum length CDS and maximum length full transcript sequence. If not, then use number of exons and transcript name as tie-breaking features. Parameters ---------- effects : list of MutationEffect subclass instances Returns single effect object """ # first filter effects to keep those on # 1) maximum priority effects # 2) protein coding genes # 3) protein coding transcripts # 4) complete transcripts # # If any of these filters drop all the effects then we move on to the next # filtering step. effects = filter_pipeline( effects=effects, filters=[ keep_max_priority_effects, keep_effects_on_protein_coding_genes, keep_effects_on_protein_coding_transcripts, keep_effects_on_complete_transcripts, ], ) if len(effects) == 1: return effects[0] # compare CDS length and transcript lengths of remaining effects # if one effect has the maximum of both categories then return it cds_lengths = [cds_length_of_associated_transcript(e) for e in effects] max_cds_length = max(cds_lengths) # get set of indices of all effects with maximum CDS length max_cds_length_indices = { i for (i, l) in enumerate(cds_lengths) if l == max_cds_length } seq_lengths = [length_of_associated_transcript(e) for e in effects] max_seq_length = max(seq_lengths) # get set of indices for all effects whose associated transcript # has maximum sequence length max_seq_length_indices = { i for (i, l) in enumerate(seq_lengths) if l == max_seq_length } # which effects have transcripts with both the longest CDS and # longest full transcript sequence? intersection_of_indices = \ max_cds_length_indices.intersection(max_seq_length_indices) n_candidates = len(intersection_of_indices) if n_candidates == 1: best_index = intersection_of_indices.pop() return effects[best_index] elif n_candidates == 0: # if set of max CDS effects and max sequence length effects is disjoint # then let's try to do the tie-breaking sort over their union union_of_indices = max_cds_length_indices.union(max_seq_length_indices) candidate_effects = [effects[i] for i in union_of_indices] else: # if multiple effects have transcripts with the max CDS length and # the max full sequence length then run the tie-breaking sort # over all these candidates candidate_effects = [effects[i] for i in intersection_of_indices] # break ties by number of exons, whether name of transcript ends if "01", # and all else being equal, prefer transcript names that end with lower # numbers return max( candidate_effects, key=tie_breaking_sort_key_for_single_gene_effects)
python
def top_priority_effect_for_single_gene(effects): """ For effects which are from the same gene, check to see if there is a canonical transcript with both the maximum length CDS and maximum length full transcript sequence. If not, then use number of exons and transcript name as tie-breaking features. Parameters ---------- effects : list of MutationEffect subclass instances Returns single effect object """ # first filter effects to keep those on # 1) maximum priority effects # 2) protein coding genes # 3) protein coding transcripts # 4) complete transcripts # # If any of these filters drop all the effects then we move on to the next # filtering step. effects = filter_pipeline( effects=effects, filters=[ keep_max_priority_effects, keep_effects_on_protein_coding_genes, keep_effects_on_protein_coding_transcripts, keep_effects_on_complete_transcripts, ], ) if len(effects) == 1: return effects[0] # compare CDS length and transcript lengths of remaining effects # if one effect has the maximum of both categories then return it cds_lengths = [cds_length_of_associated_transcript(e) for e in effects] max_cds_length = max(cds_lengths) # get set of indices of all effects with maximum CDS length max_cds_length_indices = { i for (i, l) in enumerate(cds_lengths) if l == max_cds_length } seq_lengths = [length_of_associated_transcript(e) for e in effects] max_seq_length = max(seq_lengths) # get set of indices for all effects whose associated transcript # has maximum sequence length max_seq_length_indices = { i for (i, l) in enumerate(seq_lengths) if l == max_seq_length } # which effects have transcripts with both the longest CDS and # longest full transcript sequence? intersection_of_indices = \ max_cds_length_indices.intersection(max_seq_length_indices) n_candidates = len(intersection_of_indices) if n_candidates == 1: best_index = intersection_of_indices.pop() return effects[best_index] elif n_candidates == 0: # if set of max CDS effects and max sequence length effects is disjoint # then let's try to do the tie-breaking sort over their union union_of_indices = max_cds_length_indices.union(max_seq_length_indices) candidate_effects = [effects[i] for i in union_of_indices] else: # if multiple effects have transcripts with the max CDS length and # the max full sequence length then run the tie-breaking sort # over all these candidates candidate_effects = [effects[i] for i in intersection_of_indices] # break ties by number of exons, whether name of transcript ends if "01", # and all else being equal, prefer transcript names that end with lower # numbers return max( candidate_effects, key=tie_breaking_sort_key_for_single_gene_effects)
For effects which are from the same gene, check to see if there is a canonical transcript with both the maximum length CDS and maximum length full transcript sequence. If not, then use number of exons and transcript name as tie-breaking features. Parameters ---------- effects : list of MutationEffect subclass instances Returns single effect object
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_ordering.py#L439-L526
openvax/varcode
varcode/effects/effect_ordering.py
top_priority_effect
def top_priority_effect(effects): """ Given a collection of variant transcript effects, return the top priority object. ExonicSpliceSite variants require special treatment since they actually represent two effects -- the splicing modification and whatever else would happen to the exonic sequence if nothing else gets changed. In cases where multiple transcripts give rise to multiple effects, use a variety of filtering and sorting heuristics to pick the canonical transcript. """ if len(effects) == 0: raise ValueError("List of effects cannot be empty") effects = map( select_between_exonic_splice_site_and_alternate_effect, effects) effects_grouped_by_gene = apply_groupby( effects, fn=gene_id_of_associated_transcript, skip_none=False) if None in effects_grouped_by_gene: effects_without_genes = effects_grouped_by_gene.pop(None) else: effects_without_genes = [] # if we had any effects associated with genes then choose one of those if len(effects_grouped_by_gene) > 0: effects_with_genes = [ top_priority_effect_for_single_gene(gene_effects) for gene_effects in effects_grouped_by_gene.values() ] return max(effects_with_genes, key=multi_gene_effect_sort_key) else: # if all effects were without genes then choose the best among those assert len(effects_without_genes) > 0 return max(effects_without_genes, key=multi_gene_effect_sort_key)
python
def top_priority_effect(effects): """ Given a collection of variant transcript effects, return the top priority object. ExonicSpliceSite variants require special treatment since they actually represent two effects -- the splicing modification and whatever else would happen to the exonic sequence if nothing else gets changed. In cases where multiple transcripts give rise to multiple effects, use a variety of filtering and sorting heuristics to pick the canonical transcript. """ if len(effects) == 0: raise ValueError("List of effects cannot be empty") effects = map( select_between_exonic_splice_site_and_alternate_effect, effects) effects_grouped_by_gene = apply_groupby( effects, fn=gene_id_of_associated_transcript, skip_none=False) if None in effects_grouped_by_gene: effects_without_genes = effects_grouped_by_gene.pop(None) else: effects_without_genes = [] # if we had any effects associated with genes then choose one of those if len(effects_grouped_by_gene) > 0: effects_with_genes = [ top_priority_effect_for_single_gene(gene_effects) for gene_effects in effects_grouped_by_gene.values() ] return max(effects_with_genes, key=multi_gene_effect_sort_key) else: # if all effects were without genes then choose the best among those assert len(effects_without_genes) > 0 return max(effects_without_genes, key=multi_gene_effect_sort_key)
Given a collection of variant transcript effects, return the top priority object. ExonicSpliceSite variants require special treatment since they actually represent two effects -- the splicing modification and whatever else would happen to the exonic sequence if nothing else gets changed. In cases where multiple transcripts give rise to multiple effects, use a variety of filtering and sorting heuristics to pick the canonical transcript.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_ordering.py#L529-L564
openvax/varcode
varcode/variant_collection.py
VariantCollection.to_dict
def to_dict(self): """ Since Collection.to_dict() returns a state dictionary with an 'elements' field we have to rename it to 'variants'. """ return dict( variants=self.variants, distinct=self.distinct, sort_key=self.sort_key, sources=self.sources, source_to_metadata_dict=self.source_to_metadata_dict)
python
def to_dict(self): """ Since Collection.to_dict() returns a state dictionary with an 'elements' field we have to rename it to 'variants'. """ return dict( variants=self.variants, distinct=self.distinct, sort_key=self.sort_key, sources=self.sources, source_to_metadata_dict=self.source_to_metadata_dict)
Since Collection.to_dict() returns a state dictionary with an 'elements' field we have to rename it to 'variants'.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/variant_collection.py#L83-L93
openvax/varcode
varcode/variant_collection.py
VariantCollection.clone_with_new_elements
def clone_with_new_elements(self, new_elements): """ Create another VariantCollection of the same class and with same state (including metadata) but possibly different entries. Warning: metadata is a dictionary keyed by variants. This method leaves that dictionary as-is, which may result in extraneous entries or missing entries. """ kwargs = self.to_dict() kwargs["variants"] = new_elements return self.from_dict(kwargs)
python
def clone_with_new_elements(self, new_elements): """ Create another VariantCollection of the same class and with same state (including metadata) but possibly different entries. Warning: metadata is a dictionary keyed by variants. This method leaves that dictionary as-is, which may result in extraneous entries or missing entries. """ kwargs = self.to_dict() kwargs["variants"] = new_elements return self.from_dict(kwargs)
Create another VariantCollection of the same class and with same state (including metadata) but possibly different entries. Warning: metadata is a dictionary keyed by variants. This method leaves that dictionary as-is, which may result in extraneous entries or missing entries.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/variant_collection.py#L95-L106
openvax/varcode
varcode/variant_collection.py
VariantCollection.effects
def effects(self, raise_on_error=True): """ Parameters ---------- raise_on_error : bool, optional If exception is raised while determining effect of variant on a transcript, should it be raised? This default is True, meaning errors result in raised exceptions, otherwise they are only logged. """ return EffectCollection([ effect for variant in self for effect in variant.effects(raise_on_error=raise_on_error) ])
python
def effects(self, raise_on_error=True): """ Parameters ---------- raise_on_error : bool, optional If exception is raised while determining effect of variant on a transcript, should it be raised? This default is True, meaning errors result in raised exceptions, otherwise they are only logged. """ return EffectCollection([ effect for variant in self for effect in variant.effects(raise_on_error=raise_on_error) ])
Parameters ---------- raise_on_error : bool, optional If exception is raised while determining effect of variant on a transcript, should it be raised? This default is True, meaning errors result in raised exceptions, otherwise they are only logged.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/variant_collection.py#L108-L122
openvax/varcode
varcode/variant_collection.py
VariantCollection.gene_counts
def gene_counts(self): """ Returns number of elements overlapping each gene name. Expects the derived class (VariantCollection or EffectCollection) to have an implementation of groupby_gene_name. """ return { gene_name: len(group) for (gene_name, group) in self.groupby_gene_name().items() }
python
def gene_counts(self): """ Returns number of elements overlapping each gene name. Expects the derived class (VariantCollection or EffectCollection) to have an implementation of groupby_gene_name. """ return { gene_name: len(group) for (gene_name, group) in self.groupby_gene_name().items() }
Returns number of elements overlapping each gene name. Expects the derived class (VariantCollection or EffectCollection) to have an implementation of groupby_gene_name.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/variant_collection.py#L145-L155
openvax/varcode
varcode/variant_collection.py
VariantCollection.filter_by_transcript_expression
def filter_by_transcript_expression( self, transcript_expression_dict, min_expression_value=0.0): """ Filters variants down to those which have overlap a transcript whose expression value in the transcript_expression_dict argument is greater than min_expression_value. Parameters ---------- transcript_expression_dict : dict Dictionary mapping Ensembl transcript IDs to expression estimates (either FPKM or TPM) min_expression_value : float Threshold above which we'll keep an effect in the result collection """ return self.filter_any_above_threshold( multi_key_fn=lambda variant: variant.transcript_ids, value_dict=transcript_expression_dict, threshold=min_expression_value)
python
def filter_by_transcript_expression( self, transcript_expression_dict, min_expression_value=0.0): """ Filters variants down to those which have overlap a transcript whose expression value in the transcript_expression_dict argument is greater than min_expression_value. Parameters ---------- transcript_expression_dict : dict Dictionary mapping Ensembl transcript IDs to expression estimates (either FPKM or TPM) min_expression_value : float Threshold above which we'll keep an effect in the result collection """ return self.filter_any_above_threshold( multi_key_fn=lambda variant: variant.transcript_ids, value_dict=transcript_expression_dict, threshold=min_expression_value)
Filters variants down to those which have overlap a transcript whose expression value in the transcript_expression_dict argument is greater than min_expression_value. Parameters ---------- transcript_expression_dict : dict Dictionary mapping Ensembl transcript IDs to expression estimates (either FPKM or TPM) min_expression_value : float Threshold above which we'll keep an effect in the result collection
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/variant_collection.py#L175-L196
openvax/varcode
varcode/variant_collection.py
VariantCollection.filter_by_gene_expression
def filter_by_gene_expression( self, gene_expression_dict, min_expression_value=0.0): """ Filters variants down to those which have overlap a gene whose expression value in the transcript_expression_dict argument is greater than min_expression_value. Parameters ---------- gene_expression_dict : dict Dictionary mapping Ensembl gene IDs to expression estimates (either FPKM or TPM) min_expression_value : float Threshold above which we'll keep an effect in the result collection """ return self.filter_any_above_threshold( multi_key_fn=lambda effect: effect.gene_ids, value_dict=gene_expression_dict, threshold=min_expression_value)
python
def filter_by_gene_expression( self, gene_expression_dict, min_expression_value=0.0): """ Filters variants down to those which have overlap a gene whose expression value in the transcript_expression_dict argument is greater than min_expression_value. Parameters ---------- gene_expression_dict : dict Dictionary mapping Ensembl gene IDs to expression estimates (either FPKM or TPM) min_expression_value : float Threshold above which we'll keep an effect in the result collection """ return self.filter_any_above_threshold( multi_key_fn=lambda effect: effect.gene_ids, value_dict=gene_expression_dict, threshold=min_expression_value)
Filters variants down to those which have overlap a gene whose expression value in the transcript_expression_dict argument is greater than min_expression_value. Parameters ---------- gene_expression_dict : dict Dictionary mapping Ensembl gene IDs to expression estimates (either FPKM or TPM) min_expression_value : float Threshold above which we'll keep an effect in the result collection
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/variant_collection.py#L198-L219
openvax/varcode
varcode/variant_collection.py
VariantCollection.exactly_equal
def exactly_equal(self, other): ''' Comparison between VariantCollection instances that takes into account the info field of Variant instances. Returns ---------- True if the variants in this collection equal the variants in the other collection. The Variant.info fields are included in the comparison. ''' return ( self.__class__ == other.__class__ and len(self) == len(other) and all(x.exactly_equal(y) for (x, y) in zip(self, other)))
python
def exactly_equal(self, other): ''' Comparison between VariantCollection instances that takes into account the info field of Variant instances. Returns ---------- True if the variants in this collection equal the variants in the other collection. The Variant.info fields are included in the comparison. ''' return ( self.__class__ == other.__class__ and len(self) == len(other) and all(x.exactly_equal(y) for (x, y) in zip(self, other)))
Comparison between VariantCollection instances that takes into account the info field of Variant instances. Returns ---------- True if the variants in this collection equal the variants in the other collection. The Variant.info fields are included in the comparison.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/variant_collection.py#L221-L234
openvax/varcode
varcode/variant_collection.py
VariantCollection._merge_metadata_dictionaries
def _merge_metadata_dictionaries(cls, dictionaries): """ Helper function for combining variant collections: given multiple dictionaries mapping: source name -> (variant -> (attribute -> value)) Returns dictionary with union of all variants and sources. """ # three levels of nested dictionaries! # {source name: {variant: {attribute: value}}} combined_dictionary = {} for source_to_metadata_dict in dictionaries: for source_name, variant_to_metadata_dict in source_to_metadata_dict.items(): combined_dictionary.setdefault(source_name, {}) combined_source_dict = combined_dictionary[source_name] for variant, metadata_dict in variant_to_metadata_dict.items(): combined_source_dict.setdefault(variant, {}) combined_source_dict[variant].update(metadata_dict) return combined_dictionary
python
def _merge_metadata_dictionaries(cls, dictionaries): """ Helper function for combining variant collections: given multiple dictionaries mapping: source name -> (variant -> (attribute -> value)) Returns dictionary with union of all variants and sources. """ # three levels of nested dictionaries! # {source name: {variant: {attribute: value}}} combined_dictionary = {} for source_to_metadata_dict in dictionaries: for source_name, variant_to_metadata_dict in source_to_metadata_dict.items(): combined_dictionary.setdefault(source_name, {}) combined_source_dict = combined_dictionary[source_name] for variant, metadata_dict in variant_to_metadata_dict.items(): combined_source_dict.setdefault(variant, {}) combined_source_dict[variant].update(metadata_dict) return combined_dictionary
Helper function for combining variant collections: given multiple dictionaries mapping: source name -> (variant -> (attribute -> value)) Returns dictionary with union of all variants and sources.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/variant_collection.py#L237-L255
openvax/varcode
varcode/variant_collection.py
VariantCollection._combine_variant_collections
def _combine_variant_collections(cls, combine_fn, variant_collections, kwargs): """ Create a single VariantCollection from multiple different collections. Parameters ---------- cls : class Should be VariantCollection combine_fn : function Function which takes any number of sets of variants and returns some combination of them (typically union or intersection). variant_collections : tuple of VariantCollection kwargs : dict Optional dictionary of keyword arguments to pass to the initializer for VariantCollection. """ kwargs["variants"] = combine_fn(*[set(vc) for vc in variant_collections]) kwargs["source_to_metadata_dict"] = cls._merge_metadata_dictionaries( [vc.source_to_metadata_dict for vc in variant_collections]) kwargs["sources"] = set.union(*([vc.sources for vc in variant_collections])) for key, value in variant_collections[0].to_dict().items(): # If some optional parameter isn't explicitly specified as an # argument to union() or intersection() then use the same value # as the first VariantCollection. # # I'm doing this so that the meaning of VariantCollection.union # and VariantCollection.intersection with a single argument is # the identity function (rather than setting optional parameters # to their default values. if key not in kwargs: kwargs[key] = value return cls(**kwargs)
python
def _combine_variant_collections(cls, combine_fn, variant_collections, kwargs): """ Create a single VariantCollection from multiple different collections. Parameters ---------- cls : class Should be VariantCollection combine_fn : function Function which takes any number of sets of variants and returns some combination of them (typically union or intersection). variant_collections : tuple of VariantCollection kwargs : dict Optional dictionary of keyword arguments to pass to the initializer for VariantCollection. """ kwargs["variants"] = combine_fn(*[set(vc) for vc in variant_collections]) kwargs["source_to_metadata_dict"] = cls._merge_metadata_dictionaries( [vc.source_to_metadata_dict for vc in variant_collections]) kwargs["sources"] = set.union(*([vc.sources for vc in variant_collections])) for key, value in variant_collections[0].to_dict().items(): # If some optional parameter isn't explicitly specified as an # argument to union() or intersection() then use the same value # as the first VariantCollection. # # I'm doing this so that the meaning of VariantCollection.union # and VariantCollection.intersection with a single argument is # the identity function (rather than setting optional parameters # to their default values. if key not in kwargs: kwargs[key] = value return cls(**kwargs)
Create a single VariantCollection from multiple different collections. Parameters ---------- cls : class Should be VariantCollection combine_fn : function Function which takes any number of sets of variants and returns some combination of them (typically union or intersection). variant_collections : tuple of VariantCollection kwargs : dict Optional dictionary of keyword arguments to pass to the initializer for VariantCollection.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/variant_collection.py#L258-L293
openvax/varcode
varcode/variant_collection.py
VariantCollection.union
def union(self, *others, **kwargs): """ Returns the union of variants in a several VariantCollection objects. """ return self._combine_variant_collections( combine_fn=set.union, variant_collections=(self,) + others, kwargs=kwargs)
python
def union(self, *others, **kwargs): """ Returns the union of variants in a several VariantCollection objects. """ return self._combine_variant_collections( combine_fn=set.union, variant_collections=(self,) + others, kwargs=kwargs)
Returns the union of variants in a several VariantCollection objects.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/variant_collection.py#L295-L302
openvax/varcode
varcode/variant_collection.py
VariantCollection.intersection
def intersection(self, *others, **kwargs): """ Returns the intersection of variants in several VariantCollection objects. """ return self._combine_variant_collections( combine_fn=set.intersection, variant_collections=(self,) + others, kwargs=kwargs)
python
def intersection(self, *others, **kwargs): """ Returns the intersection of variants in several VariantCollection objects. """ return self._combine_variant_collections( combine_fn=set.intersection, variant_collections=(self,) + others, kwargs=kwargs)
Returns the intersection of variants in several VariantCollection objects.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/variant_collection.py#L304-L311
openvax/varcode
varcode/variant_collection.py
VariantCollection.to_dataframe
def to_dataframe(self): """Build a DataFrame from this variant collection""" def row_from_variant(variant): return OrderedDict([ ("chr", variant.contig), ("start", variant.original_start), ("ref", variant.original_ref), ("alt", variant.original_alt), ("gene_name", ";".join(variant.gene_names)), ("gene_id", ";".join(variant.gene_ids)) ]) rows = [row_from_variant(v) for v in self] if len(rows) == 0: # TODO: return a DataFrame with the appropriate columns return pd.DataFrame() return pd.DataFrame.from_records(rows, columns=rows[0].keys())
python
def to_dataframe(self): """Build a DataFrame from this variant collection""" def row_from_variant(variant): return OrderedDict([ ("chr", variant.contig), ("start", variant.original_start), ("ref", variant.original_ref), ("alt", variant.original_alt), ("gene_name", ";".join(variant.gene_names)), ("gene_id", ";".join(variant.gene_ids)) ]) rows = [row_from_variant(v) for v in self] if len(rows) == 0: # TODO: return a DataFrame with the appropriate columns return pd.DataFrame() return pd.DataFrame.from_records(rows, columns=rows[0].keys())
Build a DataFrame from this variant collection
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/variant_collection.py#L313-L328
openvax/varcode
varcode/string_helpers.py
trim_shared_prefix
def trim_shared_prefix(ref, alt): """ Sometimes mutations are given with a shared prefix between the reference and alternate strings. Examples: C>CT (nucleotides) or GYFP>G (amino acids). This function trims the common prefix and returns the disjoint ref and alt strings, along with the shared prefix. """ n_ref = len(ref) n_alt = len(alt) n_min = min(n_ref, n_alt) i = 0 while i < n_min and ref[i] == alt[i]: i += 1 # guaranteed that ref and alt agree on all the characters # up to i'th position, so it doesn't matter which one we pull # the prefix out of prefix = ref[:i] ref_suffix = ref[i:] alt_suffix = alt[i:] return ref_suffix, alt_suffix, prefix
python
def trim_shared_prefix(ref, alt): """ Sometimes mutations are given with a shared prefix between the reference and alternate strings. Examples: C>CT (nucleotides) or GYFP>G (amino acids). This function trims the common prefix and returns the disjoint ref and alt strings, along with the shared prefix. """ n_ref = len(ref) n_alt = len(alt) n_min = min(n_ref, n_alt) i = 0 while i < n_min and ref[i] == alt[i]: i += 1 # guaranteed that ref and alt agree on all the characters # up to i'th position, so it doesn't matter which one we pull # the prefix out of prefix = ref[:i] ref_suffix = ref[i:] alt_suffix = alt[i:] return ref_suffix, alt_suffix, prefix
Sometimes mutations are given with a shared prefix between the reference and alternate strings. Examples: C>CT (nucleotides) or GYFP>G (amino acids). This function trims the common prefix and returns the disjoint ref and alt strings, along with the shared prefix.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/string_helpers.py#L18-L39
openvax/varcode
varcode/string_helpers.py
trim_shared_suffix
def trim_shared_suffix(ref, alt): """ Reuse the `trim_shared_prefix` function above to implement similar functionality for string suffixes. Given ref='ABC' and alt='BC', we first revese both strings: reverse_ref = 'CBA' reverse_alt = 'CB' and then the result of calling trim_shared_prefix will be: ('A', '', 'CB') We then reverse all three of the result strings to get back the shared suffix and both prefixes leading up to it: ('A', '', 'BC') """ n_ref = len(ref) n_alt = len(alt) n_min = min(n_ref, n_alt) i = 0 while i < n_min and ref[-i - 1] == alt[-i - 1]: i += 1 # i is length of shared suffix. if i == 0: return (ref, alt, '') return (ref[:-i], alt[:-i], ref[-i:])
python
def trim_shared_suffix(ref, alt): """ Reuse the `trim_shared_prefix` function above to implement similar functionality for string suffixes. Given ref='ABC' and alt='BC', we first revese both strings: reverse_ref = 'CBA' reverse_alt = 'CB' and then the result of calling trim_shared_prefix will be: ('A', '', 'CB') We then reverse all three of the result strings to get back the shared suffix and both prefixes leading up to it: ('A', '', 'BC') """ n_ref = len(ref) n_alt = len(alt) n_min = min(n_ref, n_alt) i = 0 while i < n_min and ref[-i - 1] == alt[-i - 1]: i += 1 # i is length of shared suffix. if i == 0: return (ref, alt, '') return (ref[:-i], alt[:-i], ref[-i:])
Reuse the `trim_shared_prefix` function above to implement similar functionality for string suffixes. Given ref='ABC' and alt='BC', we first revese both strings: reverse_ref = 'CBA' reverse_alt = 'CB' and then the result of calling trim_shared_prefix will be: ('A', '', 'CB') We then reverse all three of the result strings to get back the shared suffix and both prefixes leading up to it: ('A', '', 'BC')
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/string_helpers.py#L42-L66
openvax/varcode
varcode/string_helpers.py
trim_shared_flanking_strings
def trim_shared_flanking_strings(ref, alt): """ Given two nucleotide or amino acid strings, identify if they have a common prefix, a common suffix, and return their unique components along with the prefix and suffix. For example, if the input ref = "SYFFQGR" and alt = "SYMLLFIFQGR" then the result will be: ("F", "MLLFI", "SY", "FQGR") """ ref, alt, prefix = trim_shared_prefix(ref, alt) ref, alt, suffix = trim_shared_suffix(ref, alt) return ref, alt, prefix, suffix
python
def trim_shared_flanking_strings(ref, alt): """ Given two nucleotide or amino acid strings, identify if they have a common prefix, a common suffix, and return their unique components along with the prefix and suffix. For example, if the input ref = "SYFFQGR" and alt = "SYMLLFIFQGR" then the result will be: ("F", "MLLFI", "SY", "FQGR") """ ref, alt, prefix = trim_shared_prefix(ref, alt) ref, alt, suffix = trim_shared_suffix(ref, alt) return ref, alt, prefix, suffix
Given two nucleotide or amino acid strings, identify if they have a common prefix, a common suffix, and return their unique components along with the prefix and suffix. For example, if the input ref = "SYFFQGR" and alt = "SYMLLFIFQGR" then the result will be: ("F", "MLLFI", "SY", "FQGR")
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/string_helpers.py#L69-L81
openvax/varcode
varcode/cli/effects_script.py
main
def main(args_list=None): """ Script which loads variants and annotates them with overlapping genes and predicted coding effects. Example usage: varcode --vcf mutect.vcf \ --vcf strelka.vcf \ --maf tcga_brca.maf \ --variant chr1 498584 C G \ --json-variants more_variants.json """ print_version_info() if args_list is None: args_list = sys.argv[1:] args = arg_parser.parse_args(args_list) variants = variant_collection_from_args(args) effects = variants.effects() if args.only_coding: effects = effects.drop_silent_and_noncoding() if args.one_per_variant: variant_to_effect_dict = effects.top_priority_effect_per_variant() effects = effects.clone_with_new_elements(list(variant_to_effect_dict.values())) effects_dataframe = effects.to_dataframe() logger.info('\n%s', effects) if args.output_csv: effects_dataframe.to_csv(args.output_csv, index=False)
python
def main(args_list=None): """ Script which loads variants and annotates them with overlapping genes and predicted coding effects. Example usage: varcode --vcf mutect.vcf \ --vcf strelka.vcf \ --maf tcga_brca.maf \ --variant chr1 498584 C G \ --json-variants more_variants.json """ print_version_info() if args_list is None: args_list = sys.argv[1:] args = arg_parser.parse_args(args_list) variants = variant_collection_from_args(args) effects = variants.effects() if args.only_coding: effects = effects.drop_silent_and_noncoding() if args.one_per_variant: variant_to_effect_dict = effects.top_priority_effect_per_variant() effects = effects.clone_with_new_elements(list(variant_to_effect_dict.values())) effects_dataframe = effects.to_dataframe() logger.info('\n%s', effects) if args.output_csv: effects_dataframe.to_csv(args.output_csv, index=False)
Script which loads variants and annotates them with overlapping genes and predicted coding effects. Example usage: varcode --vcf mutect.vcf \ --vcf strelka.vcf \ --maf tcga_brca.maf \ --variant chr1 498584 C G \ --json-variants more_variants.json
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/cli/effects_script.py#L48-L77
openvax/varcode
varcode/effects/effect_prediction_coding_in_frame.py
get_codons
def get_codons( variant, trimmed_cdna_ref, trimmed_cdna_alt, sequence_from_start_codon, cds_offset): """ Returns indices of first and last reference codons affected by the variant, as well as the actual sequence of the mutated codons which replace those reference codons. Parameters ---------- variant : Variant trimmed_cdna_ref : str Trimmed reference cDNA nucleotides affected by the variant trimmed_cdna_alt : str Trimmed alternate cDNA nucleotides which replace the reference sequence_from_start_codon : str cDNA nucleotide coding sequence cds_offset : int Integer offset into the coding sequence where ref is replace with alt """ # index (starting from 0) of first affected reference codon ref_codon_start_offset = cds_offset // 3 # which nucleotide of the first codon got changed? nucleotide_offset_into_first_ref_codon = cds_offset % 3 n_ref_nucleotides = len(trimmed_cdna_ref) if n_ref_nucleotides == 0: if nucleotide_offset_into_first_ref_codon == 2: # if we're inserting between codons ref_codon_end_offset = ref_codon_start_offset else: # inserting inside a reference codon ref_codon_end_offset = ref_codon_start_offset + 1 ref_codons = sequence_from_start_codon[ ref_codon_start_offset * 3:ref_codon_end_offset * 3] # split the reference codon into nucleotides before/after insertion prefix = ref_codons[:nucleotide_offset_into_first_ref_codon + 1] suffix = ref_codons[nucleotide_offset_into_first_ref_codon + 1:] else: ref_codon_end_offset = (cds_offset + n_ref_nucleotides - 1) // 3 + 1 # codons in the reference sequence ref_codons = sequence_from_start_codon[ ref_codon_start_offset * 3:ref_codon_end_offset * 3] # We construct the new codons by taking the unmodified prefix # of the first ref codon, the unmodified suffix of the last ref codon # and sticking the alt nucleotides in between. # Since this is supposed to be an in-frame mutation, the concatenated # nucleotide string is expected to have a length that is a multiple of # three. prefix = ref_codons[:nucleotide_offset_into_first_ref_codon] offset_in_last_ref_codon = (cds_offset + n_ref_nucleotides - 1) % 3 if offset_in_last_ref_codon == 0: suffix = ref_codons[-2:] elif offset_in_last_ref_codon == 1: suffix = ref_codons[-1:] else: suffix = "" mutant_codons = prefix + trimmed_cdna_alt + suffix assert len(mutant_codons) % 3 == 0, \ "Expected in-frame mutation but got %s (length = %d)" % ( mutant_codons, len(mutant_codons)) return ref_codon_start_offset, ref_codon_end_offset, mutant_codons
python
def get_codons( variant, trimmed_cdna_ref, trimmed_cdna_alt, sequence_from_start_codon, cds_offset): """ Returns indices of first and last reference codons affected by the variant, as well as the actual sequence of the mutated codons which replace those reference codons. Parameters ---------- variant : Variant trimmed_cdna_ref : str Trimmed reference cDNA nucleotides affected by the variant trimmed_cdna_alt : str Trimmed alternate cDNA nucleotides which replace the reference sequence_from_start_codon : str cDNA nucleotide coding sequence cds_offset : int Integer offset into the coding sequence where ref is replace with alt """ # index (starting from 0) of first affected reference codon ref_codon_start_offset = cds_offset // 3 # which nucleotide of the first codon got changed? nucleotide_offset_into_first_ref_codon = cds_offset % 3 n_ref_nucleotides = len(trimmed_cdna_ref) if n_ref_nucleotides == 0: if nucleotide_offset_into_first_ref_codon == 2: # if we're inserting between codons ref_codon_end_offset = ref_codon_start_offset else: # inserting inside a reference codon ref_codon_end_offset = ref_codon_start_offset + 1 ref_codons = sequence_from_start_codon[ ref_codon_start_offset * 3:ref_codon_end_offset * 3] # split the reference codon into nucleotides before/after insertion prefix = ref_codons[:nucleotide_offset_into_first_ref_codon + 1] suffix = ref_codons[nucleotide_offset_into_first_ref_codon + 1:] else: ref_codon_end_offset = (cds_offset + n_ref_nucleotides - 1) // 3 + 1 # codons in the reference sequence ref_codons = sequence_from_start_codon[ ref_codon_start_offset * 3:ref_codon_end_offset * 3] # We construct the new codons by taking the unmodified prefix # of the first ref codon, the unmodified suffix of the last ref codon # and sticking the alt nucleotides in between. # Since this is supposed to be an in-frame mutation, the concatenated # nucleotide string is expected to have a length that is a multiple of # three. prefix = ref_codons[:nucleotide_offset_into_first_ref_codon] offset_in_last_ref_codon = (cds_offset + n_ref_nucleotides - 1) % 3 if offset_in_last_ref_codon == 0: suffix = ref_codons[-2:] elif offset_in_last_ref_codon == 1: suffix = ref_codons[-1:] else: suffix = "" mutant_codons = prefix + trimmed_cdna_alt + suffix assert len(mutant_codons) % 3 == 0, \ "Expected in-frame mutation but got %s (length = %d)" % ( mutant_codons, len(mutant_codons)) return ref_codon_start_offset, ref_codon_end_offset, mutant_codons
Returns indices of first and last reference codons affected by the variant, as well as the actual sequence of the mutated codons which replace those reference codons. Parameters ---------- variant : Variant trimmed_cdna_ref : str Trimmed reference cDNA nucleotides affected by the variant trimmed_cdna_alt : str Trimmed alternate cDNA nucleotides which replace the reference sequence_from_start_codon : str cDNA nucleotide coding sequence cds_offset : int Integer offset into the coding sequence where ref is replace with alt
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_prediction_coding_in_frame.py#L36-L107
openvax/varcode
varcode/effects/effect_prediction_coding_in_frame.py
predict_in_frame_coding_effect
def predict_in_frame_coding_effect( variant, transcript, trimmed_cdna_ref, trimmed_cdna_alt, sequence_from_start_codon, cds_offset): """Coding effect of an in-frame nucleotide change Parameters ---------- variant : Variant transcript : Transcript trimmed_cdna_ref : str Reference nucleotides from the coding sequence of the transcript trimmed_cdna_alt : str Nucleotides to insert in place of the reference nucleotides sequence_from_start_codon : Bio.Seq or str Transcript sequence from the CDS start codon (including the 3' UTR). This sequence includes the 3' UTR since a mutation may delete the stop codon and we'll have to translate past the normal end of the CDS to determine the new protein sequence. cds_offset : int Index of first ref nucleotide, starting from 0 = beginning of coding sequence. If variant is a pure insertion (no ref nucleotides) then this argument indicates the offset *after* which to insert the alt nucleotides. """ ref_codon_start_offset, ref_codon_end_offset, mutant_codons = get_codons( variant=variant, trimmed_cdna_ref=trimmed_cdna_ref, trimmed_cdna_alt=trimmed_cdna_alt, sequence_from_start_codon=sequence_from_start_codon, cds_offset=cds_offset) mutation_affects_start_codon = (ref_codon_start_offset == 0) if mutation_affects_start_codon and mutant_codons[:3] not in START_CODONS: # if we changed a start codon to something else then # we no longer know where the protein begins (or even in # what frame). # TODO: use the Kozak consensus sequence or a predictive model # to identify the most likely start site return StartLoss( variant=variant, transcript=transcript) # rely on Ensembl's annotation of the protein sequence since we can't # easily predict whether the starting nucleotide is a methionine # (most common) or leucine aa_ref = transcript.protein_sequence[ref_codon_start_offset:ref_codon_end_offset] reference_protein_length = len(transcript.protein_sequence) aa_alt, mutant_stop_codon_index, using_three_prime_utr = \ translate_in_frame_mutation( transcript=transcript, ref_codon_start_offset=ref_codon_start_offset, ref_codon_end_offset=ref_codon_end_offset, mutant_codons=mutant_codons) mutant_codons_contain_stop = mutant_stop_codon_index != -1 # trim shared subsequences at the start and end of reference # and mutated amino acid sequences aa_ref, aa_alt, shared_prefix, shared_suffix = \ trim_shared_flanking_strings( aa_ref, aa_alt) n_aa_ref = len(aa_ref) n_aa_alt = len(aa_alt) n_aa_shared = len(shared_prefix) is_insertion = (ref_codon_start_offset == ref_codon_end_offset) # index of first amino acid which is different from the reference aa_mutation_start_offset = ( ref_codon_start_offset + n_aa_shared + is_insertion) if mutant_codons_contain_stop: mutant_stop_codon_index += n_aa_shared if mutation_affects_start_codon and (aa_ref == aa_alt): # Substitution between start codons gets special treatment since, # though superficially synonymous, this could still potentially # cause a start loss / change in reading frame and might be worth # closer scrutiny return AlternateStartCodon( variant=variant, transcript=transcript, ref_codon=transcript.sequence[:3], alt_codon=mutant_codons[:3]) n_ref_amino_acids_after_mutated_site = ( reference_protein_length - aa_mutation_start_offset - 1) if mutant_codons_contain_stop and ( n_aa_alt <= n_ref_amino_acids_after_mutated_site): # if the new coding sequence contains a stop codon, then this is a # PrematureStop mutation if it decreases the length of the protein return PrematureStop( variant=variant, transcript=transcript, aa_mutation_start_offset=aa_mutation_start_offset, aa_ref=aa_ref, aa_alt=aa_alt) if (aa_mutation_start_offset > reference_protein_length) or ( n_aa_ref == n_aa_alt == 0): # if inserted nucleotides go after original stop codon or if nothing # is changed in the amino acid sequence then this is a Silent variant return Silent( variant=variant, transcript=transcript, aa_pos=aa_mutation_start_offset, aa_ref=shared_prefix + shared_suffix) elif using_three_prime_utr: # if non-silent mutation is at the end of the protein then # should be a stop-loss return StopLoss( variant, transcript, aa_ref=aa_ref, aa_alt=aa_alt) elif n_aa_alt == 0: return Deletion( variant, transcript, aa_mutation_start_offset=aa_mutation_start_offset, aa_ref=aa_ref) elif n_aa_ref == 0: return Insertion( variant, transcript, aa_mutation_start_offset=aa_mutation_start_offset, aa_alt=aa_alt) elif n_aa_ref == n_aa_alt == 1: # simple substitution e.g. p.V600E return Substitution( variant, transcript, aa_mutation_start_offset=aa_mutation_start_offset, aa_ref=aa_ref, aa_alt=aa_alt) else: # multiple amino acids were substituted e.g. p.VQQ39FF return ComplexSubstitution( variant, transcript, aa_mutation_start_offset=aa_mutation_start_offset, aa_ref=aa_ref, aa_alt=aa_alt)
python
def predict_in_frame_coding_effect( variant, transcript, trimmed_cdna_ref, trimmed_cdna_alt, sequence_from_start_codon, cds_offset): """Coding effect of an in-frame nucleotide change Parameters ---------- variant : Variant transcript : Transcript trimmed_cdna_ref : str Reference nucleotides from the coding sequence of the transcript trimmed_cdna_alt : str Nucleotides to insert in place of the reference nucleotides sequence_from_start_codon : Bio.Seq or str Transcript sequence from the CDS start codon (including the 3' UTR). This sequence includes the 3' UTR since a mutation may delete the stop codon and we'll have to translate past the normal end of the CDS to determine the new protein sequence. cds_offset : int Index of first ref nucleotide, starting from 0 = beginning of coding sequence. If variant is a pure insertion (no ref nucleotides) then this argument indicates the offset *after* which to insert the alt nucleotides. """ ref_codon_start_offset, ref_codon_end_offset, mutant_codons = get_codons( variant=variant, trimmed_cdna_ref=trimmed_cdna_ref, trimmed_cdna_alt=trimmed_cdna_alt, sequence_from_start_codon=sequence_from_start_codon, cds_offset=cds_offset) mutation_affects_start_codon = (ref_codon_start_offset == 0) if mutation_affects_start_codon and mutant_codons[:3] not in START_CODONS: # if we changed a start codon to something else then # we no longer know where the protein begins (or even in # what frame). # TODO: use the Kozak consensus sequence or a predictive model # to identify the most likely start site return StartLoss( variant=variant, transcript=transcript) # rely on Ensembl's annotation of the protein sequence since we can't # easily predict whether the starting nucleotide is a methionine # (most common) or leucine aa_ref = transcript.protein_sequence[ref_codon_start_offset:ref_codon_end_offset] reference_protein_length = len(transcript.protein_sequence) aa_alt, mutant_stop_codon_index, using_three_prime_utr = \ translate_in_frame_mutation( transcript=transcript, ref_codon_start_offset=ref_codon_start_offset, ref_codon_end_offset=ref_codon_end_offset, mutant_codons=mutant_codons) mutant_codons_contain_stop = mutant_stop_codon_index != -1 # trim shared subsequences at the start and end of reference # and mutated amino acid sequences aa_ref, aa_alt, shared_prefix, shared_suffix = \ trim_shared_flanking_strings( aa_ref, aa_alt) n_aa_ref = len(aa_ref) n_aa_alt = len(aa_alt) n_aa_shared = len(shared_prefix) is_insertion = (ref_codon_start_offset == ref_codon_end_offset) # index of first amino acid which is different from the reference aa_mutation_start_offset = ( ref_codon_start_offset + n_aa_shared + is_insertion) if mutant_codons_contain_stop: mutant_stop_codon_index += n_aa_shared if mutation_affects_start_codon and (aa_ref == aa_alt): # Substitution between start codons gets special treatment since, # though superficially synonymous, this could still potentially # cause a start loss / change in reading frame and might be worth # closer scrutiny return AlternateStartCodon( variant=variant, transcript=transcript, ref_codon=transcript.sequence[:3], alt_codon=mutant_codons[:3]) n_ref_amino_acids_after_mutated_site = ( reference_protein_length - aa_mutation_start_offset - 1) if mutant_codons_contain_stop and ( n_aa_alt <= n_ref_amino_acids_after_mutated_site): # if the new coding sequence contains a stop codon, then this is a # PrematureStop mutation if it decreases the length of the protein return PrematureStop( variant=variant, transcript=transcript, aa_mutation_start_offset=aa_mutation_start_offset, aa_ref=aa_ref, aa_alt=aa_alt) if (aa_mutation_start_offset > reference_protein_length) or ( n_aa_ref == n_aa_alt == 0): # if inserted nucleotides go after original stop codon or if nothing # is changed in the amino acid sequence then this is a Silent variant return Silent( variant=variant, transcript=transcript, aa_pos=aa_mutation_start_offset, aa_ref=shared_prefix + shared_suffix) elif using_three_prime_utr: # if non-silent mutation is at the end of the protein then # should be a stop-loss return StopLoss( variant, transcript, aa_ref=aa_ref, aa_alt=aa_alt) elif n_aa_alt == 0: return Deletion( variant, transcript, aa_mutation_start_offset=aa_mutation_start_offset, aa_ref=aa_ref) elif n_aa_ref == 0: return Insertion( variant, transcript, aa_mutation_start_offset=aa_mutation_start_offset, aa_alt=aa_alt) elif n_aa_ref == n_aa_alt == 1: # simple substitution e.g. p.V600E return Substitution( variant, transcript, aa_mutation_start_offset=aa_mutation_start_offset, aa_ref=aa_ref, aa_alt=aa_alt) else: # multiple amino acids were substituted e.g. p.VQQ39FF return ComplexSubstitution( variant, transcript, aa_mutation_start_offset=aa_mutation_start_offset, aa_ref=aa_ref, aa_alt=aa_alt)
Coding effect of an in-frame nucleotide change Parameters ---------- variant : Variant transcript : Transcript trimmed_cdna_ref : str Reference nucleotides from the coding sequence of the transcript trimmed_cdna_alt : str Nucleotides to insert in place of the reference nucleotides sequence_from_start_codon : Bio.Seq or str Transcript sequence from the CDS start codon (including the 3' UTR). This sequence includes the 3' UTR since a mutation may delete the stop codon and we'll have to translate past the normal end of the CDS to determine the new protein sequence. cds_offset : int Index of first ref nucleotide, starting from 0 = beginning of coding sequence. If variant is a pure insertion (no ref nucleotides) then this argument indicates the offset *after* which to insert the alt nucleotides.
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_prediction_coding_in_frame.py#L110-L267
openvax/varcode
varcode/cli/variant_args.py
add_variant_args
def add_variant_args(arg_parser): """ Extends an ArgumentParser instance with the following commandline arguments: --vcf --genome --maf --variant --json-variants """ variant_arg_group = arg_parser.add_argument_group( title="Variants", description="Genomic variant files") variant_arg_group.add_argument( "--vcf", default=[], action="append", help="Genomic variants in VCF format") variant_arg_group.add_argument( "--maf", default=[], action="append", help="Genomic variants in TCGA's MAF format",) variant_arg_group.add_argument( "--variant", default=[], action="append", nargs=4, metavar=("CHR", "POS", "REF", "ALT"), help=( "Individual variant as 4 arguments giving chromsome, position, ref," " and alt. Example: chr1 3848 C G. Use '.' to indicate empty alleles" " for insertions or deletions.")) variant_arg_group.add_argument( "--genome", type=str, help=( "What reference assembly your variant coordinates are using. " "Examples: 'hg19', 'GRCh38', or 'mm9'. " "This argument is ignored for MAF files, since each row includes " "the reference. " "For VCF files, this is used if specified, and otherwise is guessed from " "the header. For variants specfied on the commandline with --variant, " "this option is required.")) variant_arg_group.add_argument( "--download-reference-genome-data", action="store_true", default=False, help=( ("Automatically download genome reference data required for " "annotation using PyEnsembl. Otherwise you must first run " "'pyensembl install' for the release/species corresponding " "to the genome used in your VCF."))) variant_arg_group.add_argument( "--json-variants", default=[], action="append", help="Path to Varcode.VariantCollection object serialized as a JSON file.") return variant_arg_group
python
def add_variant_args(arg_parser): """ Extends an ArgumentParser instance with the following commandline arguments: --vcf --genome --maf --variant --json-variants """ variant_arg_group = arg_parser.add_argument_group( title="Variants", description="Genomic variant files") variant_arg_group.add_argument( "--vcf", default=[], action="append", help="Genomic variants in VCF format") variant_arg_group.add_argument( "--maf", default=[], action="append", help="Genomic variants in TCGA's MAF format",) variant_arg_group.add_argument( "--variant", default=[], action="append", nargs=4, metavar=("CHR", "POS", "REF", "ALT"), help=( "Individual variant as 4 arguments giving chromsome, position, ref," " and alt. Example: chr1 3848 C G. Use '.' to indicate empty alleles" " for insertions or deletions.")) variant_arg_group.add_argument( "--genome", type=str, help=( "What reference assembly your variant coordinates are using. " "Examples: 'hg19', 'GRCh38', or 'mm9'. " "This argument is ignored for MAF files, since each row includes " "the reference. " "For VCF files, this is used if specified, and otherwise is guessed from " "the header. For variants specfied on the commandline with --variant, " "this option is required.")) variant_arg_group.add_argument( "--download-reference-genome-data", action="store_true", default=False, help=( ("Automatically download genome reference data required for " "annotation using PyEnsembl. Otherwise you must first run " "'pyensembl install' for the release/species corresponding " "to the genome used in your VCF."))) variant_arg_group.add_argument( "--json-variants", default=[], action="append", help="Path to Varcode.VariantCollection object serialized as a JSON file.") return variant_arg_group
Extends an ArgumentParser instance with the following commandline arguments: --vcf --genome --maf --variant --json-variants
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/cli/variant_args.py#L26-L90
openvax/varcode
varcode/effects/mutate.py
insert_before
def insert_before(sequence, offset, new_residues): """Mutate the given sequence by inserting the string `new_residues` before `offset`. Parameters ---------- sequence : sequence String of amino acids or DNA bases offset : int Base 0 offset from start of sequence, after which we should insert `new_residues`. new_residues : sequence """ assert 0 < offset <= len(sequence), \ "Invalid position %d for sequence of length %d" % ( offset, len(sequence)) prefix = sequence[:offset] suffix = sequence[offset:] return prefix + new_residues + suffix
python
def insert_before(sequence, offset, new_residues): """Mutate the given sequence by inserting the string `new_residues` before `offset`. Parameters ---------- sequence : sequence String of amino acids or DNA bases offset : int Base 0 offset from start of sequence, after which we should insert `new_residues`. new_residues : sequence """ assert 0 < offset <= len(sequence), \ "Invalid position %d for sequence of length %d" % ( offset, len(sequence)) prefix = sequence[:offset] suffix = sequence[offset:] return prefix + new_residues + suffix
Mutate the given sequence by inserting the string `new_residues` before `offset`. Parameters ---------- sequence : sequence String of amino acids or DNA bases offset : int Base 0 offset from start of sequence, after which we should insert `new_residues`. new_residues : sequence
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/mutate.py#L18-L38