repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
metakermit/django-spa
spa/storage.py
PatchedManifestStaticFilesStorage.url_converter
def url_converter(self, *args, **kwargs): """ Return the custom URL converter for the given file name. """ upstream_converter = super(PatchedManifestStaticFilesStorage, self).url_converter(*args, **kwargs) def converter(matchobj): try: upstream_converter(matchobj) except ValueError: # e.g. a static file 'static/media/logo.6a30f15f.svg' could not be found # because the upstream converter stripped 'static/' from the path matched, url = matchobj.groups() return matched return converter
python
def url_converter(self, *args, **kwargs): """ Return the custom URL converter for the given file name. """ upstream_converter = super(PatchedManifestStaticFilesStorage, self).url_converter(*args, **kwargs) def converter(matchobj): try: upstream_converter(matchobj) except ValueError: # e.g. a static file 'static/media/logo.6a30f15f.svg' could not be found # because the upstream converter stripped 'static/' from the path matched, url = matchobj.groups() return matched return converter
[ "def", "url_converter", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "upstream_converter", "=", "super", "(", "PatchedManifestStaticFilesStorage", ",", "self", ")", ".", "url_converter", "(", "*", "args", ",", "*", "*", "kwargs", ")", "def", "converter", "(", "matchobj", ")", ":", "try", ":", "upstream_converter", "(", "matchobj", ")", "except", "ValueError", ":", "# e.g. a static file 'static/media/logo.6a30f15f.svg' could not be found", "# because the upstream converter stripped 'static/' from the path", "matched", ",", "url", "=", "matchobj", ".", "groups", "(", ")", "return", "matched", "return", "converter" ]
Return the custom URL converter for the given file name.
[ "Return", "the", "custom", "URL", "converter", "for", "the", "given", "file", "name", "." ]
dbdfa6d06c1077fade729db25b3b137d44299db6
https://github.com/metakermit/django-spa/blob/dbdfa6d06c1077fade729db25b3b137d44299db6/spa/storage.py#L12-L27
train
TriOptima/tri.table
lib/tri/table/__init__.py
order_by_on_list
def order_by_on_list(objects, order_field, is_desc=False): """ Utility function to sort objects django-style even for non-query set collections :param objects: list of objects to sort :param order_field: field name, follows django conventions, so "foo__bar" means `foo.bar`, can be a callable. :param is_desc: reverse the sorting :return: """ if callable(order_field): objects.sort(key=order_field, reverse=is_desc) return def order_key(x): v = getattr_path(x, order_field) if v is None: return MIN return v objects.sort(key=order_key, reverse=is_desc)
python
def order_by_on_list(objects, order_field, is_desc=False): """ Utility function to sort objects django-style even for non-query set collections :param objects: list of objects to sort :param order_field: field name, follows django conventions, so "foo__bar" means `foo.bar`, can be a callable. :param is_desc: reverse the sorting :return: """ if callable(order_field): objects.sort(key=order_field, reverse=is_desc) return def order_key(x): v = getattr_path(x, order_field) if v is None: return MIN return v objects.sort(key=order_key, reverse=is_desc)
[ "def", "order_by_on_list", "(", "objects", ",", "order_field", ",", "is_desc", "=", "False", ")", ":", "if", "callable", "(", "order_field", ")", ":", "objects", ".", "sort", "(", "key", "=", "order_field", ",", "reverse", "=", "is_desc", ")", "return", "def", "order_key", "(", "x", ")", ":", "v", "=", "getattr_path", "(", "x", ",", "order_field", ")", "if", "v", "is", "None", ":", "return", "MIN", "return", "v", "objects", ".", "sort", "(", "key", "=", "order_key", ",", "reverse", "=", "is_desc", ")" ]
Utility function to sort objects django-style even for non-query set collections :param objects: list of objects to sort :param order_field: field name, follows django conventions, so "foo__bar" means `foo.bar`, can be a callable. :param is_desc: reverse the sorting :return:
[ "Utility", "function", "to", "sort", "objects", "django", "-", "style", "even", "for", "non", "-", "query", "set", "collections" ]
fc38c02098a80a3fb336ac4cf502954d74e31484
https://github.com/TriOptima/tri.table/blob/fc38c02098a80a3fb336ac4cf502954d74e31484/lib/tri/table/__init__.py#L153-L172
train
TriOptima/tri.table
lib/tri/table/__init__.py
render_table
def render_table(request, table, links=None, context=None, template='tri_table/list.html', blank_on_empty=False, paginate_by=40, # pragma: no mutate page=None, paginator=None, show_hits=False, hit_label='Items', post_bulk_edit=lambda table, queryset, updates: None): """ Render a table. This automatically handles pagination, sorting, filtering and bulk operations. :param request: the request object. This is set on the table object so that it is available for lambda expressions. :param table: an instance of Table :param links: a list of instances of Link :param context: dict of extra context parameters :param template: if you need to render the table differently you can override this parameter with either a name of a template to load or a `Template` instance. :param blank_on_empty: turn off the displaying of `{{ empty_message }}` in the template when the list is empty :param show_hits: Display how many items there are total in the paginator. :param hit_label: Label for the show_hits display. :return: a string with the rendered HTML table """ if not context: context = {} if isinstance(table, Namespace): table = table() assert isinstance(table, Table), table table.request = request should_return, dispatch_result = handle_dispatch(request=request, obj=table) if should_return: return dispatch_result context['bulk_form'] = table.bulk_form context['query_form'] = table.query_form context['tri_query_error'] = table.query_error if table.bulk_form and request.method == 'POST': if table.bulk_form.is_valid(): queryset = table.bulk_queryset() updates = { field.name: field.value for field in table.bulk_form.fields if field.value is not None and field.value != '' and field.attr is not None } queryset.update(**updates) post_bulk_edit(table=table, queryset=queryset, updates=updates) return HttpResponseRedirect(request.META['HTTP_REFERER']) table.context = table_context( request, table=table, links=links, paginate_by=paginate_by, page=page, extra_context=context, paginator=paginator, show_hits=show_hits, hit_label=hit_label, ) if not table.data and blank_on_empty: return '' if table.query_form and not table.query_form.is_valid(): table.data = None table.context['invalid_form_message'] = mark_safe('<i class="fa fa-meh-o fa-5x" aria-hidden="true"></i>') return render_template(request, template, table.context)
python
def render_table(request, table, links=None, context=None, template='tri_table/list.html', blank_on_empty=False, paginate_by=40, # pragma: no mutate page=None, paginator=None, show_hits=False, hit_label='Items', post_bulk_edit=lambda table, queryset, updates: None): """ Render a table. This automatically handles pagination, sorting, filtering and bulk operations. :param request: the request object. This is set on the table object so that it is available for lambda expressions. :param table: an instance of Table :param links: a list of instances of Link :param context: dict of extra context parameters :param template: if you need to render the table differently you can override this parameter with either a name of a template to load or a `Template` instance. :param blank_on_empty: turn off the displaying of `{{ empty_message }}` in the template when the list is empty :param show_hits: Display how many items there are total in the paginator. :param hit_label: Label for the show_hits display. :return: a string with the rendered HTML table """ if not context: context = {} if isinstance(table, Namespace): table = table() assert isinstance(table, Table), table table.request = request should_return, dispatch_result = handle_dispatch(request=request, obj=table) if should_return: return dispatch_result context['bulk_form'] = table.bulk_form context['query_form'] = table.query_form context['tri_query_error'] = table.query_error if table.bulk_form and request.method == 'POST': if table.bulk_form.is_valid(): queryset = table.bulk_queryset() updates = { field.name: field.value for field in table.bulk_form.fields if field.value is not None and field.value != '' and field.attr is not None } queryset.update(**updates) post_bulk_edit(table=table, queryset=queryset, updates=updates) return HttpResponseRedirect(request.META['HTTP_REFERER']) table.context = table_context( request, table=table, links=links, paginate_by=paginate_by, page=page, extra_context=context, paginator=paginator, show_hits=show_hits, hit_label=hit_label, ) if not table.data and blank_on_empty: return '' if table.query_form and not table.query_form.is_valid(): table.data = None table.context['invalid_form_message'] = mark_safe('<i class="fa fa-meh-o fa-5x" aria-hidden="true"></i>') return render_template(request, template, table.context)
[ "def", "render_table", "(", "request", ",", "table", ",", "links", "=", "None", ",", "context", "=", "None", ",", "template", "=", "'tri_table/list.html'", ",", "blank_on_empty", "=", "False", ",", "paginate_by", "=", "40", ",", "# pragma: no mutate", "page", "=", "None", ",", "paginator", "=", "None", ",", "show_hits", "=", "False", ",", "hit_label", "=", "'Items'", ",", "post_bulk_edit", "=", "lambda", "table", ",", "queryset", ",", "updates", ":", "None", ")", ":", "if", "not", "context", ":", "context", "=", "{", "}", "if", "isinstance", "(", "table", ",", "Namespace", ")", ":", "table", "=", "table", "(", ")", "assert", "isinstance", "(", "table", ",", "Table", ")", ",", "table", "table", ".", "request", "=", "request", "should_return", ",", "dispatch_result", "=", "handle_dispatch", "(", "request", "=", "request", ",", "obj", "=", "table", ")", "if", "should_return", ":", "return", "dispatch_result", "context", "[", "'bulk_form'", "]", "=", "table", ".", "bulk_form", "context", "[", "'query_form'", "]", "=", "table", ".", "query_form", "context", "[", "'tri_query_error'", "]", "=", "table", ".", "query_error", "if", "table", ".", "bulk_form", "and", "request", ".", "method", "==", "'POST'", ":", "if", "table", ".", "bulk_form", ".", "is_valid", "(", ")", ":", "queryset", "=", "table", ".", "bulk_queryset", "(", ")", "updates", "=", "{", "field", ".", "name", ":", "field", ".", "value", "for", "field", "in", "table", ".", "bulk_form", ".", "fields", "if", "field", ".", "value", "is", "not", "None", "and", "field", ".", "value", "!=", "''", "and", "field", ".", "attr", "is", "not", "None", "}", "queryset", ".", "update", "(", "*", "*", "updates", ")", "post_bulk_edit", "(", "table", "=", "table", ",", "queryset", "=", "queryset", ",", "updates", "=", "updates", ")", "return", "HttpResponseRedirect", "(", "request", ".", "META", "[", "'HTTP_REFERER'", "]", ")", "table", ".", "context", "=", "table_context", "(", "request", ",", "table", "=", "table", ",", "links", "=", "links", ",", "paginate_by", "=", "paginate_by", ",", "page", "=", "page", ",", "extra_context", "=", "context", ",", "paginator", "=", "paginator", ",", "show_hits", "=", "show_hits", ",", "hit_label", "=", "hit_label", ",", ")", "if", "not", "table", ".", "data", "and", "blank_on_empty", ":", "return", "''", "if", "table", ".", "query_form", "and", "not", "table", ".", "query_form", ".", "is_valid", "(", ")", ":", "table", ".", "data", "=", "None", "table", ".", "context", "[", "'invalid_form_message'", "]", "=", "mark_safe", "(", "'<i class=\"fa fa-meh-o fa-5x\" aria-hidden=\"true\"></i>'", ")", "return", "render_template", "(", "request", ",", "template", ",", "table", ".", "context", ")" ]
Render a table. This automatically handles pagination, sorting, filtering and bulk operations. :param request: the request object. This is set on the table object so that it is available for lambda expressions. :param table: an instance of Table :param links: a list of instances of Link :param context: dict of extra context parameters :param template: if you need to render the table differently you can override this parameter with either a name of a template to load or a `Template` instance. :param blank_on_empty: turn off the displaying of `{{ empty_message }}` in the template when the list is empty :param show_hits: Display how many items there are total in the paginator. :param hit_label: Label for the show_hits display. :return: a string with the rendered HTML table
[ "Render", "a", "table", ".", "This", "automatically", "handles", "pagination", "sorting", "filtering", "and", "bulk", "operations", "." ]
fc38c02098a80a3fb336ac4cf502954d74e31484
https://github.com/TriOptima/tri.table/blob/fc38c02098a80a3fb336ac4cf502954d74e31484/lib/tri/table/__init__.py#L1595-L1671
train
infobloxopen/infoblox-client
infoblox_client/utils.py
generate_duid
def generate_duid(mac): """DUID is consisted of 10 hex numbers. 0x00 + mac with last 3 hex + mac with 6 hex """ valid = mac and isinstance(mac, six.string_types) if not valid: raise ValueError("Invalid argument was passed") return "00:" + mac[9:] + ":" + mac
python
def generate_duid(mac): """DUID is consisted of 10 hex numbers. 0x00 + mac with last 3 hex + mac with 6 hex """ valid = mac and isinstance(mac, six.string_types) if not valid: raise ValueError("Invalid argument was passed") return "00:" + mac[9:] + ":" + mac
[ "def", "generate_duid", "(", "mac", ")", ":", "valid", "=", "mac", "and", "isinstance", "(", "mac", ",", "six", ".", "string_types", ")", "if", "not", "valid", ":", "raise", "ValueError", "(", "\"Invalid argument was passed\"", ")", "return", "\"00:\"", "+", "mac", "[", "9", ":", "]", "+", "\":\"", "+", "mac" ]
DUID is consisted of 10 hex numbers. 0x00 + mac with last 3 hex + mac with 6 hex
[ "DUID", "is", "consisted", "of", "10", "hex", "numbers", "." ]
edeec62db1935784c728731b2ae7cf0fcc9bf84d
https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/utils.py#L41-L49
train
infobloxopen/infoblox-client
infoblox_client/utils.py
try_value_to_bool
def try_value_to_bool(value, strict_mode=True): """Tries to convert value into boolean. strict_mode is True: - Only string representation of str(True) and str(False) are converted into booleans; - Otherwise unchanged incoming value is returned; strict_mode is False: - Anything that looks like True or False is converted into booleans. Values accepted as True: - 'true', 'on', 'yes' (case independent) Values accepted as False: - 'false', 'off', 'no' (case independent) - all other values are returned unchanged """ if strict_mode: true_list = ('True',) false_list = ('False',) val = value else: true_list = ('true', 'on', 'yes') false_list = ('false', 'off', 'no') val = str(value).lower() if val in true_list: return True elif val in false_list: return False return value
python
def try_value_to_bool(value, strict_mode=True): """Tries to convert value into boolean. strict_mode is True: - Only string representation of str(True) and str(False) are converted into booleans; - Otherwise unchanged incoming value is returned; strict_mode is False: - Anything that looks like True or False is converted into booleans. Values accepted as True: - 'true', 'on', 'yes' (case independent) Values accepted as False: - 'false', 'off', 'no' (case independent) - all other values are returned unchanged """ if strict_mode: true_list = ('True',) false_list = ('False',) val = value else: true_list = ('true', 'on', 'yes') false_list = ('false', 'off', 'no') val = str(value).lower() if val in true_list: return True elif val in false_list: return False return value
[ "def", "try_value_to_bool", "(", "value", ",", "strict_mode", "=", "True", ")", ":", "if", "strict_mode", ":", "true_list", "=", "(", "'True'", ",", ")", "false_list", "=", "(", "'False'", ",", ")", "val", "=", "value", "else", ":", "true_list", "=", "(", "'true'", ",", "'on'", ",", "'yes'", ")", "false_list", "=", "(", "'false'", ",", "'off'", ",", "'no'", ")", "val", "=", "str", "(", "value", ")", ".", "lower", "(", ")", "if", "val", "in", "true_list", ":", "return", "True", "elif", "val", "in", "false_list", ":", "return", "False", "return", "value" ]
Tries to convert value into boolean. strict_mode is True: - Only string representation of str(True) and str(False) are converted into booleans; - Otherwise unchanged incoming value is returned; strict_mode is False: - Anything that looks like True or False is converted into booleans. Values accepted as True: - 'true', 'on', 'yes' (case independent) Values accepted as False: - 'false', 'off', 'no' (case independent) - all other values are returned unchanged
[ "Tries", "to", "convert", "value", "into", "boolean", "." ]
edeec62db1935784c728731b2ae7cf0fcc9bf84d
https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/utils.py#L85-L114
train
infobloxopen/infoblox-client
infoblox_client/object_manager.py
InfobloxObjectManager.create_network
def create_network(self, net_view_name, cidr, nameservers=None, members=None, gateway_ip=None, dhcp_trel_ip=None, network_extattrs=None): """Create NIOS Network and prepare DHCP options. Some DHCP options are valid for IPv4 only, so just skip processing them for IPv6 case. :param net_view_name: network view name :param cidr: network to allocate, example '172.23.23.0/24' :param nameservers: list of name servers hosts/ip :param members: list of objects.AnyMember objects that are expected to serve dhcp for created network :param gateway_ip: gateway ip for the network (valid for IPv4 only) :param dhcp_trel_ip: ip address of dhcp relay (valid for IPv4 only) :param network_extattrs: extensible attributes for network (instance of objects.EA) :returns: created network (instance of objects.Network) """ ipv4 = ib_utils.determine_ip_version(cidr) == 4 options = [] if nameservers: options.append(obj.DhcpOption(name='domain-name-servers', value=",".join(nameservers))) if ipv4 and gateway_ip: options.append(obj.DhcpOption(name='routers', value=gateway_ip)) if ipv4 and dhcp_trel_ip: options.append(obj.DhcpOption(name='dhcp-server-identifier', num=54, value=dhcp_trel_ip)) return obj.Network.create(self.connector, network_view=net_view_name, cidr=cidr, members=members, options=options, extattrs=network_extattrs, check_if_exists=False)
python
def create_network(self, net_view_name, cidr, nameservers=None, members=None, gateway_ip=None, dhcp_trel_ip=None, network_extattrs=None): """Create NIOS Network and prepare DHCP options. Some DHCP options are valid for IPv4 only, so just skip processing them for IPv6 case. :param net_view_name: network view name :param cidr: network to allocate, example '172.23.23.0/24' :param nameservers: list of name servers hosts/ip :param members: list of objects.AnyMember objects that are expected to serve dhcp for created network :param gateway_ip: gateway ip for the network (valid for IPv4 only) :param dhcp_trel_ip: ip address of dhcp relay (valid for IPv4 only) :param network_extattrs: extensible attributes for network (instance of objects.EA) :returns: created network (instance of objects.Network) """ ipv4 = ib_utils.determine_ip_version(cidr) == 4 options = [] if nameservers: options.append(obj.DhcpOption(name='domain-name-servers', value=",".join(nameservers))) if ipv4 and gateway_ip: options.append(obj.DhcpOption(name='routers', value=gateway_ip)) if ipv4 and dhcp_trel_ip: options.append(obj.DhcpOption(name='dhcp-server-identifier', num=54, value=dhcp_trel_ip)) return obj.Network.create(self.connector, network_view=net_view_name, cidr=cidr, members=members, options=options, extattrs=network_extattrs, check_if_exists=False)
[ "def", "create_network", "(", "self", ",", "net_view_name", ",", "cidr", ",", "nameservers", "=", "None", ",", "members", "=", "None", ",", "gateway_ip", "=", "None", ",", "dhcp_trel_ip", "=", "None", ",", "network_extattrs", "=", "None", ")", ":", "ipv4", "=", "ib_utils", ".", "determine_ip_version", "(", "cidr", ")", "==", "4", "options", "=", "[", "]", "if", "nameservers", ":", "options", ".", "append", "(", "obj", ".", "DhcpOption", "(", "name", "=", "'domain-name-servers'", ",", "value", "=", "\",\"", ".", "join", "(", "nameservers", ")", ")", ")", "if", "ipv4", "and", "gateway_ip", ":", "options", ".", "append", "(", "obj", ".", "DhcpOption", "(", "name", "=", "'routers'", ",", "value", "=", "gateway_ip", ")", ")", "if", "ipv4", "and", "dhcp_trel_ip", ":", "options", ".", "append", "(", "obj", ".", "DhcpOption", "(", "name", "=", "'dhcp-server-identifier'", ",", "num", "=", "54", ",", "value", "=", "dhcp_trel_ip", ")", ")", "return", "obj", ".", "Network", ".", "create", "(", "self", ".", "connector", ",", "network_view", "=", "net_view_name", ",", "cidr", "=", "cidr", ",", "members", "=", "members", ",", "options", "=", "options", ",", "extattrs", "=", "network_extattrs", ",", "check_if_exists", "=", "False", ")" ]
Create NIOS Network and prepare DHCP options. Some DHCP options are valid for IPv4 only, so just skip processing them for IPv6 case. :param net_view_name: network view name :param cidr: network to allocate, example '172.23.23.0/24' :param nameservers: list of name servers hosts/ip :param members: list of objects.AnyMember objects that are expected to serve dhcp for created network :param gateway_ip: gateway ip for the network (valid for IPv4 only) :param dhcp_trel_ip: ip address of dhcp relay (valid for IPv4 only) :param network_extattrs: extensible attributes for network (instance of objects.EA) :returns: created network (instance of objects.Network)
[ "Create", "NIOS", "Network", "and", "prepare", "DHCP", "options", "." ]
edeec62db1935784c728731b2ae7cf0fcc9bf84d
https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/object_manager.py#L58-L96
train
infobloxopen/infoblox-client
infoblox_client/object_manager.py
InfobloxObjectManager.create_ip_range
def create_ip_range(self, network_view, start_ip, end_ip, network, disable, range_extattrs): """Creates IPRange or fails if already exists.""" return obj.IPRange.create(self.connector, network_view=network_view, start_addr=start_ip, end_addr=end_ip, cidr=network, disable=disable, extattrs=range_extattrs, check_if_exists=False)
python
def create_ip_range(self, network_view, start_ip, end_ip, network, disable, range_extattrs): """Creates IPRange or fails if already exists.""" return obj.IPRange.create(self.connector, network_view=network_view, start_addr=start_ip, end_addr=end_ip, cidr=network, disable=disable, extattrs=range_extattrs, check_if_exists=False)
[ "def", "create_ip_range", "(", "self", ",", "network_view", ",", "start_ip", ",", "end_ip", ",", "network", ",", "disable", ",", "range_extattrs", ")", ":", "return", "obj", ".", "IPRange", ".", "create", "(", "self", ".", "connector", ",", "network_view", "=", "network_view", ",", "start_addr", "=", "start_ip", ",", "end_addr", "=", "end_ip", ",", "cidr", "=", "network", ",", "disable", "=", "disable", ",", "extattrs", "=", "range_extattrs", ",", "check_if_exists", "=", "False", ")" ]
Creates IPRange or fails if already exists.
[ "Creates", "IPRange", "or", "fails", "if", "already", "exists", "." ]
edeec62db1935784c728731b2ae7cf0fcc9bf84d
https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/object_manager.py#L103-L113
train
infobloxopen/infoblox-client
infoblox_client/connector.py
Connector._parse_options
def _parse_options(self, options): """Copy needed options to self""" attributes = ('host', 'wapi_version', 'username', 'password', 'ssl_verify', 'http_request_timeout', 'max_retries', 'http_pool_connections', 'http_pool_maxsize', 'silent_ssl_warnings', 'log_api_calls_as_info', 'max_results', 'paging') for attr in attributes: if isinstance(options, dict) and attr in options: setattr(self, attr, options[attr]) elif hasattr(options, attr): value = getattr(options, attr) setattr(self, attr, value) elif attr in self.DEFAULT_OPTIONS: setattr(self, attr, self.DEFAULT_OPTIONS[attr]) else: msg = "WAPI config error. Option %s is not defined" % attr raise ib_ex.InfobloxConfigException(msg=msg) for attr in ('host', 'username', 'password'): if not getattr(self, attr): msg = "WAPI config error. Option %s can not be blank" % attr raise ib_ex.InfobloxConfigException(msg=msg) self.wapi_url = "https://%s/wapi/v%s/" % (self.host, self.wapi_version) self.cloud_api_enabled = self.is_cloud_wapi(self.wapi_version)
python
def _parse_options(self, options): """Copy needed options to self""" attributes = ('host', 'wapi_version', 'username', 'password', 'ssl_verify', 'http_request_timeout', 'max_retries', 'http_pool_connections', 'http_pool_maxsize', 'silent_ssl_warnings', 'log_api_calls_as_info', 'max_results', 'paging') for attr in attributes: if isinstance(options, dict) and attr in options: setattr(self, attr, options[attr]) elif hasattr(options, attr): value = getattr(options, attr) setattr(self, attr, value) elif attr in self.DEFAULT_OPTIONS: setattr(self, attr, self.DEFAULT_OPTIONS[attr]) else: msg = "WAPI config error. Option %s is not defined" % attr raise ib_ex.InfobloxConfigException(msg=msg) for attr in ('host', 'username', 'password'): if not getattr(self, attr): msg = "WAPI config error. Option %s can not be blank" % attr raise ib_ex.InfobloxConfigException(msg=msg) self.wapi_url = "https://%s/wapi/v%s/" % (self.host, self.wapi_version) self.cloud_api_enabled = self.is_cloud_wapi(self.wapi_version)
[ "def", "_parse_options", "(", "self", ",", "options", ")", ":", "attributes", "=", "(", "'host'", ",", "'wapi_version'", ",", "'username'", ",", "'password'", ",", "'ssl_verify'", ",", "'http_request_timeout'", ",", "'max_retries'", ",", "'http_pool_connections'", ",", "'http_pool_maxsize'", ",", "'silent_ssl_warnings'", ",", "'log_api_calls_as_info'", ",", "'max_results'", ",", "'paging'", ")", "for", "attr", "in", "attributes", ":", "if", "isinstance", "(", "options", ",", "dict", ")", "and", "attr", "in", "options", ":", "setattr", "(", "self", ",", "attr", ",", "options", "[", "attr", "]", ")", "elif", "hasattr", "(", "options", ",", "attr", ")", ":", "value", "=", "getattr", "(", "options", ",", "attr", ")", "setattr", "(", "self", ",", "attr", ",", "value", ")", "elif", "attr", "in", "self", ".", "DEFAULT_OPTIONS", ":", "setattr", "(", "self", ",", "attr", ",", "self", ".", "DEFAULT_OPTIONS", "[", "attr", "]", ")", "else", ":", "msg", "=", "\"WAPI config error. Option %s is not defined\"", "%", "attr", "raise", "ib_ex", ".", "InfobloxConfigException", "(", "msg", "=", "msg", ")", "for", "attr", "in", "(", "'host'", ",", "'username'", ",", "'password'", ")", ":", "if", "not", "getattr", "(", "self", ",", "attr", ")", ":", "msg", "=", "\"WAPI config error. Option %s can not be blank\"", "%", "attr", "raise", "ib_ex", ".", "InfobloxConfigException", "(", "msg", "=", "msg", ")", "self", ".", "wapi_url", "=", "\"https://%s/wapi/v%s/\"", "%", "(", "self", ".", "host", ",", "self", ".", "wapi_version", ")", "self", ".", "cloud_api_enabled", "=", "self", ".", "is_cloud_wapi", "(", "self", ".", "wapi_version", ")" ]
Copy needed options to self
[ "Copy", "needed", "options", "to", "self" ]
edeec62db1935784c728731b2ae7cf0fcc9bf84d
https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/connector.py#L89-L115
train
infobloxopen/infoblox-client
infoblox_client/connector.py
Connector._parse_reply
def _parse_reply(request): """Tries to parse reply from NIOS. Raises exception with content if reply is not in json format """ try: return jsonutils.loads(request.content) except ValueError: raise ib_ex.InfobloxConnectionError(reason=request.content)
python
def _parse_reply(request): """Tries to parse reply from NIOS. Raises exception with content if reply is not in json format """ try: return jsonutils.loads(request.content) except ValueError: raise ib_ex.InfobloxConnectionError(reason=request.content)
[ "def", "_parse_reply", "(", "request", ")", ":", "try", ":", "return", "jsonutils", ".", "loads", "(", "request", ".", "content", ")", "except", "ValueError", ":", "raise", "ib_ex", ".", "InfobloxConnectionError", "(", "reason", "=", "request", ".", "content", ")" ]
Tries to parse reply from NIOS. Raises exception with content if reply is not in json format
[ "Tries", "to", "parse", "reply", "from", "NIOS", "." ]
edeec62db1935784c728731b2ae7cf0fcc9bf84d
https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/connector.py#L212-L220
train
infobloxopen/infoblox-client
infoblox_client/connector.py
Connector.get_object
def get_object(self, obj_type, payload=None, return_fields=None, extattrs=None, force_proxy=False, max_results=None, paging=False): """Retrieve a list of Infoblox objects of type 'obj_type' Some get requests like 'ipv4address' should be always proxied to GM on Hellfire If request is cloud and proxy is not forced yet, then plan to do 2 request: - the first one is not proxied to GM - the second is proxied to GM Args: obj_type (str): Infoblox object type, e.g. 'network', 'range', etc. payload (dict): Payload with data to send return_fields (list): List of fields to be returned extattrs (dict): List of Extensible Attributes force_proxy (bool): Set _proxy_search flag to process requests on GM max_results (int): Maximum number of objects to be returned. If set to a negative number the appliance will return an error when the number of returned objects would exceed the setting. The default is -1000. If this is set to a positive number, the results will be truncated when necessary. paging (bool): Enables paging to wapi calls if paging = True, it uses _max_results to set paging size of the wapi calls. If _max_results is negative it will take paging size as 1000. Returns: A list of the Infoblox objects requested Raises: InfobloxObjectNotFound """ self._validate_obj_type_or_die(obj_type, obj_type_expected=False) # max_results passed to get_object has priority over # one defined as connector option if max_results is None and self.max_results: max_results = self.max_results if paging is False and self.paging: paging = self.paging query_params = self._build_query_params(payload=payload, return_fields=return_fields, max_results=max_results, paging=paging) # Clear proxy flag if wapi version is too old (non-cloud) proxy_flag = self.cloud_api_enabled and force_proxy ib_object = self._handle_get_object(obj_type, query_params, extattrs, proxy_flag) if ib_object: return ib_object # Do second get call with force_proxy if not done yet if self.cloud_api_enabled and not force_proxy: ib_object = self._handle_get_object(obj_type, query_params, extattrs, proxy_flag=True) if ib_object: return ib_object return None
python
def get_object(self, obj_type, payload=None, return_fields=None, extattrs=None, force_proxy=False, max_results=None, paging=False): """Retrieve a list of Infoblox objects of type 'obj_type' Some get requests like 'ipv4address' should be always proxied to GM on Hellfire If request is cloud and proxy is not forced yet, then plan to do 2 request: - the first one is not proxied to GM - the second is proxied to GM Args: obj_type (str): Infoblox object type, e.g. 'network', 'range', etc. payload (dict): Payload with data to send return_fields (list): List of fields to be returned extattrs (dict): List of Extensible Attributes force_proxy (bool): Set _proxy_search flag to process requests on GM max_results (int): Maximum number of objects to be returned. If set to a negative number the appliance will return an error when the number of returned objects would exceed the setting. The default is -1000. If this is set to a positive number, the results will be truncated when necessary. paging (bool): Enables paging to wapi calls if paging = True, it uses _max_results to set paging size of the wapi calls. If _max_results is negative it will take paging size as 1000. Returns: A list of the Infoblox objects requested Raises: InfobloxObjectNotFound """ self._validate_obj_type_or_die(obj_type, obj_type_expected=False) # max_results passed to get_object has priority over # one defined as connector option if max_results is None and self.max_results: max_results = self.max_results if paging is False and self.paging: paging = self.paging query_params = self._build_query_params(payload=payload, return_fields=return_fields, max_results=max_results, paging=paging) # Clear proxy flag if wapi version is too old (non-cloud) proxy_flag = self.cloud_api_enabled and force_proxy ib_object = self._handle_get_object(obj_type, query_params, extattrs, proxy_flag) if ib_object: return ib_object # Do second get call with force_proxy if not done yet if self.cloud_api_enabled and not force_proxy: ib_object = self._handle_get_object(obj_type, query_params, extattrs, proxy_flag=True) if ib_object: return ib_object return None
[ "def", "get_object", "(", "self", ",", "obj_type", ",", "payload", "=", "None", ",", "return_fields", "=", "None", ",", "extattrs", "=", "None", ",", "force_proxy", "=", "False", ",", "max_results", "=", "None", ",", "paging", "=", "False", ")", ":", "self", ".", "_validate_obj_type_or_die", "(", "obj_type", ",", "obj_type_expected", "=", "False", ")", "# max_results passed to get_object has priority over", "# one defined as connector option", "if", "max_results", "is", "None", "and", "self", ".", "max_results", ":", "max_results", "=", "self", ".", "max_results", "if", "paging", "is", "False", "and", "self", ".", "paging", ":", "paging", "=", "self", ".", "paging", "query_params", "=", "self", ".", "_build_query_params", "(", "payload", "=", "payload", ",", "return_fields", "=", "return_fields", ",", "max_results", "=", "max_results", ",", "paging", "=", "paging", ")", "# Clear proxy flag if wapi version is too old (non-cloud)", "proxy_flag", "=", "self", ".", "cloud_api_enabled", "and", "force_proxy", "ib_object", "=", "self", ".", "_handle_get_object", "(", "obj_type", ",", "query_params", ",", "extattrs", ",", "proxy_flag", ")", "if", "ib_object", ":", "return", "ib_object", "# Do second get call with force_proxy if not done yet", "if", "self", ".", "cloud_api_enabled", "and", "not", "force_proxy", ":", "ib_object", "=", "self", ".", "_handle_get_object", "(", "obj_type", ",", "query_params", ",", "extattrs", ",", "proxy_flag", "=", "True", ")", "if", "ib_object", ":", "return", "ib_object", "return", "None" ]
Retrieve a list of Infoblox objects of type 'obj_type' Some get requests like 'ipv4address' should be always proxied to GM on Hellfire If request is cloud and proxy is not forced yet, then plan to do 2 request: - the first one is not proxied to GM - the second is proxied to GM Args: obj_type (str): Infoblox object type, e.g. 'network', 'range', etc. payload (dict): Payload with data to send return_fields (list): List of fields to be returned extattrs (dict): List of Extensible Attributes force_proxy (bool): Set _proxy_search flag to process requests on GM max_results (int): Maximum number of objects to be returned. If set to a negative number the appliance will return an error when the number of returned objects would exceed the setting. The default is -1000. If this is set to a positive number, the results will be truncated when necessary. paging (bool): Enables paging to wapi calls if paging = True, it uses _max_results to set paging size of the wapi calls. If _max_results is negative it will take paging size as 1000. Returns: A list of the Infoblox objects requested Raises: InfobloxObjectNotFound
[ "Retrieve", "a", "list", "of", "Infoblox", "objects", "of", "type", "obj_type" ]
edeec62db1935784c728731b2ae7cf0fcc9bf84d
https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/connector.py#L231-L293
train
infobloxopen/infoblox-client
infoblox_client/connector.py
Connector.create_object
def create_object(self, obj_type, payload, return_fields=None): """Create an Infoblox object of type 'obj_type' Args: obj_type (str): Infoblox object type, e.g. 'network', 'range', etc. payload (dict): Payload with data to send return_fields (list): List of fields to be returned Returns: The object reference of the newly create object Raises: InfobloxException """ self._validate_obj_type_or_die(obj_type) query_params = self._build_query_params(return_fields=return_fields) url = self._construct_url(obj_type, query_params) opts = self._get_request_options(data=payload) self._log_request('post', url, opts) if(self.session.cookies): # the first 'get' or 'post' action will generate a cookie # after that, we don't need to re-authenticate self.session.auth = None r = self.session.post(url, **opts) self._validate_authorized(r) if r.status_code != requests.codes.CREATED: response = utils.safe_json_load(r.content) already_assigned = 'is assigned to another network view' if response and already_assigned in response.get('text'): exception = ib_ex.InfobloxMemberAlreadyAssigned else: exception = ib_ex.InfobloxCannotCreateObject raise exception( response=response, obj_type=obj_type, content=r.content, args=payload, code=r.status_code) return self._parse_reply(r)
python
def create_object(self, obj_type, payload, return_fields=None): """Create an Infoblox object of type 'obj_type' Args: obj_type (str): Infoblox object type, e.g. 'network', 'range', etc. payload (dict): Payload with data to send return_fields (list): List of fields to be returned Returns: The object reference of the newly create object Raises: InfobloxException """ self._validate_obj_type_or_die(obj_type) query_params = self._build_query_params(return_fields=return_fields) url = self._construct_url(obj_type, query_params) opts = self._get_request_options(data=payload) self._log_request('post', url, opts) if(self.session.cookies): # the first 'get' or 'post' action will generate a cookie # after that, we don't need to re-authenticate self.session.auth = None r = self.session.post(url, **opts) self._validate_authorized(r) if r.status_code != requests.codes.CREATED: response = utils.safe_json_load(r.content) already_assigned = 'is assigned to another network view' if response and already_assigned in response.get('text'): exception = ib_ex.InfobloxMemberAlreadyAssigned else: exception = ib_ex.InfobloxCannotCreateObject raise exception( response=response, obj_type=obj_type, content=r.content, args=payload, code=r.status_code) return self._parse_reply(r)
[ "def", "create_object", "(", "self", ",", "obj_type", ",", "payload", ",", "return_fields", "=", "None", ")", ":", "self", ".", "_validate_obj_type_or_die", "(", "obj_type", ")", "query_params", "=", "self", ".", "_build_query_params", "(", "return_fields", "=", "return_fields", ")", "url", "=", "self", ".", "_construct_url", "(", "obj_type", ",", "query_params", ")", "opts", "=", "self", ".", "_get_request_options", "(", "data", "=", "payload", ")", "self", ".", "_log_request", "(", "'post'", ",", "url", ",", "opts", ")", "if", "(", "self", ".", "session", ".", "cookies", ")", ":", "# the first 'get' or 'post' action will generate a cookie", "# after that, we don't need to re-authenticate", "self", ".", "session", ".", "auth", "=", "None", "r", "=", "self", ".", "session", ".", "post", "(", "url", ",", "*", "*", "opts", ")", "self", ".", "_validate_authorized", "(", "r", ")", "if", "r", ".", "status_code", "!=", "requests", ".", "codes", ".", "CREATED", ":", "response", "=", "utils", ".", "safe_json_load", "(", "r", ".", "content", ")", "already_assigned", "=", "'is assigned to another network view'", "if", "response", "and", "already_assigned", "in", "response", ".", "get", "(", "'text'", ")", ":", "exception", "=", "ib_ex", ".", "InfobloxMemberAlreadyAssigned", "else", ":", "exception", "=", "ib_ex", ".", "InfobloxCannotCreateObject", "raise", "exception", "(", "response", "=", "response", ",", "obj_type", "=", "obj_type", ",", "content", "=", "r", ".", "content", ",", "args", "=", "payload", ",", "code", "=", "r", ".", "status_code", ")", "return", "self", ".", "_parse_reply", "(", "r", ")" ]
Create an Infoblox object of type 'obj_type' Args: obj_type (str): Infoblox object type, e.g. 'network', 'range', etc. payload (dict): Payload with data to send return_fields (list): List of fields to be returned Returns: The object reference of the newly create object Raises: InfobloxException
[ "Create", "an", "Infoblox", "object", "of", "type", "obj_type" ]
edeec62db1935784c728731b2ae7cf0fcc9bf84d
https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/connector.py#L345-L387
train
infobloxopen/infoblox-client
infoblox_client/connector.py
Connector.update_object
def update_object(self, ref, payload, return_fields=None): """Update an Infoblox object Args: ref (str): Infoblox object reference payload (dict): Payload with data to send Returns: The object reference of the updated object Raises: InfobloxException """ query_params = self._build_query_params(return_fields=return_fields) opts = self._get_request_options(data=payload) url = self._construct_url(ref, query_params) self._log_request('put', url, opts) r = self.session.put(url, **opts) self._validate_authorized(r) if r.status_code != requests.codes.ok: self._check_service_availability('update', r, ref) raise ib_ex.InfobloxCannotUpdateObject( response=jsonutils.loads(r.content), ref=ref, content=r.content, code=r.status_code) return self._parse_reply(r)
python
def update_object(self, ref, payload, return_fields=None): """Update an Infoblox object Args: ref (str): Infoblox object reference payload (dict): Payload with data to send Returns: The object reference of the updated object Raises: InfobloxException """ query_params = self._build_query_params(return_fields=return_fields) opts = self._get_request_options(data=payload) url = self._construct_url(ref, query_params) self._log_request('put', url, opts) r = self.session.put(url, **opts) self._validate_authorized(r) if r.status_code != requests.codes.ok: self._check_service_availability('update', r, ref) raise ib_ex.InfobloxCannotUpdateObject( response=jsonutils.loads(r.content), ref=ref, content=r.content, code=r.status_code) return self._parse_reply(r)
[ "def", "update_object", "(", "self", ",", "ref", ",", "payload", ",", "return_fields", "=", "None", ")", ":", "query_params", "=", "self", ".", "_build_query_params", "(", "return_fields", "=", "return_fields", ")", "opts", "=", "self", ".", "_get_request_options", "(", "data", "=", "payload", ")", "url", "=", "self", ".", "_construct_url", "(", "ref", ",", "query_params", ")", "self", ".", "_log_request", "(", "'put'", ",", "url", ",", "opts", ")", "r", "=", "self", ".", "session", ".", "put", "(", "url", ",", "*", "*", "opts", ")", "self", ".", "_validate_authorized", "(", "r", ")", "if", "r", ".", "status_code", "!=", "requests", ".", "codes", ".", "ok", ":", "self", ".", "_check_service_availability", "(", "'update'", ",", "r", ",", "ref", ")", "raise", "ib_ex", ".", "InfobloxCannotUpdateObject", "(", "response", "=", "jsonutils", ".", "loads", "(", "r", ".", "content", ")", ",", "ref", "=", "ref", ",", "content", "=", "r", ".", "content", ",", "code", "=", "r", ".", "status_code", ")", "return", "self", ".", "_parse_reply", "(", "r", ")" ]
Update an Infoblox object Args: ref (str): Infoblox object reference payload (dict): Payload with data to send Returns: The object reference of the updated object Raises: InfobloxException
[ "Update", "an", "Infoblox", "object" ]
edeec62db1935784c728731b2ae7cf0fcc9bf84d
https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/connector.py#L424-L453
train
infobloxopen/infoblox-client
infoblox_client/connector.py
Connector.delete_object
def delete_object(self, ref, delete_arguments=None): """Remove an Infoblox object Args: ref (str): Object reference delete_arguments (dict): Extra delete arguments Returns: The object reference of the removed object Raises: InfobloxException """ opts = self._get_request_options() if not isinstance(delete_arguments, dict): delete_arguments = {} url = self._construct_url(ref, query_params=delete_arguments) self._log_request('delete', url, opts) r = self.session.delete(url, **opts) self._validate_authorized(r) if r.status_code != requests.codes.ok: self._check_service_availability('delete', r, ref) raise ib_ex.InfobloxCannotDeleteObject( response=jsonutils.loads(r.content), ref=ref, content=r.content, code=r.status_code) return self._parse_reply(r)
python
def delete_object(self, ref, delete_arguments=None): """Remove an Infoblox object Args: ref (str): Object reference delete_arguments (dict): Extra delete arguments Returns: The object reference of the removed object Raises: InfobloxException """ opts = self._get_request_options() if not isinstance(delete_arguments, dict): delete_arguments = {} url = self._construct_url(ref, query_params=delete_arguments) self._log_request('delete', url, opts) r = self.session.delete(url, **opts) self._validate_authorized(r) if r.status_code != requests.codes.ok: self._check_service_availability('delete', r, ref) raise ib_ex.InfobloxCannotDeleteObject( response=jsonutils.loads(r.content), ref=ref, content=r.content, code=r.status_code) return self._parse_reply(r)
[ "def", "delete_object", "(", "self", ",", "ref", ",", "delete_arguments", "=", "None", ")", ":", "opts", "=", "self", ".", "_get_request_options", "(", ")", "if", "not", "isinstance", "(", "delete_arguments", ",", "dict", ")", ":", "delete_arguments", "=", "{", "}", "url", "=", "self", ".", "_construct_url", "(", "ref", ",", "query_params", "=", "delete_arguments", ")", "self", ".", "_log_request", "(", "'delete'", ",", "url", ",", "opts", ")", "r", "=", "self", ".", "session", ".", "delete", "(", "url", ",", "*", "*", "opts", ")", "self", ".", "_validate_authorized", "(", "r", ")", "if", "r", ".", "status_code", "!=", "requests", ".", "codes", ".", "ok", ":", "self", ".", "_check_service_availability", "(", "'delete'", ",", "r", ",", "ref", ")", "raise", "ib_ex", ".", "InfobloxCannotDeleteObject", "(", "response", "=", "jsonutils", ".", "loads", "(", "r", ".", "content", ")", ",", "ref", "=", "ref", ",", "content", "=", "r", ".", "content", ",", "code", "=", "r", ".", "status_code", ")", "return", "self", ".", "_parse_reply", "(", "r", ")" ]
Remove an Infoblox object Args: ref (str): Object reference delete_arguments (dict): Extra delete arguments Returns: The object reference of the removed object Raises: InfobloxException
[ "Remove", "an", "Infoblox", "object" ]
edeec62db1935784c728731b2ae7cf0fcc9bf84d
https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/connector.py#L456-L485
train
infobloxopen/infoblox-client
infoblox_client/objects.py
BaseObject._remap_fields
def _remap_fields(cls, kwargs): """Map fields from kwargs into dict acceptable by NIOS""" mapped = {} for key in kwargs: if key in cls._remap: mapped[cls._remap[key]] = kwargs[key] else: mapped[key] = kwargs[key] return mapped
python
def _remap_fields(cls, kwargs): """Map fields from kwargs into dict acceptable by NIOS""" mapped = {} for key in kwargs: if key in cls._remap: mapped[cls._remap[key]] = kwargs[key] else: mapped[key] = kwargs[key] return mapped
[ "def", "_remap_fields", "(", "cls", ",", "kwargs", ")", ":", "mapped", "=", "{", "}", "for", "key", "in", "kwargs", ":", "if", "key", "in", "cls", ".", "_remap", ":", "mapped", "[", "cls", ".", "_remap", "[", "key", "]", "]", "=", "kwargs", "[", "key", "]", "else", ":", "mapped", "[", "key", "]", "=", "kwargs", "[", "key", "]", "return", "mapped" ]
Map fields from kwargs into dict acceptable by NIOS
[ "Map", "fields", "from", "kwargs", "into", "dict", "acceptable", "by", "NIOS" ]
edeec62db1935784c728731b2ae7cf0fcc9bf84d
https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/objects.py#L87-L95
train
infobloxopen/infoblox-client
infoblox_client/objects.py
EA.from_dict
def from_dict(cls, eas_from_nios): """Converts extensible attributes from the NIOS reply.""" if not eas_from_nios: return return cls({name: cls._process_value(ib_utils.try_value_to_bool, eas_from_nios[name]['value']) for name in eas_from_nios})
python
def from_dict(cls, eas_from_nios): """Converts extensible attributes from the NIOS reply.""" if not eas_from_nios: return return cls({name: cls._process_value(ib_utils.try_value_to_bool, eas_from_nios[name]['value']) for name in eas_from_nios})
[ "def", "from_dict", "(", "cls", ",", "eas_from_nios", ")", ":", "if", "not", "eas_from_nios", ":", "return", "return", "cls", "(", "{", "name", ":", "cls", ".", "_process_value", "(", "ib_utils", ".", "try_value_to_bool", ",", "eas_from_nios", "[", "name", "]", "[", "'value'", "]", ")", "for", "name", "in", "eas_from_nios", "}", ")" ]
Converts extensible attributes from the NIOS reply.
[ "Converts", "extensible", "attributes", "from", "the", "NIOS", "reply", "." ]
edeec62db1935784c728731b2ae7cf0fcc9bf84d
https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/objects.py#L141-L147
train
infobloxopen/infoblox-client
infoblox_client/objects.py
EA.to_dict
def to_dict(self): """Converts extensible attributes into the format suitable for NIOS.""" return {name: {'value': self._process_value(str, value)} for name, value in self._ea_dict.items() if not (value is None or value == "" or value == [])}
python
def to_dict(self): """Converts extensible attributes into the format suitable for NIOS.""" return {name: {'value': self._process_value(str, value)} for name, value in self._ea_dict.items() if not (value is None or value == "" or value == [])}
[ "def", "to_dict", "(", "self", ")", ":", "return", "{", "name", ":", "{", "'value'", ":", "self", ".", "_process_value", "(", "str", ",", "value", ")", "}", "for", "name", ",", "value", "in", "self", ".", "_ea_dict", ".", "items", "(", ")", "if", "not", "(", "value", "is", "None", "or", "value", "==", "\"\"", "or", "value", "==", "[", "]", ")", "}" ]
Converts extensible attributes into the format suitable for NIOS.
[ "Converts", "extensible", "attributes", "into", "the", "format", "suitable", "for", "NIOS", "." ]
edeec62db1935784c728731b2ae7cf0fcc9bf84d
https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/objects.py#L149-L153
train
infobloxopen/infoblox-client
infoblox_client/objects.py
EA._process_value
def _process_value(func, value): """Applies processing method for value or each element in it. :param func: method to be called with value :param value: value to process :return: if 'value' is list/tupe, returns iterable with func results, else func result is returned """ if isinstance(value, (list, tuple)): return [func(item) for item in value] return func(value)
python
def _process_value(func, value): """Applies processing method for value or each element in it. :param func: method to be called with value :param value: value to process :return: if 'value' is list/tupe, returns iterable with func results, else func result is returned """ if isinstance(value, (list, tuple)): return [func(item) for item in value] return func(value)
[ "def", "_process_value", "(", "func", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "[", "func", "(", "item", ")", "for", "item", "in", "value", "]", "return", "func", "(", "value", ")" ]
Applies processing method for value or each element in it. :param func: method to be called with value :param value: value to process :return: if 'value' is list/tupe, returns iterable with func results, else func result is returned
[ "Applies", "processing", "method", "for", "value", "or", "each", "element", "in", "it", "." ]
edeec62db1935784c728731b2ae7cf0fcc9bf84d
https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/objects.py#L156-L166
train
infobloxopen/infoblox-client
infoblox_client/objects.py
InfobloxObject.from_dict
def from_dict(cls, connector, ip_dict): """Build dict fields as SubObjects if needed. Checks if lambda for building object from dict exists. _global_field_processing and _custom_field_processing rules are checked. """ mapping = cls._global_field_processing.copy() mapping.update(cls._custom_field_processing) # Process fields that require building themselves as objects for field in mapping: if field in ip_dict: ip_dict[field] = mapping[field](ip_dict[field]) return cls(connector, **ip_dict)
python
def from_dict(cls, connector, ip_dict): """Build dict fields as SubObjects if needed. Checks if lambda for building object from dict exists. _global_field_processing and _custom_field_processing rules are checked. """ mapping = cls._global_field_processing.copy() mapping.update(cls._custom_field_processing) # Process fields that require building themselves as objects for field in mapping: if field in ip_dict: ip_dict[field] = mapping[field](ip_dict[field]) return cls(connector, **ip_dict)
[ "def", "from_dict", "(", "cls", ",", "connector", ",", "ip_dict", ")", ":", "mapping", "=", "cls", ".", "_global_field_processing", ".", "copy", "(", ")", "mapping", ".", "update", "(", "cls", ".", "_custom_field_processing", ")", "# Process fields that require building themselves as objects", "for", "field", "in", "mapping", ":", "if", "field", "in", "ip_dict", ":", "ip_dict", "[", "field", "]", "=", "mapping", "[", "field", "]", "(", "ip_dict", "[", "field", "]", ")", "return", "cls", "(", "connector", ",", "*", "*", "ip_dict", ")" ]
Build dict fields as SubObjects if needed. Checks if lambda for building object from dict exists. _global_field_processing and _custom_field_processing rules are checked.
[ "Build", "dict", "fields", "as", "SubObjects", "if", "needed", "." ]
edeec62db1935784c728731b2ae7cf0fcc9bf84d
https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/objects.py#L243-L256
train
infobloxopen/infoblox-client
infoblox_client/objects.py
InfobloxObject.field_to_dict
def field_to_dict(self, field): """Read field value and converts to dict if possible""" value = getattr(self, field) if isinstance(value, (list, tuple)): return [self.value_to_dict(val) for val in value] return self.value_to_dict(value)
python
def field_to_dict(self, field): """Read field value and converts to dict if possible""" value = getattr(self, field) if isinstance(value, (list, tuple)): return [self.value_to_dict(val) for val in value] return self.value_to_dict(value)
[ "def", "field_to_dict", "(", "self", ",", "field", ")", ":", "value", "=", "getattr", "(", "self", ",", "field", ")", "if", "isinstance", "(", "value", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "[", "self", ".", "value_to_dict", "(", "val", ")", "for", "val", "in", "value", "]", "return", "self", ".", "value_to_dict", "(", "value", ")" ]
Read field value and converts to dict if possible
[ "Read", "field", "value", "and", "converts", "to", "dict", "if", "possible" ]
edeec62db1935784c728731b2ae7cf0fcc9bf84d
https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/objects.py#L262-L267
train
infobloxopen/infoblox-client
infoblox_client/objects.py
InfobloxObject.to_dict
def to_dict(self, search_fields=None): """Builds dict without None object fields""" fields = self._fields if search_fields == 'update': fields = self._search_for_update_fields elif search_fields == 'all': fields = self._all_searchable_fields elif search_fields == 'exclude': # exclude search fields for update actions, # but include updateable_search_fields fields = [field for field in self._fields if field in self._updateable_search_fields or field not in self._search_for_update_fields] return {field: self.field_to_dict(field) for field in fields if getattr(self, field, None) is not None}
python
def to_dict(self, search_fields=None): """Builds dict without None object fields""" fields = self._fields if search_fields == 'update': fields = self._search_for_update_fields elif search_fields == 'all': fields = self._all_searchable_fields elif search_fields == 'exclude': # exclude search fields for update actions, # but include updateable_search_fields fields = [field for field in self._fields if field in self._updateable_search_fields or field not in self._search_for_update_fields] return {field: self.field_to_dict(field) for field in fields if getattr(self, field, None) is not None}
[ "def", "to_dict", "(", "self", ",", "search_fields", "=", "None", ")", ":", "fields", "=", "self", ".", "_fields", "if", "search_fields", "==", "'update'", ":", "fields", "=", "self", ".", "_search_for_update_fields", "elif", "search_fields", "==", "'all'", ":", "fields", "=", "self", ".", "_all_searchable_fields", "elif", "search_fields", "==", "'exclude'", ":", "# exclude search fields for update actions,", "# but include updateable_search_fields", "fields", "=", "[", "field", "for", "field", "in", "self", ".", "_fields", "if", "field", "in", "self", ".", "_updateable_search_fields", "or", "field", "not", "in", "self", ".", "_search_for_update_fields", "]", "return", "{", "field", ":", "self", ".", "field_to_dict", "(", "field", ")", "for", "field", "in", "fields", "if", "getattr", "(", "self", ",", "field", ",", "None", ")", "is", "not", "None", "}" ]
Builds dict without None object fields
[ "Builds", "dict", "without", "None", "object", "fields" ]
edeec62db1935784c728731b2ae7cf0fcc9bf84d
https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/objects.py#L269-L284
train
infobloxopen/infoblox-client
infoblox_client/objects.py
InfobloxObject.fetch
def fetch(self, only_ref=False): """Fetch object from NIOS by _ref or searchfields Update existent object with fields returned from NIOS Return True on successful object fetch """ if self.ref: reply = self.connector.get_object( self.ref, return_fields=self.return_fields) if reply: self.update_from_dict(reply) return True search_dict = self.to_dict(search_fields='update') return_fields = [] if only_ref else self.return_fields reply = self.connector.get_object(self.infoblox_type, search_dict, return_fields=return_fields) if reply: self.update_from_dict(reply[0], only_ref=only_ref) return True return False
python
def fetch(self, only_ref=False): """Fetch object from NIOS by _ref or searchfields Update existent object with fields returned from NIOS Return True on successful object fetch """ if self.ref: reply = self.connector.get_object( self.ref, return_fields=self.return_fields) if reply: self.update_from_dict(reply) return True search_dict = self.to_dict(search_fields='update') return_fields = [] if only_ref else self.return_fields reply = self.connector.get_object(self.infoblox_type, search_dict, return_fields=return_fields) if reply: self.update_from_dict(reply[0], only_ref=only_ref) return True return False
[ "def", "fetch", "(", "self", ",", "only_ref", "=", "False", ")", ":", "if", "self", ".", "ref", ":", "reply", "=", "self", ".", "connector", ".", "get_object", "(", "self", ".", "ref", ",", "return_fields", "=", "self", ".", "return_fields", ")", "if", "reply", ":", "self", ".", "update_from_dict", "(", "reply", ")", "return", "True", "search_dict", "=", "self", ".", "to_dict", "(", "search_fields", "=", "'update'", ")", "return_fields", "=", "[", "]", "if", "only_ref", "else", "self", ".", "return_fields", "reply", "=", "self", ".", "connector", ".", "get_object", "(", "self", ".", "infoblox_type", ",", "search_dict", ",", "return_fields", "=", "return_fields", ")", "if", "reply", ":", "self", ".", "update_from_dict", "(", "reply", "[", "0", "]", ",", "only_ref", "=", "only_ref", ")", "return", "True", "return", "False" ]
Fetch object from NIOS by _ref or searchfields Update existent object with fields returned from NIOS Return True on successful object fetch
[ "Fetch", "object", "from", "NIOS", "by", "_ref", "or", "searchfields" ]
edeec62db1935784c728731b2ae7cf0fcc9bf84d
https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/objects.py#L378-L399
train
infobloxopen/infoblox-client
infoblox_client/objects.py
HostRecord._ip_setter
def _ip_setter(self, ipaddr_name, ipaddrs_name, ips): """Setter for ip fields Accept as input string or list of IP instances. String case: only ipvXaddr is going to be filled, that is enough to perform host record search using ip List of IP instances case: ipvXaddrs is going to be filled with ips content, so create can be issues, since fully prepared IP objects in place. ipXaddr is also filled to be able perform search on NIOS and verify that no such host record exists yet. """ if isinstance(ips, six.string_types): setattr(self, ipaddr_name, ips) elif isinstance(ips, (list, tuple)) and isinstance(ips[0], IP): setattr(self, ipaddr_name, ips[0].ip) setattr(self, ipaddrs_name, ips) elif isinstance(ips, IP): setattr(self, ipaddr_name, ips.ip) setattr(self, ipaddrs_name, [ips]) elif ips is None: setattr(self, ipaddr_name, None) setattr(self, ipaddrs_name, None) else: raise ValueError( "Invalid format of ip passed in: %s." "Should be string or list of NIOS IP objects." % ips)
python
def _ip_setter(self, ipaddr_name, ipaddrs_name, ips): """Setter for ip fields Accept as input string or list of IP instances. String case: only ipvXaddr is going to be filled, that is enough to perform host record search using ip List of IP instances case: ipvXaddrs is going to be filled with ips content, so create can be issues, since fully prepared IP objects in place. ipXaddr is also filled to be able perform search on NIOS and verify that no such host record exists yet. """ if isinstance(ips, six.string_types): setattr(self, ipaddr_name, ips) elif isinstance(ips, (list, tuple)) and isinstance(ips[0], IP): setattr(self, ipaddr_name, ips[0].ip) setattr(self, ipaddrs_name, ips) elif isinstance(ips, IP): setattr(self, ipaddr_name, ips.ip) setattr(self, ipaddrs_name, [ips]) elif ips is None: setattr(self, ipaddr_name, None) setattr(self, ipaddrs_name, None) else: raise ValueError( "Invalid format of ip passed in: %s." "Should be string or list of NIOS IP objects." % ips)
[ "def", "_ip_setter", "(", "self", ",", "ipaddr_name", ",", "ipaddrs_name", ",", "ips", ")", ":", "if", "isinstance", "(", "ips", ",", "six", ".", "string_types", ")", ":", "setattr", "(", "self", ",", "ipaddr_name", ",", "ips", ")", "elif", "isinstance", "(", "ips", ",", "(", "list", ",", "tuple", ")", ")", "and", "isinstance", "(", "ips", "[", "0", "]", ",", "IP", ")", ":", "setattr", "(", "self", ",", "ipaddr_name", ",", "ips", "[", "0", "]", ".", "ip", ")", "setattr", "(", "self", ",", "ipaddrs_name", ",", "ips", ")", "elif", "isinstance", "(", "ips", ",", "IP", ")", ":", "setattr", "(", "self", ",", "ipaddr_name", ",", "ips", ".", "ip", ")", "setattr", "(", "self", ",", "ipaddrs_name", ",", "[", "ips", "]", ")", "elif", "ips", "is", "None", ":", "setattr", "(", "self", ",", "ipaddr_name", ",", "None", ")", "setattr", "(", "self", ",", "ipaddrs_name", ",", "None", ")", "else", ":", "raise", "ValueError", "(", "\"Invalid format of ip passed in: %s.\"", "\"Should be string or list of NIOS IP objects.\"", "%", "ips", ")" ]
Setter for ip fields Accept as input string or list of IP instances. String case: only ipvXaddr is going to be filled, that is enough to perform host record search using ip List of IP instances case: ipvXaddrs is going to be filled with ips content, so create can be issues, since fully prepared IP objects in place. ipXaddr is also filled to be able perform search on NIOS and verify that no such host record exists yet.
[ "Setter", "for", "ip", "fields" ]
edeec62db1935784c728731b2ae7cf0fcc9bf84d
https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/objects.py#L527-L554
train
infobloxopen/infoblox-client
infoblox_client/objects.py
FixedAddressV6.mac
def mac(self, mac): """Set mac and duid fields To have common interface with FixedAddress accept mac address and set duid as a side effect. 'mac' was added to _shadow_fields to prevent sending it out over wapi. """ self._mac = mac if mac: self.duid = ib_utils.generate_duid(mac) elif not hasattr(self, 'duid'): self.duid = None
python
def mac(self, mac): """Set mac and duid fields To have common interface with FixedAddress accept mac address and set duid as a side effect. 'mac' was added to _shadow_fields to prevent sending it out over wapi. """ self._mac = mac if mac: self.duid = ib_utils.generate_duid(mac) elif not hasattr(self, 'duid'): self.duid = None
[ "def", "mac", "(", "self", ",", "mac", ")", ":", "self", ".", "_mac", "=", "mac", "if", "mac", ":", "self", ".", "duid", "=", "ib_utils", ".", "generate_duid", "(", "mac", ")", "elif", "not", "hasattr", "(", "self", ",", "'duid'", ")", ":", "self", ".", "duid", "=", "None" ]
Set mac and duid fields To have common interface with FixedAddress accept mac address and set duid as a side effect. 'mac' was added to _shadow_fields to prevent sending it out over wapi.
[ "Set", "mac", "and", "duid", "fields" ]
edeec62db1935784c728731b2ae7cf0fcc9bf84d
https://github.com/infobloxopen/infoblox-client/blob/edeec62db1935784c728731b2ae7cf0fcc9bf84d/infoblox_client/objects.py#L821-L832
train
cf-platform-eng/tile-generator
tile_generator/template.py
render_property
def render_property(property): """Render a property for bosh manifest, according to its type.""" # This ain't the prettiest thing, but it should get the job done. # I don't think we have anything more elegant available at bosh-manifest-generation time. # See https://docs.pivotal.io/partners/product-template-reference.html for list. if 'type' in property and property['type'] in PROPERTY_FIELDS: fields = {} for field in PROPERTY_FIELDS[property['type']]: if type(field) is tuple: fields[field[0]] = '(( .properties.{}.{} ))'.format(property['name'], field[1]) else: fields[field] = '(( .properties.{}.{} ))'.format(property['name'], field) out = { property['name']: fields } else: if property.get('is_reference', False): out = { property['name']: property['default'] } else: out = { property['name']: '(( .properties.{}.value ))'.format(property['name']) } return out
python
def render_property(property): """Render a property for bosh manifest, according to its type.""" # This ain't the prettiest thing, but it should get the job done. # I don't think we have anything more elegant available at bosh-manifest-generation time. # See https://docs.pivotal.io/partners/product-template-reference.html for list. if 'type' in property and property['type'] in PROPERTY_FIELDS: fields = {} for field in PROPERTY_FIELDS[property['type']]: if type(field) is tuple: fields[field[0]] = '(( .properties.{}.{} ))'.format(property['name'], field[1]) else: fields[field] = '(( .properties.{}.{} ))'.format(property['name'], field) out = { property['name']: fields } else: if property.get('is_reference', False): out = { property['name']: property['default'] } else: out = { property['name']: '(( .properties.{}.value ))'.format(property['name']) } return out
[ "def", "render_property", "(", "property", ")", ":", "# This ain't the prettiest thing, but it should get the job done.", "# I don't think we have anything more elegant available at bosh-manifest-generation time.", "# See https://docs.pivotal.io/partners/product-template-reference.html for list.", "if", "'type'", "in", "property", "and", "property", "[", "'type'", "]", "in", "PROPERTY_FIELDS", ":", "fields", "=", "{", "}", "for", "field", "in", "PROPERTY_FIELDS", "[", "property", "[", "'type'", "]", "]", ":", "if", "type", "(", "field", ")", "is", "tuple", ":", "fields", "[", "field", "[", "0", "]", "]", "=", "'(( .properties.{}.{} ))'", ".", "format", "(", "property", "[", "'name'", "]", ",", "field", "[", "1", "]", ")", "else", ":", "fields", "[", "field", "]", "=", "'(( .properties.{}.{} ))'", ".", "format", "(", "property", "[", "'name'", "]", ",", "field", ")", "out", "=", "{", "property", "[", "'name'", "]", ":", "fields", "}", "else", ":", "if", "property", ".", "get", "(", "'is_reference'", ",", "False", ")", ":", "out", "=", "{", "property", "[", "'name'", "]", ":", "property", "[", "'default'", "]", "}", "else", ":", "out", "=", "{", "property", "[", "'name'", "]", ":", "'(( .properties.{}.value ))'", ".", "format", "(", "property", "[", "'name'", "]", ")", "}", "return", "out" ]
Render a property for bosh manifest, according to its type.
[ "Render", "a", "property", "for", "bosh", "manifest", "according", "to", "its", "type", "." ]
56b602334edb38639bc7e01b1e9e68e43f9e6828
https://github.com/cf-platform-eng/tile-generator/blob/56b602334edb38639bc7e01b1e9e68e43f9e6828/tile_generator/template.py#L152-L170
train
h2non/filetype.py
filetype/match.py
match
def match(obj, matchers=TYPES): """ Matches the given input againts the available file type matchers. Args: obj: path to file, bytes or bytearray. Returns: Type instance if type matches. Otherwise None. Raises: TypeError: if obj is not a supported type. """ buf = get_bytes(obj) for matcher in matchers: if matcher.match(buf): return matcher return None
python
def match(obj, matchers=TYPES): """ Matches the given input againts the available file type matchers. Args: obj: path to file, bytes or bytearray. Returns: Type instance if type matches. Otherwise None. Raises: TypeError: if obj is not a supported type. """ buf = get_bytes(obj) for matcher in matchers: if matcher.match(buf): return matcher return None
[ "def", "match", "(", "obj", ",", "matchers", "=", "TYPES", ")", ":", "buf", "=", "get_bytes", "(", "obj", ")", "for", "matcher", "in", "matchers", ":", "if", "matcher", ".", "match", "(", "buf", ")", ":", "return", "matcher", "return", "None" ]
Matches the given input againts the available file type matchers. Args: obj: path to file, bytes or bytearray. Returns: Type instance if type matches. Otherwise None. Raises: TypeError: if obj is not a supported type.
[ "Matches", "the", "given", "input", "againts", "the", "available", "file", "type", "matchers", "." ]
37e7fd1a9eed1a9eab55ac43f62da98f10970675
https://github.com/h2non/filetype.py/blob/37e7fd1a9eed1a9eab55ac43f62da98f10970675/filetype/match.py#L14-L34
train
h2non/filetype.py
filetype/utils.py
signature
def signature(array): """ Returns the first 262 bytes of the given bytearray as part of the file header signature. Args: array: bytearray to extract the header signature. Returns: First 262 bytes of the file content as bytearray type. """ length = len(array) index = _NUM_SIGNATURE_BYTES if length > _NUM_SIGNATURE_BYTES else length return array[:index]
python
def signature(array): """ Returns the first 262 bytes of the given bytearray as part of the file header signature. Args: array: bytearray to extract the header signature. Returns: First 262 bytes of the file content as bytearray type. """ length = len(array) index = _NUM_SIGNATURE_BYTES if length > _NUM_SIGNATURE_BYTES else length return array[:index]
[ "def", "signature", "(", "array", ")", ":", "length", "=", "len", "(", "array", ")", "index", "=", "_NUM_SIGNATURE_BYTES", "if", "length", ">", "_NUM_SIGNATURE_BYTES", "else", "length", "return", "array", "[", ":", "index", "]" ]
Returns the first 262 bytes of the given bytearray as part of the file header signature. Args: array: bytearray to extract the header signature. Returns: First 262 bytes of the file content as bytearray type.
[ "Returns", "the", "first", "262", "bytes", "of", "the", "given", "bytearray", "as", "part", "of", "the", "file", "header", "signature", "." ]
37e7fd1a9eed1a9eab55ac43f62da98f10970675
https://github.com/h2non/filetype.py/blob/37e7fd1a9eed1a9eab55ac43f62da98f10970675/filetype/utils.py#L21-L35
train
h2non/filetype.py
filetype/utils.py
get_bytes
def get_bytes(obj): """ Infers the input type and reads the first 262 bytes, returning a sliced bytearray. Args: obj: path to readable, file, bytes or bytearray. Returns: First 262 bytes of the file content as bytearray type. Raises: TypeError: if obj is not a supported type. """ try: obj = obj.read(_NUM_SIGNATURE_BYTES) except AttributeError: # duck-typing as readable failed - we'll try the other options pass kind = type(obj) if kind is bytearray: return signature(obj) if kind is str: return get_signature_bytes(obj) if kind is bytes: return signature(obj) if kind is memoryview: return signature(obj).tolist() raise TypeError('Unsupported type as file input: %s' % kind)
python
def get_bytes(obj): """ Infers the input type and reads the first 262 bytes, returning a sliced bytearray. Args: obj: path to readable, file, bytes or bytearray. Returns: First 262 bytes of the file content as bytearray type. Raises: TypeError: if obj is not a supported type. """ try: obj = obj.read(_NUM_SIGNATURE_BYTES) except AttributeError: # duck-typing as readable failed - we'll try the other options pass kind = type(obj) if kind is bytearray: return signature(obj) if kind is str: return get_signature_bytes(obj) if kind is bytes: return signature(obj) if kind is memoryview: return signature(obj).tolist() raise TypeError('Unsupported type as file input: %s' % kind)
[ "def", "get_bytes", "(", "obj", ")", ":", "try", ":", "obj", "=", "obj", ".", "read", "(", "_NUM_SIGNATURE_BYTES", ")", "except", "AttributeError", ":", "# duck-typing as readable failed - we'll try the other options", "pass", "kind", "=", "type", "(", "obj", ")", "if", "kind", "is", "bytearray", ":", "return", "signature", "(", "obj", ")", "if", "kind", "is", "str", ":", "return", "get_signature_bytes", "(", "obj", ")", "if", "kind", "is", "bytes", ":", "return", "signature", "(", "obj", ")", "if", "kind", "is", "memoryview", ":", "return", "signature", "(", "obj", ")", ".", "tolist", "(", ")", "raise", "TypeError", "(", "'Unsupported type as file input: %s'", "%", "kind", ")" ]
Infers the input type and reads the first 262 bytes, returning a sliced bytearray. Args: obj: path to readable, file, bytes or bytearray. Returns: First 262 bytes of the file content as bytearray type. Raises: TypeError: if obj is not a supported type.
[ "Infers", "the", "input", "type", "and", "reads", "the", "first", "262", "bytes", "returning", "a", "sliced", "bytearray", "." ]
37e7fd1a9eed1a9eab55ac43f62da98f10970675
https://github.com/h2non/filetype.py/blob/37e7fd1a9eed1a9eab55ac43f62da98f10970675/filetype/utils.py#L38-L72
train
h2non/filetype.py
filetype/filetype.py
get_type
def get_type(mime=None, ext=None): """ Returns the file type instance searching by MIME type or file extension. Args: ext: file extension string. E.g: jpg, png, mp4, mp3 mime: MIME string. E.g: image/jpeg, video/mpeg Returns: The matched file type instance. Otherwise None. """ for kind in types: if kind.extension is ext or kind.mime is mime: return kind return None
python
def get_type(mime=None, ext=None): """ Returns the file type instance searching by MIME type or file extension. Args: ext: file extension string. E.g: jpg, png, mp4, mp3 mime: MIME string. E.g: image/jpeg, video/mpeg Returns: The matched file type instance. Otherwise None. """ for kind in types: if kind.extension is ext or kind.mime is mime: return kind return None
[ "def", "get_type", "(", "mime", "=", "None", ",", "ext", "=", "None", ")", ":", "for", "kind", "in", "types", ":", "if", "kind", ".", "extension", "is", "ext", "or", "kind", ".", "mime", "is", "mime", ":", "return", "kind", "return", "None" ]
Returns the file type instance searching by MIME type or file extension. Args: ext: file extension string. E.g: jpg, png, mp4, mp3 mime: MIME string. E.g: image/jpeg, video/mpeg Returns: The matched file type instance. Otherwise None.
[ "Returns", "the", "file", "type", "instance", "searching", "by", "MIME", "type", "or", "file", "extension", "." ]
37e7fd1a9eed1a9eab55ac43f62da98f10970675
https://github.com/h2non/filetype.py/blob/37e7fd1a9eed1a9eab55ac43f62da98f10970675/filetype/filetype.py#L67-L82
train
python-beaver/python-beaver
beaver/worker/tail.py
Tail.open
def open(self, encoding=None): """Opens the file with the appropriate call""" try: if IS_GZIPPED_FILE.search(self._filename): _file = gzip.open(self._filename, 'rb') else: if encoding: _file = io.open(self._filename, 'r', encoding=encoding, errors='replace') elif self._encoding: _file = io.open(self._filename, 'r', encoding=self._encoding, errors='replace') else: _file = io.open(self._filename, 'r', errors='replace') except IOError, e: self._log_warning(str(e)) _file = None self.close() return _file
python
def open(self, encoding=None): """Opens the file with the appropriate call""" try: if IS_GZIPPED_FILE.search(self._filename): _file = gzip.open(self._filename, 'rb') else: if encoding: _file = io.open(self._filename, 'r', encoding=encoding, errors='replace') elif self._encoding: _file = io.open(self._filename, 'r', encoding=self._encoding, errors='replace') else: _file = io.open(self._filename, 'r', errors='replace') except IOError, e: self._log_warning(str(e)) _file = None self.close() return _file
[ "def", "open", "(", "self", ",", "encoding", "=", "None", ")", ":", "try", ":", "if", "IS_GZIPPED_FILE", ".", "search", "(", "self", ".", "_filename", ")", ":", "_file", "=", "gzip", ".", "open", "(", "self", ".", "_filename", ",", "'rb'", ")", "else", ":", "if", "encoding", ":", "_file", "=", "io", ".", "open", "(", "self", ".", "_filename", ",", "'r'", ",", "encoding", "=", "encoding", ",", "errors", "=", "'replace'", ")", "elif", "self", ".", "_encoding", ":", "_file", "=", "io", ".", "open", "(", "self", ".", "_filename", ",", "'r'", ",", "encoding", "=", "self", ".", "_encoding", ",", "errors", "=", "'replace'", ")", "else", ":", "_file", "=", "io", ".", "open", "(", "self", ".", "_filename", ",", "'r'", ",", "errors", "=", "'replace'", ")", "except", "IOError", ",", "e", ":", "self", ".", "_log_warning", "(", "str", "(", "e", ")", ")", "_file", "=", "None", "self", ".", "close", "(", ")", "return", "_file" ]
Opens the file with the appropriate call
[ "Opens", "the", "file", "with", "the", "appropriate", "call" ]
93941e968016c5a962dffed9e7a9f6dc1d23236c
https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/worker/tail.py#L79-L96
train
python-beaver/python-beaver
beaver/worker/tail.py
Tail.close
def close(self): """Closes all currently open file pointers""" if not self.active: return self.active = False if self._file: self._file.close() self._sincedb_update_position(force_update=True) if self._current_event: event = '\n'.join(self._current_event) self._current_event.clear() self._callback_wrapper([event])
python
def close(self): """Closes all currently open file pointers""" if not self.active: return self.active = False if self._file: self._file.close() self._sincedb_update_position(force_update=True) if self._current_event: event = '\n'.join(self._current_event) self._current_event.clear() self._callback_wrapper([event])
[ "def", "close", "(", "self", ")", ":", "if", "not", "self", ".", "active", ":", "return", "self", ".", "active", "=", "False", "if", "self", ".", "_file", ":", "self", ".", "_file", ".", "close", "(", ")", "self", ".", "_sincedb_update_position", "(", "force_update", "=", "True", ")", "if", "self", ".", "_current_event", ":", "event", "=", "'\\n'", ".", "join", "(", "self", ".", "_current_event", ")", "self", ".", "_current_event", ".", "clear", "(", ")", "self", ".", "_callback_wrapper", "(", "[", "event", "]", ")" ]
Closes all currently open file pointers
[ "Closes", "all", "currently", "open", "file", "pointers" ]
93941e968016c5a962dffed9e7a9f6dc1d23236c
https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/worker/tail.py#L98-L111
train
python-beaver/python-beaver
beaver/worker/tail.py
Tail._ensure_file_is_good
def _ensure_file_is_good(self, current_time): """Every N seconds, ensures that the file we are tailing is the file we expect to be tailing""" if self._last_file_mapping_update and current_time - self._last_file_mapping_update <= self._stat_interval: return self._last_file_mapping_update = time.time() try: st = os.stat(self._filename) except EnvironmentError, err: if err.errno == errno.ENOENT: self._log_info('file removed') self.close() return raise fid = self.get_file_id(st) if fid != self._fid: self._log_info('file rotated') self.close() elif self._file.tell() > st.st_size: if st.st_size == 0 and self._ignore_truncate: self._logger.info("[{0}] - file size is 0 {1}. ".format(fid, self._filename) + "If you use another tool (i.e. logrotate) to truncate " + "the file, your application may continue to write to " + "the offset it last wrote later. In such a case, we'd " + "better do nothing here") return self._log_info('file truncated') self._update_file(seek_to_end=False) elif REOPEN_FILES: self._log_debug('file reloaded (non-linux)') position = self._file.tell() self._update_file(seek_to_end=False) if self.active: self._file.seek(position, os.SEEK_SET)
python
def _ensure_file_is_good(self, current_time): """Every N seconds, ensures that the file we are tailing is the file we expect to be tailing""" if self._last_file_mapping_update and current_time - self._last_file_mapping_update <= self._stat_interval: return self._last_file_mapping_update = time.time() try: st = os.stat(self._filename) except EnvironmentError, err: if err.errno == errno.ENOENT: self._log_info('file removed') self.close() return raise fid = self.get_file_id(st) if fid != self._fid: self._log_info('file rotated') self.close() elif self._file.tell() > st.st_size: if st.st_size == 0 and self._ignore_truncate: self._logger.info("[{0}] - file size is 0 {1}. ".format(fid, self._filename) + "If you use another tool (i.e. logrotate) to truncate " + "the file, your application may continue to write to " + "the offset it last wrote later. In such a case, we'd " + "better do nothing here") return self._log_info('file truncated') self._update_file(seek_to_end=False) elif REOPEN_FILES: self._log_debug('file reloaded (non-linux)') position = self._file.tell() self._update_file(seek_to_end=False) if self.active: self._file.seek(position, os.SEEK_SET)
[ "def", "_ensure_file_is_good", "(", "self", ",", "current_time", ")", ":", "if", "self", ".", "_last_file_mapping_update", "and", "current_time", "-", "self", ".", "_last_file_mapping_update", "<=", "self", ".", "_stat_interval", ":", "return", "self", ".", "_last_file_mapping_update", "=", "time", ".", "time", "(", ")", "try", ":", "st", "=", "os", ".", "stat", "(", "self", ".", "_filename", ")", "except", "EnvironmentError", ",", "err", ":", "if", "err", ".", "errno", "==", "errno", ".", "ENOENT", ":", "self", ".", "_log_info", "(", "'file removed'", ")", "self", ".", "close", "(", ")", "return", "raise", "fid", "=", "self", ".", "get_file_id", "(", "st", ")", "if", "fid", "!=", "self", ".", "_fid", ":", "self", ".", "_log_info", "(", "'file rotated'", ")", "self", ".", "close", "(", ")", "elif", "self", ".", "_file", ".", "tell", "(", ")", ">", "st", ".", "st_size", ":", "if", "st", ".", "st_size", "==", "0", "and", "self", ".", "_ignore_truncate", ":", "self", ".", "_logger", ".", "info", "(", "\"[{0}] - file size is 0 {1}. \"", ".", "format", "(", "fid", ",", "self", ".", "_filename", ")", "+", "\"If you use another tool (i.e. logrotate) to truncate \"", "+", "\"the file, your application may continue to write to \"", "+", "\"the offset it last wrote later. In such a case, we'd \"", "+", "\"better do nothing here\"", ")", "return", "self", ".", "_log_info", "(", "'file truncated'", ")", "self", ".", "_update_file", "(", "seek_to_end", "=", "False", ")", "elif", "REOPEN_FILES", ":", "self", ".", "_log_debug", "(", "'file reloaded (non-linux)'", ")", "position", "=", "self", ".", "_file", ".", "tell", "(", ")", "self", ".", "_update_file", "(", "seek_to_end", "=", "False", ")", "if", "self", ".", "active", ":", "self", ".", "_file", ".", "seek", "(", "position", ",", "os", ".", "SEEK_SET", ")" ]
Every N seconds, ensures that the file we are tailing is the file we expect to be tailing
[ "Every", "N", "seconds", "ensures", "that", "the", "file", "we", "are", "tailing", "is", "the", "file", "we", "expect", "to", "be", "tailing" ]
93941e968016c5a962dffed9e7a9f6dc1d23236c
https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/worker/tail.py#L197-L232
train
python-beaver/python-beaver
beaver/worker/tail.py
Tail._run_pass
def _run_pass(self): """Read lines from a file and performs a callback against them""" while True: try: data = self._file.read(4096) except IOError, e: if e.errno == errno.ESTALE: self.active = False return False lines = self._buffer_extract(data) if not lines: # Before returning, check if an event (maybe partial) is waiting for too long. if self._current_event and time.time() - self._last_activity > 1: event = '\n'.join(self._current_event) self._current_event.clear() self._callback_wrapper([event]) break self._last_activity = time.time() if self._multiline_regex_after or self._multiline_regex_before: # Multiline is enabled for this file. events = multiline_merge( lines, self._current_event, self._multiline_regex_after, self._multiline_regex_before) else: events = lines if events: self._callback_wrapper(events) if self._sincedb_path: current_line_count = len(lines) self._sincedb_update_position(lines=current_line_count) self._sincedb_update_position()
python
def _run_pass(self): """Read lines from a file and performs a callback against them""" while True: try: data = self._file.read(4096) except IOError, e: if e.errno == errno.ESTALE: self.active = False return False lines = self._buffer_extract(data) if not lines: # Before returning, check if an event (maybe partial) is waiting for too long. if self._current_event and time.time() - self._last_activity > 1: event = '\n'.join(self._current_event) self._current_event.clear() self._callback_wrapper([event]) break self._last_activity = time.time() if self._multiline_regex_after or self._multiline_regex_before: # Multiline is enabled for this file. events = multiline_merge( lines, self._current_event, self._multiline_regex_after, self._multiline_regex_before) else: events = lines if events: self._callback_wrapper(events) if self._sincedb_path: current_line_count = len(lines) self._sincedb_update_position(lines=current_line_count) self._sincedb_update_position()
[ "def", "_run_pass", "(", "self", ")", ":", "while", "True", ":", "try", ":", "data", "=", "self", ".", "_file", ".", "read", "(", "4096", ")", "except", "IOError", ",", "e", ":", "if", "e", ".", "errno", "==", "errno", ".", "ESTALE", ":", "self", ".", "active", "=", "False", "return", "False", "lines", "=", "self", ".", "_buffer_extract", "(", "data", ")", "if", "not", "lines", ":", "# Before returning, check if an event (maybe partial) is waiting for too long.", "if", "self", ".", "_current_event", "and", "time", ".", "time", "(", ")", "-", "self", ".", "_last_activity", ">", "1", ":", "event", "=", "'\\n'", ".", "join", "(", "self", ".", "_current_event", ")", "self", ".", "_current_event", ".", "clear", "(", ")", "self", ".", "_callback_wrapper", "(", "[", "event", "]", ")", "break", "self", ".", "_last_activity", "=", "time", ".", "time", "(", ")", "if", "self", ".", "_multiline_regex_after", "or", "self", ".", "_multiline_regex_before", ":", "# Multiline is enabled for this file.", "events", "=", "multiline_merge", "(", "lines", ",", "self", ".", "_current_event", ",", "self", ".", "_multiline_regex_after", ",", "self", ".", "_multiline_regex_before", ")", "else", ":", "events", "=", "lines", "if", "events", ":", "self", ".", "_callback_wrapper", "(", "events", ")", "if", "self", ".", "_sincedb_path", ":", "current_line_count", "=", "len", "(", "lines", ")", "self", ".", "_sincedb_update_position", "(", "lines", "=", "current_line_count", ")", "self", ".", "_sincedb_update_position", "(", ")" ]
Read lines from a file and performs a callback against them
[ "Read", "lines", "from", "a", "file", "and", "performs", "a", "callback", "against", "them" ]
93941e968016c5a962dffed9e7a9f6dc1d23236c
https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/worker/tail.py#L234-L273
train
python-beaver/python-beaver
beaver/worker/tail.py
Tail._sincedb_init
def _sincedb_init(self): """Initializes the sincedb schema in an sqlite db""" if not self._sincedb_path: return if not os.path.exists(self._sincedb_path): self._log_debug('initializing sincedb sqlite schema') conn = sqlite3.connect(self._sincedb_path, isolation_level=None) conn.execute(""" create table sincedb ( fid text primary key, filename text, position integer default 1 ); """) conn.close()
python
def _sincedb_init(self): """Initializes the sincedb schema in an sqlite db""" if not self._sincedb_path: return if not os.path.exists(self._sincedb_path): self._log_debug('initializing sincedb sqlite schema') conn = sqlite3.connect(self._sincedb_path, isolation_level=None) conn.execute(""" create table sincedb ( fid text primary key, filename text, position integer default 1 ); """) conn.close()
[ "def", "_sincedb_init", "(", "self", ")", ":", "if", "not", "self", ".", "_sincedb_path", ":", "return", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "_sincedb_path", ")", ":", "self", ".", "_log_debug", "(", "'initializing sincedb sqlite schema'", ")", "conn", "=", "sqlite3", ".", "connect", "(", "self", ".", "_sincedb_path", ",", "isolation_level", "=", "None", ")", "conn", ".", "execute", "(", "\"\"\"\n create table sincedb (\n fid text primary key,\n filename text,\n position integer default 1\n );\n \"\"\"", ")", "conn", ".", "close", "(", ")" ]
Initializes the sincedb schema in an sqlite db
[ "Initializes", "the", "sincedb", "schema", "in", "an", "sqlite", "db" ]
93941e968016c5a962dffed9e7a9f6dc1d23236c
https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/worker/tail.py#L381-L396
train
python-beaver/python-beaver
beaver/worker/tail.py
Tail._sincedb_update_position
def _sincedb_update_position(self, lines=0, force_update=False): """Retrieves the starting position from the sincedb sql db for a given file Returns a boolean representing whether or not it updated the record """ if not self._sincedb_path: return False self._line_count = self._line_count + lines old_count = self._line_count_sincedb lines = self._line_count current_time = int(time.time()) if not force_update: if self._last_sincedb_write and current_time - self._last_sincedb_write <= self._sincedb_write_interval: return False if old_count == lines: return False self._sincedb_init() self._last_sincedb_write = current_time self._log_debug('updating sincedb to {0}'.format(lines)) conn = sqlite3.connect(self._sincedb_path, isolation_level=None) cursor = conn.cursor() query = 'insert or replace into sincedb (fid, filename) values (:fid, :filename);' cursor.execute(query, { 'fid': self._fid, 'filename': self._filename }) query = 'update sincedb set position = :position where fid = :fid and filename = :filename' cursor.execute(query, { 'fid': self._fid, 'filename': self._filename, 'position': lines, }) conn.close() self._line_count_sincedb = lines return True
python
def _sincedb_update_position(self, lines=0, force_update=False): """Retrieves the starting position from the sincedb sql db for a given file Returns a boolean representing whether or not it updated the record """ if not self._sincedb_path: return False self._line_count = self._line_count + lines old_count = self._line_count_sincedb lines = self._line_count current_time = int(time.time()) if not force_update: if self._last_sincedb_write and current_time - self._last_sincedb_write <= self._sincedb_write_interval: return False if old_count == lines: return False self._sincedb_init() self._last_sincedb_write = current_time self._log_debug('updating sincedb to {0}'.format(lines)) conn = sqlite3.connect(self._sincedb_path, isolation_level=None) cursor = conn.cursor() query = 'insert or replace into sincedb (fid, filename) values (:fid, :filename);' cursor.execute(query, { 'fid': self._fid, 'filename': self._filename }) query = 'update sincedb set position = :position where fid = :fid and filename = :filename' cursor.execute(query, { 'fid': self._fid, 'filename': self._filename, 'position': lines, }) conn.close() self._line_count_sincedb = lines return True
[ "def", "_sincedb_update_position", "(", "self", ",", "lines", "=", "0", ",", "force_update", "=", "False", ")", ":", "if", "not", "self", ".", "_sincedb_path", ":", "return", "False", "self", ".", "_line_count", "=", "self", ".", "_line_count", "+", "lines", "old_count", "=", "self", ".", "_line_count_sincedb", "lines", "=", "self", ".", "_line_count", "current_time", "=", "int", "(", "time", ".", "time", "(", ")", ")", "if", "not", "force_update", ":", "if", "self", ".", "_last_sincedb_write", "and", "current_time", "-", "self", ".", "_last_sincedb_write", "<=", "self", ".", "_sincedb_write_interval", ":", "return", "False", "if", "old_count", "==", "lines", ":", "return", "False", "self", ".", "_sincedb_init", "(", ")", "self", ".", "_last_sincedb_write", "=", "current_time", "self", ".", "_log_debug", "(", "'updating sincedb to {0}'", ".", "format", "(", "lines", ")", ")", "conn", "=", "sqlite3", ".", "connect", "(", "self", ".", "_sincedb_path", ",", "isolation_level", "=", "None", ")", "cursor", "=", "conn", ".", "cursor", "(", ")", "query", "=", "'insert or replace into sincedb (fid, filename) values (:fid, :filename);'", "cursor", ".", "execute", "(", "query", ",", "{", "'fid'", ":", "self", ".", "_fid", ",", "'filename'", ":", "self", ".", "_filename", "}", ")", "query", "=", "'update sincedb set position = :position where fid = :fid and filename = :filename'", "cursor", ".", "execute", "(", "query", ",", "{", "'fid'", ":", "self", ".", "_fid", ",", "'filename'", ":", "self", ".", "_filename", ",", "'position'", ":", "lines", ",", "}", ")", "conn", ".", "close", "(", ")", "self", ".", "_line_count_sincedb", "=", "lines", "return", "True" ]
Retrieves the starting position from the sincedb sql db for a given file Returns a boolean representing whether or not it updated the record
[ "Retrieves", "the", "starting", "position", "from", "the", "sincedb", "sql", "db", "for", "a", "given", "file", "Returns", "a", "boolean", "representing", "whether", "or", "not", "it", "updated", "the", "record" ]
93941e968016c5a962dffed9e7a9f6dc1d23236c
https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/worker/tail.py#L398-L441
train
python-beaver/python-beaver
beaver/worker/tail.py
Tail._sincedb_start_position
def _sincedb_start_position(self): """Retrieves the starting position from the sincedb sql db for a given file """ if not self._sincedb_path: return None self._sincedb_init() self._log_debug('retrieving start_position from sincedb') conn = sqlite3.connect(self._sincedb_path, isolation_level=None) cursor = conn.cursor() cursor.execute('select position from sincedb where fid = :fid and filename = :filename', { 'fid': self._fid, 'filename': self._filename }) start_position = None for row in cursor.fetchall(): start_position, = row return start_position
python
def _sincedb_start_position(self): """Retrieves the starting position from the sincedb sql db for a given file """ if not self._sincedb_path: return None self._sincedb_init() self._log_debug('retrieving start_position from sincedb') conn = sqlite3.connect(self._sincedb_path, isolation_level=None) cursor = conn.cursor() cursor.execute('select position from sincedb where fid = :fid and filename = :filename', { 'fid': self._fid, 'filename': self._filename }) start_position = None for row in cursor.fetchall(): start_position, = row return start_position
[ "def", "_sincedb_start_position", "(", "self", ")", ":", "if", "not", "self", ".", "_sincedb_path", ":", "return", "None", "self", ".", "_sincedb_init", "(", ")", "self", ".", "_log_debug", "(", "'retrieving start_position from sincedb'", ")", "conn", "=", "sqlite3", ".", "connect", "(", "self", ".", "_sincedb_path", ",", "isolation_level", "=", "None", ")", "cursor", "=", "conn", ".", "cursor", "(", ")", "cursor", ".", "execute", "(", "'select position from sincedb where fid = :fid and filename = :filename'", ",", "{", "'fid'", ":", "self", ".", "_fid", ",", "'filename'", ":", "self", ".", "_filename", "}", ")", "start_position", "=", "None", "for", "row", "in", "cursor", ".", "fetchall", "(", ")", ":", "start_position", ",", "=", "row", "return", "start_position" ]
Retrieves the starting position from the sincedb sql db for a given file
[ "Retrieves", "the", "starting", "position", "from", "the", "sincedb", "sql", "db", "for", "a", "given", "file" ]
93941e968016c5a962dffed9e7a9f6dc1d23236c
https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/worker/tail.py#L443-L463
train
python-beaver/python-beaver
beaver/worker/tail.py
Tail._update_file
def _update_file(self, seek_to_end=True): """Open the file for tailing""" try: self.close() self._file = self.open() except IOError: pass else: if not self._file: return self.active = True try: st = os.stat(self._filename) except EnvironmentError, err: if err.errno == errno.ENOENT: self._log_info('file removed') self.close() fid = self.get_file_id(st) if not self._fid: self._fid = fid if fid != self._fid: self._log_info('file rotated') self.close() elif seek_to_end: self._seek_to_end()
python
def _update_file(self, seek_to_end=True): """Open the file for tailing""" try: self.close() self._file = self.open() except IOError: pass else: if not self._file: return self.active = True try: st = os.stat(self._filename) except EnvironmentError, err: if err.errno == errno.ENOENT: self._log_info('file removed') self.close() fid = self.get_file_id(st) if not self._fid: self._fid = fid if fid != self._fid: self._log_info('file rotated') self.close() elif seek_to_end: self._seek_to_end()
[ "def", "_update_file", "(", "self", ",", "seek_to_end", "=", "True", ")", ":", "try", ":", "self", ".", "close", "(", ")", "self", ".", "_file", "=", "self", ".", "open", "(", ")", "except", "IOError", ":", "pass", "else", ":", "if", "not", "self", ".", "_file", ":", "return", "self", ".", "active", "=", "True", "try", ":", "st", "=", "os", ".", "stat", "(", "self", ".", "_filename", ")", "except", "EnvironmentError", ",", "err", ":", "if", "err", ".", "errno", "==", "errno", ".", "ENOENT", ":", "self", ".", "_log_info", "(", "'file removed'", ")", "self", ".", "close", "(", ")", "fid", "=", "self", ".", "get_file_id", "(", "st", ")", "if", "not", "self", ".", "_fid", ":", "self", ".", "_fid", "=", "fid", "if", "fid", "!=", "self", ".", "_fid", ":", "self", ".", "_log_info", "(", "'file rotated'", ")", "self", ".", "close", "(", ")", "elif", "seek_to_end", ":", "self", ".", "_seek_to_end", "(", ")" ]
Open the file for tailing
[ "Open", "the", "file", "for", "tailing" ]
93941e968016c5a962dffed9e7a9f6dc1d23236c
https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/worker/tail.py#L465-L492
train
python-beaver/python-beaver
beaver/worker/tail.py
Tail.tail
def tail(self, fname, encoding, window, position=None): """Read last N lines from file fname.""" if window <= 0: raise ValueError('invalid window %r' % window) encodings = ENCODINGS if encoding: encodings = [encoding] + ENCODINGS for enc in encodings: try: f = self.open(encoding=enc) if f: return self.tail_read(f, window, position=position) return False except IOError, err: if err.errno == errno.ENOENT: return [] raise except UnicodeDecodeError: pass
python
def tail(self, fname, encoding, window, position=None): """Read last N lines from file fname.""" if window <= 0: raise ValueError('invalid window %r' % window) encodings = ENCODINGS if encoding: encodings = [encoding] + ENCODINGS for enc in encodings: try: f = self.open(encoding=enc) if f: return self.tail_read(f, window, position=position) return False except IOError, err: if err.errno == errno.ENOENT: return [] raise except UnicodeDecodeError: pass
[ "def", "tail", "(", "self", ",", "fname", ",", "encoding", ",", "window", ",", "position", "=", "None", ")", ":", "if", "window", "<=", "0", ":", "raise", "ValueError", "(", "'invalid window %r'", "%", "window", ")", "encodings", "=", "ENCODINGS", "if", "encoding", ":", "encodings", "=", "[", "encoding", "]", "+", "ENCODINGS", "for", "enc", "in", "encodings", ":", "try", ":", "f", "=", "self", ".", "open", "(", "encoding", "=", "enc", ")", "if", "f", ":", "return", "self", ".", "tail_read", "(", "f", ",", "window", ",", "position", "=", "position", ")", "return", "False", "except", "IOError", ",", "err", ":", "if", "err", ".", "errno", "==", "errno", ".", "ENOENT", ":", "return", "[", "]", "raise", "except", "UnicodeDecodeError", ":", "pass" ]
Read last N lines from file fname.
[ "Read", "last", "N", "lines", "from", "file", "fname", "." ]
93941e968016c5a962dffed9e7a9f6dc1d23236c
https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/worker/tail.py#L494-L515
train
python-beaver/python-beaver
beaver/transports/__init__.py
create_transport
def create_transport(beaver_config, logger): """Creates and returns a transport object""" transport_str = beaver_config.get('transport') if '.' not in transport_str: # allow simple names like 'redis' to load a beaver built-in transport module_path = 'beaver.transports.%s_transport' % transport_str.lower() class_name = '%sTransport' % transport_str.title() else: # allow dotted path names to load a custom transport class try: module_path, class_name = transport_str.rsplit('.', 1) except ValueError: raise Exception('Invalid transport {0}'.format(beaver_config.get('transport'))) _module = __import__(module_path, globals(), locals(), class_name, -1) transport_class = getattr(_module, class_name) transport = transport_class(beaver_config=beaver_config, logger=logger) return transport
python
def create_transport(beaver_config, logger): """Creates and returns a transport object""" transport_str = beaver_config.get('transport') if '.' not in transport_str: # allow simple names like 'redis' to load a beaver built-in transport module_path = 'beaver.transports.%s_transport' % transport_str.lower() class_name = '%sTransport' % transport_str.title() else: # allow dotted path names to load a custom transport class try: module_path, class_name = transport_str.rsplit('.', 1) except ValueError: raise Exception('Invalid transport {0}'.format(beaver_config.get('transport'))) _module = __import__(module_path, globals(), locals(), class_name, -1) transport_class = getattr(_module, class_name) transport = transport_class(beaver_config=beaver_config, logger=logger) return transport
[ "def", "create_transport", "(", "beaver_config", ",", "logger", ")", ":", "transport_str", "=", "beaver_config", ".", "get", "(", "'transport'", ")", "if", "'.'", "not", "in", "transport_str", ":", "# allow simple names like 'redis' to load a beaver built-in transport", "module_path", "=", "'beaver.transports.%s_transport'", "%", "transport_str", ".", "lower", "(", ")", "class_name", "=", "'%sTransport'", "%", "transport_str", ".", "title", "(", ")", "else", ":", "# allow dotted path names to load a custom transport class", "try", ":", "module_path", ",", "class_name", "=", "transport_str", ".", "rsplit", "(", "'.'", ",", "1", ")", "except", "ValueError", ":", "raise", "Exception", "(", "'Invalid transport {0}'", ".", "format", "(", "beaver_config", ".", "get", "(", "'transport'", ")", ")", ")", "_module", "=", "__import__", "(", "module_path", ",", "globals", "(", ")", ",", "locals", "(", ")", ",", "class_name", ",", "-", "1", ")", "transport_class", "=", "getattr", "(", "_module", ",", "class_name", ")", "transport", "=", "transport_class", "(", "beaver_config", "=", "beaver_config", ",", "logger", "=", "logger", ")", "return", "transport" ]
Creates and returns a transport object
[ "Creates", "and", "returns", "a", "transport", "object" ]
93941e968016c5a962dffed9e7a9f6dc1d23236c
https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/transports/__init__.py#L4-L22
train
python-beaver/python-beaver
beaver/worker/tail_manager.py
TailManager.update_files
def update_files(self): """Ensures all files are properly loaded. Detects new files, file removals, file rotation, and truncation. On non-linux platforms, it will also manually reload the file for tailing. Note that this hack is necessary because EOF is cached on BSD systems. """ if self._update_time and int(time.time()) - self._update_time < self._discover_interval: return self._update_time = int(time.time()) possible_files = [] files = [] if len(self._beaver_config.get('globs')) > 0: extend_files = files.extend for name, exclude in self._beaver_config.get('globs').items(): globbed = [os.path.realpath(filename) for filename in eglob(name, exclude)] extend_files(globbed) self._beaver_config.addglob(name, globbed) self._callback(("addglob", (name, globbed))) else: append_files = files.append for name in self.listdir(): append_files(os.path.realpath(os.path.join(self._folder, name))) for absname in files: try: st = os.stat(absname) except EnvironmentError, err: if err.errno != errno.ENOENT: raise else: if not stat.S_ISREG(st.st_mode): continue append_possible_files = possible_files.append fid = self.get_file_id(st) append_possible_files((fid, absname)) # add new ones new_files = [fname for fid, fname in possible_files if fid not in self._tails] self.watch(new_files)
python
def update_files(self): """Ensures all files are properly loaded. Detects new files, file removals, file rotation, and truncation. On non-linux platforms, it will also manually reload the file for tailing. Note that this hack is necessary because EOF is cached on BSD systems. """ if self._update_time and int(time.time()) - self._update_time < self._discover_interval: return self._update_time = int(time.time()) possible_files = [] files = [] if len(self._beaver_config.get('globs')) > 0: extend_files = files.extend for name, exclude in self._beaver_config.get('globs').items(): globbed = [os.path.realpath(filename) for filename in eglob(name, exclude)] extend_files(globbed) self._beaver_config.addglob(name, globbed) self._callback(("addglob", (name, globbed))) else: append_files = files.append for name in self.listdir(): append_files(os.path.realpath(os.path.join(self._folder, name))) for absname in files: try: st = os.stat(absname) except EnvironmentError, err: if err.errno != errno.ENOENT: raise else: if not stat.S_ISREG(st.st_mode): continue append_possible_files = possible_files.append fid = self.get_file_id(st) append_possible_files((fid, absname)) # add new ones new_files = [fname for fid, fname in possible_files if fid not in self._tails] self.watch(new_files)
[ "def", "update_files", "(", "self", ")", ":", "if", "self", ".", "_update_time", "and", "int", "(", "time", ".", "time", "(", ")", ")", "-", "self", ".", "_update_time", "<", "self", ".", "_discover_interval", ":", "return", "self", ".", "_update_time", "=", "int", "(", "time", ".", "time", "(", ")", ")", "possible_files", "=", "[", "]", "files", "=", "[", "]", "if", "len", "(", "self", ".", "_beaver_config", ".", "get", "(", "'globs'", ")", ")", ">", "0", ":", "extend_files", "=", "files", ".", "extend", "for", "name", ",", "exclude", "in", "self", ".", "_beaver_config", ".", "get", "(", "'globs'", ")", ".", "items", "(", ")", ":", "globbed", "=", "[", "os", ".", "path", ".", "realpath", "(", "filename", ")", "for", "filename", "in", "eglob", "(", "name", ",", "exclude", ")", "]", "extend_files", "(", "globbed", ")", "self", ".", "_beaver_config", ".", "addglob", "(", "name", ",", "globbed", ")", "self", ".", "_callback", "(", "(", "\"addglob\"", ",", "(", "name", ",", "globbed", ")", ")", ")", "else", ":", "append_files", "=", "files", ".", "append", "for", "name", "in", "self", ".", "listdir", "(", ")", ":", "append_files", "(", "os", ".", "path", ".", "realpath", "(", "os", ".", "path", ".", "join", "(", "self", ".", "_folder", ",", "name", ")", ")", ")", "for", "absname", "in", "files", ":", "try", ":", "st", "=", "os", ".", "stat", "(", "absname", ")", "except", "EnvironmentError", ",", "err", ":", "if", "err", ".", "errno", "!=", "errno", ".", "ENOENT", ":", "raise", "else", ":", "if", "not", "stat", ".", "S_ISREG", "(", "st", ".", "st_mode", ")", ":", "continue", "append_possible_files", "=", "possible_files", ".", "append", "fid", "=", "self", ".", "get_file_id", "(", "st", ")", "append_possible_files", "(", "(", "fid", ",", "absname", ")", ")", "# add new ones", "new_files", "=", "[", "fname", "for", "fid", ",", "fname", "in", "possible_files", "if", "fid", "not", "in", "self", ".", "_tails", "]", "self", ".", "watch", "(", "new_files", ")" ]
Ensures all files are properly loaded. Detects new files, file removals, file rotation, and truncation. On non-linux platforms, it will also manually reload the file for tailing. Note that this hack is necessary because EOF is cached on BSD systems.
[ "Ensures", "all", "files", "are", "properly", "loaded", ".", "Detects", "new", "files", "file", "removals", "file", "rotation", "and", "truncation", ".", "On", "non", "-", "linux", "platforms", "it", "will", "also", "manually", "reload", "the", "file", "for", "tailing", ".", "Note", "that", "this", "hack", "is", "necessary", "because", "EOF", "is", "cached", "on", "BSD", "systems", "." ]
93941e968016c5a962dffed9e7a9f6dc1d23236c
https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/worker/tail_manager.py#L85-L125
train
python-beaver/python-beaver
beaver/worker/tail_manager.py
TailManager.close
def close(self, signalnum=None, frame=None): self._running = False """Closes all currently open Tail objects""" self._log_debug("Closing all tail objects") self._active = False for fid in self._tails: self._tails[fid].close() for n in range(0,self._number_of_consumer_processes): if self._proc[n] is not None and self._proc[n].is_alive(): self._logger.debug("Terminate Process: " + str(n)) self._proc[n].terminate() self._proc[n].join()
python
def close(self, signalnum=None, frame=None): self._running = False """Closes all currently open Tail objects""" self._log_debug("Closing all tail objects") self._active = False for fid in self._tails: self._tails[fid].close() for n in range(0,self._number_of_consumer_processes): if self._proc[n] is not None and self._proc[n].is_alive(): self._logger.debug("Terminate Process: " + str(n)) self._proc[n].terminate() self._proc[n].join()
[ "def", "close", "(", "self", ",", "signalnum", "=", "None", ",", "frame", "=", "None", ")", ":", "self", ".", "_running", "=", "False", "self", ".", "_log_debug", "(", "\"Closing all tail objects\"", ")", "self", ".", "_active", "=", "False", "for", "fid", "in", "self", ".", "_tails", ":", "self", ".", "_tails", "[", "fid", "]", ".", "close", "(", ")", "for", "n", "in", "range", "(", "0", ",", "self", ".", "_number_of_consumer_processes", ")", ":", "if", "self", ".", "_proc", "[", "n", "]", "is", "not", "None", "and", "self", ".", "_proc", "[", "n", "]", ".", "is_alive", "(", ")", ":", "self", ".", "_logger", ".", "debug", "(", "\"Terminate Process: \"", "+", "str", "(", "n", ")", ")", "self", ".", "_proc", "[", "n", "]", ".", "terminate", "(", ")", "self", ".", "_proc", "[", "n", "]", ".", "join", "(", ")" ]
Closes all currently open Tail objects
[ "Closes", "all", "currently", "open", "Tail", "objects" ]
93941e968016c5a962dffed9e7a9f6dc1d23236c
https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/worker/tail_manager.py#L127-L138
train
python-beaver/python-beaver
beaver/utils.py
expand_paths
def expand_paths(path): """When given a path with brackets, expands it to return all permutations of the path with expanded brackets, similar to ant. >>> expand_paths('../{a,b}/{c,d}') ['../a/c', '../a/d', '../b/c', '../b/d'] >>> expand_paths('../{a,b}/{a,b}.py') ['../a/a.py', '../a/b.py', '../b/a.py', '../b/b.py'] >>> expand_paths('../{a,b,c}/{a,b,c}') ['../a/a', '../a/b', '../a/c', '../b/a', '../b/b', '../b/c', '../c/a', '../c/b', '../c/c'] >>> expand_paths('test') ['test'] >>> expand_paths('') """ pr = itertools.product parts = MAGIC_BRACKETS.findall(path) if not path: return if not parts: return [path] permutations = [[(p[0], i, 1) for i in p[1].split(',')] for p in parts] return [_replace_all(path, i) for i in pr(*permutations)]
python
def expand_paths(path): """When given a path with brackets, expands it to return all permutations of the path with expanded brackets, similar to ant. >>> expand_paths('../{a,b}/{c,d}') ['../a/c', '../a/d', '../b/c', '../b/d'] >>> expand_paths('../{a,b}/{a,b}.py') ['../a/a.py', '../a/b.py', '../b/a.py', '../b/b.py'] >>> expand_paths('../{a,b,c}/{a,b,c}') ['../a/a', '../a/b', '../a/c', '../b/a', '../b/b', '../b/c', '../c/a', '../c/b', '../c/c'] >>> expand_paths('test') ['test'] >>> expand_paths('') """ pr = itertools.product parts = MAGIC_BRACKETS.findall(path) if not path: return if not parts: return [path] permutations = [[(p[0], i, 1) for i in p[1].split(',')] for p in parts] return [_replace_all(path, i) for i in pr(*permutations)]
[ "def", "expand_paths", "(", "path", ")", ":", "pr", "=", "itertools", ".", "product", "parts", "=", "MAGIC_BRACKETS", ".", "findall", "(", "path", ")", "if", "not", "path", ":", "return", "if", "not", "parts", ":", "return", "[", "path", "]", "permutations", "=", "[", "[", "(", "p", "[", "0", "]", ",", "i", ",", "1", ")", "for", "i", "in", "p", "[", "1", "]", ".", "split", "(", "','", ")", "]", "for", "p", "in", "parts", "]", "return", "[", "_replace_all", "(", "path", ",", "i", ")", "for", "i", "in", "pr", "(", "*", "permutations", ")", "]" ]
When given a path with brackets, expands it to return all permutations of the path with expanded brackets, similar to ant. >>> expand_paths('../{a,b}/{c,d}') ['../a/c', '../a/d', '../b/c', '../b/d'] >>> expand_paths('../{a,b}/{a,b}.py') ['../a/a.py', '../a/b.py', '../b/a.py', '../b/b.py'] >>> expand_paths('../{a,b,c}/{a,b,c}') ['../a/a', '../a/b', '../a/c', '../b/a', '../b/b', '../b/c', '../c/a', '../c/b', '../c/c'] >>> expand_paths('test') ['test'] >>> expand_paths('')
[ "When", "given", "a", "path", "with", "brackets", "expands", "it", "to", "return", "all", "permutations", "of", "the", "path", "with", "expanded", "brackets", "similar", "to", "ant", "." ]
93941e968016c5a962dffed9e7a9f6dc1d23236c
https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/utils.py#L147-L171
train
python-beaver/python-beaver
beaver/utils.py
multiline_merge
def multiline_merge(lines, current_event, re_after, re_before): """ Merge multi-line events based. Some event (like Python trackback or Java stracktrace) spawn on multiple line. This method will merge them using two regular expression: regex_after and regex_before. If a line match re_after, it will be merged with next line. If a line match re_before, it will be merged with previous line. This function return a list of complet event. Note that because we don't know if an event is complet before another new event start, the last event will not be returned but stored in current_event. You should pass the same current_event to successive call to multiline_merge. current_event is a list of lines whose belong to the same event. """ events = [] for line in lines: if re_before and re_before.match(line): current_event.append(line) elif re_after and current_event and re_after.match(current_event[-1]): current_event.append(line) else: if current_event: events.append('\n'.join(current_event)) current_event.clear() current_event.append(line) return events
python
def multiline_merge(lines, current_event, re_after, re_before): """ Merge multi-line events based. Some event (like Python trackback or Java stracktrace) spawn on multiple line. This method will merge them using two regular expression: regex_after and regex_before. If a line match re_after, it will be merged with next line. If a line match re_before, it will be merged with previous line. This function return a list of complet event. Note that because we don't know if an event is complet before another new event start, the last event will not be returned but stored in current_event. You should pass the same current_event to successive call to multiline_merge. current_event is a list of lines whose belong to the same event. """ events = [] for line in lines: if re_before and re_before.match(line): current_event.append(line) elif re_after and current_event and re_after.match(current_event[-1]): current_event.append(line) else: if current_event: events.append('\n'.join(current_event)) current_event.clear() current_event.append(line) return events
[ "def", "multiline_merge", "(", "lines", ",", "current_event", ",", "re_after", ",", "re_before", ")", ":", "events", "=", "[", "]", "for", "line", "in", "lines", ":", "if", "re_before", "and", "re_before", ".", "match", "(", "line", ")", ":", "current_event", ".", "append", "(", "line", ")", "elif", "re_after", "and", "current_event", "and", "re_after", ".", "match", "(", "current_event", "[", "-", "1", "]", ")", ":", "current_event", ".", "append", "(", "line", ")", "else", ":", "if", "current_event", ":", "events", ".", "append", "(", "'\\n'", ".", "join", "(", "current_event", ")", ")", "current_event", ".", "clear", "(", ")", "current_event", ".", "append", "(", "line", ")", "return", "events" ]
Merge multi-line events based. Some event (like Python trackback or Java stracktrace) spawn on multiple line. This method will merge them using two regular expression: regex_after and regex_before. If a line match re_after, it will be merged with next line. If a line match re_before, it will be merged with previous line. This function return a list of complet event. Note that because we don't know if an event is complet before another new event start, the last event will not be returned but stored in current_event. You should pass the same current_event to successive call to multiline_merge. current_event is a list of lines whose belong to the same event.
[ "Merge", "multi", "-", "line", "events", "based", "." ]
93941e968016c5a962dffed9e7a9f6dc1d23236c
https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/utils.py#L180-L210
train
python-beaver/python-beaver
beaver/ssh_tunnel.py
create_ssh_tunnel
def create_ssh_tunnel(beaver_config, logger=None): """Returns a BeaverSshTunnel object if the current config requires us to""" if not beaver_config.use_ssh_tunnel(): return None logger.info("Proxying transport using through local ssh tunnel") return BeaverSshTunnel(beaver_config, logger=logger)
python
def create_ssh_tunnel(beaver_config, logger=None): """Returns a BeaverSshTunnel object if the current config requires us to""" if not beaver_config.use_ssh_tunnel(): return None logger.info("Proxying transport using through local ssh tunnel") return BeaverSshTunnel(beaver_config, logger=logger)
[ "def", "create_ssh_tunnel", "(", "beaver_config", ",", "logger", "=", "None", ")", ":", "if", "not", "beaver_config", ".", "use_ssh_tunnel", "(", ")", ":", "return", "None", "logger", ".", "info", "(", "\"Proxying transport using through local ssh tunnel\"", ")", "return", "BeaverSshTunnel", "(", "beaver_config", ",", "logger", "=", "logger", ")" ]
Returns a BeaverSshTunnel object if the current config requires us to
[ "Returns", "a", "BeaverSshTunnel", "object", "if", "the", "current", "config", "requires", "us", "to" ]
93941e968016c5a962dffed9e7a9f6dc1d23236c
https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/ssh_tunnel.py#L10-L16
train
python-beaver/python-beaver
beaver/ssh_tunnel.py
BeaverSubprocess.poll
def poll(self): """Poll attached subprocess until it is available""" if self._subprocess is not None: self._subprocess.poll() time.sleep(self._beaver_config.get('subprocess_poll_sleep'))
python
def poll(self): """Poll attached subprocess until it is available""" if self._subprocess is not None: self._subprocess.poll() time.sleep(self._beaver_config.get('subprocess_poll_sleep'))
[ "def", "poll", "(", "self", ")", ":", "if", "self", ".", "_subprocess", "is", "not", "None", ":", "self", ".", "_subprocess", ".", "poll", "(", ")", "time", ".", "sleep", "(", "self", ".", "_beaver_config", ".", "get", "(", "'subprocess_poll_sleep'", ")", ")" ]
Poll attached subprocess until it is available
[ "Poll", "attached", "subprocess", "until", "it", "is", "available" ]
93941e968016c5a962dffed9e7a9f6dc1d23236c
https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/ssh_tunnel.py#L43-L48
train
python-beaver/python-beaver
beaver/ssh_tunnel.py
BeaverSubprocess.close
def close(self): """Close child subprocess""" if self._subprocess is not None: os.killpg(self._subprocess.pid, signal.SIGTERM) self._subprocess = None
python
def close(self): """Close child subprocess""" if self._subprocess is not None: os.killpg(self._subprocess.pid, signal.SIGTERM) self._subprocess = None
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "_subprocess", "is", "not", "None", ":", "os", ".", "killpg", "(", "self", ".", "_subprocess", ".", "pid", ",", "signal", ".", "SIGTERM", ")", "self", ".", "_subprocess", "=", "None" ]
Close child subprocess
[ "Close", "child", "subprocess" ]
93941e968016c5a962dffed9e7a9f6dc1d23236c
https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/ssh_tunnel.py#L50-L54
train
python-beaver/python-beaver
beaver/unicode_dammit.py
_to_unicode
def _to_unicode(self, data, encoding, errors='strict'): '''Given a string and its encoding, decodes the string into Unicode. %encoding is a string recognized by encodings.aliases''' # strip Byte Order Mark (if present) if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'): encoding = 'utf-16be' data = data[2:] elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'): encoding = 'utf-16le' data = data[2:] elif data[:3] == '\xef\xbb\xbf': encoding = 'utf-8' data = data[3:] elif data[:4] == '\x00\x00\xfe\xff': encoding = 'utf-32be' data = data[4:] elif data[:4] == '\xff\xfe\x00\x00': encoding = 'utf-32le' data = data[4:] newdata = unicode(data, encoding, errors) return newdata
python
def _to_unicode(self, data, encoding, errors='strict'): '''Given a string and its encoding, decodes the string into Unicode. %encoding is a string recognized by encodings.aliases''' # strip Byte Order Mark (if present) if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'): encoding = 'utf-16be' data = data[2:] elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'): encoding = 'utf-16le' data = data[2:] elif data[:3] == '\xef\xbb\xbf': encoding = 'utf-8' data = data[3:] elif data[:4] == '\x00\x00\xfe\xff': encoding = 'utf-32be' data = data[4:] elif data[:4] == '\xff\xfe\x00\x00': encoding = 'utf-32le' data = data[4:] newdata = unicode(data, encoding, errors) return newdata
[ "def", "_to_unicode", "(", "self", ",", "data", ",", "encoding", ",", "errors", "=", "'strict'", ")", ":", "# strip Byte Order Mark (if present)", "if", "(", "len", "(", "data", ")", ">=", "4", ")", "and", "(", "data", "[", ":", "2", "]", "==", "'\\xfe\\xff'", ")", "and", "(", "data", "[", "2", ":", "4", "]", "!=", "'\\x00\\x00'", ")", ":", "encoding", "=", "'utf-16be'", "data", "=", "data", "[", "2", ":", "]", "elif", "(", "len", "(", "data", ")", ">=", "4", ")", "and", "(", "data", "[", ":", "2", "]", "==", "'\\xff\\xfe'", ")", "and", "(", "data", "[", "2", ":", "4", "]", "!=", "'\\x00\\x00'", ")", ":", "encoding", "=", "'utf-16le'", "data", "=", "data", "[", "2", ":", "]", "elif", "data", "[", ":", "3", "]", "==", "'\\xef\\xbb\\xbf'", ":", "encoding", "=", "'utf-8'", "data", "=", "data", "[", "3", ":", "]", "elif", "data", "[", ":", "4", "]", "==", "'\\x00\\x00\\xfe\\xff'", ":", "encoding", "=", "'utf-32be'", "data", "=", "data", "[", "4", ":", "]", "elif", "data", "[", ":", "4", "]", "==", "'\\xff\\xfe\\x00\\x00'", ":", "encoding", "=", "'utf-32le'", "data", "=", "data", "[", "4", ":", "]", "newdata", "=", "unicode", "(", "data", ",", "encoding", ",", "errors", ")", "return", "newdata" ]
Given a string and its encoding, decodes the string into Unicode. %encoding is a string recognized by encodings.aliases
[ "Given", "a", "string", "and", "its", "encoding", "decodes", "the", "string", "into", "Unicode", ".", "%encoding", "is", "a", "string", "recognized", "by", "encodings", ".", "aliases" ]
93941e968016c5a962dffed9e7a9f6dc1d23236c
https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/unicode_dammit.py#L38-L59
train
python-beaver/python-beaver
beaver/transports/stomp_transport.py
StompTransport.reconnect
def reconnect(self): """Allows reconnection from when a handled TransportException is thrown""" try: self.conn.close() except Exception,e: self.logger.warn(e) self.createConnection() return True
python
def reconnect(self): """Allows reconnection from when a handled TransportException is thrown""" try: self.conn.close() except Exception,e: self.logger.warn(e) self.createConnection() return True
[ "def", "reconnect", "(", "self", ")", ":", "try", ":", "self", ".", "conn", ".", "close", "(", ")", "except", "Exception", ",", "e", ":", "self", ".", "logger", ".", "warn", "(", "e", ")", "self", ".", "createConnection", "(", ")", "return", "True" ]
Allows reconnection from when a handled TransportException is thrown
[ "Allows", "reconnection", "from", "when", "a", "handled", "TransportException", "is", "thrown" ]
93941e968016c5a962dffed9e7a9f6dc1d23236c
https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/transports/stomp_transport.py#L64-L74
train
python-beaver/python-beaver
beaver/transports/redis_transport.py
RedisTransport._check_connections
def _check_connections(self): """Checks if all configured redis servers are reachable""" for server in self._servers: if self._is_reachable(server): server['down_until'] = 0 else: server['down_until'] = time.time() + 5
python
def _check_connections(self): """Checks if all configured redis servers are reachable""" for server in self._servers: if self._is_reachable(server): server['down_until'] = 0 else: server['down_until'] = time.time() + 5
[ "def", "_check_connections", "(", "self", ")", ":", "for", "server", "in", "self", ".", "_servers", ":", "if", "self", ".", "_is_reachable", "(", "server", ")", ":", "server", "[", "'down_until'", "]", "=", "0", "else", ":", "server", "[", "'down_until'", "]", "=", "time", ".", "time", "(", ")", "+", "5" ]
Checks if all configured redis servers are reachable
[ "Checks", "if", "all", "configured", "redis", "servers", "are", "reachable" ]
93941e968016c5a962dffed9e7a9f6dc1d23236c
https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/transports/redis_transport.py#L38-L45
train
python-beaver/python-beaver
beaver/transports/redis_transport.py
RedisTransport._is_reachable
def _is_reachable(self, server): """Checks if the given redis server is reachable""" try: server['redis'].ping() return True except UserWarning: self._logger.warn('Cannot reach redis server: ' + server['url']) except Exception: self._logger.warn('Cannot reach redis server: ' + server['url']) return False
python
def _is_reachable(self, server): """Checks if the given redis server is reachable""" try: server['redis'].ping() return True except UserWarning: self._logger.warn('Cannot reach redis server: ' + server['url']) except Exception: self._logger.warn('Cannot reach redis server: ' + server['url']) return False
[ "def", "_is_reachable", "(", "self", ",", "server", ")", ":", "try", ":", "server", "[", "'redis'", "]", ".", "ping", "(", ")", "return", "True", "except", "UserWarning", ":", "self", ".", "_logger", ".", "warn", "(", "'Cannot reach redis server: '", "+", "server", "[", "'url'", "]", ")", "except", "Exception", ":", "self", ".", "_logger", ".", "warn", "(", "'Cannot reach redis server: '", "+", "server", "[", "'url'", "]", ")", "return", "False" ]
Checks if the given redis server is reachable
[ "Checks", "if", "the", "given", "redis", "server", "is", "reachable" ]
93941e968016c5a962dffed9e7a9f6dc1d23236c
https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/transports/redis_transport.py#L47-L58
train
python-beaver/python-beaver
beaver/transports/redis_transport.py
RedisTransport.invalidate
def invalidate(self): """Invalidates the current transport and disconnects all redis connections""" super(RedisTransport, self).invalidate() for server in self._servers: server['redis'].connection_pool.disconnect() return False
python
def invalidate(self): """Invalidates the current transport and disconnects all redis connections""" super(RedisTransport, self).invalidate() for server in self._servers: server['redis'].connection_pool.disconnect() return False
[ "def", "invalidate", "(", "self", ")", ":", "super", "(", "RedisTransport", ",", "self", ")", ".", "invalidate", "(", ")", "for", "server", "in", "self", ".", "_servers", ":", "server", "[", "'redis'", "]", ".", "connection_pool", ".", "disconnect", "(", ")", "return", "False" ]
Invalidates the current transport and disconnects all redis connections
[ "Invalidates", "the", "current", "transport", "and", "disconnects", "all", "redis", "connections" ]
93941e968016c5a962dffed9e7a9f6dc1d23236c
https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/transports/redis_transport.py#L63-L69
train
python-beaver/python-beaver
beaver/transports/redis_transport.py
RedisTransport.callback
def callback(self, filename, lines, **kwargs): """Sends log lines to redis servers""" self._logger.debug('Redis transport called') timestamp = self.get_timestamp(**kwargs) if kwargs.get('timestamp', False): del kwargs['timestamp'] namespaces = self._beaver_config.get_field('redis_namespace', filename) if not namespaces: namespaces = self._namespace namespaces = namespaces.split(",") self._logger.debug('Got namespaces: '.join(namespaces)) data_type = self._data_type self._logger.debug('Got data type: ' + data_type) server = self._get_next_server() self._logger.debug('Got redis server: ' + server['url']) pipeline = server['redis'].pipeline(transaction=False) callback_map = { self.LIST_DATA_TYPE: pipeline.rpush, self.CHANNEL_DATA_TYPE: pipeline.publish, } callback_method = callback_map[data_type] for line in lines: for namespace in namespaces: callback_method( namespace.strip(), self.format(filename, line, timestamp, **kwargs) ) try: pipeline.execute() except redis.exceptions.RedisError, exception: self._logger.warn('Cannot push lines to redis server: ' + server['url']) raise TransportException(exception)
python
def callback(self, filename, lines, **kwargs): """Sends log lines to redis servers""" self._logger.debug('Redis transport called') timestamp = self.get_timestamp(**kwargs) if kwargs.get('timestamp', False): del kwargs['timestamp'] namespaces = self._beaver_config.get_field('redis_namespace', filename) if not namespaces: namespaces = self._namespace namespaces = namespaces.split(",") self._logger.debug('Got namespaces: '.join(namespaces)) data_type = self._data_type self._logger.debug('Got data type: ' + data_type) server = self._get_next_server() self._logger.debug('Got redis server: ' + server['url']) pipeline = server['redis'].pipeline(transaction=False) callback_map = { self.LIST_DATA_TYPE: pipeline.rpush, self.CHANNEL_DATA_TYPE: pipeline.publish, } callback_method = callback_map[data_type] for line in lines: for namespace in namespaces: callback_method( namespace.strip(), self.format(filename, line, timestamp, **kwargs) ) try: pipeline.execute() except redis.exceptions.RedisError, exception: self._logger.warn('Cannot push lines to redis server: ' + server['url']) raise TransportException(exception)
[ "def", "callback", "(", "self", ",", "filename", ",", "lines", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_logger", ".", "debug", "(", "'Redis transport called'", ")", "timestamp", "=", "self", ".", "get_timestamp", "(", "*", "*", "kwargs", ")", "if", "kwargs", ".", "get", "(", "'timestamp'", ",", "False", ")", ":", "del", "kwargs", "[", "'timestamp'", "]", "namespaces", "=", "self", ".", "_beaver_config", ".", "get_field", "(", "'redis_namespace'", ",", "filename", ")", "if", "not", "namespaces", ":", "namespaces", "=", "self", ".", "_namespace", "namespaces", "=", "namespaces", ".", "split", "(", "\",\"", ")", "self", ".", "_logger", ".", "debug", "(", "'Got namespaces: '", ".", "join", "(", "namespaces", ")", ")", "data_type", "=", "self", ".", "_data_type", "self", ".", "_logger", ".", "debug", "(", "'Got data type: '", "+", "data_type", ")", "server", "=", "self", ".", "_get_next_server", "(", ")", "self", ".", "_logger", ".", "debug", "(", "'Got redis server: '", "+", "server", "[", "'url'", "]", ")", "pipeline", "=", "server", "[", "'redis'", "]", ".", "pipeline", "(", "transaction", "=", "False", ")", "callback_map", "=", "{", "self", ".", "LIST_DATA_TYPE", ":", "pipeline", ".", "rpush", ",", "self", ".", "CHANNEL_DATA_TYPE", ":", "pipeline", ".", "publish", ",", "}", "callback_method", "=", "callback_map", "[", "data_type", "]", "for", "line", "in", "lines", ":", "for", "namespace", "in", "namespaces", ":", "callback_method", "(", "namespace", ".", "strip", "(", ")", ",", "self", ".", "format", "(", "filename", ",", "line", ",", "timestamp", ",", "*", "*", "kwargs", ")", ")", "try", ":", "pipeline", ".", "execute", "(", ")", "except", "redis", ".", "exceptions", ".", "RedisError", ",", "exception", ":", "self", ".", "_logger", ".", "warn", "(", "'Cannot push lines to redis server: '", "+", "server", "[", "'url'", "]", ")", "raise", "TransportException", "(", "exception", ")" ]
Sends log lines to redis servers
[ "Sends", "log", "lines", "to", "redis", "servers" ]
93941e968016c5a962dffed9e7a9f6dc1d23236c
https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/transports/redis_transport.py#L71-L112
train
python-beaver/python-beaver
beaver/transports/redis_transport.py
RedisTransport._get_next_server
def _get_next_server(self): """Returns a valid redis server or raises a TransportException""" current_try = 0 max_tries = len(self._servers) while current_try < max_tries: server_index = self._raise_server_index() server = self._servers[server_index] down_until = server['down_until'] self._logger.debug('Checking server ' + str(current_try + 1) + '/' + str(max_tries) + ': ' + server['url']) if down_until == 0: self._logger.debug('Elected server: ' + server['url']) return server if down_until < time.time(): if self._is_reachable(server): server['down_until'] = 0 self._logger.debug('Elected server: ' + server['url']) return server else: self._logger.debug('Server still unavailable: ' + server['url']) server['down_until'] = time.time() + 5 current_try += 1 raise TransportException('Cannot reach any redis server')
python
def _get_next_server(self): """Returns a valid redis server or raises a TransportException""" current_try = 0 max_tries = len(self._servers) while current_try < max_tries: server_index = self._raise_server_index() server = self._servers[server_index] down_until = server['down_until'] self._logger.debug('Checking server ' + str(current_try + 1) + '/' + str(max_tries) + ': ' + server['url']) if down_until == 0: self._logger.debug('Elected server: ' + server['url']) return server if down_until < time.time(): if self._is_reachable(server): server['down_until'] = 0 self._logger.debug('Elected server: ' + server['url']) return server else: self._logger.debug('Server still unavailable: ' + server['url']) server['down_until'] = time.time() + 5 current_try += 1 raise TransportException('Cannot reach any redis server')
[ "def", "_get_next_server", "(", "self", ")", ":", "current_try", "=", "0", "max_tries", "=", "len", "(", "self", ".", "_servers", ")", "while", "current_try", "<", "max_tries", ":", "server_index", "=", "self", ".", "_raise_server_index", "(", ")", "server", "=", "self", ".", "_servers", "[", "server_index", "]", "down_until", "=", "server", "[", "'down_until'", "]", "self", ".", "_logger", ".", "debug", "(", "'Checking server '", "+", "str", "(", "current_try", "+", "1", ")", "+", "'/'", "+", "str", "(", "max_tries", ")", "+", "': '", "+", "server", "[", "'url'", "]", ")", "if", "down_until", "==", "0", ":", "self", ".", "_logger", ".", "debug", "(", "'Elected server: '", "+", "server", "[", "'url'", "]", ")", "return", "server", "if", "down_until", "<", "time", ".", "time", "(", ")", ":", "if", "self", ".", "_is_reachable", "(", "server", ")", ":", "server", "[", "'down_until'", "]", "=", "0", "self", ".", "_logger", ".", "debug", "(", "'Elected server: '", "+", "server", "[", "'url'", "]", ")", "return", "server", "else", ":", "self", ".", "_logger", ".", "debug", "(", "'Server still unavailable: '", "+", "server", "[", "'url'", "]", ")", "server", "[", "'down_until'", "]", "=", "time", ".", "time", "(", ")", "+", "5", "current_try", "+=", "1", "raise", "TransportException", "(", "'Cannot reach any redis server'", ")" ]
Returns a valid redis server or raises a TransportException
[ "Returns", "a", "valid", "redis", "server", "or", "raises", "a", "TransportException" ]
93941e968016c5a962dffed9e7a9f6dc1d23236c
https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/transports/redis_transport.py#L114-L144
train
python-beaver/python-beaver
beaver/transports/redis_transport.py
RedisTransport.valid
def valid(self): """Returns whether or not the transport can send data to any redis server""" valid_servers = 0 for server in self._servers: if server['down_until'] <= time.time(): valid_servers += 1 return valid_servers > 0
python
def valid(self): """Returns whether or not the transport can send data to any redis server""" valid_servers = 0 for server in self._servers: if server['down_until'] <= time.time(): valid_servers += 1 return valid_servers > 0
[ "def", "valid", "(", "self", ")", ":", "valid_servers", "=", "0", "for", "server", "in", "self", ".", "_servers", ":", "if", "server", "[", "'down_until'", "]", "<=", "time", ".", "time", "(", ")", ":", "valid_servers", "+=", "1", "return", "valid_servers", ">", "0" ]
Returns whether or not the transport can send data to any redis server
[ "Returns", "whether", "or", "not", "the", "transport", "can", "send", "data", "to", "any", "redis", "server" ]
93941e968016c5a962dffed9e7a9f6dc1d23236c
https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/transports/redis_transport.py#L154-L162
train
python-beaver/python-beaver
beaver/transports/base_transport.py
BaseTransport.format
def format(self, filename, line, timestamp, **kwargs): """Returns a formatted log line""" line = unicode(line.encode("utf-8"), "utf-8", errors="ignore") formatter = self._beaver_config.get_field('format', filename) if formatter not in self._formatters: formatter = self._default_formatter data = { self._fields.get('type'): kwargs.get('type'), self._fields.get('tags'): kwargs.get('tags'), '@timestamp': timestamp, self._fields.get('host'): self._current_host, self._fields.get('file'): filename, self._fields.get('message'): line } if self._logstash_version == 0: data['@source'] = 'file://{0}'.format(filename) data['@fields'] = kwargs.get('fields') else: data['@version'] = self._logstash_version fields = kwargs.get('fields') for key in fields: data[key] = fields.get(key) return self._formatters[formatter](data)
python
def format(self, filename, line, timestamp, **kwargs): """Returns a formatted log line""" line = unicode(line.encode("utf-8"), "utf-8", errors="ignore") formatter = self._beaver_config.get_field('format', filename) if formatter not in self._formatters: formatter = self._default_formatter data = { self._fields.get('type'): kwargs.get('type'), self._fields.get('tags'): kwargs.get('tags'), '@timestamp': timestamp, self._fields.get('host'): self._current_host, self._fields.get('file'): filename, self._fields.get('message'): line } if self._logstash_version == 0: data['@source'] = 'file://{0}'.format(filename) data['@fields'] = kwargs.get('fields') else: data['@version'] = self._logstash_version fields = kwargs.get('fields') for key in fields: data[key] = fields.get(key) return self._formatters[formatter](data)
[ "def", "format", "(", "self", ",", "filename", ",", "line", ",", "timestamp", ",", "*", "*", "kwargs", ")", ":", "line", "=", "unicode", "(", "line", ".", "encode", "(", "\"utf-8\"", ")", ",", "\"utf-8\"", ",", "errors", "=", "\"ignore\"", ")", "formatter", "=", "self", ".", "_beaver_config", ".", "get_field", "(", "'format'", ",", "filename", ")", "if", "formatter", "not", "in", "self", ".", "_formatters", ":", "formatter", "=", "self", ".", "_default_formatter", "data", "=", "{", "self", ".", "_fields", ".", "get", "(", "'type'", ")", ":", "kwargs", ".", "get", "(", "'type'", ")", ",", "self", ".", "_fields", ".", "get", "(", "'tags'", ")", ":", "kwargs", ".", "get", "(", "'tags'", ")", ",", "'@timestamp'", ":", "timestamp", ",", "self", ".", "_fields", ".", "get", "(", "'host'", ")", ":", "self", ".", "_current_host", ",", "self", ".", "_fields", ".", "get", "(", "'file'", ")", ":", "filename", ",", "self", ".", "_fields", ".", "get", "(", "'message'", ")", ":", "line", "}", "if", "self", ".", "_logstash_version", "==", "0", ":", "data", "[", "'@source'", "]", "=", "'file://{0}'", ".", "format", "(", "filename", ")", "data", "[", "'@fields'", "]", "=", "kwargs", ".", "get", "(", "'fields'", ")", "else", ":", "data", "[", "'@version'", "]", "=", "self", ".", "_logstash_version", "fields", "=", "kwargs", ".", "get", "(", "'fields'", ")", "for", "key", "in", "fields", ":", "data", "[", "key", "]", "=", "fields", ".", "get", "(", "key", ")", "return", "self", ".", "_formatters", "[", "formatter", "]", "(", "data", ")" ]
Returns a formatted log line
[ "Returns", "a", "formatted", "log", "line" ]
93941e968016c5a962dffed9e7a9f6dc1d23236c
https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/transports/base_transport.py#L117-L142
train
python-beaver/python-beaver
beaver/transports/base_transport.py
BaseTransport.get_timestamp
def get_timestamp(self, **kwargs): """Retrieves the timestamp for a given set of data""" timestamp = kwargs.get('timestamp') if not timestamp: now = datetime.datetime.utcnow() timestamp = now.strftime("%Y-%m-%dT%H:%M:%S") + ".%03d" % (now.microsecond / 1000) + "Z" return timestamp
python
def get_timestamp(self, **kwargs): """Retrieves the timestamp for a given set of data""" timestamp = kwargs.get('timestamp') if not timestamp: now = datetime.datetime.utcnow() timestamp = now.strftime("%Y-%m-%dT%H:%M:%S") + ".%03d" % (now.microsecond / 1000) + "Z" return timestamp
[ "def", "get_timestamp", "(", "self", ",", "*", "*", "kwargs", ")", ":", "timestamp", "=", "kwargs", ".", "get", "(", "'timestamp'", ")", "if", "not", "timestamp", ":", "now", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "timestamp", "=", "now", ".", "strftime", "(", "\"%Y-%m-%dT%H:%M:%S\"", ")", "+", "\".%03d\"", "%", "(", "now", ".", "microsecond", "/", "1000", ")", "+", "\"Z\"", "return", "timestamp" ]
Retrieves the timestamp for a given set of data
[ "Retrieves", "the", "timestamp", "for", "a", "given", "set", "of", "data" ]
93941e968016c5a962dffed9e7a9f6dc1d23236c
https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/transports/base_transport.py#L144-L151
train
gqmelo/exec-wrappers
exec_wrappers/create_wrappers.py
_make_executable
def _make_executable(path): """Make the file at path executable.""" os.chmod(path, os.stat(path).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
python
def _make_executable(path): """Make the file at path executable.""" os.chmod(path, os.stat(path).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
[ "def", "_make_executable", "(", "path", ")", ":", "os", ".", "chmod", "(", "path", ",", "os", ".", "stat", "(", "path", ")", ".", "st_mode", "|", "stat", ".", "S_IXUSR", "|", "stat", ".", "S_IXGRP", "|", "stat", ".", "S_IXOTH", ")" ]
Make the file at path executable.
[ "Make", "the", "file", "at", "path", "executable", "." ]
0faf892a103cf03d005f1dbdc71ca52d279b4e3b
https://github.com/gqmelo/exec-wrappers/blob/0faf892a103cf03d005f1dbdc71ca52d279b4e3b/exec_wrappers/create_wrappers.py#L300-L302
train
cmap/cmapPy
cmapPy/pandasGEXpress/subset.py
build_parser
def build_parser(): """Build argument parser.""" parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) # Required args parser.add_argument("--in_path", "-i", required=True, help="file path to input GCT(x) file") parser.add_argument("--rid", nargs="+", help="filepath to grp file or string array for including rows") parser.add_argument("--cid", nargs="+", help="filepath to grp file or string array for including cols") parser.add_argument("--exclude_rid", "-er", nargs="+", help="filepath to grp file or string array for excluding rows") parser.add_argument("--exclude_cid", "-ec", nargs="+", help="filepath to grp file or string array for excluding cols") parser.add_argument("--out_name", "-o", default="ds_subsetted.gct", help="what to name the output file") parser.add_argument("--out_type", default="gct", choices=["gct", "gctx"], help="whether to write output as GCT or GCTx") parser.add_argument("--verbose", "-v", action="store_true", default=False, help="whether to increase the # of messages reported") return parser
python
def build_parser(): """Build argument parser.""" parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) # Required args parser.add_argument("--in_path", "-i", required=True, help="file path to input GCT(x) file") parser.add_argument("--rid", nargs="+", help="filepath to grp file or string array for including rows") parser.add_argument("--cid", nargs="+", help="filepath to grp file or string array for including cols") parser.add_argument("--exclude_rid", "-er", nargs="+", help="filepath to grp file or string array for excluding rows") parser.add_argument("--exclude_cid", "-ec", nargs="+", help="filepath to grp file or string array for excluding cols") parser.add_argument("--out_name", "-o", default="ds_subsetted.gct", help="what to name the output file") parser.add_argument("--out_type", default="gct", choices=["gct", "gctx"], help="whether to write output as GCT or GCTx") parser.add_argument("--verbose", "-v", action="store_true", default=False, help="whether to increase the # of messages reported") return parser
[ "def", "build_parser", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "__doc__", ",", "formatter_class", "=", "argparse", ".", "ArgumentDefaultsHelpFormatter", ")", "# Required args", "parser", ".", "add_argument", "(", "\"--in_path\"", ",", "\"-i\"", ",", "required", "=", "True", ",", "help", "=", "\"file path to input GCT(x) file\"", ")", "parser", ".", "add_argument", "(", "\"--rid\"", ",", "nargs", "=", "\"+\"", ",", "help", "=", "\"filepath to grp file or string array for including rows\"", ")", "parser", ".", "add_argument", "(", "\"--cid\"", ",", "nargs", "=", "\"+\"", ",", "help", "=", "\"filepath to grp file or string array for including cols\"", ")", "parser", ".", "add_argument", "(", "\"--exclude_rid\"", ",", "\"-er\"", ",", "nargs", "=", "\"+\"", ",", "help", "=", "\"filepath to grp file or string array for excluding rows\"", ")", "parser", ".", "add_argument", "(", "\"--exclude_cid\"", ",", "\"-ec\"", ",", "nargs", "=", "\"+\"", ",", "help", "=", "\"filepath to grp file or string array for excluding cols\"", ")", "parser", ".", "add_argument", "(", "\"--out_name\"", ",", "\"-o\"", ",", "default", "=", "\"ds_subsetted.gct\"", ",", "help", "=", "\"what to name the output file\"", ")", "parser", ".", "add_argument", "(", "\"--out_type\"", ",", "default", "=", "\"gct\"", ",", "choices", "=", "[", "\"gct\"", ",", "\"gctx\"", "]", ",", "help", "=", "\"whether to write output as GCT or GCTx\"", ")", "parser", ".", "add_argument", "(", "\"--verbose\"", ",", "\"-v\"", ",", "action", "=", "\"store_true\"", ",", "default", "=", "False", ",", "help", "=", "\"whether to increase the # of messages reported\"", ")", "return", "parser" ]
Build argument parser.
[ "Build", "argument", "parser", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/subset.py#L28-L49
train
cmap/cmapPy
cmapPy/pandasGEXpress/subset.py
_read_arg
def _read_arg(arg): """ If arg is a list with 1 element that corresponds to a valid file path, use set_io.grp to read the grp file. Otherwise, check that arg is a list of strings. Args: arg (list or None) Returns: arg_out (list or None) """ # If arg is None, just return it back if arg is None: arg_out = arg else: # If len(arg) == 1 and arg[0] is a valid filepath, read it as a grp file if len(arg) == 1 and os.path.exists(arg[0]): arg_out = grp.read(arg[0]) else: arg_out = arg # Make sure that arg_out is a list of strings assert isinstance(arg_out, list), "arg_out must be a list." assert type(arg_out[0]) == str, "arg_out must be a list of strings." return arg_out
python
def _read_arg(arg): """ If arg is a list with 1 element that corresponds to a valid file path, use set_io.grp to read the grp file. Otherwise, check that arg is a list of strings. Args: arg (list or None) Returns: arg_out (list or None) """ # If arg is None, just return it back if arg is None: arg_out = arg else: # If len(arg) == 1 and arg[0] is a valid filepath, read it as a grp file if len(arg) == 1 and os.path.exists(arg[0]): arg_out = grp.read(arg[0]) else: arg_out = arg # Make sure that arg_out is a list of strings assert isinstance(arg_out, list), "arg_out must be a list." assert type(arg_out[0]) == str, "arg_out must be a list of strings." return arg_out
[ "def", "_read_arg", "(", "arg", ")", ":", "# If arg is None, just return it back", "if", "arg", "is", "None", ":", "arg_out", "=", "arg", "else", ":", "# If len(arg) == 1 and arg[0] is a valid filepath, read it as a grp file", "if", "len", "(", "arg", ")", "==", "1", "and", "os", ".", "path", ".", "exists", "(", "arg", "[", "0", "]", ")", ":", "arg_out", "=", "grp", ".", "read", "(", "arg", "[", "0", "]", ")", "else", ":", "arg_out", "=", "arg", "# Make sure that arg_out is a list of strings", "assert", "isinstance", "(", "arg_out", ",", "list", ")", ",", "\"arg_out must be a list.\"", "assert", "type", "(", "arg_out", "[", "0", "]", ")", "==", "str", ",", "\"arg_out must be a list of strings.\"", "return", "arg_out" ]
If arg is a list with 1 element that corresponds to a valid file path, use set_io.grp to read the grp file. Otherwise, check that arg is a list of strings. Args: arg (list or None) Returns: arg_out (list or None)
[ "If", "arg", "is", "a", "list", "with", "1", "element", "that", "corresponds", "to", "a", "valid", "file", "path", "use", "set_io", ".", "grp", "to", "read", "the", "grp", "file", ".", "Otherwise", "check", "that", "arg", "is", "a", "list", "of", "strings", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/subset.py#L94-L121
train
cmap/cmapPy
cmapPy/set_io/gmt.py
read
def read(file_path): """ Read a gmt file at the path specified by file_path. Args: file_path (string): path to gmt file Returns: gmt (GMT object): list of dicts, where each dict corresponds to one line of the GMT file """ # Read in file actual_file_path = os.path.expanduser(file_path) with open(actual_file_path, 'r') as f: lines = f.readlines() # Create GMT object gmt = [] # Iterate over each line for line_num, line in enumerate(lines): # Separate along tabs fields = line.split('\t') assert len(fields) > 2, ( "Each line must have at least 3 tab-delimited items. " + "line_num: {}, fields: {}").format(line_num, fields) # Get rid of trailing whitespace fields[-1] = fields[-1].rstrip() # Collect entries entries = fields[2:] # Remove empty entries entries = [x for x in entries if x] assert len(set(entries)) == len(entries), ( "There should not be duplicate entries for the same set. " + "line_num: {}, entries: {}").format(line_num, entries) # Store this line as a dictionary line_dict = {SET_IDENTIFIER_FIELD: fields[0], SET_DESC_FIELD: fields[1], SET_MEMBERS_FIELD: entries} gmt.append(line_dict) verify_gmt_integrity(gmt) return gmt
python
def read(file_path): """ Read a gmt file at the path specified by file_path. Args: file_path (string): path to gmt file Returns: gmt (GMT object): list of dicts, where each dict corresponds to one line of the GMT file """ # Read in file actual_file_path = os.path.expanduser(file_path) with open(actual_file_path, 'r') as f: lines = f.readlines() # Create GMT object gmt = [] # Iterate over each line for line_num, line in enumerate(lines): # Separate along tabs fields = line.split('\t') assert len(fields) > 2, ( "Each line must have at least 3 tab-delimited items. " + "line_num: {}, fields: {}").format(line_num, fields) # Get rid of trailing whitespace fields[-1] = fields[-1].rstrip() # Collect entries entries = fields[2:] # Remove empty entries entries = [x for x in entries if x] assert len(set(entries)) == len(entries), ( "There should not be duplicate entries for the same set. " + "line_num: {}, entries: {}").format(line_num, entries) # Store this line as a dictionary line_dict = {SET_IDENTIFIER_FIELD: fields[0], SET_DESC_FIELD: fields[1], SET_MEMBERS_FIELD: entries} gmt.append(line_dict) verify_gmt_integrity(gmt) return gmt
[ "def", "read", "(", "file_path", ")", ":", "# Read in file", "actual_file_path", "=", "os", ".", "path", ".", "expanduser", "(", "file_path", ")", "with", "open", "(", "actual_file_path", ",", "'r'", ")", "as", "f", ":", "lines", "=", "f", ".", "readlines", "(", ")", "# Create GMT object", "gmt", "=", "[", "]", "# Iterate over each line", "for", "line_num", ",", "line", "in", "enumerate", "(", "lines", ")", ":", "# Separate along tabs", "fields", "=", "line", ".", "split", "(", "'\\t'", ")", "assert", "len", "(", "fields", ")", ">", "2", ",", "(", "\"Each line must have at least 3 tab-delimited items. \"", "+", "\"line_num: {}, fields: {}\"", ")", ".", "format", "(", "line_num", ",", "fields", ")", "# Get rid of trailing whitespace", "fields", "[", "-", "1", "]", "=", "fields", "[", "-", "1", "]", ".", "rstrip", "(", ")", "# Collect entries", "entries", "=", "fields", "[", "2", ":", "]", "# Remove empty entries", "entries", "=", "[", "x", "for", "x", "in", "entries", "if", "x", "]", "assert", "len", "(", "set", "(", "entries", ")", ")", "==", "len", "(", "entries", ")", ",", "(", "\"There should not be duplicate entries for the same set. \"", "+", "\"line_num: {}, entries: {}\"", ")", ".", "format", "(", "line_num", ",", "entries", ")", "# Store this line as a dictionary", "line_dict", "=", "{", "SET_IDENTIFIER_FIELD", ":", "fields", "[", "0", "]", ",", "SET_DESC_FIELD", ":", "fields", "[", "1", "]", ",", "SET_MEMBERS_FIELD", ":", "entries", "}", "gmt", ".", "append", "(", "line_dict", ")", "verify_gmt_integrity", "(", "gmt", ")", "return", "gmt" ]
Read a gmt file at the path specified by file_path. Args: file_path (string): path to gmt file Returns: gmt (GMT object): list of dicts, where each dict corresponds to one line of the GMT file
[ "Read", "a", "gmt", "file", "at", "the", "path", "specified", "by", "file_path", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/set_io/gmt.py#L24-L73
train
cmap/cmapPy
cmapPy/set_io/gmt.py
verify_gmt_integrity
def verify_gmt_integrity(gmt): """ Make sure that set ids are unique. Args: gmt (GMT object): list of dicts Returns: None """ # Verify that set ids are unique set_ids = [d[SET_IDENTIFIER_FIELD] for d in gmt] assert len(set(set_ids)) == len(set_ids), ( "Set identifiers should be unique. set_ids: {}".format(set_ids))
python
def verify_gmt_integrity(gmt): """ Make sure that set ids are unique. Args: gmt (GMT object): list of dicts Returns: None """ # Verify that set ids are unique set_ids = [d[SET_IDENTIFIER_FIELD] for d in gmt] assert len(set(set_ids)) == len(set_ids), ( "Set identifiers should be unique. set_ids: {}".format(set_ids))
[ "def", "verify_gmt_integrity", "(", "gmt", ")", ":", "# Verify that set ids are unique", "set_ids", "=", "[", "d", "[", "SET_IDENTIFIER_FIELD", "]", "for", "d", "in", "gmt", "]", "assert", "len", "(", "set", "(", "set_ids", ")", ")", "==", "len", "(", "set_ids", ")", ",", "(", "\"Set identifiers should be unique. set_ids: {}\"", ".", "format", "(", "set_ids", ")", ")" ]
Make sure that set ids are unique. Args: gmt (GMT object): list of dicts Returns: None
[ "Make", "sure", "that", "set", "ids", "are", "unique", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/set_io/gmt.py#L76-L90
train
cmap/cmapPy
cmapPy/set_io/gmt.py
write
def write(gmt, out_path): """ Write a GMT to a text file. Args: gmt (GMT object): list of dicts out_path (string): output path Returns: None """ with open(out_path, 'w') as f: for _, each_dict in enumerate(gmt): f.write(each_dict[SET_IDENTIFIER_FIELD] + '\t') f.write(each_dict[SET_DESC_FIELD] + '\t') f.write('\t'.join([str(entry) for entry in each_dict[SET_MEMBERS_FIELD]])) f.write('\n')
python
def write(gmt, out_path): """ Write a GMT to a text file. Args: gmt (GMT object): list of dicts out_path (string): output path Returns: None """ with open(out_path, 'w') as f: for _, each_dict in enumerate(gmt): f.write(each_dict[SET_IDENTIFIER_FIELD] + '\t') f.write(each_dict[SET_DESC_FIELD] + '\t') f.write('\t'.join([str(entry) for entry in each_dict[SET_MEMBERS_FIELD]])) f.write('\n')
[ "def", "write", "(", "gmt", ",", "out_path", ")", ":", "with", "open", "(", "out_path", ",", "'w'", ")", "as", "f", ":", "for", "_", ",", "each_dict", "in", "enumerate", "(", "gmt", ")", ":", "f", ".", "write", "(", "each_dict", "[", "SET_IDENTIFIER_FIELD", "]", "+", "'\\t'", ")", "f", ".", "write", "(", "each_dict", "[", "SET_DESC_FIELD", "]", "+", "'\\t'", ")", "f", ".", "write", "(", "'\\t'", ".", "join", "(", "[", "str", "(", "entry", ")", "for", "entry", "in", "each_dict", "[", "SET_MEMBERS_FIELD", "]", "]", ")", ")", "f", ".", "write", "(", "'\\n'", ")" ]
Write a GMT to a text file. Args: gmt (GMT object): list of dicts out_path (string): output path Returns: None
[ "Write", "a", "GMT", "to", "a", "text", "file", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/set_io/gmt.py#L93-L109
train
cmap/cmapPy
cmapPy/pandasGEXpress/parse_gctx.py
parse
def parse(gctx_file_path, convert_neg_666=True, rid=None, cid=None, ridx=None, cidx=None, row_meta_only=False, col_meta_only=False, make_multiindex=False): """ Primary method of script. Reads in path to a gctx file and parses into GCToo object. Input: Mandatory: - gctx_file_path (str): full path to gctx file you want to parse. Optional: - convert_neg_666 (bool): whether to convert -666 values to numpy.nan or not (see Note below for more details on this). Default = False. - rid (list of strings): list of row ids to specifically keep from gctx. Default=None. - cid (list of strings): list of col ids to specifically keep from gctx. Default=None. - ridx (list of integers): only read the rows corresponding to this list of integer ids. Default=None. - cidx (list of integers): only read the columns corresponding to this list of integer ids. Default=None. - row_meta_only (bool): Whether to load data + metadata (if False), or just row metadata (if True) as pandas DataFrame - col_meta_only (bool): Whether to load data + metadata (if False), or just col metadata (if True) as pandas DataFrame - make_multiindex (bool): whether to create a multi-index df combining the 3 component dfs Output: - myGCToo (GCToo): A GCToo instance containing content of parsed gctx file. Note: if meta_only = True, this will be a GCToo instance where the data_df is empty, i.e. data_df = pd.DataFrame(index=rids, columns = cids) Note: why does convert_neg_666 exist? - In CMap--for somewhat obscure historical reasons--we use "-666" as our null value for metadata. However (so that users can take full advantage of pandas' methods, including those for filtering nan's etc) we provide the option of converting these into numpy.NaN values, the pandas default. """ full_path = os.path.expanduser(gctx_file_path) # Verify that the path exists if not os.path.exists(full_path): err_msg = "The given path to the gctx file cannot be found. full_path: {}" logger.error(err_msg.format(full_path)) raise Exception(err_msg.format(full_path)) logger.info("Reading GCTX: {}".format(full_path)) # open file gctx_file = h5py.File(full_path, "r") if row_meta_only: # read in row metadata row_dset = gctx_file[row_meta_group_node] row_meta = parse_metadata_df("row", row_dset, convert_neg_666) # validate optional input ids & get indexes to subset by (sorted_ridx, sorted_cidx) = check_and_order_id_inputs(rid, ridx, cid, cidx, row_meta, None) gctx_file.close() # subset if specified, then return row_meta = row_meta.iloc[sorted_ridx] return row_meta elif col_meta_only: # read in col metadata col_dset = gctx_file[col_meta_group_node] col_meta = parse_metadata_df("col", col_dset, convert_neg_666) # validate optional input ids & get indexes to subset by (sorted_ridx, sorted_cidx) = check_and_order_id_inputs(rid, ridx, cid, cidx, None, col_meta) gctx_file.close() # subset if specified, then return col_meta = col_meta.iloc[sorted_cidx] return col_meta else: # read in row metadata row_dset = gctx_file[row_meta_group_node] row_meta = parse_metadata_df("row", row_dset, convert_neg_666) # read in col metadata col_dset = gctx_file[col_meta_group_node] col_meta = parse_metadata_df("col", col_dset, convert_neg_666) # validate optional input ids & get indexes to subset by (sorted_ridx, sorted_cidx) = check_and_order_id_inputs(rid, ridx, cid, cidx, row_meta, col_meta) data_dset = gctx_file[data_node] data_df = parse_data_df(data_dset, sorted_ridx, sorted_cidx, row_meta, col_meta) # (if subsetting) subset metadata row_meta = row_meta.iloc[sorted_ridx] col_meta = col_meta.iloc[sorted_cidx] # get version my_version = gctx_file.attrs[version_node] if type(my_version) == np.ndarray: my_version = my_version[0] gctx_file.close() # make GCToo instance my_gctoo = GCToo.GCToo(data_df=data_df, row_metadata_df=row_meta, col_metadata_df=col_meta, src=full_path, version=my_version, make_multiindex=make_multiindex) return my_gctoo
python
def parse(gctx_file_path, convert_neg_666=True, rid=None, cid=None, ridx=None, cidx=None, row_meta_only=False, col_meta_only=False, make_multiindex=False): """ Primary method of script. Reads in path to a gctx file and parses into GCToo object. Input: Mandatory: - gctx_file_path (str): full path to gctx file you want to parse. Optional: - convert_neg_666 (bool): whether to convert -666 values to numpy.nan or not (see Note below for more details on this). Default = False. - rid (list of strings): list of row ids to specifically keep from gctx. Default=None. - cid (list of strings): list of col ids to specifically keep from gctx. Default=None. - ridx (list of integers): only read the rows corresponding to this list of integer ids. Default=None. - cidx (list of integers): only read the columns corresponding to this list of integer ids. Default=None. - row_meta_only (bool): Whether to load data + metadata (if False), or just row metadata (if True) as pandas DataFrame - col_meta_only (bool): Whether to load data + metadata (if False), or just col metadata (if True) as pandas DataFrame - make_multiindex (bool): whether to create a multi-index df combining the 3 component dfs Output: - myGCToo (GCToo): A GCToo instance containing content of parsed gctx file. Note: if meta_only = True, this will be a GCToo instance where the data_df is empty, i.e. data_df = pd.DataFrame(index=rids, columns = cids) Note: why does convert_neg_666 exist? - In CMap--for somewhat obscure historical reasons--we use "-666" as our null value for metadata. However (so that users can take full advantage of pandas' methods, including those for filtering nan's etc) we provide the option of converting these into numpy.NaN values, the pandas default. """ full_path = os.path.expanduser(gctx_file_path) # Verify that the path exists if not os.path.exists(full_path): err_msg = "The given path to the gctx file cannot be found. full_path: {}" logger.error(err_msg.format(full_path)) raise Exception(err_msg.format(full_path)) logger.info("Reading GCTX: {}".format(full_path)) # open file gctx_file = h5py.File(full_path, "r") if row_meta_only: # read in row metadata row_dset = gctx_file[row_meta_group_node] row_meta = parse_metadata_df("row", row_dset, convert_neg_666) # validate optional input ids & get indexes to subset by (sorted_ridx, sorted_cidx) = check_and_order_id_inputs(rid, ridx, cid, cidx, row_meta, None) gctx_file.close() # subset if specified, then return row_meta = row_meta.iloc[sorted_ridx] return row_meta elif col_meta_only: # read in col metadata col_dset = gctx_file[col_meta_group_node] col_meta = parse_metadata_df("col", col_dset, convert_neg_666) # validate optional input ids & get indexes to subset by (sorted_ridx, sorted_cidx) = check_and_order_id_inputs(rid, ridx, cid, cidx, None, col_meta) gctx_file.close() # subset if specified, then return col_meta = col_meta.iloc[sorted_cidx] return col_meta else: # read in row metadata row_dset = gctx_file[row_meta_group_node] row_meta = parse_metadata_df("row", row_dset, convert_neg_666) # read in col metadata col_dset = gctx_file[col_meta_group_node] col_meta = parse_metadata_df("col", col_dset, convert_neg_666) # validate optional input ids & get indexes to subset by (sorted_ridx, sorted_cidx) = check_and_order_id_inputs(rid, ridx, cid, cidx, row_meta, col_meta) data_dset = gctx_file[data_node] data_df = parse_data_df(data_dset, sorted_ridx, sorted_cidx, row_meta, col_meta) # (if subsetting) subset metadata row_meta = row_meta.iloc[sorted_ridx] col_meta = col_meta.iloc[sorted_cidx] # get version my_version = gctx_file.attrs[version_node] if type(my_version) == np.ndarray: my_version = my_version[0] gctx_file.close() # make GCToo instance my_gctoo = GCToo.GCToo(data_df=data_df, row_metadata_df=row_meta, col_metadata_df=col_meta, src=full_path, version=my_version, make_multiindex=make_multiindex) return my_gctoo
[ "def", "parse", "(", "gctx_file_path", ",", "convert_neg_666", "=", "True", ",", "rid", "=", "None", ",", "cid", "=", "None", ",", "ridx", "=", "None", ",", "cidx", "=", "None", ",", "row_meta_only", "=", "False", ",", "col_meta_only", "=", "False", ",", "make_multiindex", "=", "False", ")", ":", "full_path", "=", "os", ".", "path", ".", "expanduser", "(", "gctx_file_path", ")", "# Verify that the path exists", "if", "not", "os", ".", "path", ".", "exists", "(", "full_path", ")", ":", "err_msg", "=", "\"The given path to the gctx file cannot be found. full_path: {}\"", "logger", ".", "error", "(", "err_msg", ".", "format", "(", "full_path", ")", ")", "raise", "Exception", "(", "err_msg", ".", "format", "(", "full_path", ")", ")", "logger", ".", "info", "(", "\"Reading GCTX: {}\"", ".", "format", "(", "full_path", ")", ")", "# open file", "gctx_file", "=", "h5py", ".", "File", "(", "full_path", ",", "\"r\"", ")", "if", "row_meta_only", ":", "# read in row metadata", "row_dset", "=", "gctx_file", "[", "row_meta_group_node", "]", "row_meta", "=", "parse_metadata_df", "(", "\"row\"", ",", "row_dset", ",", "convert_neg_666", ")", "# validate optional input ids & get indexes to subset by", "(", "sorted_ridx", ",", "sorted_cidx", ")", "=", "check_and_order_id_inputs", "(", "rid", ",", "ridx", ",", "cid", ",", "cidx", ",", "row_meta", ",", "None", ")", "gctx_file", ".", "close", "(", ")", "# subset if specified, then return", "row_meta", "=", "row_meta", ".", "iloc", "[", "sorted_ridx", "]", "return", "row_meta", "elif", "col_meta_only", ":", "# read in col metadata", "col_dset", "=", "gctx_file", "[", "col_meta_group_node", "]", "col_meta", "=", "parse_metadata_df", "(", "\"col\"", ",", "col_dset", ",", "convert_neg_666", ")", "# validate optional input ids & get indexes to subset by", "(", "sorted_ridx", ",", "sorted_cidx", ")", "=", "check_and_order_id_inputs", "(", "rid", ",", "ridx", ",", "cid", ",", "cidx", ",", "None", ",", "col_meta", ")", "gctx_file", ".", "close", "(", ")", "# subset if specified, then return", "col_meta", "=", "col_meta", ".", "iloc", "[", "sorted_cidx", "]", "return", "col_meta", "else", ":", "# read in row metadata", "row_dset", "=", "gctx_file", "[", "row_meta_group_node", "]", "row_meta", "=", "parse_metadata_df", "(", "\"row\"", ",", "row_dset", ",", "convert_neg_666", ")", "# read in col metadata", "col_dset", "=", "gctx_file", "[", "col_meta_group_node", "]", "col_meta", "=", "parse_metadata_df", "(", "\"col\"", ",", "col_dset", ",", "convert_neg_666", ")", "# validate optional input ids & get indexes to subset by", "(", "sorted_ridx", ",", "sorted_cidx", ")", "=", "check_and_order_id_inputs", "(", "rid", ",", "ridx", ",", "cid", ",", "cidx", ",", "row_meta", ",", "col_meta", ")", "data_dset", "=", "gctx_file", "[", "data_node", "]", "data_df", "=", "parse_data_df", "(", "data_dset", ",", "sorted_ridx", ",", "sorted_cidx", ",", "row_meta", ",", "col_meta", ")", "# (if subsetting) subset metadata", "row_meta", "=", "row_meta", ".", "iloc", "[", "sorted_ridx", "]", "col_meta", "=", "col_meta", ".", "iloc", "[", "sorted_cidx", "]", "# get version", "my_version", "=", "gctx_file", ".", "attrs", "[", "version_node", "]", "if", "type", "(", "my_version", ")", "==", "np", ".", "ndarray", ":", "my_version", "=", "my_version", "[", "0", "]", "gctx_file", ".", "close", "(", ")", "# make GCToo instance", "my_gctoo", "=", "GCToo", ".", "GCToo", "(", "data_df", "=", "data_df", ",", "row_metadata_df", "=", "row_meta", ",", "col_metadata_df", "=", "col_meta", ",", "src", "=", "full_path", ",", "version", "=", "my_version", ",", "make_multiindex", "=", "make_multiindex", ")", "return", "my_gctoo" ]
Primary method of script. Reads in path to a gctx file and parses into GCToo object. Input: Mandatory: - gctx_file_path (str): full path to gctx file you want to parse. Optional: - convert_neg_666 (bool): whether to convert -666 values to numpy.nan or not (see Note below for more details on this). Default = False. - rid (list of strings): list of row ids to specifically keep from gctx. Default=None. - cid (list of strings): list of col ids to specifically keep from gctx. Default=None. - ridx (list of integers): only read the rows corresponding to this list of integer ids. Default=None. - cidx (list of integers): only read the columns corresponding to this list of integer ids. Default=None. - row_meta_only (bool): Whether to load data + metadata (if False), or just row metadata (if True) as pandas DataFrame - col_meta_only (bool): Whether to load data + metadata (if False), or just col metadata (if True) as pandas DataFrame - make_multiindex (bool): whether to create a multi-index df combining the 3 component dfs Output: - myGCToo (GCToo): A GCToo instance containing content of parsed gctx file. Note: if meta_only = True, this will be a GCToo instance where the data_df is empty, i.e. data_df = pd.DataFrame(index=rids, columns = cids) Note: why does convert_neg_666 exist? - In CMap--for somewhat obscure historical reasons--we use "-666" as our null value for metadata. However (so that users can take full advantage of pandas' methods, including those for filtering nan's etc) we provide the option of converting these into numpy.NaN values, the pandas default.
[ "Primary", "method", "of", "script", ".", "Reads", "in", "path", "to", "a", "gctx", "file", "and", "parses", "into", "GCToo", "object", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/parse_gctx.py#L23-L126
train
cmap/cmapPy
cmapPy/pandasGEXpress/parse_gctx.py
check_id_idx_exclusivity
def check_id_idx_exclusivity(id, idx): """ Makes sure user didn't provide both ids and idx values to subset by. Input: - id (list or None): if not None, a list of string id names - idx (list or None): if not None, a list of integer id indexes Output: - a tuple: first element is subset type, second is subset content """ if (id is not None and idx is not None): msg = ("'id' and 'idx' fields can't both not be None," + " please specify subset in only one of these fields") logger.error(msg) raise Exception("parse_gctx.check_id_idx_exclusivity: " + msg) elif id is not None: return ("id", id) elif idx is not None: return ("idx", idx) else: return (None, [])
python
def check_id_idx_exclusivity(id, idx): """ Makes sure user didn't provide both ids and idx values to subset by. Input: - id (list or None): if not None, a list of string id names - idx (list or None): if not None, a list of integer id indexes Output: - a tuple: first element is subset type, second is subset content """ if (id is not None and idx is not None): msg = ("'id' and 'idx' fields can't both not be None," + " please specify subset in only one of these fields") logger.error(msg) raise Exception("parse_gctx.check_id_idx_exclusivity: " + msg) elif id is not None: return ("id", id) elif idx is not None: return ("idx", idx) else: return (None, [])
[ "def", "check_id_idx_exclusivity", "(", "id", ",", "idx", ")", ":", "if", "(", "id", "is", "not", "None", "and", "idx", "is", "not", "None", ")", ":", "msg", "=", "(", "\"'id' and 'idx' fields can't both not be None,\"", "+", "\" please specify subset in only one of these fields\"", ")", "logger", ".", "error", "(", "msg", ")", "raise", "Exception", "(", "\"parse_gctx.check_id_idx_exclusivity: \"", "+", "msg", ")", "elif", "id", "is", "not", "None", ":", "return", "(", "\"id\"", ",", "id", ")", "elif", "idx", "is", "not", "None", ":", "return", "(", "\"idx\"", ",", "idx", ")", "else", ":", "return", "(", "None", ",", "[", "]", ")" ]
Makes sure user didn't provide both ids and idx values to subset by. Input: - id (list or None): if not None, a list of string id names - idx (list or None): if not None, a list of integer id indexes Output: - a tuple: first element is subset type, second is subset content
[ "Makes", "sure", "user", "didn", "t", "provide", "both", "ids", "and", "idx", "values", "to", "subset", "by", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/parse_gctx.py#L151-L172
train
cmap/cmapPy
cmapPy/pandasGEXpress/parse_gctx.py
parse_data_df
def parse_data_df(data_dset, ridx, cidx, row_meta, col_meta): """ Parses in data_df from hdf5, subsetting if specified. Input: -data_dset (h5py dset): HDF5 dataset from which to read data_df -ridx (list): list of indexes to subset from data_df (may be all of them if no subsetting) -cidx (list): list of indexes to subset from data_df (may be all of them if no subsetting) -row_meta (pandas DataFrame): the parsed in row metadata -col_meta (pandas DataFrame): the parsed in col metadata """ if len(ridx) == len(row_meta.index) and len(cidx) == len(col_meta.index): # no subset data_array = np.empty(data_dset.shape, dtype=np.float32) data_dset.read_direct(data_array) data_array = data_array.transpose() elif len(ridx) <= len(cidx): first_subset = data_dset[:, ridx].astype(np.float32) data_array = first_subset[cidx, :].transpose() elif len(cidx) < len(ridx): first_subset = data_dset[cidx, :].astype(np.float32) data_array = first_subset[:, ridx].transpose() # make DataFrame instance data_df = pd.DataFrame(data_array, index=row_meta.index[ridx], columns=col_meta.index[cidx]) return data_df
python
def parse_data_df(data_dset, ridx, cidx, row_meta, col_meta): """ Parses in data_df from hdf5, subsetting if specified. Input: -data_dset (h5py dset): HDF5 dataset from which to read data_df -ridx (list): list of indexes to subset from data_df (may be all of them if no subsetting) -cidx (list): list of indexes to subset from data_df (may be all of them if no subsetting) -row_meta (pandas DataFrame): the parsed in row metadata -col_meta (pandas DataFrame): the parsed in col metadata """ if len(ridx) == len(row_meta.index) and len(cidx) == len(col_meta.index): # no subset data_array = np.empty(data_dset.shape, dtype=np.float32) data_dset.read_direct(data_array) data_array = data_array.transpose() elif len(ridx) <= len(cidx): first_subset = data_dset[:, ridx].astype(np.float32) data_array = first_subset[cidx, :].transpose() elif len(cidx) < len(ridx): first_subset = data_dset[cidx, :].astype(np.float32) data_array = first_subset[:, ridx].transpose() # make DataFrame instance data_df = pd.DataFrame(data_array, index=row_meta.index[ridx], columns=col_meta.index[cidx]) return data_df
[ "def", "parse_data_df", "(", "data_dset", ",", "ridx", ",", "cidx", ",", "row_meta", ",", "col_meta", ")", ":", "if", "len", "(", "ridx", ")", "==", "len", "(", "row_meta", ".", "index", ")", "and", "len", "(", "cidx", ")", "==", "len", "(", "col_meta", ".", "index", ")", ":", "# no subset", "data_array", "=", "np", ".", "empty", "(", "data_dset", ".", "shape", ",", "dtype", "=", "np", ".", "float32", ")", "data_dset", ".", "read_direct", "(", "data_array", ")", "data_array", "=", "data_array", ".", "transpose", "(", ")", "elif", "len", "(", "ridx", ")", "<=", "len", "(", "cidx", ")", ":", "first_subset", "=", "data_dset", "[", ":", ",", "ridx", "]", ".", "astype", "(", "np", ".", "float32", ")", "data_array", "=", "first_subset", "[", "cidx", ",", ":", "]", ".", "transpose", "(", ")", "elif", "len", "(", "cidx", ")", "<", "len", "(", "ridx", ")", ":", "first_subset", "=", "data_dset", "[", "cidx", ",", ":", "]", ".", "astype", "(", "np", ".", "float32", ")", "data_array", "=", "first_subset", "[", ":", ",", "ridx", "]", ".", "transpose", "(", ")", "# make DataFrame instance", "data_df", "=", "pd", ".", "DataFrame", "(", "data_array", ",", "index", "=", "row_meta", ".", "index", "[", "ridx", "]", ",", "columns", "=", "col_meta", ".", "index", "[", "cidx", "]", ")", "return", "data_df" ]
Parses in data_df from hdf5, subsetting if specified. Input: -data_dset (h5py dset): HDF5 dataset from which to read data_df -ridx (list): list of indexes to subset from data_df (may be all of them if no subsetting) -cidx (list): list of indexes to subset from data_df (may be all of them if no subsetting) -row_meta (pandas DataFrame): the parsed in row metadata -col_meta (pandas DataFrame): the parsed in col metadata
[ "Parses", "in", "data_df", "from", "hdf5", "subsetting", "if", "specified", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/parse_gctx.py#L320-L345
train
cmap/cmapPy
cmapPy/pandasGEXpress/parse_gctx.py
get_column_metadata
def get_column_metadata(gctx_file_path, convert_neg_666=True): """ Opens .gctx file and returns only column metadata Input: Mandatory: - gctx_file_path (str): full path to gctx file you want to parse. Optional: - convert_neg_666 (bool): whether to convert -666 values to num Output: - col_meta (pandas DataFrame): a DataFrame of all column metadata values. """ full_path = os.path.expanduser(gctx_file_path) # open file gctx_file = h5py.File(full_path, "r") col_dset = gctx_file[col_meta_group_node] col_meta = parse_metadata_df("col", col_dset, convert_neg_666) gctx_file.close() return col_meta
python
def get_column_metadata(gctx_file_path, convert_neg_666=True): """ Opens .gctx file and returns only column metadata Input: Mandatory: - gctx_file_path (str): full path to gctx file you want to parse. Optional: - convert_neg_666 (bool): whether to convert -666 values to num Output: - col_meta (pandas DataFrame): a DataFrame of all column metadata values. """ full_path = os.path.expanduser(gctx_file_path) # open file gctx_file = h5py.File(full_path, "r") col_dset = gctx_file[col_meta_group_node] col_meta = parse_metadata_df("col", col_dset, convert_neg_666) gctx_file.close() return col_meta
[ "def", "get_column_metadata", "(", "gctx_file_path", ",", "convert_neg_666", "=", "True", ")", ":", "full_path", "=", "os", ".", "path", ".", "expanduser", "(", "gctx_file_path", ")", "# open file", "gctx_file", "=", "h5py", ".", "File", "(", "full_path", ",", "\"r\"", ")", "col_dset", "=", "gctx_file", "[", "col_meta_group_node", "]", "col_meta", "=", "parse_metadata_df", "(", "\"col\"", ",", "col_dset", ",", "convert_neg_666", ")", "gctx_file", ".", "close", "(", ")", "return", "col_meta" ]
Opens .gctx file and returns only column metadata Input: Mandatory: - gctx_file_path (str): full path to gctx file you want to parse. Optional: - convert_neg_666 (bool): whether to convert -666 values to num Output: - col_meta (pandas DataFrame): a DataFrame of all column metadata values.
[ "Opens", ".", "gctx", "file", "and", "returns", "only", "column", "metadata" ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/parse_gctx.py#L348-L368
train
cmap/cmapPy
cmapPy/pandasGEXpress/parse_gctx.py
get_row_metadata
def get_row_metadata(gctx_file_path, convert_neg_666=True): """ Opens .gctx file and returns only row metadata Input: Mandatory: - gctx_file_path (str): full path to gctx file you want to parse. Optional: - convert_neg_666 (bool): whether to convert -666 values to num Output: - row_meta (pandas DataFrame): a DataFrame of all row metadata values. """ full_path = os.path.expanduser(gctx_file_path) # open file gctx_file = h5py.File(full_path, "r") row_dset = gctx_file[row_meta_group_node] row_meta = parse_metadata_df("row", row_dset, convert_neg_666) gctx_file.close() return row_meta
python
def get_row_metadata(gctx_file_path, convert_neg_666=True): """ Opens .gctx file and returns only row metadata Input: Mandatory: - gctx_file_path (str): full path to gctx file you want to parse. Optional: - convert_neg_666 (bool): whether to convert -666 values to num Output: - row_meta (pandas DataFrame): a DataFrame of all row metadata values. """ full_path = os.path.expanduser(gctx_file_path) # open file gctx_file = h5py.File(full_path, "r") row_dset = gctx_file[row_meta_group_node] row_meta = parse_metadata_df("row", row_dset, convert_neg_666) gctx_file.close() return row_meta
[ "def", "get_row_metadata", "(", "gctx_file_path", ",", "convert_neg_666", "=", "True", ")", ":", "full_path", "=", "os", ".", "path", ".", "expanduser", "(", "gctx_file_path", ")", "# open file", "gctx_file", "=", "h5py", ".", "File", "(", "full_path", ",", "\"r\"", ")", "row_dset", "=", "gctx_file", "[", "row_meta_group_node", "]", "row_meta", "=", "parse_metadata_df", "(", "\"row\"", ",", "row_dset", ",", "convert_neg_666", ")", "gctx_file", ".", "close", "(", ")", "return", "row_meta" ]
Opens .gctx file and returns only row metadata Input: Mandatory: - gctx_file_path (str): full path to gctx file you want to parse. Optional: - convert_neg_666 (bool): whether to convert -666 values to num Output: - row_meta (pandas DataFrame): a DataFrame of all row metadata values.
[ "Opens", ".", "gctx", "file", "and", "returns", "only", "row", "metadata" ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/parse_gctx.py#L371-L391
train
cmap/cmapPy
cmapPy/pandasGEXpress/GCToo.py
multi_index_df_to_component_dfs
def multi_index_df_to_component_dfs(multi_index_df, rid="rid", cid="cid"): """ Convert a multi-index df into 3 component dfs. """ # Id level of the multiindex will become the index rids = list(multi_index_df.index.get_level_values(rid)) cids = list(multi_index_df.columns.get_level_values(cid)) # It's possible that the index and/or columns of multi_index_df are not # actually multi-index; need to check for this and there are more than one level in index(python3) if isinstance(multi_index_df.index, pd.MultiIndex): # check if there are more than one levels in index (python3) if len(multi_index_df.index.names) > 1: # If so, drop rid because it won't go into the body of the metadata mi_df_index = multi_index_df.index.droplevel(rid) # Names of the multiindex levels become the headers rhds = list(mi_df_index.names) # Assemble metadata values row_metadata = np.array([mi_df_index.get_level_values(level).values for level in list(rhds)]).T # if there is one level in index (python3), then rhds and row metadata should be empty else: rhds = [] row_metadata = [] # If the index is not multi-index, then rhds and row metadata should be empty else: rhds = [] row_metadata = [] # Check if columns of multi_index_df are in fact multi-index if isinstance(multi_index_df.columns, pd.MultiIndex): # Check if there are more than one levels in columns(python3) if len(multi_index_df.columns.names) > 1: # If so, drop cid because it won't go into the body of the metadata mi_df_columns = multi_index_df.columns.droplevel(cid) # Names of the multiindex levels become the headers chds = list(mi_df_columns.names) # Assemble metadata values col_metadata = np.array([mi_df_columns.get_level_values(level).values for level in list(chds)]).T # If there is one level in columns (python3), then rhds and row metadata should be empty else: chds = [] col_metadata = [] # If the columns are not multi-index, then rhds and row metadata should be empty else: chds = [] col_metadata = [] # Create component dfs row_metadata_df = pd.DataFrame.from_records(row_metadata, index=pd.Index(rids, name="rid"), columns=pd.Index(rhds, name="rhd")) col_metadata_df = pd.DataFrame.from_records(col_metadata, index=pd.Index(cids, name="cid"), columns=pd.Index(chds, name="chd")) data_df = pd.DataFrame(multi_index_df.values, index=pd.Index(rids, name="rid"), columns=pd.Index(cids, name="cid")) return data_df, row_metadata_df, col_metadata_df
python
def multi_index_df_to_component_dfs(multi_index_df, rid="rid", cid="cid"): """ Convert a multi-index df into 3 component dfs. """ # Id level of the multiindex will become the index rids = list(multi_index_df.index.get_level_values(rid)) cids = list(multi_index_df.columns.get_level_values(cid)) # It's possible that the index and/or columns of multi_index_df are not # actually multi-index; need to check for this and there are more than one level in index(python3) if isinstance(multi_index_df.index, pd.MultiIndex): # check if there are more than one levels in index (python3) if len(multi_index_df.index.names) > 1: # If so, drop rid because it won't go into the body of the metadata mi_df_index = multi_index_df.index.droplevel(rid) # Names of the multiindex levels become the headers rhds = list(mi_df_index.names) # Assemble metadata values row_metadata = np.array([mi_df_index.get_level_values(level).values for level in list(rhds)]).T # if there is one level in index (python3), then rhds and row metadata should be empty else: rhds = [] row_metadata = [] # If the index is not multi-index, then rhds and row metadata should be empty else: rhds = [] row_metadata = [] # Check if columns of multi_index_df are in fact multi-index if isinstance(multi_index_df.columns, pd.MultiIndex): # Check if there are more than one levels in columns(python3) if len(multi_index_df.columns.names) > 1: # If so, drop cid because it won't go into the body of the metadata mi_df_columns = multi_index_df.columns.droplevel(cid) # Names of the multiindex levels become the headers chds = list(mi_df_columns.names) # Assemble metadata values col_metadata = np.array([mi_df_columns.get_level_values(level).values for level in list(chds)]).T # If there is one level in columns (python3), then rhds and row metadata should be empty else: chds = [] col_metadata = [] # If the columns are not multi-index, then rhds and row metadata should be empty else: chds = [] col_metadata = [] # Create component dfs row_metadata_df = pd.DataFrame.from_records(row_metadata, index=pd.Index(rids, name="rid"), columns=pd.Index(rhds, name="rhd")) col_metadata_df = pd.DataFrame.from_records(col_metadata, index=pd.Index(cids, name="cid"), columns=pd.Index(chds, name="chd")) data_df = pd.DataFrame(multi_index_df.values, index=pd.Index(rids, name="rid"), columns=pd.Index(cids, name="cid")) return data_df, row_metadata_df, col_metadata_df
[ "def", "multi_index_df_to_component_dfs", "(", "multi_index_df", ",", "rid", "=", "\"rid\"", ",", "cid", "=", "\"cid\"", ")", ":", "# Id level of the multiindex will become the index", "rids", "=", "list", "(", "multi_index_df", ".", "index", ".", "get_level_values", "(", "rid", ")", ")", "cids", "=", "list", "(", "multi_index_df", ".", "columns", ".", "get_level_values", "(", "cid", ")", ")", "# It's possible that the index and/or columns of multi_index_df are not", "# actually multi-index; need to check for this and there are more than one level in index(python3)", "if", "isinstance", "(", "multi_index_df", ".", "index", ",", "pd", ".", "MultiIndex", ")", ":", "# check if there are more than one levels in index (python3)", "if", "len", "(", "multi_index_df", ".", "index", ".", "names", ")", ">", "1", ":", "# If so, drop rid because it won't go into the body of the metadata", "mi_df_index", "=", "multi_index_df", ".", "index", ".", "droplevel", "(", "rid", ")", "# Names of the multiindex levels become the headers", "rhds", "=", "list", "(", "mi_df_index", ".", "names", ")", "# Assemble metadata values", "row_metadata", "=", "np", ".", "array", "(", "[", "mi_df_index", ".", "get_level_values", "(", "level", ")", ".", "values", "for", "level", "in", "list", "(", "rhds", ")", "]", ")", ".", "T", "# if there is one level in index (python3), then rhds and row metadata should be empty", "else", ":", "rhds", "=", "[", "]", "row_metadata", "=", "[", "]", "# If the index is not multi-index, then rhds and row metadata should be empty", "else", ":", "rhds", "=", "[", "]", "row_metadata", "=", "[", "]", "# Check if columns of multi_index_df are in fact multi-index", "if", "isinstance", "(", "multi_index_df", ".", "columns", ",", "pd", ".", "MultiIndex", ")", ":", "# Check if there are more than one levels in columns(python3)", "if", "len", "(", "multi_index_df", ".", "columns", ".", "names", ")", ">", "1", ":", "# If so, drop cid because it won't go into the body of the metadata", "mi_df_columns", "=", "multi_index_df", ".", "columns", ".", "droplevel", "(", "cid", ")", "# Names of the multiindex levels become the headers", "chds", "=", "list", "(", "mi_df_columns", ".", "names", ")", "# Assemble metadata values", "col_metadata", "=", "np", ".", "array", "(", "[", "mi_df_columns", ".", "get_level_values", "(", "level", ")", ".", "values", "for", "level", "in", "list", "(", "chds", ")", "]", ")", ".", "T", "# If there is one level in columns (python3), then rhds and row metadata should be empty", "else", ":", "chds", "=", "[", "]", "col_metadata", "=", "[", "]", "# If the columns are not multi-index, then rhds and row metadata should be empty", "else", ":", "chds", "=", "[", "]", "col_metadata", "=", "[", "]", "# Create component dfs", "row_metadata_df", "=", "pd", ".", "DataFrame", ".", "from_records", "(", "row_metadata", ",", "index", "=", "pd", ".", "Index", "(", "rids", ",", "name", "=", "\"rid\"", ")", ",", "columns", "=", "pd", ".", "Index", "(", "rhds", ",", "name", "=", "\"rhd\"", ")", ")", "col_metadata_df", "=", "pd", ".", "DataFrame", ".", "from_records", "(", "col_metadata", ",", "index", "=", "pd", ".", "Index", "(", "cids", ",", "name", "=", "\"cid\"", ")", ",", "columns", "=", "pd", ".", "Index", "(", "chds", ",", "name", "=", "\"chd\"", ")", ")", "data_df", "=", "pd", ".", "DataFrame", "(", "multi_index_df", ".", "values", ",", "index", "=", "pd", ".", "Index", "(", "rids", ",", "name", "=", "\"rid\"", ")", ",", "columns", "=", "pd", ".", "Index", "(", "cids", ",", "name", "=", "\"cid\"", ")", ")", "return", "data_df", ",", "row_metadata_df", ",", "col_metadata_df" ]
Convert a multi-index df into 3 component dfs.
[ "Convert", "a", "multi", "-", "index", "df", "into", "3", "component", "dfs", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/GCToo.py#L222-L284
train
cmap/cmapPy
cmapPy/pandasGEXpress/GCToo.py
GCToo.check_df
def check_df(self, df): """ Verifies that df is a pandas DataFrame instance and that its index and column values are unique. """ if isinstance(df, pd.DataFrame): if not df.index.is_unique: repeats = df.index[df.index.duplicated()].values msg = "Index values must be unique but aren't. The following entries appear more than once: {}".format(repeats) self.logger.error(msg) raise Exception("GCToo GCToo.check_df " + msg) if not df.columns.is_unique: repeats = df.columns[df.columns.duplicated()].values msg = "Columns values must be unique but aren't. The following entries appear more than once: {}".format(repeats) raise Exception("GCToo GCToo.check_df " + msg) else: return True else: msg = "expected Pandas DataFrame, got something else: {} of type: {}".format(df, type(df)) self.logger.error(msg) raise Exception("GCToo GCToo.check_df " + msg)
python
def check_df(self, df): """ Verifies that df is a pandas DataFrame instance and that its index and column values are unique. """ if isinstance(df, pd.DataFrame): if not df.index.is_unique: repeats = df.index[df.index.duplicated()].values msg = "Index values must be unique but aren't. The following entries appear more than once: {}".format(repeats) self.logger.error(msg) raise Exception("GCToo GCToo.check_df " + msg) if not df.columns.is_unique: repeats = df.columns[df.columns.duplicated()].values msg = "Columns values must be unique but aren't. The following entries appear more than once: {}".format(repeats) raise Exception("GCToo GCToo.check_df " + msg) else: return True else: msg = "expected Pandas DataFrame, got something else: {} of type: {}".format(df, type(df)) self.logger.error(msg) raise Exception("GCToo GCToo.check_df " + msg)
[ "def", "check_df", "(", "self", ",", "df", ")", ":", "if", "isinstance", "(", "df", ",", "pd", ".", "DataFrame", ")", ":", "if", "not", "df", ".", "index", ".", "is_unique", ":", "repeats", "=", "df", ".", "index", "[", "df", ".", "index", ".", "duplicated", "(", ")", "]", ".", "values", "msg", "=", "\"Index values must be unique but aren't. The following entries appear more than once: {}\"", ".", "format", "(", "repeats", ")", "self", ".", "logger", ".", "error", "(", "msg", ")", "raise", "Exception", "(", "\"GCToo GCToo.check_df \"", "+", "msg", ")", "if", "not", "df", ".", "columns", ".", "is_unique", ":", "repeats", "=", "df", ".", "columns", "[", "df", ".", "columns", ".", "duplicated", "(", ")", "]", ".", "values", "msg", "=", "\"Columns values must be unique but aren't. The following entries appear more than once: {}\"", ".", "format", "(", "repeats", ")", "raise", "Exception", "(", "\"GCToo GCToo.check_df \"", "+", "msg", ")", "else", ":", "return", "True", "else", ":", "msg", "=", "\"expected Pandas DataFrame, got something else: {} of type: {}\"", ".", "format", "(", "df", ",", "type", "(", "df", ")", ")", "self", ".", "logger", ".", "error", "(", "msg", ")", "raise", "Exception", "(", "\"GCToo GCToo.check_df \"", "+", "msg", ")" ]
Verifies that df is a pandas DataFrame instance and that its index and column values are unique.
[ "Verifies", "that", "df", "is", "a", "pandas", "DataFrame", "instance", "and", "that", "its", "index", "and", "column", "values", "are", "unique", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/GCToo.py#L125-L145
train
cmap/cmapPy
cmapPy/clue_api_client/gene_queries.py
are_genes_in_api
def are_genes_in_api(my_clue_api_client, gene_symbols): """determine if genes are present in the API Args: my_clue_api_client: gene_symbols: collection of gene symbols to query the API with Returns: set of the found gene symbols """ if len(gene_symbols) > 0: query_gene_symbols = gene_symbols if type(gene_symbols) is list else list(gene_symbols) query_result = my_clue_api_client.run_filter_query(resource_name, {"where":{"gene_symbol":{"inq":query_gene_symbols}}, "fields":{"gene_symbol":True}}) logger.debug("query_result: {}".format(query_result)) r = set([x["gene_symbol"] for x in query_result]) return r else: logger.warning("provided gene_symbols was empty, cannot run query") return set()
python
def are_genes_in_api(my_clue_api_client, gene_symbols): """determine if genes are present in the API Args: my_clue_api_client: gene_symbols: collection of gene symbols to query the API with Returns: set of the found gene symbols """ if len(gene_symbols) > 0: query_gene_symbols = gene_symbols if type(gene_symbols) is list else list(gene_symbols) query_result = my_clue_api_client.run_filter_query(resource_name, {"where":{"gene_symbol":{"inq":query_gene_symbols}}, "fields":{"gene_symbol":True}}) logger.debug("query_result: {}".format(query_result)) r = set([x["gene_symbol"] for x in query_result]) return r else: logger.warning("provided gene_symbols was empty, cannot run query") return set()
[ "def", "are_genes_in_api", "(", "my_clue_api_client", ",", "gene_symbols", ")", ":", "if", "len", "(", "gene_symbols", ")", ">", "0", ":", "query_gene_symbols", "=", "gene_symbols", "if", "type", "(", "gene_symbols", ")", "is", "list", "else", "list", "(", "gene_symbols", ")", "query_result", "=", "my_clue_api_client", ".", "run_filter_query", "(", "resource_name", ",", "{", "\"where\"", ":", "{", "\"gene_symbol\"", ":", "{", "\"inq\"", ":", "query_gene_symbols", "}", "}", ",", "\"fields\"", ":", "{", "\"gene_symbol\"", ":", "True", "}", "}", ")", "logger", ".", "debug", "(", "\"query_result: {}\"", ".", "format", "(", "query_result", ")", ")", "r", "=", "set", "(", "[", "x", "[", "\"gene_symbol\"", "]", "for", "x", "in", "query_result", "]", ")", "return", "r", "else", ":", "logger", ".", "warning", "(", "\"provided gene_symbols was empty, cannot run query\"", ")", "return", "set", "(", ")" ]
determine if genes are present in the API Args: my_clue_api_client: gene_symbols: collection of gene symbols to query the API with Returns: set of the found gene symbols
[ "determine", "if", "genes", "are", "present", "in", "the", "API" ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/clue_api_client/gene_queries.py#L13-L34
train
cmap/cmapPy
cmapPy/pandasGEXpress/write_gct.py
write
def write(gctoo, out_fname, data_null="NaN", metadata_null="-666", filler_null="-666", data_float_format="%.4f"): """Write a gctoo object to a gct file. Args: gctoo (gctoo object) out_fname (string): filename for output gct file data_null (string): how to represent missing values in the data (default = "NaN") metadata_null (string): how to represent missing values in the metadata (default = "-666") filler_null (string): what value to fill the top-left filler block with (default = "-666") data_float_format (string): how many decimal points to keep in representing data (default = 4 digits; None will keep all digits) Returns: None """ # Create handle for output file if not out_fname.endswith(".gct"): out_fname += ".gct" f = open(out_fname, "w") # Write first two lines dims = [str(gctoo.data_df.shape[0]), str(gctoo.data_df.shape[1]), str(gctoo.row_metadata_df.shape[1]), str(gctoo.col_metadata_df.shape[1])] write_version_and_dims(VERSION, dims, f) # Write top half of the gct write_top_half(f, gctoo.row_metadata_df, gctoo.col_metadata_df, metadata_null, filler_null) # Write bottom half of the gct write_bottom_half(f, gctoo.row_metadata_df, gctoo.data_df, data_null, data_float_format, metadata_null) f.close() logger.info("GCT has been written to {}".format(out_fname))
python
def write(gctoo, out_fname, data_null="NaN", metadata_null="-666", filler_null="-666", data_float_format="%.4f"): """Write a gctoo object to a gct file. Args: gctoo (gctoo object) out_fname (string): filename for output gct file data_null (string): how to represent missing values in the data (default = "NaN") metadata_null (string): how to represent missing values in the metadata (default = "-666") filler_null (string): what value to fill the top-left filler block with (default = "-666") data_float_format (string): how many decimal points to keep in representing data (default = 4 digits; None will keep all digits) Returns: None """ # Create handle for output file if not out_fname.endswith(".gct"): out_fname += ".gct" f = open(out_fname, "w") # Write first two lines dims = [str(gctoo.data_df.shape[0]), str(gctoo.data_df.shape[1]), str(gctoo.row_metadata_df.shape[1]), str(gctoo.col_metadata_df.shape[1])] write_version_and_dims(VERSION, dims, f) # Write top half of the gct write_top_half(f, gctoo.row_metadata_df, gctoo.col_metadata_df, metadata_null, filler_null) # Write bottom half of the gct write_bottom_half(f, gctoo.row_metadata_df, gctoo.data_df, data_null, data_float_format, metadata_null) f.close() logger.info("GCT has been written to {}".format(out_fname))
[ "def", "write", "(", "gctoo", ",", "out_fname", ",", "data_null", "=", "\"NaN\"", ",", "metadata_null", "=", "\"-666\"", ",", "filler_null", "=", "\"-666\"", ",", "data_float_format", "=", "\"%.4f\"", ")", ":", "# Create handle for output file", "if", "not", "out_fname", ".", "endswith", "(", "\".gct\"", ")", ":", "out_fname", "+=", "\".gct\"", "f", "=", "open", "(", "out_fname", ",", "\"w\"", ")", "# Write first two lines", "dims", "=", "[", "str", "(", "gctoo", ".", "data_df", ".", "shape", "[", "0", "]", ")", ",", "str", "(", "gctoo", ".", "data_df", ".", "shape", "[", "1", "]", ")", ",", "str", "(", "gctoo", ".", "row_metadata_df", ".", "shape", "[", "1", "]", ")", ",", "str", "(", "gctoo", ".", "col_metadata_df", ".", "shape", "[", "1", "]", ")", "]", "write_version_and_dims", "(", "VERSION", ",", "dims", ",", "f", ")", "# Write top half of the gct", "write_top_half", "(", "f", ",", "gctoo", ".", "row_metadata_df", ",", "gctoo", ".", "col_metadata_df", ",", "metadata_null", ",", "filler_null", ")", "# Write bottom half of the gct", "write_bottom_half", "(", "f", ",", "gctoo", ".", "row_metadata_df", ",", "gctoo", ".", "data_df", ",", "data_null", ",", "data_float_format", ",", "metadata_null", ")", "f", ".", "close", "(", ")", "logger", ".", "info", "(", "\"GCT has been written to {}\"", ".", "format", "(", "out_fname", ")", ")" ]
Write a gctoo object to a gct file. Args: gctoo (gctoo object) out_fname (string): filename for output gct file data_null (string): how to represent missing values in the data (default = "NaN") metadata_null (string): how to represent missing values in the metadata (default = "-666") filler_null (string): what value to fill the top-left filler block with (default = "-666") data_float_format (string): how many decimal points to keep in representing data (default = 4 digits; None will keep all digits) Returns: None
[ "Write", "a", "gctoo", "object", "to", "a", "gct", "file", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/write_gct.py#L16-L51
train
cmap/cmapPy
cmapPy/pandasGEXpress/write_gct.py
write_version_and_dims
def write_version_and_dims(version, dims, f): """Write first two lines of gct file. Args: version (string): 1.3 by default dims (list of strings): length = 4 f (file handle): handle of output file Returns: nothing """ f.write(("#" + version + "\n")) f.write((dims[0] + "\t" + dims[1] + "\t" + dims[2] + "\t" + dims[3] + "\n"))
python
def write_version_and_dims(version, dims, f): """Write first two lines of gct file. Args: version (string): 1.3 by default dims (list of strings): length = 4 f (file handle): handle of output file Returns: nothing """ f.write(("#" + version + "\n")) f.write((dims[0] + "\t" + dims[1] + "\t" + dims[2] + "\t" + dims[3] + "\n"))
[ "def", "write_version_and_dims", "(", "version", ",", "dims", ",", "f", ")", ":", "f", ".", "write", "(", "(", "\"#\"", "+", "version", "+", "\"\\n\"", ")", ")", "f", ".", "write", "(", "(", "dims", "[", "0", "]", "+", "\"\\t\"", "+", "dims", "[", "1", "]", "+", "\"\\t\"", "+", "dims", "[", "2", "]", "+", "\"\\t\"", "+", "dims", "[", "3", "]", "+", "\"\\n\"", ")", ")" ]
Write first two lines of gct file. Args: version (string): 1.3 by default dims (list of strings): length = 4 f (file handle): handle of output file Returns: nothing
[ "Write", "first", "two", "lines", "of", "gct", "file", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/write_gct.py#L54-L65
train
cmap/cmapPy
cmapPy/pandasGEXpress/write_gct.py
append_dims_and_file_extension
def append_dims_and_file_extension(fname, data_df): """Append dimensions and file extension to output filename. N.B. Dimensions are cols x rows. Args: fname (string): output filename data_df (pandas df) Returns: out_fname (string): output filename with matrix dims and .gct appended """ # If there's no .gct at the end of output file name, add the dims and .gct if not fname.endswith(".gct"): out_fname = '{0}_n{1}x{2}.gct'.format(fname, data_df.shape[1], data_df.shape[0]) return out_fname # Otherwise, only add the dims else: basename = os.path.splitext(fname)[0] out_fname = '{0}_n{1}x{2}.gct'.format(basename, data_df.shape[1], data_df.shape[0]) return out_fname
python
def append_dims_and_file_extension(fname, data_df): """Append dimensions and file extension to output filename. N.B. Dimensions are cols x rows. Args: fname (string): output filename data_df (pandas df) Returns: out_fname (string): output filename with matrix dims and .gct appended """ # If there's no .gct at the end of output file name, add the dims and .gct if not fname.endswith(".gct"): out_fname = '{0}_n{1}x{2}.gct'.format(fname, data_df.shape[1], data_df.shape[0]) return out_fname # Otherwise, only add the dims else: basename = os.path.splitext(fname)[0] out_fname = '{0}_n{1}x{2}.gct'.format(basename, data_df.shape[1], data_df.shape[0]) return out_fname
[ "def", "append_dims_and_file_extension", "(", "fname", ",", "data_df", ")", ":", "# If there's no .gct at the end of output file name, add the dims and .gct", "if", "not", "fname", ".", "endswith", "(", "\".gct\"", ")", ":", "out_fname", "=", "'{0}_n{1}x{2}.gct'", ".", "format", "(", "fname", ",", "data_df", ".", "shape", "[", "1", "]", ",", "data_df", ".", "shape", "[", "0", "]", ")", "return", "out_fname", "# Otherwise, only add the dims", "else", ":", "basename", "=", "os", ".", "path", ".", "splitext", "(", "fname", ")", "[", "0", "]", "out_fname", "=", "'{0}_n{1}x{2}.gct'", ".", "format", "(", "basename", ",", "data_df", ".", "shape", "[", "1", "]", ",", "data_df", ".", "shape", "[", "0", "]", ")", "return", "out_fname" ]
Append dimensions and file extension to output filename. N.B. Dimensions are cols x rows. Args: fname (string): output filename data_df (pandas df) Returns: out_fname (string): output filename with matrix dims and .gct appended
[ "Append", "dimensions", "and", "file", "extension", "to", "output", "filename", ".", "N", ".", "B", ".", "Dimensions", "are", "cols", "x", "rows", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/write_gct.py#L142-L161
train
cmap/cmapPy
cmapPy/math/robust_zscore.py
robust_zscore
def robust_zscore(mat, ctrl_mat=None, min_mad=0.1): ''' Robustly z-score a pandas df along the rows. Args: mat (pandas df): Matrix of data that z-scoring will be applied to ctrl_mat (pandas df): Optional matrix from which to compute medians and MADs (e.g. vehicle control) min_mad (float): Minimum MAD to threshold to; tiny MAD values will cause z-scores to blow up Returns: zscore_df (pandas_df): z-scored data ''' # If optional df exists, calc medians and mads from it if ctrl_mat is not None: medians = ctrl_mat.median(axis=1) median_devs = abs(ctrl_mat.subtract(medians, axis=0)) # Else just use plate medians else: medians = mat.median(axis=1) median_devs = abs(mat.subtract(medians, axis=0)) sub = mat.subtract(medians, axis='index') mads = median_devs.median(axis=1) # Threshold mads mads = mads.clip(lower=min_mad) # Must multiply values by 1.4826 to make MAD comparable to SD # (https://en.wikipedia.org/wiki/Median_absolute_deviation) zscore_df = sub.divide(mads * 1.4826, axis='index') return zscore_df.round(rounding_precision)
python
def robust_zscore(mat, ctrl_mat=None, min_mad=0.1): ''' Robustly z-score a pandas df along the rows. Args: mat (pandas df): Matrix of data that z-scoring will be applied to ctrl_mat (pandas df): Optional matrix from which to compute medians and MADs (e.g. vehicle control) min_mad (float): Minimum MAD to threshold to; tiny MAD values will cause z-scores to blow up Returns: zscore_df (pandas_df): z-scored data ''' # If optional df exists, calc medians and mads from it if ctrl_mat is not None: medians = ctrl_mat.median(axis=1) median_devs = abs(ctrl_mat.subtract(medians, axis=0)) # Else just use plate medians else: medians = mat.median(axis=1) median_devs = abs(mat.subtract(medians, axis=0)) sub = mat.subtract(medians, axis='index') mads = median_devs.median(axis=1) # Threshold mads mads = mads.clip(lower=min_mad) # Must multiply values by 1.4826 to make MAD comparable to SD # (https://en.wikipedia.org/wiki/Median_absolute_deviation) zscore_df = sub.divide(mads * 1.4826, axis='index') return zscore_df.round(rounding_precision)
[ "def", "robust_zscore", "(", "mat", ",", "ctrl_mat", "=", "None", ",", "min_mad", "=", "0.1", ")", ":", "# If optional df exists, calc medians and mads from it", "if", "ctrl_mat", "is", "not", "None", ":", "medians", "=", "ctrl_mat", ".", "median", "(", "axis", "=", "1", ")", "median_devs", "=", "abs", "(", "ctrl_mat", ".", "subtract", "(", "medians", ",", "axis", "=", "0", ")", ")", "# Else just use plate medians", "else", ":", "medians", "=", "mat", ".", "median", "(", "axis", "=", "1", ")", "median_devs", "=", "abs", "(", "mat", ".", "subtract", "(", "medians", ",", "axis", "=", "0", ")", ")", "sub", "=", "mat", ".", "subtract", "(", "medians", ",", "axis", "=", "'index'", ")", "mads", "=", "median_devs", ".", "median", "(", "axis", "=", "1", ")", "# Threshold mads", "mads", "=", "mads", ".", "clip", "(", "lower", "=", "min_mad", ")", "# Must multiply values by 1.4826 to make MAD comparable to SD", "# (https://en.wikipedia.org/wiki/Median_absolute_deviation)", "zscore_df", "=", "sub", ".", "divide", "(", "mads", "*", "1.4826", ",", "axis", "=", "'index'", ")", "return", "zscore_df", ".", "round", "(", "rounding_precision", ")" ]
Robustly z-score a pandas df along the rows. Args: mat (pandas df): Matrix of data that z-scoring will be applied to ctrl_mat (pandas df): Optional matrix from which to compute medians and MADs (e.g. vehicle control) min_mad (float): Minimum MAD to threshold to; tiny MAD values will cause z-scores to blow up Returns: zscore_df (pandas_df): z-scored data
[ "Robustly", "z", "-", "score", "a", "pandas", "df", "along", "the", "rows", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/math/robust_zscore.py#L24-L58
train
cmap/cmapPy
cmapPy/pandasGEXpress/parse.py
parse
def parse(file_path, convert_neg_666=True, rid=None, cid=None, ridx=None, cidx=None, row_meta_only=False, col_meta_only=False, make_multiindex=False): """ Identifies whether file_path corresponds to a .gct or .gctx file and calls the correct corresponding parse method. Input: Mandatory: - gct(x)_file_path (str): full path to gct(x) file you want to parse. Optional: - convert_neg_666 (bool): whether to convert -666 values to numpy.nan or not (see Note below for more details on this). Default = False. - rid (list of strings): list of row ids to specifically keep from gctx. Default=None. - cid (list of strings): list of col ids to specifically keep from gctx. Default=None. - ridx (list of integers): only read the rows corresponding to this list of integer ids. Default=None. - cidx (list of integers): only read the columns corresponding to this list of integer ids. Default=None. - row_meta_only (bool): Whether to load data + metadata (if False), or just row metadata (if True) as pandas DataFrame - col_meta_only (bool): Whether to load data + metadata (if False), or just col metadata (if True) as pandas DataFrame - make_multiindex (bool): whether to create a multi-index df combining the 3 component dfs Output: - out (GCToo object or pandas df): if row_meta_only or col_meta_only, then out is a metadata df; otherwise, it's a GCToo instance containing content of parsed gct(x) file Note: why does convert_neg_666 exist? - In CMap--for somewhat obscure historical reasons--we use "-666" as our null value for metadata. However (so that users can take full advantage of pandas' methods, including those for filtering nan's etc) we provide the option of converting these into numpy.NaN values, the pandas default. """ if file_path.endswith(".gct"): out = parse_gct.parse(file_path, convert_neg_666=convert_neg_666, rid=rid, cid=cid, ridx=ridx, cidx=cidx, row_meta_only=row_meta_only, col_meta_only=col_meta_only, make_multiindex=make_multiindex) elif file_path.endswith(".gctx"): out = parse_gctx.parse(file_path, convert_neg_666=convert_neg_666, rid=rid, cid=cid, ridx=ridx, cidx=cidx, row_meta_only=row_meta_only, col_meta_only=col_meta_only, make_multiindex=make_multiindex) else: err_msg = "File to parse must be .gct or .gctx!" logger.error(err_msg) raise Exception(err_msg) return out
python
def parse(file_path, convert_neg_666=True, rid=None, cid=None, ridx=None, cidx=None, row_meta_only=False, col_meta_only=False, make_multiindex=False): """ Identifies whether file_path corresponds to a .gct or .gctx file and calls the correct corresponding parse method. Input: Mandatory: - gct(x)_file_path (str): full path to gct(x) file you want to parse. Optional: - convert_neg_666 (bool): whether to convert -666 values to numpy.nan or not (see Note below for more details on this). Default = False. - rid (list of strings): list of row ids to specifically keep from gctx. Default=None. - cid (list of strings): list of col ids to specifically keep from gctx. Default=None. - ridx (list of integers): only read the rows corresponding to this list of integer ids. Default=None. - cidx (list of integers): only read the columns corresponding to this list of integer ids. Default=None. - row_meta_only (bool): Whether to load data + metadata (if False), or just row metadata (if True) as pandas DataFrame - col_meta_only (bool): Whether to load data + metadata (if False), or just col metadata (if True) as pandas DataFrame - make_multiindex (bool): whether to create a multi-index df combining the 3 component dfs Output: - out (GCToo object or pandas df): if row_meta_only or col_meta_only, then out is a metadata df; otherwise, it's a GCToo instance containing content of parsed gct(x) file Note: why does convert_neg_666 exist? - In CMap--for somewhat obscure historical reasons--we use "-666" as our null value for metadata. However (so that users can take full advantage of pandas' methods, including those for filtering nan's etc) we provide the option of converting these into numpy.NaN values, the pandas default. """ if file_path.endswith(".gct"): out = parse_gct.parse(file_path, convert_neg_666=convert_neg_666, rid=rid, cid=cid, ridx=ridx, cidx=cidx, row_meta_only=row_meta_only, col_meta_only=col_meta_only, make_multiindex=make_multiindex) elif file_path.endswith(".gctx"): out = parse_gctx.parse(file_path, convert_neg_666=convert_neg_666, rid=rid, cid=cid, ridx=ridx, cidx=cidx, row_meta_only=row_meta_only, col_meta_only=col_meta_only, make_multiindex=make_multiindex) else: err_msg = "File to parse must be .gct or .gctx!" logger.error(err_msg) raise Exception(err_msg) return out
[ "def", "parse", "(", "file_path", ",", "convert_neg_666", "=", "True", ",", "rid", "=", "None", ",", "cid", "=", "None", ",", "ridx", "=", "None", ",", "cidx", "=", "None", ",", "row_meta_only", "=", "False", ",", "col_meta_only", "=", "False", ",", "make_multiindex", "=", "False", ")", ":", "if", "file_path", ".", "endswith", "(", "\".gct\"", ")", ":", "out", "=", "parse_gct", ".", "parse", "(", "file_path", ",", "convert_neg_666", "=", "convert_neg_666", ",", "rid", "=", "rid", ",", "cid", "=", "cid", ",", "ridx", "=", "ridx", ",", "cidx", "=", "cidx", ",", "row_meta_only", "=", "row_meta_only", ",", "col_meta_only", "=", "col_meta_only", ",", "make_multiindex", "=", "make_multiindex", ")", "elif", "file_path", ".", "endswith", "(", "\".gctx\"", ")", ":", "out", "=", "parse_gctx", ".", "parse", "(", "file_path", ",", "convert_neg_666", "=", "convert_neg_666", ",", "rid", "=", "rid", ",", "cid", "=", "cid", ",", "ridx", "=", "ridx", ",", "cidx", "=", "cidx", ",", "row_meta_only", "=", "row_meta_only", ",", "col_meta_only", "=", "col_meta_only", ",", "make_multiindex", "=", "make_multiindex", ")", "else", ":", "err_msg", "=", "\"File to parse must be .gct or .gctx!\"", "logger", ".", "error", "(", "err_msg", ")", "raise", "Exception", "(", "err_msg", ")", "return", "out" ]
Identifies whether file_path corresponds to a .gct or .gctx file and calls the correct corresponding parse method. Input: Mandatory: - gct(x)_file_path (str): full path to gct(x) file you want to parse. Optional: - convert_neg_666 (bool): whether to convert -666 values to numpy.nan or not (see Note below for more details on this). Default = False. - rid (list of strings): list of row ids to specifically keep from gctx. Default=None. - cid (list of strings): list of col ids to specifically keep from gctx. Default=None. - ridx (list of integers): only read the rows corresponding to this list of integer ids. Default=None. - cidx (list of integers): only read the columns corresponding to this list of integer ids. Default=None. - row_meta_only (bool): Whether to load data + metadata (if False), or just row metadata (if True) as pandas DataFrame - col_meta_only (bool): Whether to load data + metadata (if False), or just col metadata (if True) as pandas DataFrame - make_multiindex (bool): whether to create a multi-index df combining the 3 component dfs Output: - out (GCToo object or pandas df): if row_meta_only or col_meta_only, then out is a metadata df; otherwise, it's a GCToo instance containing content of parsed gct(x) file Note: why does convert_neg_666 exist? - In CMap--for somewhat obscure historical reasons--we use "-666" as our null value for metadata. However (so that users can take full advantage of pandas' methods, including those for filtering nan's etc) we provide the option of converting these into numpy.NaN values, the pandas default.
[ "Identifies", "whether", "file_path", "corresponds", "to", "a", ".", "gct", "or", ".", "gctx", "file", "and", "calls", "the", "correct", "corresponding", "parse", "method", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/parse.py#L21-L75
train
cmap/cmapPy
cmapPy/math/agg_wt_avg.py
get_upper_triangle
def get_upper_triangle(correlation_matrix): ''' Extract upper triangle from a square matrix. Negative values are set to 0. Args: correlation_matrix (pandas df): Correlations between all replicates Returns: upper_tri_df (pandas df): Upper triangle extracted from correlation_matrix; rid is the row index, cid is the column index, corr is the extracted correlation value ''' upper_triangle = correlation_matrix.where(np.triu(np.ones(correlation_matrix.shape), k=1).astype(np.bool)) # convert matrix into long form description upper_tri_df = upper_triangle.stack().reset_index(level=1) upper_tri_df.columns = ['rid', 'corr'] # Index at this point is cid, it now becomes a column upper_tri_df.reset_index(level=0, inplace=True) # Get rid of negative values upper_tri_df['corr'] = upper_tri_df['corr'].clip(lower=0) return upper_tri_df.round(rounding_precision)
python
def get_upper_triangle(correlation_matrix): ''' Extract upper triangle from a square matrix. Negative values are set to 0. Args: correlation_matrix (pandas df): Correlations between all replicates Returns: upper_tri_df (pandas df): Upper triangle extracted from correlation_matrix; rid is the row index, cid is the column index, corr is the extracted correlation value ''' upper_triangle = correlation_matrix.where(np.triu(np.ones(correlation_matrix.shape), k=1).astype(np.bool)) # convert matrix into long form description upper_tri_df = upper_triangle.stack().reset_index(level=1) upper_tri_df.columns = ['rid', 'corr'] # Index at this point is cid, it now becomes a column upper_tri_df.reset_index(level=0, inplace=True) # Get rid of negative values upper_tri_df['corr'] = upper_tri_df['corr'].clip(lower=0) return upper_tri_df.round(rounding_precision)
[ "def", "get_upper_triangle", "(", "correlation_matrix", ")", ":", "upper_triangle", "=", "correlation_matrix", ".", "where", "(", "np", ".", "triu", "(", "np", ".", "ones", "(", "correlation_matrix", ".", "shape", ")", ",", "k", "=", "1", ")", ".", "astype", "(", "np", ".", "bool", ")", ")", "# convert matrix into long form description", "upper_tri_df", "=", "upper_triangle", ".", "stack", "(", ")", ".", "reset_index", "(", "level", "=", "1", ")", "upper_tri_df", ".", "columns", "=", "[", "'rid'", ",", "'corr'", "]", "# Index at this point is cid, it now becomes a column", "upper_tri_df", ".", "reset_index", "(", "level", "=", "0", ",", "inplace", "=", "True", ")", "# Get rid of negative values", "upper_tri_df", "[", "'corr'", "]", "=", "upper_tri_df", "[", "'corr'", "]", ".", "clip", "(", "lower", "=", "0", ")", "return", "upper_tri_df", ".", "round", "(", "rounding_precision", ")" ]
Extract upper triangle from a square matrix. Negative values are set to 0. Args: correlation_matrix (pandas df): Correlations between all replicates Returns: upper_tri_df (pandas df): Upper triangle extracted from correlation_matrix; rid is the row index, cid is the column index, corr is the extracted correlation value
[ "Extract", "upper", "triangle", "from", "a", "square", "matrix", ".", "Negative", "values", "are", "set", "to", "0", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/math/agg_wt_avg.py#L17-L41
train
cmap/cmapPy
cmapPy/math/agg_wt_avg.py
calculate_weights
def calculate_weights(correlation_matrix, min_wt): ''' Calculate a weight for each profile based on its correlation to other replicates. Negative correlations are clipped to 0, and weights are clipped to be min_wt at the least. Args: correlation_matrix (pandas df): Correlations between all replicates min_wt (float): Minimum raw weight when calculating weighted average Returns: raw weights (pandas series): Mean correlation to other replicates weights (pandas series): raw_weights normalized such that they add to 1 ''' # fill diagonal of correlation_matrix with np.nan np.fill_diagonal(correlation_matrix.values, np.nan) # remove negative values correlation_matrix = correlation_matrix.clip(lower=0) # get average correlation for each profile (will ignore NaN) raw_weights = correlation_matrix.mean(axis=1) # threshold weights raw_weights = raw_weights.clip(lower=min_wt) # normalize raw_weights so that they add to 1 weights = raw_weights / sum(raw_weights) return raw_weights.round(rounding_precision), weights.round(rounding_precision)
python
def calculate_weights(correlation_matrix, min_wt): ''' Calculate a weight for each profile based on its correlation to other replicates. Negative correlations are clipped to 0, and weights are clipped to be min_wt at the least. Args: correlation_matrix (pandas df): Correlations between all replicates min_wt (float): Minimum raw weight when calculating weighted average Returns: raw weights (pandas series): Mean correlation to other replicates weights (pandas series): raw_weights normalized such that they add to 1 ''' # fill diagonal of correlation_matrix with np.nan np.fill_diagonal(correlation_matrix.values, np.nan) # remove negative values correlation_matrix = correlation_matrix.clip(lower=0) # get average correlation for each profile (will ignore NaN) raw_weights = correlation_matrix.mean(axis=1) # threshold weights raw_weights = raw_weights.clip(lower=min_wt) # normalize raw_weights so that they add to 1 weights = raw_weights / sum(raw_weights) return raw_weights.round(rounding_precision), weights.round(rounding_precision)
[ "def", "calculate_weights", "(", "correlation_matrix", ",", "min_wt", ")", ":", "# fill diagonal of correlation_matrix with np.nan", "np", ".", "fill_diagonal", "(", "correlation_matrix", ".", "values", ",", "np", ".", "nan", ")", "# remove negative values", "correlation_matrix", "=", "correlation_matrix", ".", "clip", "(", "lower", "=", "0", ")", "# get average correlation for each profile (will ignore NaN)", "raw_weights", "=", "correlation_matrix", ".", "mean", "(", "axis", "=", "1", ")", "# threshold weights", "raw_weights", "=", "raw_weights", ".", "clip", "(", "lower", "=", "min_wt", ")", "# normalize raw_weights so that they add to 1", "weights", "=", "raw_weights", "/", "sum", "(", "raw_weights", ")", "return", "raw_weights", ".", "round", "(", "rounding_precision", ")", ",", "weights", ".", "round", "(", "rounding_precision", ")" ]
Calculate a weight for each profile based on its correlation to other replicates. Negative correlations are clipped to 0, and weights are clipped to be min_wt at the least. Args: correlation_matrix (pandas df): Correlations between all replicates min_wt (float): Minimum raw weight when calculating weighted average Returns: raw weights (pandas series): Mean correlation to other replicates weights (pandas series): raw_weights normalized such that they add to 1
[ "Calculate", "a", "weight", "for", "each", "profile", "based", "on", "its", "correlation", "to", "other", "replicates", ".", "Negative", "correlations", "are", "clipped", "to", "0", "and", "weights", "are", "clipped", "to", "be", "min_wt", "at", "the", "least", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/math/agg_wt_avg.py#L44-L72
train
cmap/cmapPy
cmapPy/math/agg_wt_avg.py
agg_wt_avg
def agg_wt_avg(mat, min_wt = 0.01, corr_metric='spearman'): ''' Aggregate a set of replicate profiles into a single signature using a weighted average. Args: mat (pandas df): a matrix of replicate profiles, where the columns are samples and the rows are features; columns correspond to the replicates of a single perturbagen min_wt (float): Minimum raw weight when calculating weighted average corr_metric (string): Spearman or Pearson; the correlation method Returns: out_sig (pandas series): weighted average values upper_tri_df (pandas df): the correlations between each profile that went into the signature raw weights (pandas series): weights before normalization weights (pandas series): weights after normalization ''' assert mat.shape[1] > 0, "mat is empty! mat: {}".format(mat) if mat.shape[1] == 1: out_sig = mat upper_tri_df = None raw_weights = None weights = None else: assert corr_metric in ["spearman", "pearson"] # Make correlation matrix column wise corr_mat = mat.corr(method=corr_metric) # Save the values in the upper triangle upper_tri_df = get_upper_triangle(corr_mat) # Calculate weight per replicate raw_weights, weights = calculate_weights(corr_mat, min_wt) # Apply weights to values weighted_values = mat * weights out_sig = weighted_values.sum(axis=1) return out_sig, upper_tri_df, raw_weights, weights
python
def agg_wt_avg(mat, min_wt = 0.01, corr_metric='spearman'): ''' Aggregate a set of replicate profiles into a single signature using a weighted average. Args: mat (pandas df): a matrix of replicate profiles, where the columns are samples and the rows are features; columns correspond to the replicates of a single perturbagen min_wt (float): Minimum raw weight when calculating weighted average corr_metric (string): Spearman or Pearson; the correlation method Returns: out_sig (pandas series): weighted average values upper_tri_df (pandas df): the correlations between each profile that went into the signature raw weights (pandas series): weights before normalization weights (pandas series): weights after normalization ''' assert mat.shape[1] > 0, "mat is empty! mat: {}".format(mat) if mat.shape[1] == 1: out_sig = mat upper_tri_df = None raw_weights = None weights = None else: assert corr_metric in ["spearman", "pearson"] # Make correlation matrix column wise corr_mat = mat.corr(method=corr_metric) # Save the values in the upper triangle upper_tri_df = get_upper_triangle(corr_mat) # Calculate weight per replicate raw_weights, weights = calculate_weights(corr_mat, min_wt) # Apply weights to values weighted_values = mat * weights out_sig = weighted_values.sum(axis=1) return out_sig, upper_tri_df, raw_weights, weights
[ "def", "agg_wt_avg", "(", "mat", ",", "min_wt", "=", "0.01", ",", "corr_metric", "=", "'spearman'", ")", ":", "assert", "mat", ".", "shape", "[", "1", "]", ">", "0", ",", "\"mat is empty! mat: {}\"", ".", "format", "(", "mat", ")", "if", "mat", ".", "shape", "[", "1", "]", "==", "1", ":", "out_sig", "=", "mat", "upper_tri_df", "=", "None", "raw_weights", "=", "None", "weights", "=", "None", "else", ":", "assert", "corr_metric", "in", "[", "\"spearman\"", ",", "\"pearson\"", "]", "# Make correlation matrix column wise", "corr_mat", "=", "mat", ".", "corr", "(", "method", "=", "corr_metric", ")", "# Save the values in the upper triangle", "upper_tri_df", "=", "get_upper_triangle", "(", "corr_mat", ")", "# Calculate weight per replicate", "raw_weights", ",", "weights", "=", "calculate_weights", "(", "corr_mat", ",", "min_wt", ")", "# Apply weights to values", "weighted_values", "=", "mat", "*", "weights", "out_sig", "=", "weighted_values", ".", "sum", "(", "axis", "=", "1", ")", "return", "out_sig", ",", "upper_tri_df", ",", "raw_weights", ",", "weights" ]
Aggregate a set of replicate profiles into a single signature using a weighted average. Args: mat (pandas df): a matrix of replicate profiles, where the columns are samples and the rows are features; columns correspond to the replicates of a single perturbagen min_wt (float): Minimum raw weight when calculating weighted average corr_metric (string): Spearman or Pearson; the correlation method Returns: out_sig (pandas series): weighted average values upper_tri_df (pandas df): the correlations between each profile that went into the signature raw weights (pandas series): weights before normalization weights (pandas series): weights after normalization
[ "Aggregate", "a", "set", "of", "replicate", "profiles", "into", "a", "single", "signature", "using", "a", "weighted", "average", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/math/agg_wt_avg.py#L75-L118
train
cmap/cmapPy
cmapPy/pandasGEXpress/concat.py
get_file_list
def get_file_list(wildcard): """ Search for files to be concatenated. Currently very basic, but could expand to be more sophisticated. Args: wildcard (regular expression string) Returns: files (list of full file paths) """ files = glob.glob(os.path.expanduser(wildcard)) return files
python
def get_file_list(wildcard): """ Search for files to be concatenated. Currently very basic, but could expand to be more sophisticated. Args: wildcard (regular expression string) Returns: files (list of full file paths) """ files = glob.glob(os.path.expanduser(wildcard)) return files
[ "def", "get_file_list", "(", "wildcard", ")", ":", "files", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "expanduser", "(", "wildcard", ")", ")", "return", "files" ]
Search for files to be concatenated. Currently very basic, but could expand to be more sophisticated. Args: wildcard (regular expression string) Returns: files (list of full file paths)
[ "Search", "for", "files", "to", "be", "concatenated", ".", "Currently", "very", "basic", "but", "could", "expand", "to", "be", "more", "sophisticated", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/concat.py#L158-L170
train
cmap/cmapPy
cmapPy/pandasGEXpress/concat.py
hstack
def hstack(gctoos, remove_all_metadata_fields=False, error_report_file=None, fields_to_remove=[], reset_ids=False): """ Horizontally concatenate gctoos. Args: gctoos (list of gctoo objects) remove_all_metadata_fields (bool): ignore/strip all common metadata when combining gctoos error_report_file (string): path to write file containing error report indicating problems that occurred during hstack, mainly for inconsistencies in common metadata fields_to_remove (list of strings): fields to be removed from the common metadata because they don't agree across files reset_ids (bool): set to True if sample ids are not unique Return: concated (gctoo object) """ # Separate each gctoo into its component dfs row_meta_dfs = [] col_meta_dfs = [] data_dfs = [] srcs = [] for g in gctoos: row_meta_dfs.append(g.row_metadata_df) col_meta_dfs.append(g.col_metadata_df) data_dfs.append(g.data_df) srcs.append(g.src) logger.debug("shapes of row_meta_dfs: {}".format([x.shape for x in row_meta_dfs])) # Concatenate row metadata all_row_metadata_df = assemble_common_meta(row_meta_dfs, fields_to_remove, srcs, remove_all_metadata_fields, error_report_file) # Concatenate col metadata all_col_metadata_df = assemble_concatenated_meta(col_meta_dfs, remove_all_metadata_fields) # Concatenate the data_dfs all_data_df = assemble_data(data_dfs, "horiz") # Make sure df shapes are correct assert all_data_df.shape[0] == all_row_metadata_df.shape[0], "Number of rows in metadata does not match number of rows in data - all_data_df.shape[0]: {} all_row_metadata_df.shape[0]: {}".format(all_data_df.shape[0], all_row_metadata_df.shape[0]) assert all_data_df.shape[1] == all_col_metadata_df.shape[0], "Number of columns in data does not match number of columns metadata - all_data_df.shape[1]: {} all_col_metadata_df.shape[0]: {}".format(all_data_df.shape[1], all_col_metadata_df.shape[0]) # If requested, reset sample ids to be unique integers and move old sample # ids into column metadata if reset_ids: do_reset_ids(all_col_metadata_df, all_data_df, "horiz") logger.info("Build GCToo of all...") concated = GCToo.GCToo(row_metadata_df=all_row_metadata_df, col_metadata_df=all_col_metadata_df, data_df=all_data_df) return concated
python
def hstack(gctoos, remove_all_metadata_fields=False, error_report_file=None, fields_to_remove=[], reset_ids=False): """ Horizontally concatenate gctoos. Args: gctoos (list of gctoo objects) remove_all_metadata_fields (bool): ignore/strip all common metadata when combining gctoos error_report_file (string): path to write file containing error report indicating problems that occurred during hstack, mainly for inconsistencies in common metadata fields_to_remove (list of strings): fields to be removed from the common metadata because they don't agree across files reset_ids (bool): set to True if sample ids are not unique Return: concated (gctoo object) """ # Separate each gctoo into its component dfs row_meta_dfs = [] col_meta_dfs = [] data_dfs = [] srcs = [] for g in gctoos: row_meta_dfs.append(g.row_metadata_df) col_meta_dfs.append(g.col_metadata_df) data_dfs.append(g.data_df) srcs.append(g.src) logger.debug("shapes of row_meta_dfs: {}".format([x.shape for x in row_meta_dfs])) # Concatenate row metadata all_row_metadata_df = assemble_common_meta(row_meta_dfs, fields_to_remove, srcs, remove_all_metadata_fields, error_report_file) # Concatenate col metadata all_col_metadata_df = assemble_concatenated_meta(col_meta_dfs, remove_all_metadata_fields) # Concatenate the data_dfs all_data_df = assemble_data(data_dfs, "horiz") # Make sure df shapes are correct assert all_data_df.shape[0] == all_row_metadata_df.shape[0], "Number of rows in metadata does not match number of rows in data - all_data_df.shape[0]: {} all_row_metadata_df.shape[0]: {}".format(all_data_df.shape[0], all_row_metadata_df.shape[0]) assert all_data_df.shape[1] == all_col_metadata_df.shape[0], "Number of columns in data does not match number of columns metadata - all_data_df.shape[1]: {} all_col_metadata_df.shape[0]: {}".format(all_data_df.shape[1], all_col_metadata_df.shape[0]) # If requested, reset sample ids to be unique integers and move old sample # ids into column metadata if reset_ids: do_reset_ids(all_col_metadata_df, all_data_df, "horiz") logger.info("Build GCToo of all...") concated = GCToo.GCToo(row_metadata_df=all_row_metadata_df, col_metadata_df=all_col_metadata_df, data_df=all_data_df) return concated
[ "def", "hstack", "(", "gctoos", ",", "remove_all_metadata_fields", "=", "False", ",", "error_report_file", "=", "None", ",", "fields_to_remove", "=", "[", "]", ",", "reset_ids", "=", "False", ")", ":", "# Separate each gctoo into its component dfs", "row_meta_dfs", "=", "[", "]", "col_meta_dfs", "=", "[", "]", "data_dfs", "=", "[", "]", "srcs", "=", "[", "]", "for", "g", "in", "gctoos", ":", "row_meta_dfs", ".", "append", "(", "g", ".", "row_metadata_df", ")", "col_meta_dfs", ".", "append", "(", "g", ".", "col_metadata_df", ")", "data_dfs", ".", "append", "(", "g", ".", "data_df", ")", "srcs", ".", "append", "(", "g", ".", "src", ")", "logger", ".", "debug", "(", "\"shapes of row_meta_dfs: {}\"", ".", "format", "(", "[", "x", ".", "shape", "for", "x", "in", "row_meta_dfs", "]", ")", ")", "# Concatenate row metadata", "all_row_metadata_df", "=", "assemble_common_meta", "(", "row_meta_dfs", ",", "fields_to_remove", ",", "srcs", ",", "remove_all_metadata_fields", ",", "error_report_file", ")", "# Concatenate col metadata", "all_col_metadata_df", "=", "assemble_concatenated_meta", "(", "col_meta_dfs", ",", "remove_all_metadata_fields", ")", "# Concatenate the data_dfs", "all_data_df", "=", "assemble_data", "(", "data_dfs", ",", "\"horiz\"", ")", "# Make sure df shapes are correct", "assert", "all_data_df", ".", "shape", "[", "0", "]", "==", "all_row_metadata_df", ".", "shape", "[", "0", "]", ",", "\"Number of rows in metadata does not match number of rows in data - all_data_df.shape[0]: {} all_row_metadata_df.shape[0]: {}\"", ".", "format", "(", "all_data_df", ".", "shape", "[", "0", "]", ",", "all_row_metadata_df", ".", "shape", "[", "0", "]", ")", "assert", "all_data_df", ".", "shape", "[", "1", "]", "==", "all_col_metadata_df", ".", "shape", "[", "0", "]", ",", "\"Number of columns in data does not match number of columns metadata - all_data_df.shape[1]: {} all_col_metadata_df.shape[0]: {}\"", ".", "format", "(", "all_data_df", ".", "shape", "[", "1", "]", ",", "all_col_metadata_df", ".", "shape", "[", "0", "]", ")", "# If requested, reset sample ids to be unique integers and move old sample", "# ids into column metadata", "if", "reset_ids", ":", "do_reset_ids", "(", "all_col_metadata_df", ",", "all_data_df", ",", "\"horiz\"", ")", "logger", ".", "info", "(", "\"Build GCToo of all...\"", ")", "concated", "=", "GCToo", ".", "GCToo", "(", "row_metadata_df", "=", "all_row_metadata_df", ",", "col_metadata_df", "=", "all_col_metadata_df", ",", "data_df", "=", "all_data_df", ")", "return", "concated" ]
Horizontally concatenate gctoos. Args: gctoos (list of gctoo objects) remove_all_metadata_fields (bool): ignore/strip all common metadata when combining gctoos error_report_file (string): path to write file containing error report indicating problems that occurred during hstack, mainly for inconsistencies in common metadata fields_to_remove (list of strings): fields to be removed from the common metadata because they don't agree across files reset_ids (bool): set to True if sample ids are not unique Return: concated (gctoo object)
[ "Horizontally", "concatenate", "gctoos", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/concat.py#L173-L224
train
cmap/cmapPy
cmapPy/pandasGEXpress/concat.py
assemble_concatenated_meta
def assemble_concatenated_meta(concated_meta_dfs, remove_all_metadata_fields): """ Assemble the concatenated metadata dfs together. For example, if horizontally concatenating, the concatenated metadata dfs are the column metadata dfs. Both indices are sorted. Args: concated_meta_dfs (list of pandas dfs) Returns: all_concated_meta_df_sorted (pandas df) """ # Concatenate the concated_meta_dfs if remove_all_metadata_fields: for df in concated_meta_dfs: df.drop(df.columns, axis=1, inplace=True) all_concated_meta_df = pd.concat(concated_meta_dfs, axis=0) # Sanity check: the number of rows in all_concated_meta_df should correspond # to the sum of the number of rows in the input dfs n_rows = all_concated_meta_df.shape[0] logger.debug("all_concated_meta_df.shape[0]: {}".format(n_rows)) n_rows_cumulative = sum([df.shape[0] for df in concated_meta_dfs]) assert n_rows == n_rows_cumulative # Sort the index and columns all_concated_meta_df_sorted = all_concated_meta_df.sort_index(axis=0).sort_index(axis=1) return all_concated_meta_df_sorted
python
def assemble_concatenated_meta(concated_meta_dfs, remove_all_metadata_fields): """ Assemble the concatenated metadata dfs together. For example, if horizontally concatenating, the concatenated metadata dfs are the column metadata dfs. Both indices are sorted. Args: concated_meta_dfs (list of pandas dfs) Returns: all_concated_meta_df_sorted (pandas df) """ # Concatenate the concated_meta_dfs if remove_all_metadata_fields: for df in concated_meta_dfs: df.drop(df.columns, axis=1, inplace=True) all_concated_meta_df = pd.concat(concated_meta_dfs, axis=0) # Sanity check: the number of rows in all_concated_meta_df should correspond # to the sum of the number of rows in the input dfs n_rows = all_concated_meta_df.shape[0] logger.debug("all_concated_meta_df.shape[0]: {}".format(n_rows)) n_rows_cumulative = sum([df.shape[0] for df in concated_meta_dfs]) assert n_rows == n_rows_cumulative # Sort the index and columns all_concated_meta_df_sorted = all_concated_meta_df.sort_index(axis=0).sort_index(axis=1) return all_concated_meta_df_sorted
[ "def", "assemble_concatenated_meta", "(", "concated_meta_dfs", ",", "remove_all_metadata_fields", ")", ":", "# Concatenate the concated_meta_dfs", "if", "remove_all_metadata_fields", ":", "for", "df", "in", "concated_meta_dfs", ":", "df", ".", "drop", "(", "df", ".", "columns", ",", "axis", "=", "1", ",", "inplace", "=", "True", ")", "all_concated_meta_df", "=", "pd", ".", "concat", "(", "concated_meta_dfs", ",", "axis", "=", "0", ")", "# Sanity check: the number of rows in all_concated_meta_df should correspond", "# to the sum of the number of rows in the input dfs", "n_rows", "=", "all_concated_meta_df", ".", "shape", "[", "0", "]", "logger", ".", "debug", "(", "\"all_concated_meta_df.shape[0]: {}\"", ".", "format", "(", "n_rows", ")", ")", "n_rows_cumulative", "=", "sum", "(", "[", "df", ".", "shape", "[", "0", "]", "for", "df", "in", "concated_meta_dfs", "]", ")", "assert", "n_rows", "==", "n_rows_cumulative", "# Sort the index and columns", "all_concated_meta_df_sorted", "=", "all_concated_meta_df", ".", "sort_index", "(", "axis", "=", "0", ")", ".", "sort_index", "(", "axis", "=", "1", ")", "return", "all_concated_meta_df_sorted" ]
Assemble the concatenated metadata dfs together. For example, if horizontally concatenating, the concatenated metadata dfs are the column metadata dfs. Both indices are sorted. Args: concated_meta_dfs (list of pandas dfs) Returns: all_concated_meta_df_sorted (pandas df)
[ "Assemble", "the", "concatenated", "metadata", "dfs", "together", ".", "For", "example", "if", "horizontally", "concatenating", "the", "concatenated", "metadata", "dfs", "are", "the", "column", "metadata", "dfs", ".", "Both", "indices", "are", "sorted", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/concat.py#L423-L452
train
cmap/cmapPy
cmapPy/pandasGEXpress/concat.py
assemble_data
def assemble_data(data_dfs, concat_direction): """ Assemble the data dfs together. Both indices are sorted. Args: data_dfs (list of pandas dfs) concat_direction (string): 'horiz' or 'vert' Returns: all_data_df_sorted (pandas df) """ if concat_direction == "horiz": # Concatenate the data_dfs horizontally all_data_df = pd.concat(data_dfs, axis=1) # Sanity check: the number of columns in all_data_df should # correspond to the sum of the number of columns in the input dfs n_cols = all_data_df.shape[1] logger.debug("all_data_df.shape[1]: {}".format(n_cols)) n_cols_cumulative = sum([df.shape[1] for df in data_dfs]) assert n_cols == n_cols_cumulative elif concat_direction == "vert": # Concatenate the data_dfs vertically all_data_df = pd.concat(data_dfs, axis=0) # Sanity check: the number of rows in all_data_df should # correspond to the sum of the number of rows in the input dfs n_rows = all_data_df.shape[0] logger.debug("all_data_df.shape[0]: {}".format(n_rows)) n_rows_cumulative = sum([df.shape[0] for df in data_dfs]) assert n_rows == n_rows_cumulative # Sort both indices all_data_df_sorted = all_data_df.sort_index(axis=0).sort_index(axis=1) return all_data_df_sorted
python
def assemble_data(data_dfs, concat_direction): """ Assemble the data dfs together. Both indices are sorted. Args: data_dfs (list of pandas dfs) concat_direction (string): 'horiz' or 'vert' Returns: all_data_df_sorted (pandas df) """ if concat_direction == "horiz": # Concatenate the data_dfs horizontally all_data_df = pd.concat(data_dfs, axis=1) # Sanity check: the number of columns in all_data_df should # correspond to the sum of the number of columns in the input dfs n_cols = all_data_df.shape[1] logger.debug("all_data_df.shape[1]: {}".format(n_cols)) n_cols_cumulative = sum([df.shape[1] for df in data_dfs]) assert n_cols == n_cols_cumulative elif concat_direction == "vert": # Concatenate the data_dfs vertically all_data_df = pd.concat(data_dfs, axis=0) # Sanity check: the number of rows in all_data_df should # correspond to the sum of the number of rows in the input dfs n_rows = all_data_df.shape[0] logger.debug("all_data_df.shape[0]: {}".format(n_rows)) n_rows_cumulative = sum([df.shape[0] for df in data_dfs]) assert n_rows == n_rows_cumulative # Sort both indices all_data_df_sorted = all_data_df.sort_index(axis=0).sort_index(axis=1) return all_data_df_sorted
[ "def", "assemble_data", "(", "data_dfs", ",", "concat_direction", ")", ":", "if", "concat_direction", "==", "\"horiz\"", ":", "# Concatenate the data_dfs horizontally", "all_data_df", "=", "pd", ".", "concat", "(", "data_dfs", ",", "axis", "=", "1", ")", "# Sanity check: the number of columns in all_data_df should", "# correspond to the sum of the number of columns in the input dfs", "n_cols", "=", "all_data_df", ".", "shape", "[", "1", "]", "logger", ".", "debug", "(", "\"all_data_df.shape[1]: {}\"", ".", "format", "(", "n_cols", ")", ")", "n_cols_cumulative", "=", "sum", "(", "[", "df", ".", "shape", "[", "1", "]", "for", "df", "in", "data_dfs", "]", ")", "assert", "n_cols", "==", "n_cols_cumulative", "elif", "concat_direction", "==", "\"vert\"", ":", "# Concatenate the data_dfs vertically", "all_data_df", "=", "pd", ".", "concat", "(", "data_dfs", ",", "axis", "=", "0", ")", "# Sanity check: the number of rows in all_data_df should", "# correspond to the sum of the number of rows in the input dfs", "n_rows", "=", "all_data_df", ".", "shape", "[", "0", "]", "logger", ".", "debug", "(", "\"all_data_df.shape[0]: {}\"", ".", "format", "(", "n_rows", ")", ")", "n_rows_cumulative", "=", "sum", "(", "[", "df", ".", "shape", "[", "0", "]", "for", "df", "in", "data_dfs", "]", ")", "assert", "n_rows", "==", "n_rows_cumulative", "# Sort both indices", "all_data_df_sorted", "=", "all_data_df", ".", "sort_index", "(", "axis", "=", "0", ")", ".", "sort_index", "(", "axis", "=", "1", ")", "return", "all_data_df_sorted" ]
Assemble the data dfs together. Both indices are sorted. Args: data_dfs (list of pandas dfs) concat_direction (string): 'horiz' or 'vert' Returns: all_data_df_sorted (pandas df)
[ "Assemble", "the", "data", "dfs", "together", ".", "Both", "indices", "are", "sorted", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/concat.py#L455-L492
train
cmap/cmapPy
cmapPy/pandasGEXpress/concat.py
do_reset_ids
def do_reset_ids(concatenated_meta_df, data_df, concat_direction): """ Reset ids in concatenated metadata and data dfs to unique integers and save the old ids in a metadata column. Note that the dataframes are modified in-place. Args: concatenated_meta_df (pandas df) data_df (pandas df) concat_direction (string): 'horiz' or 'vert' Returns: None (dfs modified in-place) """ if concat_direction == "horiz": # Make sure cids agree between data_df and concatenated_meta_df assert concatenated_meta_df.index.equals(data_df.columns), ( "cids in concatenated_meta_df do not agree with cids in data_df.") # Reset cids in concatenated_meta_df reset_ids_in_meta_df(concatenated_meta_df) # Replace cids in data_df with the new ones from concatenated_meta_df # (just an array of unique integers, zero-indexed) data_df.columns = pd.Index(concatenated_meta_df.index.values) elif concat_direction == "vert": # Make sure rids agree between data_df and concatenated_meta_df assert concatenated_meta_df.index.equals(data_df.index), ( "rids in concatenated_meta_df do not agree with rids in data_df.") # Reset rids in concatenated_meta_df reset_ids_in_meta_df(concatenated_meta_df) # Replace rids in data_df with the new ones from concatenated_meta_df # (just an array of unique integers, zero-indexed) data_df.index = pd.Index(concatenated_meta_df.index.values)
python
def do_reset_ids(concatenated_meta_df, data_df, concat_direction): """ Reset ids in concatenated metadata and data dfs to unique integers and save the old ids in a metadata column. Note that the dataframes are modified in-place. Args: concatenated_meta_df (pandas df) data_df (pandas df) concat_direction (string): 'horiz' or 'vert' Returns: None (dfs modified in-place) """ if concat_direction == "horiz": # Make sure cids agree between data_df and concatenated_meta_df assert concatenated_meta_df.index.equals(data_df.columns), ( "cids in concatenated_meta_df do not agree with cids in data_df.") # Reset cids in concatenated_meta_df reset_ids_in_meta_df(concatenated_meta_df) # Replace cids in data_df with the new ones from concatenated_meta_df # (just an array of unique integers, zero-indexed) data_df.columns = pd.Index(concatenated_meta_df.index.values) elif concat_direction == "vert": # Make sure rids agree between data_df and concatenated_meta_df assert concatenated_meta_df.index.equals(data_df.index), ( "rids in concatenated_meta_df do not agree with rids in data_df.") # Reset rids in concatenated_meta_df reset_ids_in_meta_df(concatenated_meta_df) # Replace rids in data_df with the new ones from concatenated_meta_df # (just an array of unique integers, zero-indexed) data_df.index = pd.Index(concatenated_meta_df.index.values)
[ "def", "do_reset_ids", "(", "concatenated_meta_df", ",", "data_df", ",", "concat_direction", ")", ":", "if", "concat_direction", "==", "\"horiz\"", ":", "# Make sure cids agree between data_df and concatenated_meta_df", "assert", "concatenated_meta_df", ".", "index", ".", "equals", "(", "data_df", ".", "columns", ")", ",", "(", "\"cids in concatenated_meta_df do not agree with cids in data_df.\"", ")", "# Reset cids in concatenated_meta_df", "reset_ids_in_meta_df", "(", "concatenated_meta_df", ")", "# Replace cids in data_df with the new ones from concatenated_meta_df", "# (just an array of unique integers, zero-indexed)", "data_df", ".", "columns", "=", "pd", ".", "Index", "(", "concatenated_meta_df", ".", "index", ".", "values", ")", "elif", "concat_direction", "==", "\"vert\"", ":", "# Make sure rids agree between data_df and concatenated_meta_df", "assert", "concatenated_meta_df", ".", "index", ".", "equals", "(", "data_df", ".", "index", ")", ",", "(", "\"rids in concatenated_meta_df do not agree with rids in data_df.\"", ")", "# Reset rids in concatenated_meta_df", "reset_ids_in_meta_df", "(", "concatenated_meta_df", ")", "# Replace rids in data_df with the new ones from concatenated_meta_df", "# (just an array of unique integers, zero-indexed)", "data_df", ".", "index", "=", "pd", ".", "Index", "(", "concatenated_meta_df", ".", "index", ".", "values", ")" ]
Reset ids in concatenated metadata and data dfs to unique integers and save the old ids in a metadata column. Note that the dataframes are modified in-place. Args: concatenated_meta_df (pandas df) data_df (pandas df) concat_direction (string): 'horiz' or 'vert' Returns: None (dfs modified in-place)
[ "Reset", "ids", "in", "concatenated", "metadata", "and", "data", "dfs", "to", "unique", "integers", "and", "save", "the", "old", "ids", "in", "a", "metadata", "column", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/concat.py#L495-L534
train
cmap/cmapPy
cmapPy/pandasGEXpress/concat.py
reset_ids_in_meta_df
def reset_ids_in_meta_df(meta_df): """ Meta_df is modified inplace. """ # Record original index name, and then change it so that the column that it # becomes will be appropriately named original_index_name = meta_df.index.name meta_df.index.name = "old_id" # Reset index meta_df.reset_index(inplace=True) # Change the index name back to what it was meta_df.index.name = original_index_name
python
def reset_ids_in_meta_df(meta_df): """ Meta_df is modified inplace. """ # Record original index name, and then change it so that the column that it # becomes will be appropriately named original_index_name = meta_df.index.name meta_df.index.name = "old_id" # Reset index meta_df.reset_index(inplace=True) # Change the index name back to what it was meta_df.index.name = original_index_name
[ "def", "reset_ids_in_meta_df", "(", "meta_df", ")", ":", "# Record original index name, and then change it so that the column that it", "# becomes will be appropriately named", "original_index_name", "=", "meta_df", ".", "index", ".", "name", "meta_df", ".", "index", ".", "name", "=", "\"old_id\"", "# Reset index", "meta_df", ".", "reset_index", "(", "inplace", "=", "True", ")", "# Change the index name back to what it was", "meta_df", ".", "index", ".", "name", "=", "original_index_name" ]
Meta_df is modified inplace.
[ "Meta_df", "is", "modified", "inplace", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/concat.py#L537-L549
train
cmap/cmapPy
cmapPy/pandasGEXpress/subset_gctoo.py
subset_gctoo
def subset_gctoo(gctoo, row_bool=None, col_bool=None, rid=None, cid=None, ridx=None, cidx=None, exclude_rid=None, exclude_cid=None): """ Extract a subset of data from a GCToo object in a variety of ways. The order of rows and columns will be preserved. Args: gctoo (GCToo object) row_bool (list of bools): length must equal gctoo.data_df.shape[0] col_bool (list of bools): length must equal gctoo.data_df.shape[1] rid (list of strings): rids to include cid (list of strings): cids to include ridx (list of integers): row integer ids to include cidx (list of integers): col integer ids to include exclude_rid (list of strings): rids to exclude exclude_cid (list of strings): cids to exclude Returns: out_gctoo (GCToo object): gctoo after subsetting """ assert sum([(rid is not None), (row_bool is not None), (ridx is not None)]) <= 1, ( "Only one of rid, row_bool, and ridx can be provided.") assert sum([(cid is not None), (col_bool is not None), (cidx is not None)]) <= 1, ( "Only one of cid, col_bool, and cidx can be provided.") # Figure out what rows and columns to keep rows_to_keep = get_rows_to_keep(gctoo, rid, row_bool, ridx, exclude_rid) cols_to_keep = get_cols_to_keep(gctoo, cid, col_bool, cidx, exclude_cid) # Convert labels to boolean array to preserve order rows_to_keep_bools = gctoo.data_df.index.isin(rows_to_keep) cols_to_keep_bools = gctoo.data_df.columns.isin(cols_to_keep) # Make the output gct out_gctoo = GCToo.GCToo( src=gctoo.src, version=gctoo.version, data_df=gctoo.data_df.loc[rows_to_keep_bools, cols_to_keep_bools], row_metadata_df=gctoo.row_metadata_df.loc[rows_to_keep_bools, :], col_metadata_df=gctoo.col_metadata_df.loc[cols_to_keep_bools, :]) assert out_gctoo.data_df.size > 0, "Subsetting yielded an empty gct!" logger.info(("Initial GCToo with {} rows and {} columns subsetted down to " + "{} rows and {} columns.").format( gctoo.data_df.shape[0], gctoo.data_df.shape[1], out_gctoo.data_df.shape[0], out_gctoo.data_df.shape[1])) return out_gctoo
python
def subset_gctoo(gctoo, row_bool=None, col_bool=None, rid=None, cid=None, ridx=None, cidx=None, exclude_rid=None, exclude_cid=None): """ Extract a subset of data from a GCToo object in a variety of ways. The order of rows and columns will be preserved. Args: gctoo (GCToo object) row_bool (list of bools): length must equal gctoo.data_df.shape[0] col_bool (list of bools): length must equal gctoo.data_df.shape[1] rid (list of strings): rids to include cid (list of strings): cids to include ridx (list of integers): row integer ids to include cidx (list of integers): col integer ids to include exclude_rid (list of strings): rids to exclude exclude_cid (list of strings): cids to exclude Returns: out_gctoo (GCToo object): gctoo after subsetting """ assert sum([(rid is not None), (row_bool is not None), (ridx is not None)]) <= 1, ( "Only one of rid, row_bool, and ridx can be provided.") assert sum([(cid is not None), (col_bool is not None), (cidx is not None)]) <= 1, ( "Only one of cid, col_bool, and cidx can be provided.") # Figure out what rows and columns to keep rows_to_keep = get_rows_to_keep(gctoo, rid, row_bool, ridx, exclude_rid) cols_to_keep = get_cols_to_keep(gctoo, cid, col_bool, cidx, exclude_cid) # Convert labels to boolean array to preserve order rows_to_keep_bools = gctoo.data_df.index.isin(rows_to_keep) cols_to_keep_bools = gctoo.data_df.columns.isin(cols_to_keep) # Make the output gct out_gctoo = GCToo.GCToo( src=gctoo.src, version=gctoo.version, data_df=gctoo.data_df.loc[rows_to_keep_bools, cols_to_keep_bools], row_metadata_df=gctoo.row_metadata_df.loc[rows_to_keep_bools, :], col_metadata_df=gctoo.col_metadata_df.loc[cols_to_keep_bools, :]) assert out_gctoo.data_df.size > 0, "Subsetting yielded an empty gct!" logger.info(("Initial GCToo with {} rows and {} columns subsetted down to " + "{} rows and {} columns.").format( gctoo.data_df.shape[0], gctoo.data_df.shape[1], out_gctoo.data_df.shape[0], out_gctoo.data_df.shape[1])) return out_gctoo
[ "def", "subset_gctoo", "(", "gctoo", ",", "row_bool", "=", "None", ",", "col_bool", "=", "None", ",", "rid", "=", "None", ",", "cid", "=", "None", ",", "ridx", "=", "None", ",", "cidx", "=", "None", ",", "exclude_rid", "=", "None", ",", "exclude_cid", "=", "None", ")", ":", "assert", "sum", "(", "[", "(", "rid", "is", "not", "None", ")", ",", "(", "row_bool", "is", "not", "None", ")", ",", "(", "ridx", "is", "not", "None", ")", "]", ")", "<=", "1", ",", "(", "\"Only one of rid, row_bool, and ridx can be provided.\"", ")", "assert", "sum", "(", "[", "(", "cid", "is", "not", "None", ")", ",", "(", "col_bool", "is", "not", "None", ")", ",", "(", "cidx", "is", "not", "None", ")", "]", ")", "<=", "1", ",", "(", "\"Only one of cid, col_bool, and cidx can be provided.\"", ")", "# Figure out what rows and columns to keep", "rows_to_keep", "=", "get_rows_to_keep", "(", "gctoo", ",", "rid", ",", "row_bool", ",", "ridx", ",", "exclude_rid", ")", "cols_to_keep", "=", "get_cols_to_keep", "(", "gctoo", ",", "cid", ",", "col_bool", ",", "cidx", ",", "exclude_cid", ")", "# Convert labels to boolean array to preserve order", "rows_to_keep_bools", "=", "gctoo", ".", "data_df", ".", "index", ".", "isin", "(", "rows_to_keep", ")", "cols_to_keep_bools", "=", "gctoo", ".", "data_df", ".", "columns", ".", "isin", "(", "cols_to_keep", ")", "# Make the output gct", "out_gctoo", "=", "GCToo", ".", "GCToo", "(", "src", "=", "gctoo", ".", "src", ",", "version", "=", "gctoo", ".", "version", ",", "data_df", "=", "gctoo", ".", "data_df", ".", "loc", "[", "rows_to_keep_bools", ",", "cols_to_keep_bools", "]", ",", "row_metadata_df", "=", "gctoo", ".", "row_metadata_df", ".", "loc", "[", "rows_to_keep_bools", ",", ":", "]", ",", "col_metadata_df", "=", "gctoo", ".", "col_metadata_df", ".", "loc", "[", "cols_to_keep_bools", ",", ":", "]", ")", "assert", "out_gctoo", ".", "data_df", ".", "size", ">", "0", ",", "\"Subsetting yielded an empty gct!\"", "logger", ".", "info", "(", "(", "\"Initial GCToo with {} rows and {} columns subsetted down to \"", "+", "\"{} rows and {} columns.\"", ")", ".", "format", "(", "gctoo", ".", "data_df", ".", "shape", "[", "0", "]", ",", "gctoo", ".", "data_df", ".", "shape", "[", "1", "]", ",", "out_gctoo", ".", "data_df", ".", "shape", "[", "0", "]", ",", "out_gctoo", ".", "data_df", ".", "shape", "[", "1", "]", ")", ")", "return", "out_gctoo" ]
Extract a subset of data from a GCToo object in a variety of ways. The order of rows and columns will be preserved. Args: gctoo (GCToo object) row_bool (list of bools): length must equal gctoo.data_df.shape[0] col_bool (list of bools): length must equal gctoo.data_df.shape[1] rid (list of strings): rids to include cid (list of strings): cids to include ridx (list of integers): row integer ids to include cidx (list of integers): col integer ids to include exclude_rid (list of strings): rids to exclude exclude_cid (list of strings): cids to exclude Returns: out_gctoo (GCToo object): gctoo after subsetting
[ "Extract", "a", "subset", "of", "data", "from", "a", "GCToo", "object", "in", "a", "variety", "of", "ways", ".", "The", "order", "of", "rows", "and", "columns", "will", "be", "preserved", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/subset_gctoo.py#L19-L65
train
cmap/cmapPy
cmapPy/pandasGEXpress/subset_gctoo.py
get_rows_to_keep
def get_rows_to_keep(gctoo, rid=None, row_bool=None, ridx=None, exclude_rid=None): """ Figure out based on the possible row inputs which rows to keep. Args: gctoo (GCToo object): rid (list of strings): row_bool (boolean array): ridx (list of integers): exclude_rid (list of strings): Returns: rows_to_keep (list of strings): row ids to be kept """ # Use rid if provided if rid is not None: assert type(rid) == list, "rid must be a list. rid: {}".format(rid) rows_to_keep = [gctoo_row for gctoo_row in gctoo.data_df.index if gctoo_row in rid] # Tell user if some rids not found num_missing_rids = len(rid) - len(rows_to_keep) if num_missing_rids != 0: logger.info("{} rids were not found in the GCT.".format(num_missing_rids)) # Use row_bool if provided elif row_bool is not None: assert len(row_bool) == gctoo.data_df.shape[0], ( "row_bool must have length equal to gctoo.data_df.shape[0]. " + "len(row_bool): {}, gctoo.data_df.shape[0]: {}".format( len(row_bool), gctoo.data_df.shape[0])) rows_to_keep = gctoo.data_df.index[row_bool].values # Use ridx if provided elif ridx is not None: assert type(ridx[0]) is int, ( "ridx must be a list of integers. ridx[0]: {}, " + "type(ridx[0]): {}").format(ridx[0], type(ridx[0])) assert max(ridx) <= gctoo.data_df.shape[0], ( "ridx contains an integer larger than the number of rows in " + "the GCToo. max(ridx): {}, gctoo.data_df.shape[0]: {}").format( max(ridx), gctoo.data_df.shape[0]) rows_to_keep = gctoo.data_df.index[ridx].values # If rid, row_bool, and ridx are all None, return all rows else: rows_to_keep = gctoo.data_df.index.values # Use exclude_rid if provided if exclude_rid is not None: # Keep only those rows that are not in exclude_rid rows_to_keep = [row_to_keep for row_to_keep in rows_to_keep if row_to_keep not in exclude_rid] return rows_to_keep
python
def get_rows_to_keep(gctoo, rid=None, row_bool=None, ridx=None, exclude_rid=None): """ Figure out based on the possible row inputs which rows to keep. Args: gctoo (GCToo object): rid (list of strings): row_bool (boolean array): ridx (list of integers): exclude_rid (list of strings): Returns: rows_to_keep (list of strings): row ids to be kept """ # Use rid if provided if rid is not None: assert type(rid) == list, "rid must be a list. rid: {}".format(rid) rows_to_keep = [gctoo_row for gctoo_row in gctoo.data_df.index if gctoo_row in rid] # Tell user if some rids not found num_missing_rids = len(rid) - len(rows_to_keep) if num_missing_rids != 0: logger.info("{} rids were not found in the GCT.".format(num_missing_rids)) # Use row_bool if provided elif row_bool is not None: assert len(row_bool) == gctoo.data_df.shape[0], ( "row_bool must have length equal to gctoo.data_df.shape[0]. " + "len(row_bool): {}, gctoo.data_df.shape[0]: {}".format( len(row_bool), gctoo.data_df.shape[0])) rows_to_keep = gctoo.data_df.index[row_bool].values # Use ridx if provided elif ridx is not None: assert type(ridx[0]) is int, ( "ridx must be a list of integers. ridx[0]: {}, " + "type(ridx[0]): {}").format(ridx[0], type(ridx[0])) assert max(ridx) <= gctoo.data_df.shape[0], ( "ridx contains an integer larger than the number of rows in " + "the GCToo. max(ridx): {}, gctoo.data_df.shape[0]: {}").format( max(ridx), gctoo.data_df.shape[0]) rows_to_keep = gctoo.data_df.index[ridx].values # If rid, row_bool, and ridx are all None, return all rows else: rows_to_keep = gctoo.data_df.index.values # Use exclude_rid if provided if exclude_rid is not None: # Keep only those rows that are not in exclude_rid rows_to_keep = [row_to_keep for row_to_keep in rows_to_keep if row_to_keep not in exclude_rid] return rows_to_keep
[ "def", "get_rows_to_keep", "(", "gctoo", ",", "rid", "=", "None", ",", "row_bool", "=", "None", ",", "ridx", "=", "None", ",", "exclude_rid", "=", "None", ")", ":", "# Use rid if provided", "if", "rid", "is", "not", "None", ":", "assert", "type", "(", "rid", ")", "==", "list", ",", "\"rid must be a list. rid: {}\"", ".", "format", "(", "rid", ")", "rows_to_keep", "=", "[", "gctoo_row", "for", "gctoo_row", "in", "gctoo", ".", "data_df", ".", "index", "if", "gctoo_row", "in", "rid", "]", "# Tell user if some rids not found", "num_missing_rids", "=", "len", "(", "rid", ")", "-", "len", "(", "rows_to_keep", ")", "if", "num_missing_rids", "!=", "0", ":", "logger", ".", "info", "(", "\"{} rids were not found in the GCT.\"", ".", "format", "(", "num_missing_rids", ")", ")", "# Use row_bool if provided", "elif", "row_bool", "is", "not", "None", ":", "assert", "len", "(", "row_bool", ")", "==", "gctoo", ".", "data_df", ".", "shape", "[", "0", "]", ",", "(", "\"row_bool must have length equal to gctoo.data_df.shape[0]. \"", "+", "\"len(row_bool): {}, gctoo.data_df.shape[0]: {}\"", ".", "format", "(", "len", "(", "row_bool", ")", ",", "gctoo", ".", "data_df", ".", "shape", "[", "0", "]", ")", ")", "rows_to_keep", "=", "gctoo", ".", "data_df", ".", "index", "[", "row_bool", "]", ".", "values", "# Use ridx if provided", "elif", "ridx", "is", "not", "None", ":", "assert", "type", "(", "ridx", "[", "0", "]", ")", "is", "int", ",", "(", "\"ridx must be a list of integers. ridx[0]: {}, \"", "+", "\"type(ridx[0]): {}\"", ")", ".", "format", "(", "ridx", "[", "0", "]", ",", "type", "(", "ridx", "[", "0", "]", ")", ")", "assert", "max", "(", "ridx", ")", "<=", "gctoo", ".", "data_df", ".", "shape", "[", "0", "]", ",", "(", "\"ridx contains an integer larger than the number of rows in \"", "+", "\"the GCToo. max(ridx): {}, gctoo.data_df.shape[0]: {}\"", ")", ".", "format", "(", "max", "(", "ridx", ")", ",", "gctoo", ".", "data_df", ".", "shape", "[", "0", "]", ")", "rows_to_keep", "=", "gctoo", ".", "data_df", ".", "index", "[", "ridx", "]", ".", "values", "# If rid, row_bool, and ridx are all None, return all rows", "else", ":", "rows_to_keep", "=", "gctoo", ".", "data_df", ".", "index", ".", "values", "# Use exclude_rid if provided", "if", "exclude_rid", "is", "not", "None", ":", "# Keep only those rows that are not in exclude_rid", "rows_to_keep", "=", "[", "row_to_keep", "for", "row_to_keep", "in", "rows_to_keep", "if", "row_to_keep", "not", "in", "exclude_rid", "]", "return", "rows_to_keep" ]
Figure out based on the possible row inputs which rows to keep. Args: gctoo (GCToo object): rid (list of strings): row_bool (boolean array): ridx (list of integers): exclude_rid (list of strings): Returns: rows_to_keep (list of strings): row ids to be kept
[ "Figure", "out", "based", "on", "the", "possible", "row", "inputs", "which", "rows", "to", "keep", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/subset_gctoo.py#L68-L126
train
cmap/cmapPy
cmapPy/pandasGEXpress/subset_gctoo.py
get_cols_to_keep
def get_cols_to_keep(gctoo, cid=None, col_bool=None, cidx=None, exclude_cid=None): """ Figure out based on the possible columns inputs which columns to keep. Args: gctoo (GCToo object): cid (list of strings): col_bool (boolean array): cidx (list of integers): exclude_cid (list of strings): Returns: cols_to_keep (list of strings): col ids to be kept """ # Use cid if provided if cid is not None: assert type(cid) == list, "cid must be a list. cid: {}".format(cid) cols_to_keep = [gctoo_col for gctoo_col in gctoo.data_df.columns if gctoo_col in cid] # Tell user if some cids not found num_missing_cids = len(cid) - len(cols_to_keep) if num_missing_cids != 0: logger.info("{} cids were not found in the GCT.".format(num_missing_cids)) # Use col_bool if provided elif col_bool is not None: assert len(col_bool) == gctoo.data_df.shape[1], ( "col_bool must have length equal to gctoo.data_df.shape[1]. " + "len(col_bool): {}, gctoo.data_df.shape[1]: {}".format( len(col_bool), gctoo.data_df.shape[1])) cols_to_keep = gctoo.data_df.columns[col_bool].values # Use cidx if provided elif cidx is not None: assert type(cidx[0]) is int, ( "cidx must be a list of integers. cidx[0]: {}, " + "type(cidx[0]): {}").format(cidx[0], type(cidx[0])) assert max(cidx) <= gctoo.data_df.shape[1], ( "cidx contains an integer larger than the number of columns in " + "the GCToo. max(cidx): {}, gctoo.data_df.shape[1]: {}").format( max(cidx), gctoo.data_df.shape[1]) cols_to_keep = gctoo.data_df.columns[cidx].values # If cid, col_bool, and cidx are all None, return all columns else: cols_to_keep = gctoo.data_df.columns.values # Use exclude_cid if provided if exclude_cid is not None: # Keep only those columns that are not in exclude_cid cols_to_keep = [col_to_keep for col_to_keep in cols_to_keep if col_to_keep not in exclude_cid] return cols_to_keep
python
def get_cols_to_keep(gctoo, cid=None, col_bool=None, cidx=None, exclude_cid=None): """ Figure out based on the possible columns inputs which columns to keep. Args: gctoo (GCToo object): cid (list of strings): col_bool (boolean array): cidx (list of integers): exclude_cid (list of strings): Returns: cols_to_keep (list of strings): col ids to be kept """ # Use cid if provided if cid is not None: assert type(cid) == list, "cid must be a list. cid: {}".format(cid) cols_to_keep = [gctoo_col for gctoo_col in gctoo.data_df.columns if gctoo_col in cid] # Tell user if some cids not found num_missing_cids = len(cid) - len(cols_to_keep) if num_missing_cids != 0: logger.info("{} cids were not found in the GCT.".format(num_missing_cids)) # Use col_bool if provided elif col_bool is not None: assert len(col_bool) == gctoo.data_df.shape[1], ( "col_bool must have length equal to gctoo.data_df.shape[1]. " + "len(col_bool): {}, gctoo.data_df.shape[1]: {}".format( len(col_bool), gctoo.data_df.shape[1])) cols_to_keep = gctoo.data_df.columns[col_bool].values # Use cidx if provided elif cidx is not None: assert type(cidx[0]) is int, ( "cidx must be a list of integers. cidx[0]: {}, " + "type(cidx[0]): {}").format(cidx[0], type(cidx[0])) assert max(cidx) <= gctoo.data_df.shape[1], ( "cidx contains an integer larger than the number of columns in " + "the GCToo. max(cidx): {}, gctoo.data_df.shape[1]: {}").format( max(cidx), gctoo.data_df.shape[1]) cols_to_keep = gctoo.data_df.columns[cidx].values # If cid, col_bool, and cidx are all None, return all columns else: cols_to_keep = gctoo.data_df.columns.values # Use exclude_cid if provided if exclude_cid is not None: # Keep only those columns that are not in exclude_cid cols_to_keep = [col_to_keep for col_to_keep in cols_to_keep if col_to_keep not in exclude_cid] return cols_to_keep
[ "def", "get_cols_to_keep", "(", "gctoo", ",", "cid", "=", "None", ",", "col_bool", "=", "None", ",", "cidx", "=", "None", ",", "exclude_cid", "=", "None", ")", ":", "# Use cid if provided", "if", "cid", "is", "not", "None", ":", "assert", "type", "(", "cid", ")", "==", "list", ",", "\"cid must be a list. cid: {}\"", ".", "format", "(", "cid", ")", "cols_to_keep", "=", "[", "gctoo_col", "for", "gctoo_col", "in", "gctoo", ".", "data_df", ".", "columns", "if", "gctoo_col", "in", "cid", "]", "# Tell user if some cids not found", "num_missing_cids", "=", "len", "(", "cid", ")", "-", "len", "(", "cols_to_keep", ")", "if", "num_missing_cids", "!=", "0", ":", "logger", ".", "info", "(", "\"{} cids were not found in the GCT.\"", ".", "format", "(", "num_missing_cids", ")", ")", "# Use col_bool if provided", "elif", "col_bool", "is", "not", "None", ":", "assert", "len", "(", "col_bool", ")", "==", "gctoo", ".", "data_df", ".", "shape", "[", "1", "]", ",", "(", "\"col_bool must have length equal to gctoo.data_df.shape[1]. \"", "+", "\"len(col_bool): {}, gctoo.data_df.shape[1]: {}\"", ".", "format", "(", "len", "(", "col_bool", ")", ",", "gctoo", ".", "data_df", ".", "shape", "[", "1", "]", ")", ")", "cols_to_keep", "=", "gctoo", ".", "data_df", ".", "columns", "[", "col_bool", "]", ".", "values", "# Use cidx if provided", "elif", "cidx", "is", "not", "None", ":", "assert", "type", "(", "cidx", "[", "0", "]", ")", "is", "int", ",", "(", "\"cidx must be a list of integers. cidx[0]: {}, \"", "+", "\"type(cidx[0]): {}\"", ")", ".", "format", "(", "cidx", "[", "0", "]", ",", "type", "(", "cidx", "[", "0", "]", ")", ")", "assert", "max", "(", "cidx", ")", "<=", "gctoo", ".", "data_df", ".", "shape", "[", "1", "]", ",", "(", "\"cidx contains an integer larger than the number of columns in \"", "+", "\"the GCToo. max(cidx): {}, gctoo.data_df.shape[1]: {}\"", ")", ".", "format", "(", "max", "(", "cidx", ")", ",", "gctoo", ".", "data_df", ".", "shape", "[", "1", "]", ")", "cols_to_keep", "=", "gctoo", ".", "data_df", ".", "columns", "[", "cidx", "]", ".", "values", "# If cid, col_bool, and cidx are all None, return all columns", "else", ":", "cols_to_keep", "=", "gctoo", ".", "data_df", ".", "columns", ".", "values", "# Use exclude_cid if provided", "if", "exclude_cid", "is", "not", "None", ":", "# Keep only those columns that are not in exclude_cid", "cols_to_keep", "=", "[", "col_to_keep", "for", "col_to_keep", "in", "cols_to_keep", "if", "col_to_keep", "not", "in", "exclude_cid", "]", "return", "cols_to_keep" ]
Figure out based on the possible columns inputs which columns to keep. Args: gctoo (GCToo object): cid (list of strings): col_bool (boolean array): cidx (list of integers): exclude_cid (list of strings): Returns: cols_to_keep (list of strings): col ids to be kept
[ "Figure", "out", "based", "on", "the", "possible", "columns", "inputs", "which", "columns", "to", "keep", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/subset_gctoo.py#L129-L188
train
cmap/cmapPy
cmapPy/set_io/grp.py
read
def read(in_path): """ Read a grp file at the path specified by in_path. Args: in_path (string): path to GRP file Returns: grp (list) """ assert os.path.exists(in_path), "The following GRP file can't be found. in_path: {}".format(in_path) with open(in_path, "r") as f: lines = f.readlines() # need the second conditional to ignore comment lines grp = [line.strip() for line in lines if line and not re.match("^#", line)] return grp
python
def read(in_path): """ Read a grp file at the path specified by in_path. Args: in_path (string): path to GRP file Returns: grp (list) """ assert os.path.exists(in_path), "The following GRP file can't be found. in_path: {}".format(in_path) with open(in_path, "r") as f: lines = f.readlines() # need the second conditional to ignore comment lines grp = [line.strip() for line in lines if line and not re.match("^#", line)] return grp
[ "def", "read", "(", "in_path", ")", ":", "assert", "os", ".", "path", ".", "exists", "(", "in_path", ")", ",", "\"The following GRP file can't be found. in_path: {}\"", ".", "format", "(", "in_path", ")", "with", "open", "(", "in_path", ",", "\"r\"", ")", "as", "f", ":", "lines", "=", "f", ".", "readlines", "(", ")", "# need the second conditional to ignore comment lines", "grp", "=", "[", "line", ".", "strip", "(", ")", "for", "line", "in", "lines", "if", "line", "and", "not", "re", ".", "match", "(", "\"^#\"", ",", "line", ")", "]", "return", "grp" ]
Read a grp file at the path specified by in_path. Args: in_path (string): path to GRP file Returns: grp (list)
[ "Read", "a", "grp", "file", "at", "the", "path", "specified", "by", "in_path", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/set_io/grp.py#L16-L33
train
cmap/cmapPy
cmapPy/set_io/grp.py
write
def write(grp, out_path): """ Write a GRP to a text file. Args: grp (list): GRP object to write to new-line delimited text file out_path (string): output path Returns: None """ with open(out_path, "w") as f: for x in grp: f.write(str(x) + "\n")
python
def write(grp, out_path): """ Write a GRP to a text file. Args: grp (list): GRP object to write to new-line delimited text file out_path (string): output path Returns: None """ with open(out_path, "w") as f: for x in grp: f.write(str(x) + "\n")
[ "def", "write", "(", "grp", ",", "out_path", ")", ":", "with", "open", "(", "out_path", ",", "\"w\"", ")", "as", "f", ":", "for", "x", "in", "grp", ":", "f", ".", "write", "(", "str", "(", "x", ")", "+", "\"\\n\"", ")" ]
Write a GRP to a text file. Args: grp (list): GRP object to write to new-line delimited text file out_path (string): output path Returns: None
[ "Write", "a", "GRP", "to", "a", "text", "file", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/set_io/grp.py#L36-L49
train
cmap/cmapPy
cmapPy/pandasGEXpress/random_slice.py
make_specified_size_gctoo
def make_specified_size_gctoo(og_gctoo, num_entries, dim): """ Subsets a GCToo instance along either rows or columns to obtain a specified size. Input: - og_gctoo (GCToo): a GCToo instance - num_entries (int): the number of entries to keep - dim (str): the dimension along which to subset. Must be "row" or "col" Output: - new_gctoo (GCToo): the GCToo instance subsetted as specified. """ assert dim in ["row", "col"], "dim specified must be either 'row' or 'col'" dim_index = 0 if "row" == dim else 1 assert num_entries <= og_gctoo.data_df.shape[dim_index], ("number of entries must be smaller than dimension being " "subsetted - num_entries: {} dim: {} dim_index: {} og_gctoo.data_df.shape[dim_index]: {}".format( num_entries, dim, dim_index, og_gctoo.data_df.shape[dim_index])) if dim == "col": columns = [x for x in og_gctoo.data_df.columns.values] numpy.random.shuffle(columns) columns = columns[0:num_entries] rows = og_gctoo.data_df.index.values else: rows = [x for x in og_gctoo.data_df.index.values] numpy.random.shuffle(rows) rows = rows[0:num_entries] columns = og_gctoo.data_df.columns.values new_data_df = og_gctoo.data_df.loc[rows, columns] new_row_meta = og_gctoo.row_metadata_df.loc[rows] new_col_meta = og_gctoo.col_metadata_df.loc[columns] logger.debug( "after slice - new_col_meta.shape: {} new_row_meta.shape: {}".format(new_col_meta.shape, new_row_meta.shape)) # make & return new gctoo instance new_gctoo = GCToo.GCToo(data_df=new_data_df, row_metadata_df=new_row_meta, col_metadata_df=new_col_meta) return new_gctoo
python
def make_specified_size_gctoo(og_gctoo, num_entries, dim): """ Subsets a GCToo instance along either rows or columns to obtain a specified size. Input: - og_gctoo (GCToo): a GCToo instance - num_entries (int): the number of entries to keep - dim (str): the dimension along which to subset. Must be "row" or "col" Output: - new_gctoo (GCToo): the GCToo instance subsetted as specified. """ assert dim in ["row", "col"], "dim specified must be either 'row' or 'col'" dim_index = 0 if "row" == dim else 1 assert num_entries <= og_gctoo.data_df.shape[dim_index], ("number of entries must be smaller than dimension being " "subsetted - num_entries: {} dim: {} dim_index: {} og_gctoo.data_df.shape[dim_index]: {}".format( num_entries, dim, dim_index, og_gctoo.data_df.shape[dim_index])) if dim == "col": columns = [x for x in og_gctoo.data_df.columns.values] numpy.random.shuffle(columns) columns = columns[0:num_entries] rows = og_gctoo.data_df.index.values else: rows = [x for x in og_gctoo.data_df.index.values] numpy.random.shuffle(rows) rows = rows[0:num_entries] columns = og_gctoo.data_df.columns.values new_data_df = og_gctoo.data_df.loc[rows, columns] new_row_meta = og_gctoo.row_metadata_df.loc[rows] new_col_meta = og_gctoo.col_metadata_df.loc[columns] logger.debug( "after slice - new_col_meta.shape: {} new_row_meta.shape: {}".format(new_col_meta.shape, new_row_meta.shape)) # make & return new gctoo instance new_gctoo = GCToo.GCToo(data_df=new_data_df, row_metadata_df=new_row_meta, col_metadata_df=new_col_meta) return new_gctoo
[ "def", "make_specified_size_gctoo", "(", "og_gctoo", ",", "num_entries", ",", "dim", ")", ":", "assert", "dim", "in", "[", "\"row\"", ",", "\"col\"", "]", ",", "\"dim specified must be either 'row' or 'col'\"", "dim_index", "=", "0", "if", "\"row\"", "==", "dim", "else", "1", "assert", "num_entries", "<=", "og_gctoo", ".", "data_df", ".", "shape", "[", "dim_index", "]", ",", "(", "\"number of entries must be smaller than dimension being \"", "\"subsetted - num_entries: {} dim: {} dim_index: {} og_gctoo.data_df.shape[dim_index]: {}\"", ".", "format", "(", "num_entries", ",", "dim", ",", "dim_index", ",", "og_gctoo", ".", "data_df", ".", "shape", "[", "dim_index", "]", ")", ")", "if", "dim", "==", "\"col\"", ":", "columns", "=", "[", "x", "for", "x", "in", "og_gctoo", ".", "data_df", ".", "columns", ".", "values", "]", "numpy", ".", "random", ".", "shuffle", "(", "columns", ")", "columns", "=", "columns", "[", "0", ":", "num_entries", "]", "rows", "=", "og_gctoo", ".", "data_df", ".", "index", ".", "values", "else", ":", "rows", "=", "[", "x", "for", "x", "in", "og_gctoo", ".", "data_df", ".", "index", ".", "values", "]", "numpy", ".", "random", ".", "shuffle", "(", "rows", ")", "rows", "=", "rows", "[", "0", ":", "num_entries", "]", "columns", "=", "og_gctoo", ".", "data_df", ".", "columns", ".", "values", "new_data_df", "=", "og_gctoo", ".", "data_df", ".", "loc", "[", "rows", ",", "columns", "]", "new_row_meta", "=", "og_gctoo", ".", "row_metadata_df", ".", "loc", "[", "rows", "]", "new_col_meta", "=", "og_gctoo", ".", "col_metadata_df", ".", "loc", "[", "columns", "]", "logger", ".", "debug", "(", "\"after slice - new_col_meta.shape: {} new_row_meta.shape: {}\"", ".", "format", "(", "new_col_meta", ".", "shape", ",", "new_row_meta", ".", "shape", ")", ")", "# make & return new gctoo instance", "new_gctoo", "=", "GCToo", ".", "GCToo", "(", "data_df", "=", "new_data_df", ",", "row_metadata_df", "=", "new_row_meta", ",", "col_metadata_df", "=", "new_col_meta", ")", "return", "new_gctoo" ]
Subsets a GCToo instance along either rows or columns to obtain a specified size. Input: - og_gctoo (GCToo): a GCToo instance - num_entries (int): the number of entries to keep - dim (str): the dimension along which to subset. Must be "row" or "col" Output: - new_gctoo (GCToo): the GCToo instance subsetted as specified.
[ "Subsets", "a", "GCToo", "instance", "along", "either", "rows", "or", "columns", "to", "obtain", "a", "specified", "size", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/random_slice.py#L15-L55
train
cmap/cmapPy
cmapPy/pandasGEXpress/write_gctx.py
write
def write(gctoo_object, out_file_name, convert_back_to_neg_666=True, gzip_compression_level=6, max_chunk_kb=1024, matrix_dtype=numpy.float32): """ Writes a GCToo instance to specified file. Input: - gctoo_object (GCToo): A GCToo instance. - out_file_name (str): file name to write gctoo_object to. - convert_back_to_neg_666 (bool): whether to convert np.NAN in metadata back to "-666" - gzip_compression_level (int, default=6): Compression level to use for metadata. - max_chunk_kb (int, default=1024): The maximum number of KB a given chunk will occupy - matrix_dtype (numpy dtype, default=numpy.float32): Storage data type for data matrix. """ # make sure out file has a .gctx suffix gctx_out_name = add_gctx_to_out_name(out_file_name) # open an hdf5 file to write to hdf5_out = h5py.File(gctx_out_name, "w") # write version write_version(hdf5_out) # write src write_src(hdf5_out, gctoo_object, gctx_out_name) # set chunk size for data matrix elem_per_kb = calculate_elem_per_kb(max_chunk_kb, matrix_dtype) chunk_size = set_data_matrix_chunk_size(gctoo_object.data_df.shape, max_chunk_kb, elem_per_kb) # write data matrix hdf5_out.create_dataset(data_matrix_node, data=gctoo_object.data_df.transpose().values, dtype=matrix_dtype) # write col metadata write_metadata(hdf5_out, "col", gctoo_object.col_metadata_df, convert_back_to_neg_666, gzip_compression=gzip_compression_level) # write row metadata write_metadata(hdf5_out, "row", gctoo_object.row_metadata_df, convert_back_to_neg_666, gzip_compression=gzip_compression_level) # close gctx file hdf5_out.close()
python
def write(gctoo_object, out_file_name, convert_back_to_neg_666=True, gzip_compression_level=6, max_chunk_kb=1024, matrix_dtype=numpy.float32): """ Writes a GCToo instance to specified file. Input: - gctoo_object (GCToo): A GCToo instance. - out_file_name (str): file name to write gctoo_object to. - convert_back_to_neg_666 (bool): whether to convert np.NAN in metadata back to "-666" - gzip_compression_level (int, default=6): Compression level to use for metadata. - max_chunk_kb (int, default=1024): The maximum number of KB a given chunk will occupy - matrix_dtype (numpy dtype, default=numpy.float32): Storage data type for data matrix. """ # make sure out file has a .gctx suffix gctx_out_name = add_gctx_to_out_name(out_file_name) # open an hdf5 file to write to hdf5_out = h5py.File(gctx_out_name, "w") # write version write_version(hdf5_out) # write src write_src(hdf5_out, gctoo_object, gctx_out_name) # set chunk size for data matrix elem_per_kb = calculate_elem_per_kb(max_chunk_kb, matrix_dtype) chunk_size = set_data_matrix_chunk_size(gctoo_object.data_df.shape, max_chunk_kb, elem_per_kb) # write data matrix hdf5_out.create_dataset(data_matrix_node, data=gctoo_object.data_df.transpose().values, dtype=matrix_dtype) # write col metadata write_metadata(hdf5_out, "col", gctoo_object.col_metadata_df, convert_back_to_neg_666, gzip_compression=gzip_compression_level) # write row metadata write_metadata(hdf5_out, "row", gctoo_object.row_metadata_df, convert_back_to_neg_666, gzip_compression=gzip_compression_level) # close gctx file hdf5_out.close()
[ "def", "write", "(", "gctoo_object", ",", "out_file_name", ",", "convert_back_to_neg_666", "=", "True", ",", "gzip_compression_level", "=", "6", ",", "max_chunk_kb", "=", "1024", ",", "matrix_dtype", "=", "numpy", ".", "float32", ")", ":", "# make sure out file has a .gctx suffix", "gctx_out_name", "=", "add_gctx_to_out_name", "(", "out_file_name", ")", "# open an hdf5 file to write to", "hdf5_out", "=", "h5py", ".", "File", "(", "gctx_out_name", ",", "\"w\"", ")", "# write version", "write_version", "(", "hdf5_out", ")", "# write src", "write_src", "(", "hdf5_out", ",", "gctoo_object", ",", "gctx_out_name", ")", "# set chunk size for data matrix", "elem_per_kb", "=", "calculate_elem_per_kb", "(", "max_chunk_kb", ",", "matrix_dtype", ")", "chunk_size", "=", "set_data_matrix_chunk_size", "(", "gctoo_object", ".", "data_df", ".", "shape", ",", "max_chunk_kb", ",", "elem_per_kb", ")", "# write data matrix", "hdf5_out", ".", "create_dataset", "(", "data_matrix_node", ",", "data", "=", "gctoo_object", ".", "data_df", ".", "transpose", "(", ")", ".", "values", ",", "dtype", "=", "matrix_dtype", ")", "# write col metadata", "write_metadata", "(", "hdf5_out", ",", "\"col\"", ",", "gctoo_object", ".", "col_metadata_df", ",", "convert_back_to_neg_666", ",", "gzip_compression", "=", "gzip_compression_level", ")", "# write row metadata", "write_metadata", "(", "hdf5_out", ",", "\"row\"", ",", "gctoo_object", ".", "row_metadata_df", ",", "convert_back_to_neg_666", ",", "gzip_compression", "=", "gzip_compression_level", ")", "# close gctx file", "hdf5_out", ".", "close", "(", ")" ]
Writes a GCToo instance to specified file. Input: - gctoo_object (GCToo): A GCToo instance. - out_file_name (str): file name to write gctoo_object to. - convert_back_to_neg_666 (bool): whether to convert np.NAN in metadata back to "-666" - gzip_compression_level (int, default=6): Compression level to use for metadata. - max_chunk_kb (int, default=1024): The maximum number of KB a given chunk will occupy - matrix_dtype (numpy dtype, default=numpy.float32): Storage data type for data matrix.
[ "Writes", "a", "GCToo", "instance", "to", "specified", "file", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/write_gctx.py#L19-L61
train
cmap/cmapPy
cmapPy/pandasGEXpress/write_gctx.py
write_src
def write_src(hdf5_out, gctoo_object, out_file_name): """ Writes src as attribute of gctx out file. Input: - hdf5_out (h5py): hdf5 file to write to - gctoo_object (GCToo): GCToo instance to be written to .gctx - out_file_name (str): name of hdf5 out file. """ if gctoo_object.src == None: hdf5_out.attrs[src_attr] = out_file_name else: hdf5_out.attrs[src_attr] = gctoo_object.src
python
def write_src(hdf5_out, gctoo_object, out_file_name): """ Writes src as attribute of gctx out file. Input: - hdf5_out (h5py): hdf5 file to write to - gctoo_object (GCToo): GCToo instance to be written to .gctx - out_file_name (str): name of hdf5 out file. """ if gctoo_object.src == None: hdf5_out.attrs[src_attr] = out_file_name else: hdf5_out.attrs[src_attr] = gctoo_object.src
[ "def", "write_src", "(", "hdf5_out", ",", "gctoo_object", ",", "out_file_name", ")", ":", "if", "gctoo_object", ".", "src", "==", "None", ":", "hdf5_out", ".", "attrs", "[", "src_attr", "]", "=", "out_file_name", "else", ":", "hdf5_out", ".", "attrs", "[", "src_attr", "]", "=", "gctoo_object", ".", "src" ]
Writes src as attribute of gctx out file. Input: - hdf5_out (h5py): hdf5 file to write to - gctoo_object (GCToo): GCToo instance to be written to .gctx - out_file_name (str): name of hdf5 out file.
[ "Writes", "src", "as", "attribute", "of", "gctx", "out", "file", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/write_gctx.py#L80-L92
train
cmap/cmapPy
cmapPy/pandasGEXpress/write_gctx.py
calculate_elem_per_kb
def calculate_elem_per_kb(max_chunk_kb, matrix_dtype): """ Calculates the number of elem per kb depending on the max chunk size set. Input: - max_chunk_kb (int, default=1024): The maximum number of KB a given chunk will occupy - matrix_dtype (numpy dtype, default=numpy.float32): Storage data type for data matrix. Currently needs to be np.float32 or np.float64 (TODO: figure out a better way to get bits from a numpy dtype). Returns: elem_per_kb (int), the number of elements per kb for matrix dtype specified. """ if matrix_dtype == numpy.float32: return (max_chunk_kb * 8)/32 elif matrix_dtype == numpy.float64: return (max_chunk_kb * 8)/64 else: msg = "Invalid matrix_dtype: {}; only numpy.float32 and numpy.float64 are currently supported".format(matrix_dtype) logger.error(msg) raise Exception("write_gctx.calculate_elem_per_kb " + msg)
python
def calculate_elem_per_kb(max_chunk_kb, matrix_dtype): """ Calculates the number of elem per kb depending on the max chunk size set. Input: - max_chunk_kb (int, default=1024): The maximum number of KB a given chunk will occupy - matrix_dtype (numpy dtype, default=numpy.float32): Storage data type for data matrix. Currently needs to be np.float32 or np.float64 (TODO: figure out a better way to get bits from a numpy dtype). Returns: elem_per_kb (int), the number of elements per kb for matrix dtype specified. """ if matrix_dtype == numpy.float32: return (max_chunk_kb * 8)/32 elif matrix_dtype == numpy.float64: return (max_chunk_kb * 8)/64 else: msg = "Invalid matrix_dtype: {}; only numpy.float32 and numpy.float64 are currently supported".format(matrix_dtype) logger.error(msg) raise Exception("write_gctx.calculate_elem_per_kb " + msg)
[ "def", "calculate_elem_per_kb", "(", "max_chunk_kb", ",", "matrix_dtype", ")", ":", "if", "matrix_dtype", "==", "numpy", ".", "float32", ":", "return", "(", "max_chunk_kb", "*", "8", ")", "/", "32", "elif", "matrix_dtype", "==", "numpy", ".", "float64", ":", "return", "(", "max_chunk_kb", "*", "8", ")", "/", "64", "else", ":", "msg", "=", "\"Invalid matrix_dtype: {}; only numpy.float32 and numpy.float64 are currently supported\"", ".", "format", "(", "matrix_dtype", ")", "logger", ".", "error", "(", "msg", ")", "raise", "Exception", "(", "\"write_gctx.calculate_elem_per_kb \"", "+", "msg", ")" ]
Calculates the number of elem per kb depending on the max chunk size set. Input: - max_chunk_kb (int, default=1024): The maximum number of KB a given chunk will occupy - matrix_dtype (numpy dtype, default=numpy.float32): Storage data type for data matrix. Currently needs to be np.float32 or np.float64 (TODO: figure out a better way to get bits from a numpy dtype). Returns: elem_per_kb (int), the number of elements per kb for matrix dtype specified.
[ "Calculates", "the", "number", "of", "elem", "per", "kb", "depending", "on", "the", "max", "chunk", "size", "set", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/write_gctx.py#L104-L123
train
cmap/cmapPy
cmapPy/pandasGEXpress/write_gctx.py
set_data_matrix_chunk_size
def set_data_matrix_chunk_size(df_shape, max_chunk_kb, elem_per_kb): """ Sets chunk size to use for writing data matrix. Note. Calculation used here is for compatibility with cmapM and cmapR. Input: - df_shape (tuple): shape of input data_df. - max_chunk_kb (int, default=1024): The maximum number of KB a given chunk will occupy - elem_per_kb (int): Number of elements per kb Returns: chunk size (tuple) to use for chunking the data matrix """ row_chunk_size = min(df_shape[0], 1000) col_chunk_size = min(((max_chunk_kb*elem_per_kb)//row_chunk_size), df_shape[1]) return (row_chunk_size, col_chunk_size)
python
def set_data_matrix_chunk_size(df_shape, max_chunk_kb, elem_per_kb): """ Sets chunk size to use for writing data matrix. Note. Calculation used here is for compatibility with cmapM and cmapR. Input: - df_shape (tuple): shape of input data_df. - max_chunk_kb (int, default=1024): The maximum number of KB a given chunk will occupy - elem_per_kb (int): Number of elements per kb Returns: chunk size (tuple) to use for chunking the data matrix """ row_chunk_size = min(df_shape[0], 1000) col_chunk_size = min(((max_chunk_kb*elem_per_kb)//row_chunk_size), df_shape[1]) return (row_chunk_size, col_chunk_size)
[ "def", "set_data_matrix_chunk_size", "(", "df_shape", ",", "max_chunk_kb", ",", "elem_per_kb", ")", ":", "row_chunk_size", "=", "min", "(", "df_shape", "[", "0", "]", ",", "1000", ")", "col_chunk_size", "=", "min", "(", "(", "(", "max_chunk_kb", "*", "elem_per_kb", ")", "//", "row_chunk_size", ")", ",", "df_shape", "[", "1", "]", ")", "return", "(", "row_chunk_size", ",", "col_chunk_size", ")" ]
Sets chunk size to use for writing data matrix. Note. Calculation used here is for compatibility with cmapM and cmapR. Input: - df_shape (tuple): shape of input data_df. - max_chunk_kb (int, default=1024): The maximum number of KB a given chunk will occupy - elem_per_kb (int): Number of elements per kb Returns: chunk size (tuple) to use for chunking the data matrix
[ "Sets", "chunk", "size", "to", "use", "for", "writing", "data", "matrix", ".", "Note", ".", "Calculation", "used", "here", "is", "for", "compatibility", "with", "cmapM", "and", "cmapR", "." ]
59d833b64fd2c3a494cdf67fe1eb11fc8008bf76
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/write_gctx.py#L126-L141
train
danfairs/django-lazysignup
lazysignup/models.py
LazyUserManager.convert
def convert(self, form): """ Convert a lazy user to a non-lazy one. The form passed in is expected to be a ModelForm instance, bound to the user to be converted. The converted ``User`` object is returned. Raises a TypeError if the user is not lazy. """ if not is_lazy_user(form.instance): raise NotLazyError('You cannot convert a non-lazy user') user = form.save() # We need to remove the LazyUser instance assocated with the # newly-converted user self.filter(user=user).delete() converted.send(self, user=user) return user
python
def convert(self, form): """ Convert a lazy user to a non-lazy one. The form passed in is expected to be a ModelForm instance, bound to the user to be converted. The converted ``User`` object is returned. Raises a TypeError if the user is not lazy. """ if not is_lazy_user(form.instance): raise NotLazyError('You cannot convert a non-lazy user') user = form.save() # We need to remove the LazyUser instance assocated with the # newly-converted user self.filter(user=user).delete() converted.send(self, user=user) return user
[ "def", "convert", "(", "self", ",", "form", ")", ":", "if", "not", "is_lazy_user", "(", "form", ".", "instance", ")", ":", "raise", "NotLazyError", "(", "'You cannot convert a non-lazy user'", ")", "user", "=", "form", ".", "save", "(", ")", "# We need to remove the LazyUser instance assocated with the", "# newly-converted user", "self", ".", "filter", "(", "user", "=", "user", ")", ".", "delete", "(", ")", "converted", ".", "send", "(", "self", ",", "user", "=", "user", ")", "return", "user" ]
Convert a lazy user to a non-lazy one. The form passed in is expected to be a ModelForm instance, bound to the user to be converted. The converted ``User`` object is returned. Raises a TypeError if the user is not lazy.
[ "Convert", "a", "lazy", "user", "to", "a", "non", "-", "lazy", "one", ".", "The", "form", "passed", "in", "is", "expected", "to", "be", "a", "ModelForm", "instance", "bound", "to", "the", "user", "to", "be", "converted", "." ]
cfe77e12976d439e1a5aae4387531b2f0f835c6a
https://github.com/danfairs/django-lazysignup/blob/cfe77e12976d439e1a5aae4387531b2f0f835c6a/lazysignup/models.py#L47-L65
train
danfairs/django-lazysignup
lazysignup/models.py
LazyUserManager.generate_username
def generate_username(self, user_class): """ Generate a new username for a user """ m = getattr(user_class, 'generate_username', None) if m: return m() else: max_length = user_class._meta.get_field( self.username_field).max_length return uuid.uuid4().hex[:max_length]
python
def generate_username(self, user_class): """ Generate a new username for a user """ m = getattr(user_class, 'generate_username', None) if m: return m() else: max_length = user_class._meta.get_field( self.username_field).max_length return uuid.uuid4().hex[:max_length]
[ "def", "generate_username", "(", "self", ",", "user_class", ")", ":", "m", "=", "getattr", "(", "user_class", ",", "'generate_username'", ",", "None", ")", "if", "m", ":", "return", "m", "(", ")", "else", ":", "max_length", "=", "user_class", ".", "_meta", ".", "get_field", "(", "self", ".", "username_field", ")", ".", "max_length", "return", "uuid", ".", "uuid4", "(", ")", ".", "hex", "[", ":", "max_length", "]" ]
Generate a new username for a user
[ "Generate", "a", "new", "username", "for", "a", "user" ]
cfe77e12976d439e1a5aae4387531b2f0f835c6a
https://github.com/danfairs/django-lazysignup/blob/cfe77e12976d439e1a5aae4387531b2f0f835c6a/lazysignup/models.py#L67-L76
train
danfairs/django-lazysignup
lazysignup/utils.py
is_lazy_user
def is_lazy_user(user): """ Return True if the passed user is a lazy user. """ # Anonymous users are not lazy. if user.is_anonymous: return False # Check the user backend. If the lazy signup backend # authenticated them, then the user is lazy. backend = getattr(user, 'backend', None) if backend == 'lazysignup.backends.LazySignupBackend': return True # Otherwise, we have to fall back to checking the database. from lazysignup.models import LazyUser return bool(LazyUser.objects.filter(user=user).count() > 0)
python
def is_lazy_user(user): """ Return True if the passed user is a lazy user. """ # Anonymous users are not lazy. if user.is_anonymous: return False # Check the user backend. If the lazy signup backend # authenticated them, then the user is lazy. backend = getattr(user, 'backend', None) if backend == 'lazysignup.backends.LazySignupBackend': return True # Otherwise, we have to fall back to checking the database. from lazysignup.models import LazyUser return bool(LazyUser.objects.filter(user=user).count() > 0)
[ "def", "is_lazy_user", "(", "user", ")", ":", "# Anonymous users are not lazy.", "if", "user", ".", "is_anonymous", ":", "return", "False", "# Check the user backend. If the lazy signup backend", "# authenticated them, then the user is lazy.", "backend", "=", "getattr", "(", "user", ",", "'backend'", ",", "None", ")", "if", "backend", "==", "'lazysignup.backends.LazySignupBackend'", ":", "return", "True", "# Otherwise, we have to fall back to checking the database.", "from", "lazysignup", ".", "models", "import", "LazyUser", "return", "bool", "(", "LazyUser", ".", "objects", ".", "filter", "(", "user", "=", "user", ")", ".", "count", "(", ")", ">", "0", ")" ]
Return True if the passed user is a lazy user.
[ "Return", "True", "if", "the", "passed", "user", "is", "a", "lazy", "user", "." ]
cfe77e12976d439e1a5aae4387531b2f0f835c6a
https://github.com/danfairs/django-lazysignup/blob/cfe77e12976d439e1a5aae4387531b2f0f835c6a/lazysignup/utils.py#L1-L16
train
bslatkin/dpxdt
dpxdt/server/work_queue.py
add
def add(queue_name, payload=None, content_type=None, source=None, task_id=None, build_id=None, release_id=None, run_id=None): """Adds a work item to a queue. Args: queue_name: Name of the queue to add the work item to. payload: Optional. Payload that describes the work to do as a string. If not a string and content_type is not provided, then this function assumes the payload is a JSON-able Python object. content_type: Optional. Content type of the payload. source: Optional. Who or what originally created the task. task_id: Optional. When supplied, only enqueue this task if a task with this ID does not already exist. If a task with this ID already exists, then this function will do nothing. build_id: Build ID to associate with this task. May be None. release_id: Release ID to associate with this task. May be None. run_id: Run ID to associate with this task. May be None. Returns: ID of the task that was added. """ if task_id: task = WorkQueue.query.filter_by(task_id=task_id).first() if task: return task.task_id else: task_id = uuid.uuid4().hex if payload and not content_type and not isinstance(payload, basestring): payload = json.dumps(payload) content_type = 'application/json' now = datetime.datetime.utcnow() task = WorkQueue( task_id=task_id, queue_name=queue_name, eta=now, source=source, build_id=build_id, release_id=release_id, run_id=run_id, payload=payload, content_type=content_type) db.session.add(task) return task.task_id
python
def add(queue_name, payload=None, content_type=None, source=None, task_id=None, build_id=None, release_id=None, run_id=None): """Adds a work item to a queue. Args: queue_name: Name of the queue to add the work item to. payload: Optional. Payload that describes the work to do as a string. If not a string and content_type is not provided, then this function assumes the payload is a JSON-able Python object. content_type: Optional. Content type of the payload. source: Optional. Who or what originally created the task. task_id: Optional. When supplied, only enqueue this task if a task with this ID does not already exist. If a task with this ID already exists, then this function will do nothing. build_id: Build ID to associate with this task. May be None. release_id: Release ID to associate with this task. May be None. run_id: Run ID to associate with this task. May be None. Returns: ID of the task that was added. """ if task_id: task = WorkQueue.query.filter_by(task_id=task_id).first() if task: return task.task_id else: task_id = uuid.uuid4().hex if payload and not content_type and not isinstance(payload, basestring): payload = json.dumps(payload) content_type = 'application/json' now = datetime.datetime.utcnow() task = WorkQueue( task_id=task_id, queue_name=queue_name, eta=now, source=source, build_id=build_id, release_id=release_id, run_id=run_id, payload=payload, content_type=content_type) db.session.add(task) return task.task_id
[ "def", "add", "(", "queue_name", ",", "payload", "=", "None", ",", "content_type", "=", "None", ",", "source", "=", "None", ",", "task_id", "=", "None", ",", "build_id", "=", "None", ",", "release_id", "=", "None", ",", "run_id", "=", "None", ")", ":", "if", "task_id", ":", "task", "=", "WorkQueue", ".", "query", ".", "filter_by", "(", "task_id", "=", "task_id", ")", ".", "first", "(", ")", "if", "task", ":", "return", "task", ".", "task_id", "else", ":", "task_id", "=", "uuid", ".", "uuid4", "(", ")", ".", "hex", "if", "payload", "and", "not", "content_type", "and", "not", "isinstance", "(", "payload", ",", "basestring", ")", ":", "payload", "=", "json", ".", "dumps", "(", "payload", ")", "content_type", "=", "'application/json'", "now", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "task", "=", "WorkQueue", "(", "task_id", "=", "task_id", ",", "queue_name", "=", "queue_name", ",", "eta", "=", "now", ",", "source", "=", "source", ",", "build_id", "=", "build_id", ",", "release_id", "=", "release_id", ",", "run_id", "=", "run_id", ",", "payload", "=", "payload", ",", "content_type", "=", "content_type", ")", "db", ".", "session", ".", "add", "(", "task", ")", "return", "task", ".", "task_id" ]
Adds a work item to a queue. Args: queue_name: Name of the queue to add the work item to. payload: Optional. Payload that describes the work to do as a string. If not a string and content_type is not provided, then this function assumes the payload is a JSON-able Python object. content_type: Optional. Content type of the payload. source: Optional. Who or what originally created the task. task_id: Optional. When supplied, only enqueue this task if a task with this ID does not already exist. If a task with this ID already exists, then this function will do nothing. build_id: Build ID to associate with this task. May be None. release_id: Release ID to associate with this task. May be None. run_id: Run ID to associate with this task. May be None. Returns: ID of the task that was added.
[ "Adds", "a", "work", "item", "to", "a", "queue", "." ]
9f860de1731021d99253670429e5f2157e1f6297
https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/work_queue.py#L100-L145
train
bslatkin/dpxdt
dpxdt/server/work_queue.py
_task_to_dict
def _task_to_dict(task): """Converts a WorkQueue to a JSON-able dictionary.""" payload = task.payload if payload and task.content_type == 'application/json': payload = json.loads(payload) return dict( task_id=task.task_id, queue_name=task.queue_name, eta=_datetime_to_epoch_seconds(task.eta), source=task.source, created=_datetime_to_epoch_seconds(task.created), lease_attempts=task.lease_attempts, last_lease=_datetime_to_epoch_seconds(task.last_lease), payload=payload, content_type=task.content_type)
python
def _task_to_dict(task): """Converts a WorkQueue to a JSON-able dictionary.""" payload = task.payload if payload and task.content_type == 'application/json': payload = json.loads(payload) return dict( task_id=task.task_id, queue_name=task.queue_name, eta=_datetime_to_epoch_seconds(task.eta), source=task.source, created=_datetime_to_epoch_seconds(task.created), lease_attempts=task.lease_attempts, last_lease=_datetime_to_epoch_seconds(task.last_lease), payload=payload, content_type=task.content_type)
[ "def", "_task_to_dict", "(", "task", ")", ":", "payload", "=", "task", ".", "payload", "if", "payload", "and", "task", ".", "content_type", "==", "'application/json'", ":", "payload", "=", "json", ".", "loads", "(", "payload", ")", "return", "dict", "(", "task_id", "=", "task", ".", "task_id", ",", "queue_name", "=", "task", ".", "queue_name", ",", "eta", "=", "_datetime_to_epoch_seconds", "(", "task", ".", "eta", ")", ",", "source", "=", "task", ".", "source", ",", "created", "=", "_datetime_to_epoch_seconds", "(", "task", ".", "created", ")", ",", "lease_attempts", "=", "task", ".", "lease_attempts", ",", "last_lease", "=", "_datetime_to_epoch_seconds", "(", "task", ".", "last_lease", ")", ",", "payload", "=", "payload", ",", "content_type", "=", "task", ".", "content_type", ")" ]
Converts a WorkQueue to a JSON-able dictionary.
[ "Converts", "a", "WorkQueue", "to", "a", "JSON", "-", "able", "dictionary", "." ]
9f860de1731021d99253670429e5f2157e1f6297
https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/work_queue.py#L155-L170
train
bslatkin/dpxdt
dpxdt/server/work_queue.py
lease
def lease(queue_name, owner, count=1, timeout_seconds=60): """Leases a work item from a queue, usually the oldest task available. Args: queue_name: Name of the queue to lease work from. owner: Who or what is leasing the task. count: Lease up to this many tasks. Return value will never have more than this many items present. timeout_seconds: Number of seconds to lock the task for before allowing another owner to lease it. Returns: List of dictionaries representing the task that was leased, or an empty list if no tasks are available to be leased. """ now = datetime.datetime.utcnow() query = ( WorkQueue.query .filter_by(queue_name=queue_name, status=WorkQueue.LIVE) .filter(WorkQueue.eta <= now) .order_by(WorkQueue.eta) .with_lockmode('update') .limit(count)) task_list = query.all() if not task_list: return None next_eta = now + datetime.timedelta(seconds=timeout_seconds) for task in task_list: task.eta = next_eta task.lease_attempts += 1 task.last_owner = owner task.last_lease = now task.heartbeat = None task.heartbeat_number = 0 db.session.add(task) return [_task_to_dict(task) for task in task_list]
python
def lease(queue_name, owner, count=1, timeout_seconds=60): """Leases a work item from a queue, usually the oldest task available. Args: queue_name: Name of the queue to lease work from. owner: Who or what is leasing the task. count: Lease up to this many tasks. Return value will never have more than this many items present. timeout_seconds: Number of seconds to lock the task for before allowing another owner to lease it. Returns: List of dictionaries representing the task that was leased, or an empty list if no tasks are available to be leased. """ now = datetime.datetime.utcnow() query = ( WorkQueue.query .filter_by(queue_name=queue_name, status=WorkQueue.LIVE) .filter(WorkQueue.eta <= now) .order_by(WorkQueue.eta) .with_lockmode('update') .limit(count)) task_list = query.all() if not task_list: return None next_eta = now + datetime.timedelta(seconds=timeout_seconds) for task in task_list: task.eta = next_eta task.lease_attempts += 1 task.last_owner = owner task.last_lease = now task.heartbeat = None task.heartbeat_number = 0 db.session.add(task) return [_task_to_dict(task) for task in task_list]
[ "def", "lease", "(", "queue_name", ",", "owner", ",", "count", "=", "1", ",", "timeout_seconds", "=", "60", ")", ":", "now", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "query", "=", "(", "WorkQueue", ".", "query", ".", "filter_by", "(", "queue_name", "=", "queue_name", ",", "status", "=", "WorkQueue", ".", "LIVE", ")", ".", "filter", "(", "WorkQueue", ".", "eta", "<=", "now", ")", ".", "order_by", "(", "WorkQueue", ".", "eta", ")", ".", "with_lockmode", "(", "'update'", ")", ".", "limit", "(", "count", ")", ")", "task_list", "=", "query", ".", "all", "(", ")", "if", "not", "task_list", ":", "return", "None", "next_eta", "=", "now", "+", "datetime", ".", "timedelta", "(", "seconds", "=", "timeout_seconds", ")", "for", "task", "in", "task_list", ":", "task", ".", "eta", "=", "next_eta", "task", ".", "lease_attempts", "+=", "1", "task", ".", "last_owner", "=", "owner", "task", ".", "last_lease", "=", "now", "task", ".", "heartbeat", "=", "None", "task", ".", "heartbeat_number", "=", "0", "db", ".", "session", ".", "add", "(", "task", ")", "return", "[", "_task_to_dict", "(", "task", ")", "for", "task", "in", "task_list", "]" ]
Leases a work item from a queue, usually the oldest task available. Args: queue_name: Name of the queue to lease work from. owner: Who or what is leasing the task. count: Lease up to this many tasks. Return value will never have more than this many items present. timeout_seconds: Number of seconds to lock the task for before allowing another owner to lease it. Returns: List of dictionaries representing the task that was leased, or an empty list if no tasks are available to be leased.
[ "Leases", "a", "work", "item", "from", "a", "queue", "usually", "the", "oldest", "task", "available", "." ]
9f860de1731021d99253670429e5f2157e1f6297
https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/work_queue.py#L177-L216
train
bslatkin/dpxdt
dpxdt/server/work_queue.py
_get_task_with_policy
def _get_task_with_policy(queue_name, task_id, owner): """Fetches the specified task and enforces ownership policy. Args: queue_name: Name of the queue the work item is on. task_id: ID of the task that is finished. owner: Who or what has the current lease on the task. Returns: The valid WorkQueue task that is currently owned. Raises: TaskDoesNotExistError if the task does not exist. LeaseExpiredError if the lease is no longer active. NotOwnerError if the specified owner no longer owns the task. """ now = datetime.datetime.utcnow() task = ( WorkQueue.query .filter_by(queue_name=queue_name, task_id=task_id) .with_lockmode('update') .first()) if not task: raise TaskDoesNotExistError('task_id=%r' % task_id) # Lease delta should be positive, meaning it has not yet expired! lease_delta = now - task.eta if lease_delta > datetime.timedelta(0): db.session.rollback() raise LeaseExpiredError('queue=%r, task_id=%r expired %s' % ( task.queue_name, task_id, lease_delta)) if task.last_owner != owner: db.session.rollback() raise NotOwnerError('queue=%r, task_id=%r, owner=%r' % ( task.queue_name, task_id, task.last_owner)) return task
python
def _get_task_with_policy(queue_name, task_id, owner): """Fetches the specified task and enforces ownership policy. Args: queue_name: Name of the queue the work item is on. task_id: ID of the task that is finished. owner: Who or what has the current lease on the task. Returns: The valid WorkQueue task that is currently owned. Raises: TaskDoesNotExistError if the task does not exist. LeaseExpiredError if the lease is no longer active. NotOwnerError if the specified owner no longer owns the task. """ now = datetime.datetime.utcnow() task = ( WorkQueue.query .filter_by(queue_name=queue_name, task_id=task_id) .with_lockmode('update') .first()) if not task: raise TaskDoesNotExistError('task_id=%r' % task_id) # Lease delta should be positive, meaning it has not yet expired! lease_delta = now - task.eta if lease_delta > datetime.timedelta(0): db.session.rollback() raise LeaseExpiredError('queue=%r, task_id=%r expired %s' % ( task.queue_name, task_id, lease_delta)) if task.last_owner != owner: db.session.rollback() raise NotOwnerError('queue=%r, task_id=%r, owner=%r' % ( task.queue_name, task_id, task.last_owner)) return task
[ "def", "_get_task_with_policy", "(", "queue_name", ",", "task_id", ",", "owner", ")", ":", "now", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "task", "=", "(", "WorkQueue", ".", "query", ".", "filter_by", "(", "queue_name", "=", "queue_name", ",", "task_id", "=", "task_id", ")", ".", "with_lockmode", "(", "'update'", ")", ".", "first", "(", ")", ")", "if", "not", "task", ":", "raise", "TaskDoesNotExistError", "(", "'task_id=%r'", "%", "task_id", ")", "# Lease delta should be positive, meaning it has not yet expired!", "lease_delta", "=", "now", "-", "task", ".", "eta", "if", "lease_delta", ">", "datetime", ".", "timedelta", "(", "0", ")", ":", "db", ".", "session", ".", "rollback", "(", ")", "raise", "LeaseExpiredError", "(", "'queue=%r, task_id=%r expired %s'", "%", "(", "task", ".", "queue_name", ",", "task_id", ",", "lease_delta", ")", ")", "if", "task", ".", "last_owner", "!=", "owner", ":", "db", ".", "session", ".", "rollback", "(", ")", "raise", "NotOwnerError", "(", "'queue=%r, task_id=%r, owner=%r'", "%", "(", "task", ".", "queue_name", ",", "task_id", ",", "task", ".", "last_owner", ")", ")", "return", "task" ]
Fetches the specified task and enforces ownership policy. Args: queue_name: Name of the queue the work item is on. task_id: ID of the task that is finished. owner: Who or what has the current lease on the task. Returns: The valid WorkQueue task that is currently owned. Raises: TaskDoesNotExistError if the task does not exist. LeaseExpiredError if the lease is no longer active. NotOwnerError if the specified owner no longer owns the task.
[ "Fetches", "the", "specified", "task", "and", "enforces", "ownership", "policy", "." ]
9f860de1731021d99253670429e5f2157e1f6297
https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/work_queue.py#L219-L256
train