code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def _cleanup(lst):
'''
Return a list of non-empty dictionaries.
'''
clean = []
for ele in lst:
if ele and isinstance(ele, dict):
clean.append(ele)
return clean | Return a list of non-empty dictionaries. | Below is the the instruction that describes the task:
### Input:
Return a list of non-empty dictionaries.
### Response:
def _cleanup(lst):
'''
Return a list of non-empty dictionaries.
'''
clean = []
for ele in lst:
if ele and isinstance(ele, dict):
clean.append(ele)
return clean |
def render(self, name, color=True, just=None, **kwargs):
"""
Render the selected prompt.
Parameters
----------
name : str
Which prompt to render. One of 'in', 'in2', 'out', 'rewrite'
color : bool
If True (default), include ANSI escape sequences for a coloured prompt.
just : bool
If True, justify the prompt to the width of the last prompt. The
default is stored in self.justify.
**kwargs :
Additional arguments will be passed to the string formatting operation,
so they can override the values that would otherwise fill in the
template.
Returns
-------
A string containing the rendered prompt.
"""
res = self._render(name, color=color, **kwargs)
# Handle justification of prompt
invis_chars = self.invisible_chars[name] if color else 0
self.txtwidth = _lenlastline(res) - invis_chars
just = self.justify if (just is None) else just
# If the prompt spans more than one line, don't try to justify it:
if just and name != 'in' and ('\n' not in res) and ('\r' not in res):
res = res.rjust(self.width + invis_chars)
self.width = _lenlastline(res) - invis_chars
return res | Render the selected prompt.
Parameters
----------
name : str
Which prompt to render. One of 'in', 'in2', 'out', 'rewrite'
color : bool
If True (default), include ANSI escape sequences for a coloured prompt.
just : bool
If True, justify the prompt to the width of the last prompt. The
default is stored in self.justify.
**kwargs :
Additional arguments will be passed to the string formatting operation,
so they can override the values that would otherwise fill in the
template.
Returns
-------
A string containing the rendered prompt. | Below is the the instruction that describes the task:
### Input:
Render the selected prompt.
Parameters
----------
name : str
Which prompt to render. One of 'in', 'in2', 'out', 'rewrite'
color : bool
If True (default), include ANSI escape sequences for a coloured prompt.
just : bool
If True, justify the prompt to the width of the last prompt. The
default is stored in self.justify.
**kwargs :
Additional arguments will be passed to the string formatting operation,
so they can override the values that would otherwise fill in the
template.
Returns
-------
A string containing the rendered prompt.
### Response:
def render(self, name, color=True, just=None, **kwargs):
"""
Render the selected prompt.
Parameters
----------
name : str
Which prompt to render. One of 'in', 'in2', 'out', 'rewrite'
color : bool
If True (default), include ANSI escape sequences for a coloured prompt.
just : bool
If True, justify the prompt to the width of the last prompt. The
default is stored in self.justify.
**kwargs :
Additional arguments will be passed to the string formatting operation,
so they can override the values that would otherwise fill in the
template.
Returns
-------
A string containing the rendered prompt.
"""
res = self._render(name, color=color, **kwargs)
# Handle justification of prompt
invis_chars = self.invisible_chars[name] if color else 0
self.txtwidth = _lenlastline(res) - invis_chars
just = self.justify if (just is None) else just
# If the prompt spans more than one line, don't try to justify it:
if just and name != 'in' and ('\n' not in res) and ('\r' not in res):
res = res.rjust(self.width + invis_chars)
self.width = _lenlastline(res) - invis_chars
return res |
def _set_data(self):
"""
This method will be called to set Series data
"""
if getattr(self, 'data', False) and not getattr(self, '_x', False) and not getattr(self, '_y', False):
_x = XVariable()
_y = YVariable()
_x.contribute_to_class(self, 'X', self.data)
_y.contribute_to_class(self, 'Y', self.data)
self['data'] = zip(self._x.points, self._y.points)
else:
for axis in ('_x', '_y'):
axis_obj = getattr(self, axis, False)
if not axis_obj:
raise exception.MissingAxisException("%s missing" % axis)
if not getattr(axis_obj, 'points', False):
raise exception.MissingDataException()
self['data'] = zip(self._x.points, self._y.points) | This method will be called to set Series data | Below is the the instruction that describes the task:
### Input:
This method will be called to set Series data
### Response:
def _set_data(self):
"""
This method will be called to set Series data
"""
if getattr(self, 'data', False) and not getattr(self, '_x', False) and not getattr(self, '_y', False):
_x = XVariable()
_y = YVariable()
_x.contribute_to_class(self, 'X', self.data)
_y.contribute_to_class(self, 'Y', self.data)
self['data'] = zip(self._x.points, self._y.points)
else:
for axis in ('_x', '_y'):
axis_obj = getattr(self, axis, False)
if not axis_obj:
raise exception.MissingAxisException("%s missing" % axis)
if not getattr(axis_obj, 'points', False):
raise exception.MissingDataException()
self['data'] = zip(self._x.points, self._y.points) |
def seed_aws_organization(ctx, owner):
"""Seeds SWAG from an AWS organziation."""
swag = create_swag_from_ctx(ctx)
accounts = swag.get_all()
_ids = [result.get('id') for result in accounts]
client = boto3.client('organizations')
paginator = client.get_paginator('list_accounts')
response_iterator = paginator.paginate()
count = 0
for response in response_iterator:
for account in response['Accounts']:
if account['Id'] in _ids:
click.echo(click.style(
'Ignoring Duplicate Account. AccountId: {} already exists in SWAG'.format(account['Id']), fg='yellow')
)
continue
if account['Status'] == 'SUSPENDED':
status = 'deprecated'
else:
status = 'created'
data = {
'id': account['Id'],
'name': account['Name'],
'description': 'Account imported from AWS organization.',
'email': account['Email'],
'owner': owner,
'provider': 'aws',
'contacts': [],
'sensitive': False,
'status': [{'region': 'all', 'status': status}]
}
click.echo(click.style(
'Seeded Account. AccountName: {}'.format(data['name']), fg='green')
)
count += 1
swag.create(data, dry_run=ctx.dry_run)
click.echo('Seeded {} accounts to SWAG.'.format(count)) | Seeds SWAG from an AWS organziation. | Below is the the instruction that describes the task:
### Input:
Seeds SWAG from an AWS organziation.
### Response:
def seed_aws_organization(ctx, owner):
"""Seeds SWAG from an AWS organziation."""
swag = create_swag_from_ctx(ctx)
accounts = swag.get_all()
_ids = [result.get('id') for result in accounts]
client = boto3.client('organizations')
paginator = client.get_paginator('list_accounts')
response_iterator = paginator.paginate()
count = 0
for response in response_iterator:
for account in response['Accounts']:
if account['Id'] in _ids:
click.echo(click.style(
'Ignoring Duplicate Account. AccountId: {} already exists in SWAG'.format(account['Id']), fg='yellow')
)
continue
if account['Status'] == 'SUSPENDED':
status = 'deprecated'
else:
status = 'created'
data = {
'id': account['Id'],
'name': account['Name'],
'description': 'Account imported from AWS organization.',
'email': account['Email'],
'owner': owner,
'provider': 'aws',
'contacts': [],
'sensitive': False,
'status': [{'region': 'all', 'status': status}]
}
click.echo(click.style(
'Seeded Account. AccountName: {}'.format(data['name']), fg='green')
)
count += 1
swag.create(data, dry_run=ctx.dry_run)
click.echo('Seeded {} accounts to SWAG.'.format(count)) |
def timezone(client, location, timestamp=None, language=None):
"""Get time zone for a location on the earth, as well as that location's
time offset from UTC.
:param location: The latitude/longitude value representing the location to
look up.
:type location: string, dict, list, or tuple
:param timestamp: Timestamp specifies the desired time as seconds since
midnight, January 1, 1970 UTC. The Time Zone API uses the timestamp to
determine whether or not Daylight Savings should be applied. Times
before 1970 can be expressed as negative values. Optional. Defaults to
``datetime.utcnow()``.
:type timestamp: int or datetime.datetime
:param language: The language in which to return results.
:type language: string
:rtype: dict
"""
params = {
"location": convert.latlng(location),
"timestamp": convert.time(timestamp or datetime.utcnow())
}
if language:
params["language"] = language
return client._request( "/maps/api/timezone/json", params) | Get time zone for a location on the earth, as well as that location's
time offset from UTC.
:param location: The latitude/longitude value representing the location to
look up.
:type location: string, dict, list, or tuple
:param timestamp: Timestamp specifies the desired time as seconds since
midnight, January 1, 1970 UTC. The Time Zone API uses the timestamp to
determine whether or not Daylight Savings should be applied. Times
before 1970 can be expressed as negative values. Optional. Defaults to
``datetime.utcnow()``.
:type timestamp: int or datetime.datetime
:param language: The language in which to return results.
:type language: string
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Get time zone for a location on the earth, as well as that location's
time offset from UTC.
:param location: The latitude/longitude value representing the location to
look up.
:type location: string, dict, list, or tuple
:param timestamp: Timestamp specifies the desired time as seconds since
midnight, January 1, 1970 UTC. The Time Zone API uses the timestamp to
determine whether or not Daylight Savings should be applied. Times
before 1970 can be expressed as negative values. Optional. Defaults to
``datetime.utcnow()``.
:type timestamp: int or datetime.datetime
:param language: The language in which to return results.
:type language: string
:rtype: dict
### Response:
def timezone(client, location, timestamp=None, language=None):
"""Get time zone for a location on the earth, as well as that location's
time offset from UTC.
:param location: The latitude/longitude value representing the location to
look up.
:type location: string, dict, list, or tuple
:param timestamp: Timestamp specifies the desired time as seconds since
midnight, January 1, 1970 UTC. The Time Zone API uses the timestamp to
determine whether or not Daylight Savings should be applied. Times
before 1970 can be expressed as negative values. Optional. Defaults to
``datetime.utcnow()``.
:type timestamp: int or datetime.datetime
:param language: The language in which to return results.
:type language: string
:rtype: dict
"""
params = {
"location": convert.latlng(location),
"timestamp": convert.time(timestamp or datetime.utcnow())
}
if language:
params["language"] = language
return client._request( "/maps/api/timezone/json", params) |
def analyse(self, traj, network, current_subrun, subrun_list, network_dict):
"""Extracts monitor data and plots.
Data extraction is done if all subruns have been completed,
i.e. `len(subrun_list)==0`
First, extracts results from the monitors and stores them into `traj`.
Next, uses the extracted data for plots.
:param traj:
Trajectory container
Adds:
Data from monitors
:param network: The BRIAN network
:param current_subrun: BrianParameter
:param subrun_list: List of coming subruns
:param network_dict: Dictionary of items shared among all components
"""
if len(subrun_list)==0:
traj.f_add_result(Brian2MonitorResult, 'monitors.spikes_e', self.spike_monitor,
comment = 'The spiketimes of the excitatory population')
traj.f_add_result(Brian2MonitorResult, 'monitors.V', self.V_monitor,
comment = 'Membrane voltage of four neurons from 2 clusters')
traj.f_add_result(Brian2MonitorResult, 'monitors.I_syn_e', self.I_syn_e_monitor,
comment = 'I_syn_e of four neurons from 2 clusters')
traj.f_add_result(Brian2MonitorResult, 'monitors.I_syn_i', self.I_syn_i_monitor,
comment = 'I_syn_i of four neurons from 2 clusters')
print('Plotting')
if traj.parameters.analysis.make_plots:
self._print_graphs(traj) | Extracts monitor data and plots.
Data extraction is done if all subruns have been completed,
i.e. `len(subrun_list)==0`
First, extracts results from the monitors and stores them into `traj`.
Next, uses the extracted data for plots.
:param traj:
Trajectory container
Adds:
Data from monitors
:param network: The BRIAN network
:param current_subrun: BrianParameter
:param subrun_list: List of coming subruns
:param network_dict: Dictionary of items shared among all components | Below is the the instruction that describes the task:
### Input:
Extracts monitor data and plots.
Data extraction is done if all subruns have been completed,
i.e. `len(subrun_list)==0`
First, extracts results from the monitors and stores them into `traj`.
Next, uses the extracted data for plots.
:param traj:
Trajectory container
Adds:
Data from monitors
:param network: The BRIAN network
:param current_subrun: BrianParameter
:param subrun_list: List of coming subruns
:param network_dict: Dictionary of items shared among all components
### Response:
def analyse(self, traj, network, current_subrun, subrun_list, network_dict):
"""Extracts monitor data and plots.
Data extraction is done if all subruns have been completed,
i.e. `len(subrun_list)==0`
First, extracts results from the monitors and stores them into `traj`.
Next, uses the extracted data for plots.
:param traj:
Trajectory container
Adds:
Data from monitors
:param network: The BRIAN network
:param current_subrun: BrianParameter
:param subrun_list: List of coming subruns
:param network_dict: Dictionary of items shared among all components
"""
if len(subrun_list)==0:
traj.f_add_result(Brian2MonitorResult, 'monitors.spikes_e', self.spike_monitor,
comment = 'The spiketimes of the excitatory population')
traj.f_add_result(Brian2MonitorResult, 'monitors.V', self.V_monitor,
comment = 'Membrane voltage of four neurons from 2 clusters')
traj.f_add_result(Brian2MonitorResult, 'monitors.I_syn_e', self.I_syn_e_monitor,
comment = 'I_syn_e of four neurons from 2 clusters')
traj.f_add_result(Brian2MonitorResult, 'monitors.I_syn_i', self.I_syn_i_monitor,
comment = 'I_syn_i of four neurons from 2 clusters')
print('Plotting')
if traj.parameters.analysis.make_plots:
self._print_graphs(traj) |
def _checkSetpointValue( setpointvalue, maxvalue ):
"""Check that the given setpointvalue is valid.
Args:
* setpointvalue (numerical): The setpoint value to be checked. Must be positive.
* maxvalue (numerical): Upper limit for setpoint value. Must be positive.
Raises:
TypeError, ValueError
"""
if maxvalue is None:
raise TypeError('The maxvalue (for the setpoint) must not be None!')
minimalmodbus._checkNumerical(setpointvalue, minvalue=0, maxvalue=maxvalue, description='setpoint value') | Check that the given setpointvalue is valid.
Args:
* setpointvalue (numerical): The setpoint value to be checked. Must be positive.
* maxvalue (numerical): Upper limit for setpoint value. Must be positive.
Raises:
TypeError, ValueError | Below is the the instruction that describes the task:
### Input:
Check that the given setpointvalue is valid.
Args:
* setpointvalue (numerical): The setpoint value to be checked. Must be positive.
* maxvalue (numerical): Upper limit for setpoint value. Must be positive.
Raises:
TypeError, ValueError
### Response:
def _checkSetpointValue( setpointvalue, maxvalue ):
"""Check that the given setpointvalue is valid.
Args:
* setpointvalue (numerical): The setpoint value to be checked. Must be positive.
* maxvalue (numerical): Upper limit for setpoint value. Must be positive.
Raises:
TypeError, ValueError
"""
if maxvalue is None:
raise TypeError('The maxvalue (for the setpoint) must not be None!')
minimalmodbus._checkNumerical(setpointvalue, minvalue=0, maxvalue=maxvalue, description='setpoint value') |
def init_config(self, app):
"""Initialize configuration.
:param app: An instance of :class:`~flask.Flask`.
"""
_vars = ['BASE_TEMPLATE', 'COVER_TEMPLATE', 'SETTINGS_TEMPLATE']
# Sets RequireJS config and SASS binary as well if not already set.
for k in dir(config):
if k.startswith('THEME_') or k in [
'REQUIREJS_CONFIG', 'SASS_BIN'] + _vars:
app.config.setdefault(k, getattr(config, k))
# Set THEME_<name>_TEMPLATE from <name>_TEMPLATE variables if not
# already set.
for varname in _vars:
theme_varname = 'THEME_{}'.format(varname)
if app.config[theme_varname] is None:
app.config[theme_varname] = app.config[varname]
app.config.setdefault(
'ADMIN_BASE_TEMPLATE', config.ADMIN_BASE_TEMPLATE) | Initialize configuration.
:param app: An instance of :class:`~flask.Flask`. | Below is the the instruction that describes the task:
### Input:
Initialize configuration.
:param app: An instance of :class:`~flask.Flask`.
### Response:
def init_config(self, app):
"""Initialize configuration.
:param app: An instance of :class:`~flask.Flask`.
"""
_vars = ['BASE_TEMPLATE', 'COVER_TEMPLATE', 'SETTINGS_TEMPLATE']
# Sets RequireJS config and SASS binary as well if not already set.
for k in dir(config):
if k.startswith('THEME_') or k in [
'REQUIREJS_CONFIG', 'SASS_BIN'] + _vars:
app.config.setdefault(k, getattr(config, k))
# Set THEME_<name>_TEMPLATE from <name>_TEMPLATE variables if not
# already set.
for varname in _vars:
theme_varname = 'THEME_{}'.format(varname)
if app.config[theme_varname] is None:
app.config[theme_varname] = app.config[varname]
app.config.setdefault(
'ADMIN_BASE_TEMPLATE', config.ADMIN_BASE_TEMPLATE) |
def print_stats(self, stream=None):
"""
Log annotated garbage objects to console or file.
:param stream: open file, uses sys.stdout if not given
"""
if not stream: # pragma: no cover
stream = sys.stdout
self.metadata.sort(key=lambda x: -x.size)
stream.write('%-10s %8s %-12s %-46s\n' % ('id', 'size', 'type', 'representation'))
for g in self.metadata:
stream.write('0x%08x %8d %-12s %-46s\n' % (g.id, g.size, trunc(g.type, 12),
trunc(g.str, 46)))
stream.write('Garbage: %8d collected objects (%s in cycles): %12s\n' % \
(self.count, self.num_in_cycles, pp(self.total_size))) | Log annotated garbage objects to console or file.
:param stream: open file, uses sys.stdout if not given | Below is the the instruction that describes the task:
### Input:
Log annotated garbage objects to console or file.
:param stream: open file, uses sys.stdout if not given
### Response:
def print_stats(self, stream=None):
"""
Log annotated garbage objects to console or file.
:param stream: open file, uses sys.stdout if not given
"""
if not stream: # pragma: no cover
stream = sys.stdout
self.metadata.sort(key=lambda x: -x.size)
stream.write('%-10s %8s %-12s %-46s\n' % ('id', 'size', 'type', 'representation'))
for g in self.metadata:
stream.write('0x%08x %8d %-12s %-46s\n' % (g.id, g.size, trunc(g.type, 12),
trunc(g.str, 46)))
stream.write('Garbage: %8d collected objects (%s in cycles): %12s\n' % \
(self.count, self.num_in_cycles, pp(self.total_size))) |
def delete_archives(self, *archives):
'''
Delete archives
:return:
'''
# Remove paths
_archives = []
for archive in archives:
_archives.append(os.path.basename(archive))
archives = _archives[:]
ret = {'files': {}, 'errors': {}}
for archive in self.archives():
arc_dir = os.path.dirname(archive)
archive = os.path.basename(archive)
if archives and archive in archives or not archives:
archive = os.path.join(arc_dir, archive)
try:
os.unlink(archive)
ret['files'][archive] = 'removed'
except Exception as err:
ret['errors'][archive] = str(err)
ret['files'][archive] = 'left'
return ret | Delete archives
:return: | Below is the the instruction that describes the task:
### Input:
Delete archives
:return:
### Response:
def delete_archives(self, *archives):
'''
Delete archives
:return:
'''
# Remove paths
_archives = []
for archive in archives:
_archives.append(os.path.basename(archive))
archives = _archives[:]
ret = {'files': {}, 'errors': {}}
for archive in self.archives():
arc_dir = os.path.dirname(archive)
archive = os.path.basename(archive)
if archives and archive in archives or not archives:
archive = os.path.join(arc_dir, archive)
try:
os.unlink(archive)
ret['files'][archive] = 'removed'
except Exception as err:
ret['errors'][archive] = str(err)
ret['files'][archive] = 'left'
return ret |
def multenterbox(msg="Fill in values for the fields.", title=" ",
fields=(), values=()):
r"""
Show screen with multiple data entry fields.
If there are fewer values than names, the list of values is padded with
empty strings until the number of values is the same as the number
of names.
If there are more values than names, the list of values
is truncated so that there are as many values as names.
Returns a list of the values of the fields,
or None if the user cancels the operation.
Here is some example code, that shows how values returned from
multenterbox can be checked for validity before they are accepted::
msg = "Enter your personal information"
title = "Credit Card Application"
fieldNames = ["Name","Street Address","City","State","ZipCode"]
fieldValues = [] # we start with blanks for the values
fieldValues = multenterbox(msg,title, fieldNames)
# make sure that none of the fields was left blank
while 1:
if fieldValues is None: break
errmsg = ""
for i in range(len(fieldNames)):
if fieldValues[i].strip() == "":
errmsg += ('"%s" is a required field.\n\n' % fieldNames[i])
if errmsg == "":
break # no problems found
fieldValues = multenterbox(errmsg, title, fieldNames, fieldValues)
print("Reply was: %s" % str(fieldValues))
:param str msg: the msg to be displayed.
:param str title: the window title
:param list fields: a list of fieldnames.
:param list values: a list of field values
:return: String
"""
return bb.__multfillablebox(msg, title, fields, values, None) | r"""
Show screen with multiple data entry fields.
If there are fewer values than names, the list of values is padded with
empty strings until the number of values is the same as the number
of names.
If there are more values than names, the list of values
is truncated so that there are as many values as names.
Returns a list of the values of the fields,
or None if the user cancels the operation.
Here is some example code, that shows how values returned from
multenterbox can be checked for validity before they are accepted::
msg = "Enter your personal information"
title = "Credit Card Application"
fieldNames = ["Name","Street Address","City","State","ZipCode"]
fieldValues = [] # we start with blanks for the values
fieldValues = multenterbox(msg,title, fieldNames)
# make sure that none of the fields was left blank
while 1:
if fieldValues is None: break
errmsg = ""
for i in range(len(fieldNames)):
if fieldValues[i].strip() == "":
errmsg += ('"%s" is a required field.\n\n' % fieldNames[i])
if errmsg == "":
break # no problems found
fieldValues = multenterbox(errmsg, title, fieldNames, fieldValues)
print("Reply was: %s" % str(fieldValues))
:param str msg: the msg to be displayed.
:param str title: the window title
:param list fields: a list of fieldnames.
:param list values: a list of field values
:return: String | Below is the the instruction that describes the task:
### Input:
r"""
Show screen with multiple data entry fields.
If there are fewer values than names, the list of values is padded with
empty strings until the number of values is the same as the number
of names.
If there are more values than names, the list of values
is truncated so that there are as many values as names.
Returns a list of the values of the fields,
or None if the user cancels the operation.
Here is some example code, that shows how values returned from
multenterbox can be checked for validity before they are accepted::
msg = "Enter your personal information"
title = "Credit Card Application"
fieldNames = ["Name","Street Address","City","State","ZipCode"]
fieldValues = [] # we start with blanks for the values
fieldValues = multenterbox(msg,title, fieldNames)
# make sure that none of the fields was left blank
while 1:
if fieldValues is None: break
errmsg = ""
for i in range(len(fieldNames)):
if fieldValues[i].strip() == "":
errmsg += ('"%s" is a required field.\n\n' % fieldNames[i])
if errmsg == "":
break # no problems found
fieldValues = multenterbox(errmsg, title, fieldNames, fieldValues)
print("Reply was: %s" % str(fieldValues))
:param str msg: the msg to be displayed.
:param str title: the window title
:param list fields: a list of fieldnames.
:param list values: a list of field values
:return: String
### Response:
def multenterbox(msg="Fill in values for the fields.", title=" ",
fields=(), values=()):
r"""
Show screen with multiple data entry fields.
If there are fewer values than names, the list of values is padded with
empty strings until the number of values is the same as the number
of names.
If there are more values than names, the list of values
is truncated so that there are as many values as names.
Returns a list of the values of the fields,
or None if the user cancels the operation.
Here is some example code, that shows how values returned from
multenterbox can be checked for validity before they are accepted::
msg = "Enter your personal information"
title = "Credit Card Application"
fieldNames = ["Name","Street Address","City","State","ZipCode"]
fieldValues = [] # we start with blanks for the values
fieldValues = multenterbox(msg,title, fieldNames)
# make sure that none of the fields was left blank
while 1:
if fieldValues is None: break
errmsg = ""
for i in range(len(fieldNames)):
if fieldValues[i].strip() == "":
errmsg += ('"%s" is a required field.\n\n' % fieldNames[i])
if errmsg == "":
break # no problems found
fieldValues = multenterbox(errmsg, title, fieldNames, fieldValues)
print("Reply was: %s" % str(fieldValues))
:param str msg: the msg to be displayed.
:param str title: the window title
:param list fields: a list of fieldnames.
:param list values: a list of field values
:return: String
"""
return bb.__multfillablebox(msg, title, fields, values, None) |
def _forwardImplementation(self, inbuf, outbuf):
""" Proportional probability method.
"""
assert self.module
propensities = self.module.getActionValues(0)
summedProps = sum(propensities)
probabilities = propensities / summedProps
action = eventGenerator(probabilities)
# action = drawIndex(probabilities)
outbuf[:] = scipy.array([action]) | Proportional probability method. | Below is the the instruction that describes the task:
### Input:
Proportional probability method.
### Response:
def _forwardImplementation(self, inbuf, outbuf):
""" Proportional probability method.
"""
assert self.module
propensities = self.module.getActionValues(0)
summedProps = sum(propensities)
probabilities = propensities / summedProps
action = eventGenerator(probabilities)
# action = drawIndex(probabilities)
outbuf[:] = scipy.array([action]) |
def query(url, **kwargs):
'''
Query a resource, and decode the return data
Passes through all the parameters described in the
:py:func:`utils.http.query function <salt.utils.http.query>`:
.. autofunction:: salt.utils.http.query
CLI Example:
.. code-block:: bash
salt '*' http.query http://somelink.com/
salt '*' http.query http://somelink.com/ method=POST \
params='key1=val1&key2=val2'
salt '*' http.query http://somelink.com/ method=POST \
data='<xml>somecontent</xml>'
For more information about the ``http.query`` module, refer to the
:ref:`HTTP Tutorial <tutorial-http>`.
'''
opts = __opts__.copy()
if 'opts' in kwargs:
opts.update(kwargs['opts'])
del kwargs['opts']
return salt.utils.http.query(url=url, opts=opts, **kwargs) | Query a resource, and decode the return data
Passes through all the parameters described in the
:py:func:`utils.http.query function <salt.utils.http.query>`:
.. autofunction:: salt.utils.http.query
CLI Example:
.. code-block:: bash
salt '*' http.query http://somelink.com/
salt '*' http.query http://somelink.com/ method=POST \
params='key1=val1&key2=val2'
salt '*' http.query http://somelink.com/ method=POST \
data='<xml>somecontent</xml>'
For more information about the ``http.query`` module, refer to the
:ref:`HTTP Tutorial <tutorial-http>`. | Below is the the instruction that describes the task:
### Input:
Query a resource, and decode the return data
Passes through all the parameters described in the
:py:func:`utils.http.query function <salt.utils.http.query>`:
.. autofunction:: salt.utils.http.query
CLI Example:
.. code-block:: bash
salt '*' http.query http://somelink.com/
salt '*' http.query http://somelink.com/ method=POST \
params='key1=val1&key2=val2'
salt '*' http.query http://somelink.com/ method=POST \
data='<xml>somecontent</xml>'
For more information about the ``http.query`` module, refer to the
:ref:`HTTP Tutorial <tutorial-http>`.
### Response:
def query(url, **kwargs):
'''
Query a resource, and decode the return data
Passes through all the parameters described in the
:py:func:`utils.http.query function <salt.utils.http.query>`:
.. autofunction:: salt.utils.http.query
CLI Example:
.. code-block:: bash
salt '*' http.query http://somelink.com/
salt '*' http.query http://somelink.com/ method=POST \
params='key1=val1&key2=val2'
salt '*' http.query http://somelink.com/ method=POST \
data='<xml>somecontent</xml>'
For more information about the ``http.query`` module, refer to the
:ref:`HTTP Tutorial <tutorial-http>`.
'''
opts = __opts__.copy()
if 'opts' in kwargs:
opts.update(kwargs['opts'])
del kwargs['opts']
return salt.utils.http.query(url=url, opts=opts, **kwargs) |
def pipe_subelement(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator extracts select sub-elements from a feed. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : {'path': {'value': <element path>}}
Yields
------
_OUTPUT : items
"""
path = DotDict(conf).get('path', **kwargs)
for item in _INPUT:
element = DotDict(item).get(path, **kwargs)
for i in utils.gen_items(element):
yield {'content': i}
if item.get('forever'):
# _INPUT is pipeforever and not a loop,
# so we just yield our item once
break | An operator extracts select sub-elements from a feed. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : {'path': {'value': <element path>}}
Yields
------
_OUTPUT : items | Below is the the instruction that describes the task:
### Input:
An operator extracts select sub-elements from a feed. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : {'path': {'value': <element path>}}
Yields
------
_OUTPUT : items
### Response:
def pipe_subelement(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator extracts select sub-elements from a feed. Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : {'path': {'value': <element path>}}
Yields
------
_OUTPUT : items
"""
path = DotDict(conf).get('path', **kwargs)
for item in _INPUT:
element = DotDict(item).get(path, **kwargs)
for i in utils.gen_items(element):
yield {'content': i}
if item.get('forever'):
# _INPUT is pipeforever and not a loop,
# so we just yield our item once
break |
def get_target_forums_for_moved_topics(self, user):
""" Returns a list of forums in which the considered user can add topics that have been
moved from another forum.
"""
return [f for f in self._get_forums_for_user(user, ['can_move_topics', ]) if f.is_forum] | Returns a list of forums in which the considered user can add topics that have been
moved from another forum. | Below is the the instruction that describes the task:
### Input:
Returns a list of forums in which the considered user can add topics that have been
moved from another forum.
### Response:
def get_target_forums_for_moved_topics(self, user):
""" Returns a list of forums in which the considered user can add topics that have been
moved from another forum.
"""
return [f for f in self._get_forums_for_user(user, ['can_move_topics', ]) if f.is_forum] |
def import_split(import_name):
""" takes a dotted string path and returns the components:
import_split('path') == 'path', None, None
import_split('path.part.object') == 'path.part', 'object', None
import_split('path.part:object') == 'path.part', 'object', None
import_split('path.part:object.attribute')
== 'path.part', 'object', 'attribute'
"""
obj = None
attr = None
if ':' in import_name:
module, obj = import_name.split(':', 1)
if '.' in obj:
obj, attr = obj.rsplit('.', 1)
elif '.' in import_name:
module, obj = import_name.rsplit('.', 1)
else:
module = import_name
return module, obj, attr | takes a dotted string path and returns the components:
import_split('path') == 'path', None, None
import_split('path.part.object') == 'path.part', 'object', None
import_split('path.part:object') == 'path.part', 'object', None
import_split('path.part:object.attribute')
== 'path.part', 'object', 'attribute' | Below is the the instruction that describes the task:
### Input:
takes a dotted string path and returns the components:
import_split('path') == 'path', None, None
import_split('path.part.object') == 'path.part', 'object', None
import_split('path.part:object') == 'path.part', 'object', None
import_split('path.part:object.attribute')
== 'path.part', 'object', 'attribute'
### Response:
def import_split(import_name):
""" takes a dotted string path and returns the components:
import_split('path') == 'path', None, None
import_split('path.part.object') == 'path.part', 'object', None
import_split('path.part:object') == 'path.part', 'object', None
import_split('path.part:object.attribute')
== 'path.part', 'object', 'attribute'
"""
obj = None
attr = None
if ':' in import_name:
module, obj = import_name.split(':', 1)
if '.' in obj:
obj, attr = obj.rsplit('.', 1)
elif '.' in import_name:
module, obj = import_name.rsplit('.', 1)
else:
module = import_name
return module, obj, attr |
def makescacoldesc(columnname, value,
datamanagertype='',
datamanagergroup='',
options=0, maxlen=0, comment='',
valuetype='', keywords={}):
"""Create description of a scalar column.
A description for a scalar column can be created from a name for
the column and a data value, which is used only to determine the
type of the column. Note that a dict value is also possible.
It is possible to create the column description in more detail
by giving the data manager name, group, option, and comment as well.
The data manager type tells which data manager (storage manager)
is used to store the columns. The data manager type and group are
explained in more detail in the `casacore Tables
<../../casacore/doc/html/group__Tables__module.html>`_ documentation.
It returns a dict with fields `name` and `desc` which can thereafter be used
to build a table description using function :func:`maketabdesc`.
`columname`
Name of column
`value`
Example data value used to determine the column's data type.
It is only used if argument `valuetype` is not given.
`datamanagertype`
Type of data manager which can be one of StandardStMan (default)
or IncrementalStMan. The latter one can save disk space if many subsequent
cells in the column will have the same value.
`datamanagergroup`
Data manager group. Only for the expert user.
`options`
Options. Need not be filled in.
`maxlen`
Maximum length of string values in a column.
Default 0 means unlimited.
`comment`
Comment: informational for user.
`valuetype`
A string giving the column's data type. Possible data types are
bool (or boolean), uchar (or byte), short, int (or integer), uint,
float, double, complex, dcomplex, and string.
'keywords'
A dict defining initial keywords for the column.
For example::
scd1 = makescacoldesc("col2", ""))
scd2 = makescacoldesc("col1", 1, "IncrementalStMan")
td = maketabdesc([scd1, scd2])
This creates a table description consisting of an integer column `col1`,
and a string column `col2`. `col1` uses the IncrementalStMan storage manager,
while `col2` uses the default storage manager StandardStMan.
"""
vtype = valuetype
if vtype == '':
vtype = _value_type_name(value)
rec2 = {'valueType': vtype,
'dataManagerType': datamanagertype,
'dataManagerGroup': datamanagergroup,
'option': options,
'maxlen': maxlen,
'comment': comment,
'keywords': keywords}
return {'name': columnname,
'desc': rec2} | Create description of a scalar column.
A description for a scalar column can be created from a name for
the column and a data value, which is used only to determine the
type of the column. Note that a dict value is also possible.
It is possible to create the column description in more detail
by giving the data manager name, group, option, and comment as well.
The data manager type tells which data manager (storage manager)
is used to store the columns. The data manager type and group are
explained in more detail in the `casacore Tables
<../../casacore/doc/html/group__Tables__module.html>`_ documentation.
It returns a dict with fields `name` and `desc` which can thereafter be used
to build a table description using function :func:`maketabdesc`.
`columname`
Name of column
`value`
Example data value used to determine the column's data type.
It is only used if argument `valuetype` is not given.
`datamanagertype`
Type of data manager which can be one of StandardStMan (default)
or IncrementalStMan. The latter one can save disk space if many subsequent
cells in the column will have the same value.
`datamanagergroup`
Data manager group. Only for the expert user.
`options`
Options. Need not be filled in.
`maxlen`
Maximum length of string values in a column.
Default 0 means unlimited.
`comment`
Comment: informational for user.
`valuetype`
A string giving the column's data type. Possible data types are
bool (or boolean), uchar (or byte), short, int (or integer), uint,
float, double, complex, dcomplex, and string.
'keywords'
A dict defining initial keywords for the column.
For example::
scd1 = makescacoldesc("col2", ""))
scd2 = makescacoldesc("col1", 1, "IncrementalStMan")
td = maketabdesc([scd1, scd2])
This creates a table description consisting of an integer column `col1`,
and a string column `col2`. `col1` uses the IncrementalStMan storage manager,
while `col2` uses the default storage manager StandardStMan. | Below is the the instruction that describes the task:
### Input:
Create description of a scalar column.
A description for a scalar column can be created from a name for
the column and a data value, which is used only to determine the
type of the column. Note that a dict value is also possible.
It is possible to create the column description in more detail
by giving the data manager name, group, option, and comment as well.
The data manager type tells which data manager (storage manager)
is used to store the columns. The data manager type and group are
explained in more detail in the `casacore Tables
<../../casacore/doc/html/group__Tables__module.html>`_ documentation.
It returns a dict with fields `name` and `desc` which can thereafter be used
to build a table description using function :func:`maketabdesc`.
`columname`
Name of column
`value`
Example data value used to determine the column's data type.
It is only used if argument `valuetype` is not given.
`datamanagertype`
Type of data manager which can be one of StandardStMan (default)
or IncrementalStMan. The latter one can save disk space if many subsequent
cells in the column will have the same value.
`datamanagergroup`
Data manager group. Only for the expert user.
`options`
Options. Need not be filled in.
`maxlen`
Maximum length of string values in a column.
Default 0 means unlimited.
`comment`
Comment: informational for user.
`valuetype`
A string giving the column's data type. Possible data types are
bool (or boolean), uchar (or byte), short, int (or integer), uint,
float, double, complex, dcomplex, and string.
'keywords'
A dict defining initial keywords for the column.
For example::
scd1 = makescacoldesc("col2", ""))
scd2 = makescacoldesc("col1", 1, "IncrementalStMan")
td = maketabdesc([scd1, scd2])
This creates a table description consisting of an integer column `col1`,
and a string column `col2`. `col1` uses the IncrementalStMan storage manager,
while `col2` uses the default storage manager StandardStMan.
### Response:
def makescacoldesc(columnname, value,
datamanagertype='',
datamanagergroup='',
options=0, maxlen=0, comment='',
valuetype='', keywords={}):
"""Create description of a scalar column.
A description for a scalar column can be created from a name for
the column and a data value, which is used only to determine the
type of the column. Note that a dict value is also possible.
It is possible to create the column description in more detail
by giving the data manager name, group, option, and comment as well.
The data manager type tells which data manager (storage manager)
is used to store the columns. The data manager type and group are
explained in more detail in the `casacore Tables
<../../casacore/doc/html/group__Tables__module.html>`_ documentation.
It returns a dict with fields `name` and `desc` which can thereafter be used
to build a table description using function :func:`maketabdesc`.
`columname`
Name of column
`value`
Example data value used to determine the column's data type.
It is only used if argument `valuetype` is not given.
`datamanagertype`
Type of data manager which can be one of StandardStMan (default)
or IncrementalStMan. The latter one can save disk space if many subsequent
cells in the column will have the same value.
`datamanagergroup`
Data manager group. Only for the expert user.
`options`
Options. Need not be filled in.
`maxlen`
Maximum length of string values in a column.
Default 0 means unlimited.
`comment`
Comment: informational for user.
`valuetype`
A string giving the column's data type. Possible data types are
bool (or boolean), uchar (or byte), short, int (or integer), uint,
float, double, complex, dcomplex, and string.
'keywords'
A dict defining initial keywords for the column.
For example::
scd1 = makescacoldesc("col2", ""))
scd2 = makescacoldesc("col1", 1, "IncrementalStMan")
td = maketabdesc([scd1, scd2])
This creates a table description consisting of an integer column `col1`,
and a string column `col2`. `col1` uses the IncrementalStMan storage manager,
while `col2` uses the default storage manager StandardStMan.
"""
vtype = valuetype
if vtype == '':
vtype = _value_type_name(value)
rec2 = {'valueType': vtype,
'dataManagerType': datamanagertype,
'dataManagerGroup': datamanagergroup,
'option': options,
'maxlen': maxlen,
'comment': comment,
'keywords': keywords}
return {'name': columnname,
'desc': rec2} |
def from_elements(cls, items=None):
"""Create a :class:`Dict` of constants from a live dictionary.
:param items: The items to store in the node.
:type items: dict
:returns: The created dictionary node.
:rtype: Dict
"""
node = cls()
if items is None:
node.items = []
else:
node.items = [
(const_factory(k), const_factory(v) if _is_const(v) else v)
for k, v in items.items()
# The keys need to be constants
if _is_const(k)
]
return node | Create a :class:`Dict` of constants from a live dictionary.
:param items: The items to store in the node.
:type items: dict
:returns: The created dictionary node.
:rtype: Dict | Below is the the instruction that describes the task:
### Input:
Create a :class:`Dict` of constants from a live dictionary.
:param items: The items to store in the node.
:type items: dict
:returns: The created dictionary node.
:rtype: Dict
### Response:
def from_elements(cls, items=None):
"""Create a :class:`Dict` of constants from a live dictionary.
:param items: The items to store in the node.
:type items: dict
:returns: The created dictionary node.
:rtype: Dict
"""
node = cls()
if items is None:
node.items = []
else:
node.items = [
(const_factory(k), const_factory(v) if _is_const(v) else v)
for k, v in items.items()
# The keys need to be constants
if _is_const(k)
]
return node |
def components(self):
"""
Return a dataframe of the components (days, hours, minutes,
seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas.
Returns
-------
a DataFrame
"""
from pandas import DataFrame
columns = ['days', 'hours', 'minutes', 'seconds',
'milliseconds', 'microseconds', 'nanoseconds']
hasnans = self._hasnans
if hasnans:
def f(x):
if isna(x):
return [np.nan] * len(columns)
return x.components
else:
def f(x):
return x.components
result = DataFrame([f(x) for x in self], columns=columns)
if not hasnans:
result = result.astype('int64')
return result | Return a dataframe of the components (days, hours, minutes,
seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas.
Returns
-------
a DataFrame | Below is the the instruction that describes the task:
### Input:
Return a dataframe of the components (days, hours, minutes,
seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas.
Returns
-------
a DataFrame
### Response:
def components(self):
"""
Return a dataframe of the components (days, hours, minutes,
seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas.
Returns
-------
a DataFrame
"""
from pandas import DataFrame
columns = ['days', 'hours', 'minutes', 'seconds',
'milliseconds', 'microseconds', 'nanoseconds']
hasnans = self._hasnans
if hasnans:
def f(x):
if isna(x):
return [np.nan] * len(columns)
return x.components
else:
def f(x):
return x.components
result = DataFrame([f(x) for x in self], columns=columns)
if not hasnans:
result = result.astype('int64')
return result |
def idxmin(self, axis=0, skipna=True, *args, **kwargs):
"""
Return the row label of the minimum value.
If multiple values equal the minimum, the first row label with that
value is returned.
Parameters
----------
skipna : bool, default True
Exclude NA/null values. If the entire Series is NA, the result
will be NA.
axis : int, default 0
For compatibility with DataFrame.idxmin. Redundant for application
on Series.
*args, **kwargs
Additional keywords have no effect but might be accepted
for compatibility with NumPy.
Returns
-------
Index
Label of the minimum value.
Raises
------
ValueError
If the Series is empty.
See Also
--------
numpy.argmin : Return indices of the minimum values
along the given axis.
DataFrame.idxmin : Return index of first occurrence of minimum
over requested axis.
Series.idxmax : Return index *label* of the first occurrence
of maximum of values.
Notes
-----
This method is the Series version of ``ndarray.argmin``. This method
returns the label of the minimum, while ``ndarray.argmin`` returns
the position. To get the position, use ``series.values.argmin()``.
Examples
--------
>>> s = pd.Series(data=[1, None, 4, 1],
... index=['A', 'B', 'C', 'D'])
>>> s
A 1.0
B NaN
C 4.0
D 1.0
dtype: float64
>>> s.idxmin()
'A'
If `skipna` is False and there is an NA value in the data,
the function returns ``nan``.
>>> s.idxmin(skipna=False)
nan
"""
skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs)
i = nanops.nanargmin(com.values_from_object(self), skipna=skipna)
if i == -1:
return np.nan
return self.index[i] | Return the row label of the minimum value.
If multiple values equal the minimum, the first row label with that
value is returned.
Parameters
----------
skipna : bool, default True
Exclude NA/null values. If the entire Series is NA, the result
will be NA.
axis : int, default 0
For compatibility with DataFrame.idxmin. Redundant for application
on Series.
*args, **kwargs
Additional keywords have no effect but might be accepted
for compatibility with NumPy.
Returns
-------
Index
Label of the minimum value.
Raises
------
ValueError
If the Series is empty.
See Also
--------
numpy.argmin : Return indices of the minimum values
along the given axis.
DataFrame.idxmin : Return index of first occurrence of minimum
over requested axis.
Series.idxmax : Return index *label* of the first occurrence
of maximum of values.
Notes
-----
This method is the Series version of ``ndarray.argmin``. This method
returns the label of the minimum, while ``ndarray.argmin`` returns
the position. To get the position, use ``series.values.argmin()``.
Examples
--------
>>> s = pd.Series(data=[1, None, 4, 1],
... index=['A', 'B', 'C', 'D'])
>>> s
A 1.0
B NaN
C 4.0
D 1.0
dtype: float64
>>> s.idxmin()
'A'
If `skipna` is False and there is an NA value in the data,
the function returns ``nan``.
>>> s.idxmin(skipna=False)
nan | Below is the the instruction that describes the task:
### Input:
Return the row label of the minimum value.
If multiple values equal the minimum, the first row label with that
value is returned.
Parameters
----------
skipna : bool, default True
Exclude NA/null values. If the entire Series is NA, the result
will be NA.
axis : int, default 0
For compatibility with DataFrame.idxmin. Redundant for application
on Series.
*args, **kwargs
Additional keywords have no effect but might be accepted
for compatibility with NumPy.
Returns
-------
Index
Label of the minimum value.
Raises
------
ValueError
If the Series is empty.
See Also
--------
numpy.argmin : Return indices of the minimum values
along the given axis.
DataFrame.idxmin : Return index of first occurrence of minimum
over requested axis.
Series.idxmax : Return index *label* of the first occurrence
of maximum of values.
Notes
-----
This method is the Series version of ``ndarray.argmin``. This method
returns the label of the minimum, while ``ndarray.argmin`` returns
the position. To get the position, use ``series.values.argmin()``.
Examples
--------
>>> s = pd.Series(data=[1, None, 4, 1],
... index=['A', 'B', 'C', 'D'])
>>> s
A 1.0
B NaN
C 4.0
D 1.0
dtype: float64
>>> s.idxmin()
'A'
If `skipna` is False and there is an NA value in the data,
the function returns ``nan``.
>>> s.idxmin(skipna=False)
nan
### Response:
def idxmin(self, axis=0, skipna=True, *args, **kwargs):
"""
Return the row label of the minimum value.
If multiple values equal the minimum, the first row label with that
value is returned.
Parameters
----------
skipna : bool, default True
Exclude NA/null values. If the entire Series is NA, the result
will be NA.
axis : int, default 0
For compatibility with DataFrame.idxmin. Redundant for application
on Series.
*args, **kwargs
Additional keywords have no effect but might be accepted
for compatibility with NumPy.
Returns
-------
Index
Label of the minimum value.
Raises
------
ValueError
If the Series is empty.
See Also
--------
numpy.argmin : Return indices of the minimum values
along the given axis.
DataFrame.idxmin : Return index of first occurrence of minimum
over requested axis.
Series.idxmax : Return index *label* of the first occurrence
of maximum of values.
Notes
-----
This method is the Series version of ``ndarray.argmin``. This method
returns the label of the minimum, while ``ndarray.argmin`` returns
the position. To get the position, use ``series.values.argmin()``.
Examples
--------
>>> s = pd.Series(data=[1, None, 4, 1],
... index=['A', 'B', 'C', 'D'])
>>> s
A 1.0
B NaN
C 4.0
D 1.0
dtype: float64
>>> s.idxmin()
'A'
If `skipna` is False and there is an NA value in the data,
the function returns ``nan``.
>>> s.idxmin(skipna=False)
nan
"""
skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs)
i = nanops.nanargmin(com.values_from_object(self), skipna=skipna)
if i == -1:
return np.nan
return self.index[i] |
def __field_to_subfields(self, field):
"""Fully describes data represented by field, including the nested case.
In the case that the field is not a message field, we have no fields nested
within a message definition, so we can simply return that field. However, in
the nested case, we can't simply describe the data with one field or even
with one chain of fields.
For example, if we have a message field
m_field = messages.MessageField(RefClass, 1)
which references a class with two fields:
class RefClass(messages.Message):
one = messages.StringField(1)
two = messages.IntegerField(2)
then we would need to include both one and two to represent all the
data contained.
Calling __field_to_subfields(m_field) would return:
[
[<MessageField "m_field">, <StringField "one">],
[<MessageField "m_field">, <StringField "two">],
]
If the second field was instead a message field
class RefClass(messages.Message):
one = messages.StringField(1)
two = messages.MessageField(OtherRefClass, 2)
referencing another class with two fields
class OtherRefClass(messages.Message):
three = messages.BooleanField(1)
four = messages.FloatField(2)
then we would need to recurse one level deeper for two.
With this change, calling __field_to_subfields(m_field) would return:
[
[<MessageField "m_field">, <StringField "one">],
[<MessageField "m_field">, <StringField "two">, <StringField "three">],
[<MessageField "m_field">, <StringField "two">, <StringField "four">],
]
Args:
field: An instance of a subclass of messages.Field.
Returns:
A list of lists, where each sublist is a list of fields.
"""
# Termination condition
if not isinstance(field, messages.MessageField):
return [[field]]
result = []
for subfield in sorted(field.message_type.all_fields(),
key=lambda f: f.number):
subfield_results = self.__field_to_subfields(subfield)
for subfields_list in subfield_results:
subfields_list.insert(0, field)
result.append(subfields_list)
return result | Fully describes data represented by field, including the nested case.
In the case that the field is not a message field, we have no fields nested
within a message definition, so we can simply return that field. However, in
the nested case, we can't simply describe the data with one field or even
with one chain of fields.
For example, if we have a message field
m_field = messages.MessageField(RefClass, 1)
which references a class with two fields:
class RefClass(messages.Message):
one = messages.StringField(1)
two = messages.IntegerField(2)
then we would need to include both one and two to represent all the
data contained.
Calling __field_to_subfields(m_field) would return:
[
[<MessageField "m_field">, <StringField "one">],
[<MessageField "m_field">, <StringField "two">],
]
If the second field was instead a message field
class RefClass(messages.Message):
one = messages.StringField(1)
two = messages.MessageField(OtherRefClass, 2)
referencing another class with two fields
class OtherRefClass(messages.Message):
three = messages.BooleanField(1)
four = messages.FloatField(2)
then we would need to recurse one level deeper for two.
With this change, calling __field_to_subfields(m_field) would return:
[
[<MessageField "m_field">, <StringField "one">],
[<MessageField "m_field">, <StringField "two">, <StringField "three">],
[<MessageField "m_field">, <StringField "two">, <StringField "four">],
]
Args:
field: An instance of a subclass of messages.Field.
Returns:
A list of lists, where each sublist is a list of fields. | Below is the the instruction that describes the task:
### Input:
Fully describes data represented by field, including the nested case.
In the case that the field is not a message field, we have no fields nested
within a message definition, so we can simply return that field. However, in
the nested case, we can't simply describe the data with one field or even
with one chain of fields.
For example, if we have a message field
m_field = messages.MessageField(RefClass, 1)
which references a class with two fields:
class RefClass(messages.Message):
one = messages.StringField(1)
two = messages.IntegerField(2)
then we would need to include both one and two to represent all the
data contained.
Calling __field_to_subfields(m_field) would return:
[
[<MessageField "m_field">, <StringField "one">],
[<MessageField "m_field">, <StringField "two">],
]
If the second field was instead a message field
class RefClass(messages.Message):
one = messages.StringField(1)
two = messages.MessageField(OtherRefClass, 2)
referencing another class with two fields
class OtherRefClass(messages.Message):
three = messages.BooleanField(1)
four = messages.FloatField(2)
then we would need to recurse one level deeper for two.
With this change, calling __field_to_subfields(m_field) would return:
[
[<MessageField "m_field">, <StringField "one">],
[<MessageField "m_field">, <StringField "two">, <StringField "three">],
[<MessageField "m_field">, <StringField "two">, <StringField "four">],
]
Args:
field: An instance of a subclass of messages.Field.
Returns:
A list of lists, where each sublist is a list of fields.
### Response:
def __field_to_subfields(self, field):
"""Fully describes data represented by field, including the nested case.
In the case that the field is not a message field, we have no fields nested
within a message definition, so we can simply return that field. However, in
the nested case, we can't simply describe the data with one field or even
with one chain of fields.
For example, if we have a message field
m_field = messages.MessageField(RefClass, 1)
which references a class with two fields:
class RefClass(messages.Message):
one = messages.StringField(1)
two = messages.IntegerField(2)
then we would need to include both one and two to represent all the
data contained.
Calling __field_to_subfields(m_field) would return:
[
[<MessageField "m_field">, <StringField "one">],
[<MessageField "m_field">, <StringField "two">],
]
If the second field was instead a message field
class RefClass(messages.Message):
one = messages.StringField(1)
two = messages.MessageField(OtherRefClass, 2)
referencing another class with two fields
class OtherRefClass(messages.Message):
three = messages.BooleanField(1)
four = messages.FloatField(2)
then we would need to recurse one level deeper for two.
With this change, calling __field_to_subfields(m_field) would return:
[
[<MessageField "m_field">, <StringField "one">],
[<MessageField "m_field">, <StringField "two">, <StringField "three">],
[<MessageField "m_field">, <StringField "two">, <StringField "four">],
]
Args:
field: An instance of a subclass of messages.Field.
Returns:
A list of lists, where each sublist is a list of fields.
"""
# Termination condition
if not isinstance(field, messages.MessageField):
return [[field]]
result = []
for subfield in sorted(field.message_type.all_fields(),
key=lambda f: f.number):
subfield_results = self.__field_to_subfields(subfield)
for subfields_list in subfield_results:
subfields_list.insert(0, field)
result.append(subfields_list)
return result |
def list(self, **params):
"""
Retrieve text messages
Returns Text Messages, according to the parameters provided
:calls: ``get /text_messages``
:param dict params: (optional) Search options.
:return: List of dictionaries that support attriubte-style access, which represent collection of TextMessages.
:rtype: list
"""
_, _, text_messages = self.http_client.get("/text_messages", params=params)
return text_messages | Retrieve text messages
Returns Text Messages, according to the parameters provided
:calls: ``get /text_messages``
:param dict params: (optional) Search options.
:return: List of dictionaries that support attriubte-style access, which represent collection of TextMessages.
:rtype: list | Below is the the instruction that describes the task:
### Input:
Retrieve text messages
Returns Text Messages, according to the parameters provided
:calls: ``get /text_messages``
:param dict params: (optional) Search options.
:return: List of dictionaries that support attriubte-style access, which represent collection of TextMessages.
:rtype: list
### Response:
def list(self, **params):
"""
Retrieve text messages
Returns Text Messages, according to the parameters provided
:calls: ``get /text_messages``
:param dict params: (optional) Search options.
:return: List of dictionaries that support attriubte-style access, which represent collection of TextMessages.
:rtype: list
"""
_, _, text_messages = self.http_client.get("/text_messages", params=params)
return text_messages |
def add_point_region(self, y: float, x: float) -> Graphic:
"""Add a point graphic to the data item.
:param x: The x coordinate, in relative units [0.0, 1.0]
:param y: The y coordinate, in relative units [0.0, 1.0]
:return: The :py:class:`nion.swift.Facade.Graphic` object that was added.
.. versionadded:: 1.0
Scriptable: Yes
"""
graphic = Graphics.PointGraphic()
graphic.position = Geometry.FloatPoint(y, x)
self.__display_item.add_graphic(graphic)
return Graphic(graphic) | Add a point graphic to the data item.
:param x: The x coordinate, in relative units [0.0, 1.0]
:param y: The y coordinate, in relative units [0.0, 1.0]
:return: The :py:class:`nion.swift.Facade.Graphic` object that was added.
.. versionadded:: 1.0
Scriptable: Yes | Below is the the instruction that describes the task:
### Input:
Add a point graphic to the data item.
:param x: The x coordinate, in relative units [0.0, 1.0]
:param y: The y coordinate, in relative units [0.0, 1.0]
:return: The :py:class:`nion.swift.Facade.Graphic` object that was added.
.. versionadded:: 1.0
Scriptable: Yes
### Response:
def add_point_region(self, y: float, x: float) -> Graphic:
"""Add a point graphic to the data item.
:param x: The x coordinate, in relative units [0.0, 1.0]
:param y: The y coordinate, in relative units [0.0, 1.0]
:return: The :py:class:`nion.swift.Facade.Graphic` object that was added.
.. versionadded:: 1.0
Scriptable: Yes
"""
graphic = Graphics.PointGraphic()
graphic.position = Geometry.FloatPoint(y, x)
self.__display_item.add_graphic(graphic)
return Graphic(graphic) |
def getTJstr(text, glyphs, simple, ordering):
""" Return a PDF string enclosed in [] brackets, suitable for the PDF TJ
operator.
Notes:
The input string is converted to either 2 or 4 hex digits per character.
Args:
simple: no glyphs: 2-chars, use char codes as the glyph
glyphs: 2-chars, use glyphs instead of char codes (Symbol,
ZapfDingbats)
not simple: ordering < 0: 4-chars, use glyphs not char codes
ordering >=0: a CJK font! 4 chars, use char codes as glyphs
"""
if text.startswith("[<") and text.endswith(">]"): # already done
return text
if not bool(text):
return "[<>]"
if simple:
if glyphs is None: # simple and not Symbol / ZapfDingbats
otxt = "".join([hex(ord(c))[2:].rjust(2, "0") if ord(c)<256 else "b7" for c in text])
else: # Symbol or ZapfDingbats
otxt = "".join([hex(glyphs[ord(c)][0])[2:].rjust(2, "0") if ord(c)<256 else "b7" for c in text])
return "[<" + otxt + ">]"
if ordering < 0: # not a CJK font: use the glyphs
otxt = "".join([hex(glyphs[ord(c)][0])[2:].rjust(4, "0") for c in text])
else: # CJK: use char codes, no glyphs
otxt = "".join([hex(ord(c))[2:].rjust(4, "0") for c in text])
return "[<" + otxt + ">]" | Return a PDF string enclosed in [] brackets, suitable for the PDF TJ
operator.
Notes:
The input string is converted to either 2 or 4 hex digits per character.
Args:
simple: no glyphs: 2-chars, use char codes as the glyph
glyphs: 2-chars, use glyphs instead of char codes (Symbol,
ZapfDingbats)
not simple: ordering < 0: 4-chars, use glyphs not char codes
ordering >=0: a CJK font! 4 chars, use char codes as glyphs | Below is the the instruction that describes the task:
### Input:
Return a PDF string enclosed in [] brackets, suitable for the PDF TJ
operator.
Notes:
The input string is converted to either 2 or 4 hex digits per character.
Args:
simple: no glyphs: 2-chars, use char codes as the glyph
glyphs: 2-chars, use glyphs instead of char codes (Symbol,
ZapfDingbats)
not simple: ordering < 0: 4-chars, use glyphs not char codes
ordering >=0: a CJK font! 4 chars, use char codes as glyphs
### Response:
def getTJstr(text, glyphs, simple, ordering):
""" Return a PDF string enclosed in [] brackets, suitable for the PDF TJ
operator.
Notes:
The input string is converted to either 2 or 4 hex digits per character.
Args:
simple: no glyphs: 2-chars, use char codes as the glyph
glyphs: 2-chars, use glyphs instead of char codes (Symbol,
ZapfDingbats)
not simple: ordering < 0: 4-chars, use glyphs not char codes
ordering >=0: a CJK font! 4 chars, use char codes as glyphs
"""
if text.startswith("[<") and text.endswith(">]"): # already done
return text
if not bool(text):
return "[<>]"
if simple:
if glyphs is None: # simple and not Symbol / ZapfDingbats
otxt = "".join([hex(ord(c))[2:].rjust(2, "0") if ord(c)<256 else "b7" for c in text])
else: # Symbol or ZapfDingbats
otxt = "".join([hex(glyphs[ord(c)][0])[2:].rjust(2, "0") if ord(c)<256 else "b7" for c in text])
return "[<" + otxt + ">]"
if ordering < 0: # not a CJK font: use the glyphs
otxt = "".join([hex(glyphs[ord(c)][0])[2:].rjust(4, "0") for c in text])
else: # CJK: use char codes, no glyphs
otxt = "".join([hex(ord(c))[2:].rjust(4, "0") for c in text])
return "[<" + otxt + ">]" |
def clone(self, screen, scene):
"""
Create a clone of this Dialog into a new Screen.
:param screen: The new Screen object to clone into.
:param scene: The new Scene object to clone into.
"""
# Only clone the object if the function is safe to do so.
if self._on_close is None or isfunction(self._on_close):
scene.add_effect(PopUpDialog(screen, self._text, self._buttons, self._on_close)) | Create a clone of this Dialog into a new Screen.
:param screen: The new Screen object to clone into.
:param scene: The new Scene object to clone into. | Below is the the instruction that describes the task:
### Input:
Create a clone of this Dialog into a new Screen.
:param screen: The new Screen object to clone into.
:param scene: The new Scene object to clone into.
### Response:
def clone(self, screen, scene):
"""
Create a clone of this Dialog into a new Screen.
:param screen: The new Screen object to clone into.
:param scene: The new Scene object to clone into.
"""
# Only clone the object if the function is safe to do so.
if self._on_close is None or isfunction(self._on_close):
scene.add_effect(PopUpDialog(screen, self._text, self._buttons, self._on_close)) |
def set_base_headers(self, hdr):
"""Set metadata in FITS headers."""
hdr['NUMXVER'] = (__version__, 'Numina package version')
hdr['NUMRNAM'] = (self.__class__.__name__, 'Numina recipe name')
hdr['NUMRVER'] = (self.__version__, 'Numina recipe version')
return hdr | Set metadata in FITS headers. | Below is the the instruction that describes the task:
### Input:
Set metadata in FITS headers.
### Response:
def set_base_headers(self, hdr):
"""Set metadata in FITS headers."""
hdr['NUMXVER'] = (__version__, 'Numina package version')
hdr['NUMRNAM'] = (self.__class__.__name__, 'Numina recipe name')
hdr['NUMRVER'] = (self.__version__, 'Numina recipe version')
return hdr |
def clear(self):
"""Clear all work items from the session.
This removes any associated results as well.
"""
with self._conn:
self._conn.execute('DELETE FROM results')
self._conn.execute('DELETE FROM work_items') | Clear all work items from the session.
This removes any associated results as well. | Below is the the instruction that describes the task:
### Input:
Clear all work items from the session.
This removes any associated results as well.
### Response:
def clear(self):
"""Clear all work items from the session.
This removes any associated results as well.
"""
with self._conn:
self._conn.execute('DELETE FROM results')
self._conn.execute('DELETE FROM work_items') |
def p_intermfluent_def(self, p):
'''intermfluent_def : IDENT LPAREN param_list RPAREN COLON LCURLY INTERMEDIATE COMMA type_spec COMMA LEVEL ASSIGN_EQUAL range_const RCURLY SEMI
| IDENT COLON LCURLY INTERMEDIATE COMMA type_spec COMMA LEVEL ASSIGN_EQUAL range_const RCURLY SEMI'''
if len(p) == 16:
p[0] = PVariable(name=p[1], fluent_type='interm-fluent', range_type=p[9], param_types=p[3], level=p[13])
else:
p[0] = PVariable(name=p[1], fluent_type='interm-fluent', range_type=p[6], level=p[10]) | intermfluent_def : IDENT LPAREN param_list RPAREN COLON LCURLY INTERMEDIATE COMMA type_spec COMMA LEVEL ASSIGN_EQUAL range_const RCURLY SEMI
| IDENT COLON LCURLY INTERMEDIATE COMMA type_spec COMMA LEVEL ASSIGN_EQUAL range_const RCURLY SEMI | Below is the the instruction that describes the task:
### Input:
intermfluent_def : IDENT LPAREN param_list RPAREN COLON LCURLY INTERMEDIATE COMMA type_spec COMMA LEVEL ASSIGN_EQUAL range_const RCURLY SEMI
| IDENT COLON LCURLY INTERMEDIATE COMMA type_spec COMMA LEVEL ASSIGN_EQUAL range_const RCURLY SEMI
### Response:
def p_intermfluent_def(self, p):
'''intermfluent_def : IDENT LPAREN param_list RPAREN COLON LCURLY INTERMEDIATE COMMA type_spec COMMA LEVEL ASSIGN_EQUAL range_const RCURLY SEMI
| IDENT COLON LCURLY INTERMEDIATE COMMA type_spec COMMA LEVEL ASSIGN_EQUAL range_const RCURLY SEMI'''
if len(p) == 16:
p[0] = PVariable(name=p[1], fluent_type='interm-fluent', range_type=p[9], param_types=p[3], level=p[13])
else:
p[0] = PVariable(name=p[1], fluent_type='interm-fluent', range_type=p[6], level=p[10]) |
def authenticate_device(self, api_token, device_token, email=None,
user_url=None, override=False, fetch=True):
"""Set credentials for Device authentication.
Args:
api_token (str): Token issued to your Application through the Gem
Developer Console.
device_token (str): Physical device identifier. You will receive this
from a user.devices.create call or from users.create.
email (str, optional): User's email address, required if user_url is
not provided.
user_url (str, optional): User's Gem url.
override (boolean, optional): Replace existing Application credentials.
fetch (boolean, optional): Return the authenticated User.
Returns:
An User object if `fetch` is True.
"""
if (self.context.has_auth_params('Gem-Device') and not override):
raise OverrideError('Gem-Device')
if (not api_token or
not device_token or
(not email and not user_url) or
not self.context.authorize('Gem-Device',
api_token=api_token,
user_email=email,
user_url=user_url,
device_token=device_token)):
raise AuthUsageError(self.context, 'Gem-Device')
if fetch:
user = self.user(email) if email else self.user()
return user.refresh()
else:
return True | Set credentials for Device authentication.
Args:
api_token (str): Token issued to your Application through the Gem
Developer Console.
device_token (str): Physical device identifier. You will receive this
from a user.devices.create call or from users.create.
email (str, optional): User's email address, required if user_url is
not provided.
user_url (str, optional): User's Gem url.
override (boolean, optional): Replace existing Application credentials.
fetch (boolean, optional): Return the authenticated User.
Returns:
An User object if `fetch` is True. | Below is the the instruction that describes the task:
### Input:
Set credentials for Device authentication.
Args:
api_token (str): Token issued to your Application through the Gem
Developer Console.
device_token (str): Physical device identifier. You will receive this
from a user.devices.create call or from users.create.
email (str, optional): User's email address, required if user_url is
not provided.
user_url (str, optional): User's Gem url.
override (boolean, optional): Replace existing Application credentials.
fetch (boolean, optional): Return the authenticated User.
Returns:
An User object if `fetch` is True.
### Response:
def authenticate_device(self, api_token, device_token, email=None,
user_url=None, override=False, fetch=True):
"""Set credentials for Device authentication.
Args:
api_token (str): Token issued to your Application through the Gem
Developer Console.
device_token (str): Physical device identifier. You will receive this
from a user.devices.create call or from users.create.
email (str, optional): User's email address, required if user_url is
not provided.
user_url (str, optional): User's Gem url.
override (boolean, optional): Replace existing Application credentials.
fetch (boolean, optional): Return the authenticated User.
Returns:
An User object if `fetch` is True.
"""
if (self.context.has_auth_params('Gem-Device') and not override):
raise OverrideError('Gem-Device')
if (not api_token or
not device_token or
(not email and not user_url) or
not self.context.authorize('Gem-Device',
api_token=api_token,
user_email=email,
user_url=user_url,
device_token=device_token)):
raise AuthUsageError(self.context, 'Gem-Device')
if fetch:
user = self.user(email) if email else self.user()
return user.refresh()
else:
return True |
def circular_shift(X):
"""Shifts circularly the X squre matrix in order to get a
time-lag matrix."""
N = X.shape[0]
L = np.zeros(X.shape)
for i in range(N):
L[i, :] = np.asarray([X[(i + j) % N, j] for j in range(N)])
return L | Shifts circularly the X squre matrix in order to get a
time-lag matrix. | Below is the the instruction that describes the task:
### Input:
Shifts circularly the X squre matrix in order to get a
time-lag matrix.
### Response:
def circular_shift(X):
"""Shifts circularly the X squre matrix in order to get a
time-lag matrix."""
N = X.shape[0]
L = np.zeros(X.shape)
for i in range(N):
L[i, :] = np.asarray([X[(i + j) % N, j] for j in range(N)])
return L |
def after_serving(self, func: Callable) -> Callable:
"""Add a after serving function.
This will allow the function provided to be called once after
anything is served (after last byte is sent).
This is designed to be used as a decorator. An example usage,
.. code-block:: python
@app.after_serving
def func():
...
Arguments:
func: The function itself.
"""
handler = ensure_coroutine(func)
self.after_serving_funcs.append(handler)
return func | Add a after serving function.
This will allow the function provided to be called once after
anything is served (after last byte is sent).
This is designed to be used as a decorator. An example usage,
.. code-block:: python
@app.after_serving
def func():
...
Arguments:
func: The function itself. | Below is the the instruction that describes the task:
### Input:
Add a after serving function.
This will allow the function provided to be called once after
anything is served (after last byte is sent).
This is designed to be used as a decorator. An example usage,
.. code-block:: python
@app.after_serving
def func():
...
Arguments:
func: The function itself.
### Response:
def after_serving(self, func: Callable) -> Callable:
"""Add a after serving function.
This will allow the function provided to be called once after
anything is served (after last byte is sent).
This is designed to be used as a decorator. An example usage,
.. code-block:: python
@app.after_serving
def func():
...
Arguments:
func: The function itself.
"""
handler = ensure_coroutine(func)
self.after_serving_funcs.append(handler)
return func |
def file_size(self):
""":return: size of file we manager"""
if self._file_size is None:
if isinstance(self._path_or_fd, string_types()):
self._file_size = os.stat(self._path_or_fd).st_size
else:
self._file_size = os.fstat(self._path_or_fd).st_size
# END handle path type
# END update file size
return self._file_size | :return: size of file we manager | Below is the the instruction that describes the task:
### Input:
:return: size of file we manager
### Response:
def file_size(self):
""":return: size of file we manager"""
if self._file_size is None:
if isinstance(self._path_or_fd, string_types()):
self._file_size = os.stat(self._path_or_fd).st_size
else:
self._file_size = os.fstat(self._path_or_fd).st_size
# END handle path type
# END update file size
return self._file_size |
def iterkeys(obj):
"Get key iterator from dictionary for Python 2 and 3"
return iter(obj.keys()) if sys.version_info.major == 3 else obj.iterkeys() | Get key iterator from dictionary for Python 2 and 3 | Below is the the instruction that describes the task:
### Input:
Get key iterator from dictionary for Python 2 and 3
### Response:
def iterkeys(obj):
"Get key iterator from dictionary for Python 2 and 3"
return iter(obj.keys()) if sys.version_info.major == 3 else obj.iterkeys() |
def dump(self):
"""Save analytics report to a temporary file.
Returns:
str: path to the temporary file that contains the analytics report.
"""
import tempfile
with tempfile.NamedTemporaryFile(delete=False, mode="w") as fobj:
json.dump(self.info, fobj)
return fobj.name | Save analytics report to a temporary file.
Returns:
str: path to the temporary file that contains the analytics report. | Below is the the instruction that describes the task:
### Input:
Save analytics report to a temporary file.
Returns:
str: path to the temporary file that contains the analytics report.
### Response:
def dump(self):
"""Save analytics report to a temporary file.
Returns:
str: path to the temporary file that contains the analytics report.
"""
import tempfile
with tempfile.NamedTemporaryFile(delete=False, mode="w") as fobj:
json.dump(self.info, fobj)
return fobj.name |
def flush(self):
"""
Wait until history is read but no more than 10 cycles
in case a browser session is closed.
"""
i = 0
while self._frame_data.is_dirty and i < 10:
i += 1
time.sleep(0.1) | Wait until history is read but no more than 10 cycles
in case a browser session is closed. | Below is the the instruction that describes the task:
### Input:
Wait until history is read but no more than 10 cycles
in case a browser session is closed.
### Response:
def flush(self):
"""
Wait until history is read but no more than 10 cycles
in case a browser session is closed.
"""
i = 0
while self._frame_data.is_dirty and i < 10:
i += 1
time.sleep(0.1) |
def _clean_suffix(string, suffix):
"""
If string endswith the suffix, remove it. Else leave it alone.
"""
suffix_len = len(suffix)
if len(string) < suffix_len:
# the string param was shorter than the suffix
raise ValueError("A suffix can not be bigger than string argument.")
if string.endswith(suffix):
# return from the beginning up to
# but not including the first letter
# in the suffix
return string[0:-suffix_len]
else:
# leave unharmed
return string | If string endswith the suffix, remove it. Else leave it alone. | Below is the the instruction that describes the task:
### Input:
If string endswith the suffix, remove it. Else leave it alone.
### Response:
def _clean_suffix(string, suffix):
"""
If string endswith the suffix, remove it. Else leave it alone.
"""
suffix_len = len(suffix)
if len(string) < suffix_len:
# the string param was shorter than the suffix
raise ValueError("A suffix can not be bigger than string argument.")
if string.endswith(suffix):
# return from the beginning up to
# but not including the first letter
# in the suffix
return string[0:-suffix_len]
else:
# leave unharmed
return string |
async def list_batches(self, request):
"""Fetches list of batches from validator, optionally filtered by id.
Request:
query:
- head: The id of the block to use as the head of the chain
- id: Comma separated list of batch ids to include in results
Response:
data: JSON array of fully expanded Batch objects
head: The head used for this query (most recent if unspecified)
link: The link to this exact query, including head block
paging: Paging info and nav, like total resources and a next link
"""
paging_controls = self._get_paging_controls(request)
validator_query = client_batch_pb2.ClientBatchListRequest(
head_id=self._get_head_id(request),
batch_ids=self._get_filter_ids(request),
sorting=self._get_sorting_message(request, "default"),
paging=self._make_paging_message(paging_controls))
response = await self._query_validator(
Message.CLIENT_BATCH_LIST_REQUEST,
client_batch_pb2.ClientBatchListResponse,
validator_query)
return self._wrap_paginated_response(
request=request,
response=response,
controls=paging_controls,
data=[self._expand_batch(b) for b in response['batches']]) | Fetches list of batches from validator, optionally filtered by id.
Request:
query:
- head: The id of the block to use as the head of the chain
- id: Comma separated list of batch ids to include in results
Response:
data: JSON array of fully expanded Batch objects
head: The head used for this query (most recent if unspecified)
link: The link to this exact query, including head block
paging: Paging info and nav, like total resources and a next link | Below is the the instruction that describes the task:
### Input:
Fetches list of batches from validator, optionally filtered by id.
Request:
query:
- head: The id of the block to use as the head of the chain
- id: Comma separated list of batch ids to include in results
Response:
data: JSON array of fully expanded Batch objects
head: The head used for this query (most recent if unspecified)
link: The link to this exact query, including head block
paging: Paging info and nav, like total resources and a next link
### Response:
async def list_batches(self, request):
"""Fetches list of batches from validator, optionally filtered by id.
Request:
query:
- head: The id of the block to use as the head of the chain
- id: Comma separated list of batch ids to include in results
Response:
data: JSON array of fully expanded Batch objects
head: The head used for this query (most recent if unspecified)
link: The link to this exact query, including head block
paging: Paging info and nav, like total resources and a next link
"""
paging_controls = self._get_paging_controls(request)
validator_query = client_batch_pb2.ClientBatchListRequest(
head_id=self._get_head_id(request),
batch_ids=self._get_filter_ids(request),
sorting=self._get_sorting_message(request, "default"),
paging=self._make_paging_message(paging_controls))
response = await self._query_validator(
Message.CLIENT_BATCH_LIST_REQUEST,
client_batch_pb2.ClientBatchListResponse,
validator_query)
return self._wrap_paginated_response(
request=request,
response=response,
controls=paging_controls,
data=[self._expand_batch(b) for b in response['batches']]) |
def supported(aln):
"""Get only the supported consensus residues in each column.
Meaning:
- Omit majority-gap columns
- Omit columns where no residue type appears more than once
- In case of a tie, return all the top-scoring residue types
(no prioritization)
Returns a *list* -- not a string! -- where elements are strings of the
consensus character(s), potentially a gap ('-') or multiple chars ('KR').
"""
def col_consensus(columns):
"""Calculate the consensus chars for an iterable of columns."""
for col in columns:
if (# Majority gap chars
(col.count('-') >= len(col)/2) or
# Lowercase cols mean "don't include in consensus"
all(c.islower() for c in col if c not in '.-')
):
yield '-'
continue
# Validation - copied from consensus() above
if any(c.islower() for c in col):
logging.warn('Mixed lowercase and uppercase letters in a '
'column: ' + ''.join(col))
col = map(str.upper, col)
# Calculate the consensus character
most_common = Counter(
[c for c in col if c not in '-']
).most_common()
if not most_common:
# XXX ever reached?
logging.warn("Column is all gaps! How did that happen?")
if most_common[0][1] == 1:
# No char has frequency > 1; no consensus char
yield '-'
elif (len(most_common) > 1 and
most_common[0][1] == most_common[1][1]):
# Tie for most-common residue type
ties = [x[0] for x in most_common
if x[1] == most_common[0][1]]
yield ''.join(ties)
else:
yield most_common[0][0]
return list(col_consensus(zip(*aln))) | Get only the supported consensus residues in each column.
Meaning:
- Omit majority-gap columns
- Omit columns where no residue type appears more than once
- In case of a tie, return all the top-scoring residue types
(no prioritization)
Returns a *list* -- not a string! -- where elements are strings of the
consensus character(s), potentially a gap ('-') or multiple chars ('KR'). | Below is the the instruction that describes the task:
### Input:
Get only the supported consensus residues in each column.
Meaning:
- Omit majority-gap columns
- Omit columns where no residue type appears more than once
- In case of a tie, return all the top-scoring residue types
(no prioritization)
Returns a *list* -- not a string! -- where elements are strings of the
consensus character(s), potentially a gap ('-') or multiple chars ('KR').
### Response:
def supported(aln):
"""Get only the supported consensus residues in each column.
Meaning:
- Omit majority-gap columns
- Omit columns where no residue type appears more than once
- In case of a tie, return all the top-scoring residue types
(no prioritization)
Returns a *list* -- not a string! -- where elements are strings of the
consensus character(s), potentially a gap ('-') or multiple chars ('KR').
"""
def col_consensus(columns):
"""Calculate the consensus chars for an iterable of columns."""
for col in columns:
if (# Majority gap chars
(col.count('-') >= len(col)/2) or
# Lowercase cols mean "don't include in consensus"
all(c.islower() for c in col if c not in '.-')
):
yield '-'
continue
# Validation - copied from consensus() above
if any(c.islower() for c in col):
logging.warn('Mixed lowercase and uppercase letters in a '
'column: ' + ''.join(col))
col = map(str.upper, col)
# Calculate the consensus character
most_common = Counter(
[c for c in col if c not in '-']
).most_common()
if not most_common:
# XXX ever reached?
logging.warn("Column is all gaps! How did that happen?")
if most_common[0][1] == 1:
# No char has frequency > 1; no consensus char
yield '-'
elif (len(most_common) > 1 and
most_common[0][1] == most_common[1][1]):
# Tie for most-common residue type
ties = [x[0] for x in most_common
if x[1] == most_common[0][1]]
yield ''.join(ties)
else:
yield most_common[0][0]
return list(col_consensus(zip(*aln))) |
def get_not_num(self, seq, num=0):
'''Find the index of first non num element'''
ind = next((i for i, x in enumerate(seq) if x != num), None)
if ind == None:
return self.board_size
else:
return ind | Find the index of first non num element | Below is the the instruction that describes the task:
### Input:
Find the index of first non num element
### Response:
def get_not_num(self, seq, num=0):
'''Find the index of first non num element'''
ind = next((i for i, x in enumerate(seq) if x != num), None)
if ind == None:
return self.board_size
else:
return ind |
def OSLibpath(self):
"""
Microsoft Windows SDK Libraries Paths
"""
ref = os.path.join(self.si.WindowsSdkDir, 'References')
libpath = []
if self.vc_ver <= 9.0:
libpath += self.OSLibraries
if self.vc_ver >= 11.0:
libpath += [os.path.join(ref, r'CommonConfiguration\Neutral')]
if self.vc_ver >= 14.0:
libpath += [
ref,
os.path.join(self.si.WindowsSdkDir, 'UnionMetadata'),
os.path.join(
ref,
'Windows.Foundation.UniversalApiContract',
'1.0.0.0',
),
os.path.join(
ref,
'Windows.Foundation.FoundationContract',
'1.0.0.0',
),
os.path.join(
ref,
'Windows.Networking.Connectivity.WwanContract',
'1.0.0.0',
),
os.path.join(
self.si.WindowsSdkDir,
'ExtensionSDKs',
'Microsoft.VCLibs',
'%0.1f' % self.vc_ver,
'References',
'CommonConfiguration',
'neutral',
),
]
return libpath | Microsoft Windows SDK Libraries Paths | Below is the the instruction that describes the task:
### Input:
Microsoft Windows SDK Libraries Paths
### Response:
def OSLibpath(self):
"""
Microsoft Windows SDK Libraries Paths
"""
ref = os.path.join(self.si.WindowsSdkDir, 'References')
libpath = []
if self.vc_ver <= 9.0:
libpath += self.OSLibraries
if self.vc_ver >= 11.0:
libpath += [os.path.join(ref, r'CommonConfiguration\Neutral')]
if self.vc_ver >= 14.0:
libpath += [
ref,
os.path.join(self.si.WindowsSdkDir, 'UnionMetadata'),
os.path.join(
ref,
'Windows.Foundation.UniversalApiContract',
'1.0.0.0',
),
os.path.join(
ref,
'Windows.Foundation.FoundationContract',
'1.0.0.0',
),
os.path.join(
ref,
'Windows.Networking.Connectivity.WwanContract',
'1.0.0.0',
),
os.path.join(
self.si.WindowsSdkDir,
'ExtensionSDKs',
'Microsoft.VCLibs',
'%0.1f' % self.vc_ver,
'References',
'CommonConfiguration',
'neutral',
),
]
return libpath |
def start(self):
""" Initialize websockets, say hello, and start listening for events
"""
self.connect()
if not self.isAlive():
super(WAMPClient,self).start()
self.hello()
return self | Initialize websockets, say hello, and start listening for events | Below is the the instruction that describes the task:
### Input:
Initialize websockets, say hello, and start listening for events
### Response:
def start(self):
""" Initialize websockets, say hello, and start listening for events
"""
self.connect()
if not self.isAlive():
super(WAMPClient,self).start()
self.hello()
return self |
def set_global_provenance(wf: Workflow, registry: Registry):
"""Compute a global provenance key for the entire workflow
before evaluation. This key can be used to store and retrieve
results in a database. The key computed in this stage is different
from the (local) provenance key that can be computed for a node
if all its arguments are known.
In cases where a result derives from other results that were
computed in child workflows, we can prevent the workflow system
from reevaluating the results at each step to find that we already
had the end-result somewhere. This is where the global prov-key
comes in.
Each node is assigned a `prov` attribute. If all arguments for this
node are known, this key will be the same as the local prov-key.
If some of the arguments are still empty, we add the global prov-keys
of the dependent nodes to the hash.
In this algorithm we traverse from the bottom of the DAG to the top
and back using a stack. This allows us to compute the keys for each
node without modifying the node other than setting the `prov` attribute
with the resulting key."""
stack = [wf.root]
while stack:
i = stack.pop()
n = wf.nodes[i]
if n.prov:
continue
if is_node_ready(n):
job_msg = registry.deep_encode(n)
n.prov = prov_key(job_msg)
continue
deps = wf.inverse_links[i]
todo = [j for j in deps if not wf.nodes[j].prov]
if not todo:
link_dict = dict(links(wf, i, deps))
link_prov = registry.deep_encode(
[link_dict[arg] for arg in empty_args(n)])
job_msg = registry.deep_encode(n)
n.prov = prov_key(job_msg, link_prov)
continue
stack.append(i)
stack.extend(deps) | Compute a global provenance key for the entire workflow
before evaluation. This key can be used to store and retrieve
results in a database. The key computed in this stage is different
from the (local) provenance key that can be computed for a node
if all its arguments are known.
In cases where a result derives from other results that were
computed in child workflows, we can prevent the workflow system
from reevaluating the results at each step to find that we already
had the end-result somewhere. This is where the global prov-key
comes in.
Each node is assigned a `prov` attribute. If all arguments for this
node are known, this key will be the same as the local prov-key.
If some of the arguments are still empty, we add the global prov-keys
of the dependent nodes to the hash.
In this algorithm we traverse from the bottom of the DAG to the top
and back using a stack. This allows us to compute the keys for each
node without modifying the node other than setting the `prov` attribute
with the resulting key. | Below is the the instruction that describes the task:
### Input:
Compute a global provenance key for the entire workflow
before evaluation. This key can be used to store and retrieve
results in a database. The key computed in this stage is different
from the (local) provenance key that can be computed for a node
if all its arguments are known.
In cases where a result derives from other results that were
computed in child workflows, we can prevent the workflow system
from reevaluating the results at each step to find that we already
had the end-result somewhere. This is where the global prov-key
comes in.
Each node is assigned a `prov` attribute. If all arguments for this
node are known, this key will be the same as the local prov-key.
If some of the arguments are still empty, we add the global prov-keys
of the dependent nodes to the hash.
In this algorithm we traverse from the bottom of the DAG to the top
and back using a stack. This allows us to compute the keys for each
node without modifying the node other than setting the `prov` attribute
with the resulting key.
### Response:
def set_global_provenance(wf: Workflow, registry: Registry):
"""Compute a global provenance key for the entire workflow
before evaluation. This key can be used to store and retrieve
results in a database. The key computed in this stage is different
from the (local) provenance key that can be computed for a node
if all its arguments are known.
In cases where a result derives from other results that were
computed in child workflows, we can prevent the workflow system
from reevaluating the results at each step to find that we already
had the end-result somewhere. This is where the global prov-key
comes in.
Each node is assigned a `prov` attribute. If all arguments for this
node are known, this key will be the same as the local prov-key.
If some of the arguments are still empty, we add the global prov-keys
of the dependent nodes to the hash.
In this algorithm we traverse from the bottom of the DAG to the top
and back using a stack. This allows us to compute the keys for each
node without modifying the node other than setting the `prov` attribute
with the resulting key."""
stack = [wf.root]
while stack:
i = stack.pop()
n = wf.nodes[i]
if n.prov:
continue
if is_node_ready(n):
job_msg = registry.deep_encode(n)
n.prov = prov_key(job_msg)
continue
deps = wf.inverse_links[i]
todo = [j for j in deps if not wf.nodes[j].prov]
if not todo:
link_dict = dict(links(wf, i, deps))
link_prov = registry.deep_encode(
[link_dict[arg] for arg in empty_args(n)])
job_msg = registry.deep_encode(n)
n.prov = prov_key(job_msg, link_prov)
continue
stack.append(i)
stack.extend(deps) |
def before(self, callback: Union[Callable, str]) -> "Control":
"""Register a control method that reacts before the trigger method is called.
Parameters:
callback:
The control method. If given as a callable, then that function will be
used as the callback. If given as a string, then the control will look
up a method with that name when reacting (useful when subclassing).
"""
if isinstance(callback, Control):
callback = callback._before
self._before = callback
return self | Register a control method that reacts before the trigger method is called.
Parameters:
callback:
The control method. If given as a callable, then that function will be
used as the callback. If given as a string, then the control will look
up a method with that name when reacting (useful when subclassing). | Below is the the instruction that describes the task:
### Input:
Register a control method that reacts before the trigger method is called.
Parameters:
callback:
The control method. If given as a callable, then that function will be
used as the callback. If given as a string, then the control will look
up a method with that name when reacting (useful when subclassing).
### Response:
def before(self, callback: Union[Callable, str]) -> "Control":
"""Register a control method that reacts before the trigger method is called.
Parameters:
callback:
The control method. If given as a callable, then that function will be
used as the callback. If given as a string, then the control will look
up a method with that name when reacting (useful when subclassing).
"""
if isinstance(callback, Control):
callback = callback._before
self._before = callback
return self |
def persistent_popen_align3(data, samples, chunk):
""" notes """
## data are already chunked, read in the whole thing
with open(chunk, 'rb') as infile:
clusts = infile.read().split("//\n//\n")[:-1]
## snames to ensure sorted order
samples.sort(key=lambda x: x.name)
snames = [sample.name for sample in samples]
## make a tmparr to store metadata (this can get huge, consider using h5)
maxlen = data._hackersonly["max_fragment_length"] + 20
indels = np.zeros((len(samples), len(clusts), maxlen), dtype=np.bool_)
duples = np.zeros(len(clusts), dtype=np.bool_)
## create a persistent shell for running muscle in.
proc = sps.Popen(["bash"],
stdin=sps.PIPE,
stdout=sps.PIPE,
universal_newlines=True)
## iterate over clusters until finished
allstack = []
#istack = []
for ldx in xrange(len(clusts)):
## new alignment string for read1s and read2s
aligned = []
istack = []
lines = clusts[ldx].strip().split("\n")
names = lines[::2]
seqs = lines[1::2]
align1 = ""
align2 = ""
## we don't allow seeds with no hits to make it here, currently
#if len(names) == 1:
# aligned.append(clusts[ldx].replace(">", "").strip())
## find duplicates and skip aligning but keep it for downstream.
if len(names) != len(set([x.rsplit("_", 1)[0] for x in names])):
duples[ldx] = 1
istack = ["{}\n{}".format(i[1:], j) for i, j in zip(names, seqs)]
#aligned.append(clusts[ldx].replace(">", "").strip())
else:
## append counter to names because muscle doesn't retain order
names = [">{};*{}".format(j[1:], i) for i, j in enumerate(names)]
try:
## try to split names on nnnn splitter
clust1, clust2 = zip(*[i.split("nnnn") for i in seqs])
## make back into strings
cl1 = "\n".join(itertools.chain(*zip(names, clust1)))
cl2 = "\n".join(itertools.chain(*zip(names, clust2)))
## store allele (lowercase) info
shape = (len(seqs), max([len(i) for i in seqs]))
arrseqs = np.zeros(shape, dtype="S1")
for row in range(arrseqs.shape[0]):
seqsrow = seqs[row]
arrseqs[row, :len(seqsrow)] = list(seqsrow)
amask = np.char.islower(arrseqs)
save_alleles = np.any(amask)
## send align1 to the bash shell
## TODO: check for pipe-overflow here and use files for i/o
cmd1 = "echo -e '{}' | {} -quiet -in - ; echo {}"\
.format(cl1, ipyrad.bins.muscle, "//")
print(cmd1, file=proc.stdin)
## read the stdout by line until splitter is reached
for line in iter(proc.stdout.readline, "//\n"):
align1 += line
## send align2 to the bash shell
## TODO: check for pipe-overflow here and use files for i/o
cmd2 = "echo -e '{}' | {} -quiet -in - ; echo {}"\
.format(cl2, ipyrad.bins.muscle, "//")
print(cmd2, file=proc.stdin)
## read the stdout by line until splitter is reached
for line in iter(proc.stdout.readline, "//\n"):
align2 += line
## join the aligned read1 and read2 and ensure name order match
la1 = align1[1:].split("\n>")
la2 = align2[1:].split("\n>")
dalign1 = dict([i.split("\n", 1) for i in la1])
dalign2 = dict([i.split("\n", 1) for i in la2])
keys = sorted(dalign1.keys(), key=DEREP)
keys2 = sorted(dalign2.keys(), key=DEREP)
## Make sure R1 and R2 actually exist for each sample. If not
## bail out of this cluster.
if not len(keys) == len(keys2):
LOGGER.error("R1 and R2 results differ in length: "\
+ "\nR1 - {}\nR2 - {}".format(keys, keys2))
continue
## impute allele (lowercase) info back into alignments
for kidx, key in enumerate(keys):
concatseq = dalign1[key].replace("\n", "")+\
"nnnn"+dalign2[key].replace("\n", "")
## impute alleles
if save_alleles:
newmask = np.zeros(len(concatseq), dtype=np.bool_)
## check for indels and impute to amask
indidx = np.where(np.array(list(concatseq)) == "-")[0]
if indidx.size:
allrows = np.arange(amask.shape[1])
mask = np.ones(allrows.shape[0], dtype=np.bool_)
for idx in indidx:
if idx < mask.shape[0]:
mask[idx] = False
not_idx = allrows[mask == 1]
## fill in new data into all other spots
newmask[not_idx] = amask[kidx, :not_idx.shape[0]]
else:
newmask = amask[kidx]
## lower the alleles
concatarr = np.array(list(concatseq))
concatarr[newmask] = np.char.lower(concatarr[newmask])
concatseq = concatarr.tostring()
#LOGGER.info(concatseq)
## fill list with aligned data
aligned.append("{}\n{}".format(key, concatseq))
## put into a dict for writing to file
#aligned = []
#for key in keys:
# aligned.append("\n".join(
# [key,
# dalign1[key].replace("\n", "")+"nnnn"+\
# dalign2[key].replace("\n", "")]))
except IndexError as inst:
LOGGER.debug("Error in PE - ldx: {}".format())
LOGGER.debug("Vars: {}".format(dict(globals(), **locals())))
raise
except ValueError:
## make back into strings
cl1 = "\n".join(["\n".join(i) for i in zip(names, seqs)])
## store allele (lowercase) info
shape = (len(seqs), max([len(i) for i in seqs]))
arrseqs = np.zeros(shape, dtype="S1")
for row in range(arrseqs.shape[0]):
seqsrow = seqs[row]
arrseqs[row, :len(seqsrow)] = list(seqsrow)
amask = np.char.islower(arrseqs)
save_alleles = np.any(amask)
## send align1 to the bash shell (TODO: check for pipe-overflow)
cmd1 = "echo -e '{}' | {} -quiet -in - ; echo {}"\
.format(cl1, ipyrad.bins.muscle, "//")
print(cmd1, file=proc.stdin)
## read the stdout by line until splitter is reached
for line in iter(proc.stdout.readline, "//\n"):
align1 += line
## ensure name order match
la1 = align1[1:].split("\n>")
dalign1 = dict([i.split("\n", 1) for i in la1])
keys = sorted(dalign1.keys(), key=DEREP)
## put into dict for writing to file
for kidx, key in enumerate(keys):
concatseq = dalign1[key].replace("\n", "")
## impute alleles
if save_alleles:
newmask = np.zeros(len(concatseq), dtype=np.bool_)
## check for indels and impute to amask
indidx = np.where(np.array(list(concatseq)) == "-")[0]
if indidx.size:
allrows = np.arange(amask.shape[1])
mask = np.ones(allrows.shape[0], dtype=np.bool_)
for idx in indidx:
if idx < mask.shape[0]:
mask[idx] = False
not_idx = allrows[mask == 1]
## fill in new data into all other spots
newmask[not_idx] = amask[kidx, :not_idx.shape[0]]
else:
newmask = amask[kidx]
## lower the alleles
concatarr = np.array(list(concatseq))
concatarr[newmask] = np.char.lower(concatarr[newmask])
concatseq = concatarr.tostring()
## fill list with aligned data
aligned.append("{}\n{}".format(key, concatseq))
## put aligned locus in list
#aligned.append("\n".join(inner_aligned))
## enforce maxlen on aligned seqs
aseqs = np.vstack([list(i.split("\n")[1]) for i in aligned])
LOGGER.info("\naseqs here: %s", aseqs)
## index names by snames order
sidxs = [snames.index(key.rsplit("_", 1)[0]) for key in keys]
thislen = min(maxlen, aseqs.shape[1])
for idx in xrange(aseqs.shape[0]):
## enter into stack
newn = aligned[idx].split(";", 1)[0]
#newn = key[idx].split(";", 1)[0]
istack.append("{}\n{}".format(newn, aseqs[idx, :thislen].tostring()))
## name index in sorted list (indels order)
sidx = sidxs[idx]
indels[sidx, ldx, :thislen] = aseqs[idx, :thislen] == "-"
if istack:
allstack.append("\n".join(istack))
#LOGGER.debug("\n\nSTACK (%s)\n%s\n", duples[ldx], "\n".join(istack))
## cleanup
proc.stdout.close()
if proc.stderr:
proc.stderr.close()
proc.stdin.close()
proc.wait()
#LOGGER.info("\n\nALLSTACK %s\n", "\n".join(i) for i in allstack[:5]])
## write to file after
odx = chunk.rsplit("_")[-1]
alignfile = os.path.join(data.tmpdir, "align_{}.fa".format(odx))
with open(alignfile, 'wb') as outfile:
outfile.write("\n//\n//\n".join(allstack)+"\n")
os.remove(chunk)
## save indels array to tmp dir
ifile = os.path.join(data.tmpdir, "indels_{}.tmp.npy".format(odx))
np.save(ifile, indels)
dfile = os.path.join(data.tmpdir, "duples_{}.tmp.npy".format(odx))
np.save(dfile, duples) | notes | Below is the the instruction that describes the task:
### Input:
notes
### Response:
def persistent_popen_align3(data, samples, chunk):
""" notes """
## data are already chunked, read in the whole thing
with open(chunk, 'rb') as infile:
clusts = infile.read().split("//\n//\n")[:-1]
## snames to ensure sorted order
samples.sort(key=lambda x: x.name)
snames = [sample.name for sample in samples]
## make a tmparr to store metadata (this can get huge, consider using h5)
maxlen = data._hackersonly["max_fragment_length"] + 20
indels = np.zeros((len(samples), len(clusts), maxlen), dtype=np.bool_)
duples = np.zeros(len(clusts), dtype=np.bool_)
## create a persistent shell for running muscle in.
proc = sps.Popen(["bash"],
stdin=sps.PIPE,
stdout=sps.PIPE,
universal_newlines=True)
## iterate over clusters until finished
allstack = []
#istack = []
for ldx in xrange(len(clusts)):
## new alignment string for read1s and read2s
aligned = []
istack = []
lines = clusts[ldx].strip().split("\n")
names = lines[::2]
seqs = lines[1::2]
align1 = ""
align2 = ""
## we don't allow seeds with no hits to make it here, currently
#if len(names) == 1:
# aligned.append(clusts[ldx].replace(">", "").strip())
## find duplicates and skip aligning but keep it for downstream.
if len(names) != len(set([x.rsplit("_", 1)[0] for x in names])):
duples[ldx] = 1
istack = ["{}\n{}".format(i[1:], j) for i, j in zip(names, seqs)]
#aligned.append(clusts[ldx].replace(">", "").strip())
else:
## append counter to names because muscle doesn't retain order
names = [">{};*{}".format(j[1:], i) for i, j in enumerate(names)]
try:
## try to split names on nnnn splitter
clust1, clust2 = zip(*[i.split("nnnn") for i in seqs])
## make back into strings
cl1 = "\n".join(itertools.chain(*zip(names, clust1)))
cl2 = "\n".join(itertools.chain(*zip(names, clust2)))
## store allele (lowercase) info
shape = (len(seqs), max([len(i) for i in seqs]))
arrseqs = np.zeros(shape, dtype="S1")
for row in range(arrseqs.shape[0]):
seqsrow = seqs[row]
arrseqs[row, :len(seqsrow)] = list(seqsrow)
amask = np.char.islower(arrseqs)
save_alleles = np.any(amask)
## send align1 to the bash shell
## TODO: check for pipe-overflow here and use files for i/o
cmd1 = "echo -e '{}' | {} -quiet -in - ; echo {}"\
.format(cl1, ipyrad.bins.muscle, "//")
print(cmd1, file=proc.stdin)
## read the stdout by line until splitter is reached
for line in iter(proc.stdout.readline, "//\n"):
align1 += line
## send align2 to the bash shell
## TODO: check for pipe-overflow here and use files for i/o
cmd2 = "echo -e '{}' | {} -quiet -in - ; echo {}"\
.format(cl2, ipyrad.bins.muscle, "//")
print(cmd2, file=proc.stdin)
## read the stdout by line until splitter is reached
for line in iter(proc.stdout.readline, "//\n"):
align2 += line
## join the aligned read1 and read2 and ensure name order match
la1 = align1[1:].split("\n>")
la2 = align2[1:].split("\n>")
dalign1 = dict([i.split("\n", 1) for i in la1])
dalign2 = dict([i.split("\n", 1) for i in la2])
keys = sorted(dalign1.keys(), key=DEREP)
keys2 = sorted(dalign2.keys(), key=DEREP)
## Make sure R1 and R2 actually exist for each sample. If not
## bail out of this cluster.
if not len(keys) == len(keys2):
LOGGER.error("R1 and R2 results differ in length: "\
+ "\nR1 - {}\nR2 - {}".format(keys, keys2))
continue
## impute allele (lowercase) info back into alignments
for kidx, key in enumerate(keys):
concatseq = dalign1[key].replace("\n", "")+\
"nnnn"+dalign2[key].replace("\n", "")
## impute alleles
if save_alleles:
newmask = np.zeros(len(concatseq), dtype=np.bool_)
## check for indels and impute to amask
indidx = np.where(np.array(list(concatseq)) == "-")[0]
if indidx.size:
allrows = np.arange(amask.shape[1])
mask = np.ones(allrows.shape[0], dtype=np.bool_)
for idx in indidx:
if idx < mask.shape[0]:
mask[idx] = False
not_idx = allrows[mask == 1]
## fill in new data into all other spots
newmask[not_idx] = amask[kidx, :not_idx.shape[0]]
else:
newmask = amask[kidx]
## lower the alleles
concatarr = np.array(list(concatseq))
concatarr[newmask] = np.char.lower(concatarr[newmask])
concatseq = concatarr.tostring()
#LOGGER.info(concatseq)
## fill list with aligned data
aligned.append("{}\n{}".format(key, concatseq))
## put into a dict for writing to file
#aligned = []
#for key in keys:
# aligned.append("\n".join(
# [key,
# dalign1[key].replace("\n", "")+"nnnn"+\
# dalign2[key].replace("\n", "")]))
except IndexError as inst:
LOGGER.debug("Error in PE - ldx: {}".format())
LOGGER.debug("Vars: {}".format(dict(globals(), **locals())))
raise
except ValueError:
## make back into strings
cl1 = "\n".join(["\n".join(i) for i in zip(names, seqs)])
## store allele (lowercase) info
shape = (len(seqs), max([len(i) for i in seqs]))
arrseqs = np.zeros(shape, dtype="S1")
for row in range(arrseqs.shape[0]):
seqsrow = seqs[row]
arrseqs[row, :len(seqsrow)] = list(seqsrow)
amask = np.char.islower(arrseqs)
save_alleles = np.any(amask)
## send align1 to the bash shell (TODO: check for pipe-overflow)
cmd1 = "echo -e '{}' | {} -quiet -in - ; echo {}"\
.format(cl1, ipyrad.bins.muscle, "//")
print(cmd1, file=proc.stdin)
## read the stdout by line until splitter is reached
for line in iter(proc.stdout.readline, "//\n"):
align1 += line
## ensure name order match
la1 = align1[1:].split("\n>")
dalign1 = dict([i.split("\n", 1) for i in la1])
keys = sorted(dalign1.keys(), key=DEREP)
## put into dict for writing to file
for kidx, key in enumerate(keys):
concatseq = dalign1[key].replace("\n", "")
## impute alleles
if save_alleles:
newmask = np.zeros(len(concatseq), dtype=np.bool_)
## check for indels and impute to amask
indidx = np.where(np.array(list(concatseq)) == "-")[0]
if indidx.size:
allrows = np.arange(amask.shape[1])
mask = np.ones(allrows.shape[0], dtype=np.bool_)
for idx in indidx:
if idx < mask.shape[0]:
mask[idx] = False
not_idx = allrows[mask == 1]
## fill in new data into all other spots
newmask[not_idx] = amask[kidx, :not_idx.shape[0]]
else:
newmask = amask[kidx]
## lower the alleles
concatarr = np.array(list(concatseq))
concatarr[newmask] = np.char.lower(concatarr[newmask])
concatseq = concatarr.tostring()
## fill list with aligned data
aligned.append("{}\n{}".format(key, concatseq))
## put aligned locus in list
#aligned.append("\n".join(inner_aligned))
## enforce maxlen on aligned seqs
aseqs = np.vstack([list(i.split("\n")[1]) for i in aligned])
LOGGER.info("\naseqs here: %s", aseqs)
## index names by snames order
sidxs = [snames.index(key.rsplit("_", 1)[0]) for key in keys]
thislen = min(maxlen, aseqs.shape[1])
for idx in xrange(aseqs.shape[0]):
## enter into stack
newn = aligned[idx].split(";", 1)[0]
#newn = key[idx].split(";", 1)[0]
istack.append("{}\n{}".format(newn, aseqs[idx, :thislen].tostring()))
## name index in sorted list (indels order)
sidx = sidxs[idx]
indels[sidx, ldx, :thislen] = aseqs[idx, :thislen] == "-"
if istack:
allstack.append("\n".join(istack))
#LOGGER.debug("\n\nSTACK (%s)\n%s\n", duples[ldx], "\n".join(istack))
## cleanup
proc.stdout.close()
if proc.stderr:
proc.stderr.close()
proc.stdin.close()
proc.wait()
#LOGGER.info("\n\nALLSTACK %s\n", "\n".join(i) for i in allstack[:5]])
## write to file after
odx = chunk.rsplit("_")[-1]
alignfile = os.path.join(data.tmpdir, "align_{}.fa".format(odx))
with open(alignfile, 'wb') as outfile:
outfile.write("\n//\n//\n".join(allstack)+"\n")
os.remove(chunk)
## save indels array to tmp dir
ifile = os.path.join(data.tmpdir, "indels_{}.tmp.npy".format(odx))
np.save(ifile, indels)
dfile = os.path.join(data.tmpdir, "duples_{}.tmp.npy".format(odx))
np.save(dfile, duples) |
def _did_receive_response(self, connection):
""" Receive a response from the connection """
if connection.has_timeouted:
bambou_logger.info("NURESTConnection has timeout.")
return
has_callbacks = connection.has_callbacks()
should_post = not has_callbacks
if connection.handle_response_for_connection(should_post=should_post) and has_callbacks:
callback = connection.callbacks['local']
callback(connection) | Receive a response from the connection | Below is the the instruction that describes the task:
### Input:
Receive a response from the connection
### Response:
def _did_receive_response(self, connection):
""" Receive a response from the connection """
if connection.has_timeouted:
bambou_logger.info("NURESTConnection has timeout.")
return
has_callbacks = connection.has_callbacks()
should_post = not has_callbacks
if connection.handle_response_for_connection(should_post=should_post) and has_callbacks:
callback = connection.callbacks['local']
callback(connection) |
def report_response(response,
request_headers=True, request_body=True,
response_headers=False, response_body=False,
redirection=False):
"""
生成响应报告
:param response: ``requests.models.Response`` 对象
:param request_headers: 是否加入请求头
:param request_body: 是否加入请求体
:param response_headers: 是否加入响应头
:param response_body: 是否加入响应体
:param redirection: 是否加入重定向响应
:return: str
"""
# https://docs.python.org/3/library/string.html#formatstrings
url = 'Url: [{method}]{url} {status} {elapsed:.2f}ms'.format(
method=response.request.method, url=response.url,
status=response.status_code, elapsed=response.elapsed.total_seconds() * 1000
)
pieces = [url]
if request_headers:
request_headers = 'Request headers: {request_headers}'.format(request_headers=response.request.headers)
pieces.append(request_headers)
if request_body:
request_body = 'Request body: {request_body}'.format(request_body=response.request.body)
pieces.append(request_body)
if response_headers:
response_headers = 'Response headers: {response_headers}'.format(response_headers=response.headers)
pieces.append(response_headers)
if response_body:
response_body = 'Response body: {response_body}'.format(response_body=response.text)
pieces.append(response_body)
reporter = '\n'.join(pieces)
if redirection and response.history:
for h in response.history[::-1]:
redirect_reporter = report_response(
h,
request_headers, request_body,
response_headers, response_body,
redirection=False
)
reporter = '\n'.join([redirect_reporter, ' Redirect ↓ '.center(72, '-'), reporter])
return reporter | 生成响应报告
:param response: ``requests.models.Response`` 对象
:param request_headers: 是否加入请求头
:param request_body: 是否加入请求体
:param response_headers: 是否加入响应头
:param response_body: 是否加入响应体
:param redirection: 是否加入重定向响应
:return: str | Below is the the instruction that describes the task:
### Input:
生成响应报告
:param response: ``requests.models.Response`` 对象
:param request_headers: 是否加入请求头
:param request_body: 是否加入请求体
:param response_headers: 是否加入响应头
:param response_body: 是否加入响应体
:param redirection: 是否加入重定向响应
:return: str
### Response:
def report_response(response,
request_headers=True, request_body=True,
response_headers=False, response_body=False,
redirection=False):
"""
生成响应报告
:param response: ``requests.models.Response`` 对象
:param request_headers: 是否加入请求头
:param request_body: 是否加入请求体
:param response_headers: 是否加入响应头
:param response_body: 是否加入响应体
:param redirection: 是否加入重定向响应
:return: str
"""
# https://docs.python.org/3/library/string.html#formatstrings
url = 'Url: [{method}]{url} {status} {elapsed:.2f}ms'.format(
method=response.request.method, url=response.url,
status=response.status_code, elapsed=response.elapsed.total_seconds() * 1000
)
pieces = [url]
if request_headers:
request_headers = 'Request headers: {request_headers}'.format(request_headers=response.request.headers)
pieces.append(request_headers)
if request_body:
request_body = 'Request body: {request_body}'.format(request_body=response.request.body)
pieces.append(request_body)
if response_headers:
response_headers = 'Response headers: {response_headers}'.format(response_headers=response.headers)
pieces.append(response_headers)
if response_body:
response_body = 'Response body: {response_body}'.format(response_body=response.text)
pieces.append(response_body)
reporter = '\n'.join(pieces)
if redirection and response.history:
for h in response.history[::-1]:
redirect_reporter = report_response(
h,
request_headers, request_body,
response_headers, response_body,
redirection=False
)
reporter = '\n'.join([redirect_reporter, ' Redirect ↓ '.center(72, '-'), reporter])
return reporter |
def RepackTemplates(self,
repack_configs,
templates,
output_dir,
config=None,
sign=False,
signed_template=False):
"""Call repacker in a subprocess."""
pool = multiprocessing.Pool(processes=10)
results = []
bulk_sign_installers = False
for repack_config in repack_configs:
for template in templates:
repack_args = ["grr_client_build"]
if config:
repack_args.extend(["--config", config])
repack_args.extend([
"--secondary_configs", repack_config, "repack", "--template",
template, "--output_dir",
self.GetOutputDir(output_dir, repack_config)
])
# We only sign exes and rpms at the moment. The others will raise if we
# try to ask for signing.
passwd = None
if sign:
if template.endswith(".exe.zip"):
# This is for osslsigncode only.
if platform.system() != "Windows":
passwd = self.GetWindowsPassphrase()
repack_args.append("--sign")
else:
bulk_sign_installers = True
if signed_template:
repack_args.append("--signed_template")
elif template.endswith(".rpm.zip"):
bulk_sign_installers = True
print("Calling %s" % " ".join(repack_args))
results.append(
pool.apply_async(SpawnProcess, (repack_args,), dict(passwd=passwd)))
# Also build debug if it's windows.
if template.endswith(".exe.zip"):
debug_args = []
debug_args.extend(repack_args)
debug_args.append("--debug_build")
print("Calling %s" % " ".join(debug_args))
results.append(
pool.apply_async(SpawnProcess, (debug_args,),
dict(passwd=passwd)))
try:
pool.close()
# Workaround to handle keyboard kills
# http://stackoverflow.com/questions/1408356/keyboard-interrupts-with-pythons-multiprocessing-pool
# get will raise if the child raises.
for result_obj in results:
result_obj.get(9999)
pool.join()
except KeyboardInterrupt:
print("parent received control-c")
pool.terminate()
except ErrorDuringRepacking:
pool.terminate()
raise
if bulk_sign_installers:
to_sign = {}
for root, _, files in os.walk(output_dir):
for f in files:
if f.endswith(".exe"):
to_sign.setdefault("windows", []).append(os.path.join(root, f))
elif f.endswith(".rpm"):
to_sign.setdefault("rpm", []).append(os.path.join(root, f))
if to_sign.get("windows"):
signer = repacking.TemplateRepacker().GetSigner([
"ClientBuilder Context",
"Platform:%s" % platform.system(), "Target:Windows"
])
signer.SignFiles(to_sign.get("windows"))
if to_sign.get("rpm"):
signer = repacking.TemplateRepacker().GetSigner([
"ClientBuilder Context",
"Platform:%s" % platform.system(), "Target:Linux", "Target:LinuxRpm"
])
signer.AddSignatureToRPMs(to_sign.get("rpm")) | Call repacker in a subprocess. | Below is the the instruction that describes the task:
### Input:
Call repacker in a subprocess.
### Response:
def RepackTemplates(self,
repack_configs,
templates,
output_dir,
config=None,
sign=False,
signed_template=False):
"""Call repacker in a subprocess."""
pool = multiprocessing.Pool(processes=10)
results = []
bulk_sign_installers = False
for repack_config in repack_configs:
for template in templates:
repack_args = ["grr_client_build"]
if config:
repack_args.extend(["--config", config])
repack_args.extend([
"--secondary_configs", repack_config, "repack", "--template",
template, "--output_dir",
self.GetOutputDir(output_dir, repack_config)
])
# We only sign exes and rpms at the moment. The others will raise if we
# try to ask for signing.
passwd = None
if sign:
if template.endswith(".exe.zip"):
# This is for osslsigncode only.
if platform.system() != "Windows":
passwd = self.GetWindowsPassphrase()
repack_args.append("--sign")
else:
bulk_sign_installers = True
if signed_template:
repack_args.append("--signed_template")
elif template.endswith(".rpm.zip"):
bulk_sign_installers = True
print("Calling %s" % " ".join(repack_args))
results.append(
pool.apply_async(SpawnProcess, (repack_args,), dict(passwd=passwd)))
# Also build debug if it's windows.
if template.endswith(".exe.zip"):
debug_args = []
debug_args.extend(repack_args)
debug_args.append("--debug_build")
print("Calling %s" % " ".join(debug_args))
results.append(
pool.apply_async(SpawnProcess, (debug_args,),
dict(passwd=passwd)))
try:
pool.close()
# Workaround to handle keyboard kills
# http://stackoverflow.com/questions/1408356/keyboard-interrupts-with-pythons-multiprocessing-pool
# get will raise if the child raises.
for result_obj in results:
result_obj.get(9999)
pool.join()
except KeyboardInterrupt:
print("parent received control-c")
pool.terminate()
except ErrorDuringRepacking:
pool.terminate()
raise
if bulk_sign_installers:
to_sign = {}
for root, _, files in os.walk(output_dir):
for f in files:
if f.endswith(".exe"):
to_sign.setdefault("windows", []).append(os.path.join(root, f))
elif f.endswith(".rpm"):
to_sign.setdefault("rpm", []).append(os.path.join(root, f))
if to_sign.get("windows"):
signer = repacking.TemplateRepacker().GetSigner([
"ClientBuilder Context",
"Platform:%s" % platform.system(), "Target:Windows"
])
signer.SignFiles(to_sign.get("windows"))
if to_sign.get("rpm"):
signer = repacking.TemplateRepacker().GetSigner([
"ClientBuilder Context",
"Platform:%s" % platform.system(), "Target:Linux", "Target:LinuxRpm"
])
signer.AddSignatureToRPMs(to_sign.get("rpm")) |
def on_batch_end(self, last_target, train, **kwargs):
"Update the metrics if not `train`"
if train: return
bs = last_target.size(0)
for name in self.names:
self.metrics[name] += bs * self.learn.loss_func.metrics[name].detach().cpu()
self.nums += bs | Update the metrics if not `train` | Below is the the instruction that describes the task:
### Input:
Update the metrics if not `train`
### Response:
def on_batch_end(self, last_target, train, **kwargs):
"Update the metrics if not `train`"
if train: return
bs = last_target.size(0)
for name in self.names:
self.metrics[name] += bs * self.learn.loss_func.metrics[name].detach().cpu()
self.nums += bs |
def delete_menu(self, menu):
""" Delete the specified menu
:param menu:
:type menu:
:returns:
:rtype:
:raises:
"""
if menu.parent is None:
del self.menus[menu.name()]
menu._delete() | Delete the specified menu
:param menu:
:type menu:
:returns:
:rtype:
:raises: | Below is the the instruction that describes the task:
### Input:
Delete the specified menu
:param menu:
:type menu:
:returns:
:rtype:
:raises:
### Response:
def delete_menu(self, menu):
""" Delete the specified menu
:param menu:
:type menu:
:returns:
:rtype:
:raises:
"""
if menu.parent is None:
del self.menus[menu.name()]
menu._delete() |
def add_config(parser):
""" add config """
# the default config path
default_config_path = config.get_heron_conf_dir()
parser.add_argument(
'--config-path',
metavar='(a string; path to cluster config; default: "' + default_config_path + '")',
default=os.path.join(config.get_heron_dir(), default_config_path))
return parser | add config | Below is the the instruction that describes the task:
### Input:
add config
### Response:
def add_config(parser):
""" add config """
# the default config path
default_config_path = config.get_heron_conf_dir()
parser.add_argument(
'--config-path',
metavar='(a string; path to cluster config; default: "' + default_config_path + '")',
default=os.path.join(config.get_heron_dir(), default_config_path))
return parser |
def makeResetPacket(ID, param):
"""
Resets a servo to one of 3 reset states:
XL320_RESET_ALL = 0xFF
XL320_RESET_ALL_BUT_ID = 0x01
XL320_RESET_ALL_BUT_ID_BAUD_RATE = 0x02
"""
if param not in [0x01, 0x02, 0xff]:
raise Exception('Packet.makeResetPacket invalide parameter {}'.format(param))
# pkt = makePacket(ID, xl320.XL320_RESET, None, [param])
pkt = makePacket(ID, xl320.XL320_RESET, None, [1])
return pkt | Resets a servo to one of 3 reset states:
XL320_RESET_ALL = 0xFF
XL320_RESET_ALL_BUT_ID = 0x01
XL320_RESET_ALL_BUT_ID_BAUD_RATE = 0x02 | Below is the the instruction that describes the task:
### Input:
Resets a servo to one of 3 reset states:
XL320_RESET_ALL = 0xFF
XL320_RESET_ALL_BUT_ID = 0x01
XL320_RESET_ALL_BUT_ID_BAUD_RATE = 0x02
### Response:
def makeResetPacket(ID, param):
"""
Resets a servo to one of 3 reset states:
XL320_RESET_ALL = 0xFF
XL320_RESET_ALL_BUT_ID = 0x01
XL320_RESET_ALL_BUT_ID_BAUD_RATE = 0x02
"""
if param not in [0x01, 0x02, 0xff]:
raise Exception('Packet.makeResetPacket invalide parameter {}'.format(param))
# pkt = makePacket(ID, xl320.XL320_RESET, None, [param])
pkt = makePacket(ID, xl320.XL320_RESET, None, [1])
return pkt |
def _update_kube_events(self, instance, pods_list, event_items):
"""
Process kube events and send ddog events
The namespace filtering is done here instead of KubeEventRetriever
to avoid interfering with service discovery
"""
node_ip, node_name = self.kubeutil.get_node_info()
self.log.debug('Processing events on {} [{}]'.format(node_name, node_ip))
k8s_namespaces = instance.get('namespaces', DEFAULT_NAMESPACES)
if not isinstance(k8s_namespaces, list):
self.log.warning('Configuration key "namespaces" is not a list: fallback to the default value')
k8s_namespaces = DEFAULT_NAMESPACES
# handle old config value
if 'namespace' in instance and instance.get('namespace') not in (None, 'default'):
self.log.warning('''The 'namespace' parameter is deprecated and will stop being supported starting '''
'''from 5.13. Please use 'namespaces' and/or 'namespace_name_regexp' instead.''')
k8s_namespaces.append(instance.get('namespace'))
if self.k8s_namespace_regexp:
namespaces_endpoint = '{}/namespaces'.format(self.kubeutil.kubernetes_api_url)
self.log.debug('Kubernetes API endpoint to query namespaces: %s' % namespaces_endpoint)
namespaces = self.kubeutil.retrieve_json_auth(namespaces_endpoint).json()
for namespace in namespaces.get('items', []):
name = namespace.get('metadata', {}).get('name', None)
if name and self.k8s_namespace_regexp.match(name):
k8s_namespaces.append(name)
k8s_namespaces = set(k8s_namespaces)
for event in event_items:
event_ts = calendar.timegm(time.strptime(event.get('lastTimestamp'), '%Y-%m-%dT%H:%M:%SZ'))
involved_obj = event.get('involvedObject', {})
# filter events by white listed namespaces (empty namespace belong to the 'default' one)
if involved_obj.get('namespace', 'default') not in k8s_namespaces:
continue
tags = self.kubeutil.extract_event_tags(event)
tags.extend(instance.get('tags', []))
title = '{} {} on {}'.format(involved_obj.get('name'), event.get('reason'), node_name)
message = event.get('message')
source = event.get('source')
k8s_event_type = event.get('type')
alert_type = K8S_ALERT_MAP.get(k8s_event_type, 'info')
if source:
message += '\nSource: {} {}\n'.format(source.get('component', ''), source.get('host', ''))
msg_body = "%%%\n{}\n```\n{}\n```\n%%%".format(title, message)
dd_event = {
'timestamp': event_ts,
'host': node_ip,
'event_type': EVENT_TYPE,
'msg_title': title,
'msg_text': msg_body,
'source_type_name': EVENT_TYPE,
'alert_type': alert_type,
'event_object': 'kubernetes:{}'.format(involved_obj.get('name')),
'tags': tags,
}
self.event(dd_event) | Process kube events and send ddog events
The namespace filtering is done here instead of KubeEventRetriever
to avoid interfering with service discovery | Below is the the instruction that describes the task:
### Input:
Process kube events and send ddog events
The namespace filtering is done here instead of KubeEventRetriever
to avoid interfering with service discovery
### Response:
def _update_kube_events(self, instance, pods_list, event_items):
"""
Process kube events and send ddog events
The namespace filtering is done here instead of KubeEventRetriever
to avoid interfering with service discovery
"""
node_ip, node_name = self.kubeutil.get_node_info()
self.log.debug('Processing events on {} [{}]'.format(node_name, node_ip))
k8s_namespaces = instance.get('namespaces', DEFAULT_NAMESPACES)
if not isinstance(k8s_namespaces, list):
self.log.warning('Configuration key "namespaces" is not a list: fallback to the default value')
k8s_namespaces = DEFAULT_NAMESPACES
# handle old config value
if 'namespace' in instance and instance.get('namespace') not in (None, 'default'):
self.log.warning('''The 'namespace' parameter is deprecated and will stop being supported starting '''
'''from 5.13. Please use 'namespaces' and/or 'namespace_name_regexp' instead.''')
k8s_namespaces.append(instance.get('namespace'))
if self.k8s_namespace_regexp:
namespaces_endpoint = '{}/namespaces'.format(self.kubeutil.kubernetes_api_url)
self.log.debug('Kubernetes API endpoint to query namespaces: %s' % namespaces_endpoint)
namespaces = self.kubeutil.retrieve_json_auth(namespaces_endpoint).json()
for namespace in namespaces.get('items', []):
name = namespace.get('metadata', {}).get('name', None)
if name and self.k8s_namespace_regexp.match(name):
k8s_namespaces.append(name)
k8s_namespaces = set(k8s_namespaces)
for event in event_items:
event_ts = calendar.timegm(time.strptime(event.get('lastTimestamp'), '%Y-%m-%dT%H:%M:%SZ'))
involved_obj = event.get('involvedObject', {})
# filter events by white listed namespaces (empty namespace belong to the 'default' one)
if involved_obj.get('namespace', 'default') not in k8s_namespaces:
continue
tags = self.kubeutil.extract_event_tags(event)
tags.extend(instance.get('tags', []))
title = '{} {} on {}'.format(involved_obj.get('name'), event.get('reason'), node_name)
message = event.get('message')
source = event.get('source')
k8s_event_type = event.get('type')
alert_type = K8S_ALERT_MAP.get(k8s_event_type, 'info')
if source:
message += '\nSource: {} {}\n'.format(source.get('component', ''), source.get('host', ''))
msg_body = "%%%\n{}\n```\n{}\n```\n%%%".format(title, message)
dd_event = {
'timestamp': event_ts,
'host': node_ip,
'event_type': EVENT_TYPE,
'msg_title': title,
'msg_text': msg_body,
'source_type_name': EVENT_TYPE,
'alert_type': alert_type,
'event_object': 'kubernetes:{}'.format(involved_obj.get('name')),
'tags': tags,
}
self.event(dd_event) |
def send(device_id, description, **kwargs):
"""
Site: http://parse.com
API: https://www.parse.com/docs/push_guide#scheduled/REST
Desc: Best app for system administrators
"""
headers = {
"X-Parse-Application-Id": settings.PARSE_APP_ID,
"X-Parse-REST-API-Key": settings.PARSE_API_KEY,
"User-Agent": "DBMail/%s" % get_version(),
"Content-type": "application/json",
}
data = {
"where": {
"user_id": device_id,
},
"data": {
"alert": description,
"title": kwargs.pop("event")
}
}
_data = kwargs.pop('data', None)
if _data is not None:
data.update(_data)
http = HTTPSConnection(kwargs.pop("api_url", "api.parse.com"))
http.request(
"POST", "/1/push",
headers=headers,
body=dumps(data))
response = http.getresponse()
if response.status != 200:
raise ParseComError(response.reason)
body = loads(response.read())
if body['error']:
raise ParseComError(body['error'])
return True | Site: http://parse.com
API: https://www.parse.com/docs/push_guide#scheduled/REST
Desc: Best app for system administrators | Below is the the instruction that describes the task:
### Input:
Site: http://parse.com
API: https://www.parse.com/docs/push_guide#scheduled/REST
Desc: Best app for system administrators
### Response:
def send(device_id, description, **kwargs):
"""
Site: http://parse.com
API: https://www.parse.com/docs/push_guide#scheduled/REST
Desc: Best app for system administrators
"""
headers = {
"X-Parse-Application-Id": settings.PARSE_APP_ID,
"X-Parse-REST-API-Key": settings.PARSE_API_KEY,
"User-Agent": "DBMail/%s" % get_version(),
"Content-type": "application/json",
}
data = {
"where": {
"user_id": device_id,
},
"data": {
"alert": description,
"title": kwargs.pop("event")
}
}
_data = kwargs.pop('data', None)
if _data is not None:
data.update(_data)
http = HTTPSConnection(kwargs.pop("api_url", "api.parse.com"))
http.request(
"POST", "/1/push",
headers=headers,
body=dumps(data))
response = http.getresponse()
if response.status != 200:
raise ParseComError(response.reason)
body = loads(response.read())
if body['error']:
raise ParseComError(body['error'])
return True |
def add_group(self, groupname, statements):
"""
Adds a group
@type groupname: bytes
@type statements: str
"""
msg = OmapiMessage.open(b"group")
msg.message.append(("create", struct.pack("!I", 1)))
msg.obj.append(("name", groupname))
msg.obj.append(("statements", statements))
response = self.query_server(msg)
if response.opcode != OMAPI_OP_UPDATE:
raise OmapiError("add group failed") | Adds a group
@type groupname: bytes
@type statements: str | Below is the the instruction that describes the task:
### Input:
Adds a group
@type groupname: bytes
@type statements: str
### Response:
def add_group(self, groupname, statements):
"""
Adds a group
@type groupname: bytes
@type statements: str
"""
msg = OmapiMessage.open(b"group")
msg.message.append(("create", struct.pack("!I", 1)))
msg.obj.append(("name", groupname))
msg.obj.append(("statements", statements))
response = self.query_server(msg)
if response.opcode != OMAPI_OP_UPDATE:
raise OmapiError("add group failed") |
def _register_plotter(cls, identifier, module, plotter_name,
plotter_cls=None):
"""
Register a plotter in the :class:`Project` class to easy access it
Parameters
----------
identifier: str
Name of the attribute that is used to filter for the instances
belonging to this plotter
module: str
The module from where to import the `plotter_name`
plotter_name: str
The name of the plotter class in `module`
plotter_cls: type
The imported class of `plotter_name`. If None, it will be imported
when it is needed
"""
if plotter_cls is not None: # plotter has already been imported
def get_x(self):
return self(plotter_cls)
else:
def get_x(self):
return self(getattr(import_module(module), plotter_name))
setattr(cls, identifier, property(get_x, doc=(
"List of data arrays that are plotted by :class:`%s.%s`"
" plotters") % (module, plotter_name)))
cls._registered_plotters[identifier] = (module, plotter_name) | Register a plotter in the :class:`Project` class to easy access it
Parameters
----------
identifier: str
Name of the attribute that is used to filter for the instances
belonging to this plotter
module: str
The module from where to import the `plotter_name`
plotter_name: str
The name of the plotter class in `module`
plotter_cls: type
The imported class of `plotter_name`. If None, it will be imported
when it is needed | Below is the the instruction that describes the task:
### Input:
Register a plotter in the :class:`Project` class to easy access it
Parameters
----------
identifier: str
Name of the attribute that is used to filter for the instances
belonging to this plotter
module: str
The module from where to import the `plotter_name`
plotter_name: str
The name of the plotter class in `module`
plotter_cls: type
The imported class of `plotter_name`. If None, it will be imported
when it is needed
### Response:
def _register_plotter(cls, identifier, module, plotter_name,
plotter_cls=None):
"""
Register a plotter in the :class:`Project` class to easy access it
Parameters
----------
identifier: str
Name of the attribute that is used to filter for the instances
belonging to this plotter
module: str
The module from where to import the `plotter_name`
plotter_name: str
The name of the plotter class in `module`
plotter_cls: type
The imported class of `plotter_name`. If None, it will be imported
when it is needed
"""
if plotter_cls is not None: # plotter has already been imported
def get_x(self):
return self(plotter_cls)
else:
def get_x(self):
return self(getattr(import_module(module), plotter_name))
setattr(cls, identifier, property(get_x, doc=(
"List of data arrays that are plotted by :class:`%s.%s`"
" plotters") % (module, plotter_name)))
cls._registered_plotters[identifier] = (module, plotter_name) |
def replace(self, text=None):
"""
Replaces the selected occurrence.
:param text: The replacement text. If it is None, the lineEditReplace's
text is used instead.
:return True if the text could be replace properly, False if there is
no more occurrences to replace.
"""
if text is None or isinstance(text, bool):
text = self.lineEditReplace.text()
current_occurences = self._current_occurrence()
occurrences = self.get_occurences()
if current_occurences == -1:
self.select_next()
current_occurences = self._current_occurrence()
try:
# prevent search request due to editor textChanged
try:
self.editor.textChanged.disconnect(self.request_search)
except (RuntimeError, TypeError):
# already disconnected
pass
occ = occurrences[current_occurences]
cursor = self.editor.textCursor()
cursor.setPosition(occ[0])
cursor.setPosition(occ[1], cursor.KeepAnchor)
len_to_replace = len(cursor.selectedText())
len_replacement = len(text)
offset = len_replacement - len_to_replace
cursor.insertText(text)
self.editor.setTextCursor(cursor)
self._remove_occurrence(current_occurences, offset)
current_occurences -= 1
self._set_current_occurrence(current_occurences)
self.select_next()
self.cpt_occurences = len(self.get_occurences())
self._update_label_matches()
self._update_buttons()
return True
except IndexError:
return False
finally:
self.editor.textChanged.connect(self.request_search) | Replaces the selected occurrence.
:param text: The replacement text. If it is None, the lineEditReplace's
text is used instead.
:return True if the text could be replace properly, False if there is
no more occurrences to replace. | Below is the the instruction that describes the task:
### Input:
Replaces the selected occurrence.
:param text: The replacement text. If it is None, the lineEditReplace's
text is used instead.
:return True if the text could be replace properly, False if there is
no more occurrences to replace.
### Response:
def replace(self, text=None):
"""
Replaces the selected occurrence.
:param text: The replacement text. If it is None, the lineEditReplace's
text is used instead.
:return True if the text could be replace properly, False if there is
no more occurrences to replace.
"""
if text is None or isinstance(text, bool):
text = self.lineEditReplace.text()
current_occurences = self._current_occurrence()
occurrences = self.get_occurences()
if current_occurences == -1:
self.select_next()
current_occurences = self._current_occurrence()
try:
# prevent search request due to editor textChanged
try:
self.editor.textChanged.disconnect(self.request_search)
except (RuntimeError, TypeError):
# already disconnected
pass
occ = occurrences[current_occurences]
cursor = self.editor.textCursor()
cursor.setPosition(occ[0])
cursor.setPosition(occ[1], cursor.KeepAnchor)
len_to_replace = len(cursor.selectedText())
len_replacement = len(text)
offset = len_replacement - len_to_replace
cursor.insertText(text)
self.editor.setTextCursor(cursor)
self._remove_occurrence(current_occurences, offset)
current_occurences -= 1
self._set_current_occurrence(current_occurences)
self.select_next()
self.cpt_occurences = len(self.get_occurences())
self._update_label_matches()
self._update_buttons()
return True
except IndexError:
return False
finally:
self.editor.textChanged.connect(self.request_search) |
def get_value(self, series, key):
""" we always want to get an index value, never a value """
if not is_scalar(key):
raise InvalidIndexError
k = com.values_from_object(key)
loc = self.get_loc(k)
new_values = com.values_from_object(series)[loc]
return new_values | we always want to get an index value, never a value | Below is the the instruction that describes the task:
### Input:
we always want to get an index value, never a value
### Response:
def get_value(self, series, key):
""" we always want to get an index value, never a value """
if not is_scalar(key):
raise InvalidIndexError
k = com.values_from_object(key)
loc = self.get_loc(k)
new_values = com.values_from_object(series)[loc]
return new_values |
def get_nodes():
""" Returns all nodes in a list of dicts format
"""
cfg_file = "/etc/nago/nago.ini"
config = ConfigParser.ConfigParser()
config.read(cfg_file)
result = {}
for section in config.sections():
if section in ['main']:
continue
token = section
node = Node(token)
for key, value in config.items(token):
node[key] = value
result[token] = node
return result | Returns all nodes in a list of dicts format | Below is the the instruction that describes the task:
### Input:
Returns all nodes in a list of dicts format
### Response:
def get_nodes():
""" Returns all nodes in a list of dicts format
"""
cfg_file = "/etc/nago/nago.ini"
config = ConfigParser.ConfigParser()
config.read(cfg_file)
result = {}
for section in config.sections():
if section in ['main']:
continue
token = section
node = Node(token)
for key, value in config.items(token):
node[key] = value
result[token] = node
return result |
def get_imports(self, module, return_fqn=False):
"""return set of imported modules that are in self
:param module: PyModule
:return: (set - str) of path names
"""
# print('####', module.fqn)
# print(self.by_name.keys(), '\n\n')
imports = set()
raw_imports = ast_imports(module.path)
for import_entry in raw_imports:
# join 'from' and 'import' part of import statement
full = ".".join(s for s in import_entry[:2] if s)
import_level = import_entry[3]
if import_level:
# intra package imports
intra = '.'.join(module.fqn[:-import_level] + [full])
imported = self._get_imported_module(intra)
else:
imported = self._get_imported_module(full)
if imported:
if return_fqn:
imports.add('.'.join(imported.fqn))
else:
imports.add(imported.path)
return imports | return set of imported modules that are in self
:param module: PyModule
:return: (set - str) of path names | Below is the the instruction that describes the task:
### Input:
return set of imported modules that are in self
:param module: PyModule
:return: (set - str) of path names
### Response:
def get_imports(self, module, return_fqn=False):
"""return set of imported modules that are in self
:param module: PyModule
:return: (set - str) of path names
"""
# print('####', module.fqn)
# print(self.by_name.keys(), '\n\n')
imports = set()
raw_imports = ast_imports(module.path)
for import_entry in raw_imports:
# join 'from' and 'import' part of import statement
full = ".".join(s for s in import_entry[:2] if s)
import_level = import_entry[3]
if import_level:
# intra package imports
intra = '.'.join(module.fqn[:-import_level] + [full])
imported = self._get_imported_module(intra)
else:
imported = self._get_imported_module(full)
if imported:
if return_fqn:
imports.add('.'.join(imported.fqn))
else:
imports.add(imported.path)
return imports |
def lru_cache(maxsize=128, typed=False):
"""
Least-recently-used cache decorator, which is a backport of the same
function in Python >= 3.2.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize)
with f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
# Users should only access the lru_cache through its public API:
# cache_info, cache_clear, and f.__wrapped__
# The internals of the lru_cache are encapsulated for thread safety and
# to allow the implementation to change (including a possible C version).
# Early detection of an erroneous call to @lru_cache without any arguments
# resulting in the inner function being passed to maxsize instead of an
# integer or None.
if maxsize is not None and not isinstance(maxsize, int):
raise TypeError('Expected maxsize to be an integer or None')
# Constants shared by all lru cache instances:
sentinel = object() # unique object used to signal cache misses
make_key = _make_key # build a key from the function arguments
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
def decorating_function(user_function):
cache = {}
hits = [0]
misses = [0]
full = [False]
cache_get = cache.get # bound method to lookup a key or return None
lock = RLock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self
r = [root]
if maxsize == 0:
def wrapper(*args, **kwds):
# No caching -- just a statistics update after a successful call
result = user_function(*args, **kwds)
misses[0] += 1
return result
elif maxsize is None:
def wrapper(*args, **kwds):
# Simple caching without ordering or size limit
key = make_key(args, kwds, typed)
result = cache_get(key, sentinel)
if result is not sentinel:
hits[0] += 1
return result
result = user_function(*args, **kwds)
cache[key] = result
misses[0] += 1
return result
else:
def wrapper(*args, **kwds):
# Size limited caching that tracks accesses by recency
key = make_key(args, kwds, typed)
with lock:
link = cache_get(key)
if link is not None:
# Move the link to the front of the circular queue
link_prev, link_next, _key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = r[0][PREV]
last[NEXT] = r[0][PREV] = link
link[PREV] = last
link[NEXT] = r[0]
hits[0] += 1
return result
result = user_function(*args, **kwds)
with lock:
if key in cache:
# Getting here means that this same key was added to the
# cache while the lock was released. Since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif full[0]:
# Use the old root to store the new key and result.
oldroot = r[0]
oldroot[KEY] = key
oldroot[RESULT] = result
# Empty the oldest link and make it the new root.
# Keep a reference to the old key and old result to
# prevent their ref counts from going to zero during the
# update. That will prevent potentially arbitrary object
# clean-up code (i.e. __del__) from running while we're
# still adjusting the links.
r[0] = oldroot[NEXT]
oldkey = r[0][KEY]
oldresult = r[0][RESULT]
r[0][KEY] = r[0][RESULT] = None
# Now update the cache dictionary.
del cache[oldkey]
# Save the potentially reentrant cache[key] assignment
# for last, after the root and links have been put in
# a consistent state.
cache[key] = oldroot
else:
# Put result in a new link at the front of the queue.
last = r[0][PREV]
link = [last, r[0], key, result]
last[NEXT] = r[0][PREV] = cache[key] = link
full[0] = (len(cache) >= maxsize)
misses[0] += 1
return result
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(hits[0], misses[0], maxsize, len(cache))
def cache_clear():
"""Clear the cache and cache statistics"""
with lock:
cache.clear()
root[:] = [root, root, None, None]
r[0] = root
hits[0] = 0
misses[0] = 0
full[0] = False
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function)
return decorating_function | Least-recently-used cache decorator, which is a backport of the same
function in Python >= 3.2.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize)
with f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used | Below is the the instruction that describes the task:
### Input:
Least-recently-used cache decorator, which is a backport of the same
function in Python >= 3.2.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize)
with f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
### Response:
def lru_cache(maxsize=128, typed=False):
"""
Least-recently-used cache decorator, which is a backport of the same
function in Python >= 3.2.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize)
with f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
# Users should only access the lru_cache through its public API:
# cache_info, cache_clear, and f.__wrapped__
# The internals of the lru_cache are encapsulated for thread safety and
# to allow the implementation to change (including a possible C version).
# Early detection of an erroneous call to @lru_cache without any arguments
# resulting in the inner function being passed to maxsize instead of an
# integer or None.
if maxsize is not None and not isinstance(maxsize, int):
raise TypeError('Expected maxsize to be an integer or None')
# Constants shared by all lru cache instances:
sentinel = object() # unique object used to signal cache misses
make_key = _make_key # build a key from the function arguments
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
def decorating_function(user_function):
cache = {}
hits = [0]
misses = [0]
full = [False]
cache_get = cache.get # bound method to lookup a key or return None
lock = RLock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self
r = [root]
if maxsize == 0:
def wrapper(*args, **kwds):
# No caching -- just a statistics update after a successful call
result = user_function(*args, **kwds)
misses[0] += 1
return result
elif maxsize is None:
def wrapper(*args, **kwds):
# Simple caching without ordering or size limit
key = make_key(args, kwds, typed)
result = cache_get(key, sentinel)
if result is not sentinel:
hits[0] += 1
return result
result = user_function(*args, **kwds)
cache[key] = result
misses[0] += 1
return result
else:
def wrapper(*args, **kwds):
# Size limited caching that tracks accesses by recency
key = make_key(args, kwds, typed)
with lock:
link = cache_get(key)
if link is not None:
# Move the link to the front of the circular queue
link_prev, link_next, _key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = r[0][PREV]
last[NEXT] = r[0][PREV] = link
link[PREV] = last
link[NEXT] = r[0]
hits[0] += 1
return result
result = user_function(*args, **kwds)
with lock:
if key in cache:
# Getting here means that this same key was added to the
# cache while the lock was released. Since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif full[0]:
# Use the old root to store the new key and result.
oldroot = r[0]
oldroot[KEY] = key
oldroot[RESULT] = result
# Empty the oldest link and make it the new root.
# Keep a reference to the old key and old result to
# prevent their ref counts from going to zero during the
# update. That will prevent potentially arbitrary object
# clean-up code (i.e. __del__) from running while we're
# still adjusting the links.
r[0] = oldroot[NEXT]
oldkey = r[0][KEY]
oldresult = r[0][RESULT]
r[0][KEY] = r[0][RESULT] = None
# Now update the cache dictionary.
del cache[oldkey]
# Save the potentially reentrant cache[key] assignment
# for last, after the root and links have been put in
# a consistent state.
cache[key] = oldroot
else:
# Put result in a new link at the front of the queue.
last = r[0][PREV]
link = [last, r[0], key, result]
last[NEXT] = r[0][PREV] = cache[key] = link
full[0] = (len(cache) >= maxsize)
misses[0] += 1
return result
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(hits[0], misses[0], maxsize, len(cache))
def cache_clear():
"""Clear the cache and cache statistics"""
with lock:
cache.clear()
root[:] = [root, root, None, None]
r[0] = root
hits[0] = 0
misses[0] = 0
full[0] = False
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function)
return decorating_function |
def fix_flags(self, flags):
"""Fixes standard TensorBoard CLI flags to parser."""
FlagsError = base_plugin.FlagsError
if flags.version_tb:
pass
elif flags.inspect:
if flags.logdir and flags.event_file:
raise FlagsError(
'Must specify either --logdir or --event_file, but not both.')
if not (flags.logdir or flags.event_file):
raise FlagsError('Must specify either --logdir or --event_file.')
elif not flags.db and not flags.logdir:
raise FlagsError('A logdir or db must be specified. '
'For example `tensorboard --logdir mylogdir` '
'or `tensorboard --db sqlite:~/.tensorboard.db`. '
'Run `tensorboard --helpfull` for details and examples.')
if flags.path_prefix.endswith('/'):
flags.path_prefix = flags.path_prefix[:-1] | Fixes standard TensorBoard CLI flags to parser. | Below is the the instruction that describes the task:
### Input:
Fixes standard TensorBoard CLI flags to parser.
### Response:
def fix_flags(self, flags):
"""Fixes standard TensorBoard CLI flags to parser."""
FlagsError = base_plugin.FlagsError
if flags.version_tb:
pass
elif flags.inspect:
if flags.logdir and flags.event_file:
raise FlagsError(
'Must specify either --logdir or --event_file, but not both.')
if not (flags.logdir or flags.event_file):
raise FlagsError('Must specify either --logdir or --event_file.')
elif not flags.db and not flags.logdir:
raise FlagsError('A logdir or db must be specified. '
'For example `tensorboard --logdir mylogdir` '
'or `tensorboard --db sqlite:~/.tensorboard.db`. '
'Run `tensorboard --helpfull` for details and examples.')
if flags.path_prefix.endswith('/'):
flags.path_prefix = flags.path_prefix[:-1] |
def generate(inputfilename, outputfilename='', dump=0, **flags):
"""Generate a grammar, given an input filename (X.g)
and an output filename (defaulting to X.py)."""
if not outputfilename:
if inputfilename[-2:] == '.g':
outputfilename = inputfilename[:-2] + '.py'
else:
raise Exception("Missing output filename")
print 'Input Grammar:', inputfilename
print 'Output File:', outputfilename
DIVIDER = '\n%%\n' # This pattern separates the pre/post parsers
preparser, postparser = None, None # Code before and after the parser desc
# Read the entire file
s = open(inputfilename, 'r').read()
# See if there's a separation between the pre-parser and parser
f = find(s, DIVIDER)
if f >= 0:
preparser, s = s[:f] + '\n\n', s[f + len(DIVIDER):]
# See if there's a separation between the parser and post-parser
f = find(s, DIVIDER)
if f >= 0:
s, postparser = s[:f], '\n\n' + s[f + len(DIVIDER):]
# Create the parser and scanner
p = ParserDescription(ParserDescriptionScanner(s))
if not p:
return
# Now parse the file
t = wrap_error_reporter(p, 'Parser')
if not t:
return # Error
if preparser is not None:
t.preparser = preparser
if postparser is not None:
t.postparser = postparser
# Check the options
for f in t.options.keys():
for opt, _, _ in yapps_options:
if f == opt:
break
else:
print 'Warning: unrecognized option', f
# Add command line options to the set
for f in flags.keys():
t.options[f] = flags[f]
# Generate the output
if dump:
t.dump_information()
else:
t.output = open(outputfilename, 'w')
t.generate_output() | Generate a grammar, given an input filename (X.g)
and an output filename (defaulting to X.py). | Below is the the instruction that describes the task:
### Input:
Generate a grammar, given an input filename (X.g)
and an output filename (defaulting to X.py).
### Response:
def generate(inputfilename, outputfilename='', dump=0, **flags):
"""Generate a grammar, given an input filename (X.g)
and an output filename (defaulting to X.py)."""
if not outputfilename:
if inputfilename[-2:] == '.g':
outputfilename = inputfilename[:-2] + '.py'
else:
raise Exception("Missing output filename")
print 'Input Grammar:', inputfilename
print 'Output File:', outputfilename
DIVIDER = '\n%%\n' # This pattern separates the pre/post parsers
preparser, postparser = None, None # Code before and after the parser desc
# Read the entire file
s = open(inputfilename, 'r').read()
# See if there's a separation between the pre-parser and parser
f = find(s, DIVIDER)
if f >= 0:
preparser, s = s[:f] + '\n\n', s[f + len(DIVIDER):]
# See if there's a separation between the parser and post-parser
f = find(s, DIVIDER)
if f >= 0:
s, postparser = s[:f], '\n\n' + s[f + len(DIVIDER):]
# Create the parser and scanner
p = ParserDescription(ParserDescriptionScanner(s))
if not p:
return
# Now parse the file
t = wrap_error_reporter(p, 'Parser')
if not t:
return # Error
if preparser is not None:
t.preparser = preparser
if postparser is not None:
t.postparser = postparser
# Check the options
for f in t.options.keys():
for opt, _, _ in yapps_options:
if f == opt:
break
else:
print 'Warning: unrecognized option', f
# Add command line options to the set
for f in flags.keys():
t.options[f] = flags[f]
# Generate the output
if dump:
t.dump_information()
else:
t.output = open(outputfilename, 'w')
t.generate_output() |
def bond_initialize_canonical_averages(
canonical_statistics, **kwargs
):
"""
Initialize the canonical averages from a single-run cluster statistics
Parameters
----------
canonical_statistics : 1-D structured ndarray
Typically contains the canonical statistics for a range of values
of the occupation probability ``p``.
The dtype is the result of `canonical_statistics_dtype`.
Returns
-------
ret : structured ndarray
The dype is the result of `canonical_averages_dtype`.
ret['number_of_runs'] : 1-D ndarray of int
Equals ``1`` (initial run).
ret['percolation_probability_mean'] : 1-D array of float
Equals ``canonical_statistics['percolation_probability']``
(if ``percolation_probability`` is present)
ret['percolation_probability_m2'] : 1-D array of float
Each entry is ``0.0``
ret['max_cluster_size_mean'] : 1-D array of float
Equals ``canonical_statistics['max_cluster_size']``
ret['max_cluster_size_m2'] : 1-D array of float
Each entry is ``0.0``
ret['moments_mean'] : 2-D array of float
Equals ``canonical_statistics['moments']``
ret['moments_m2'] : 2-D array of float
Each entry is ``0.0``
See Also
--------
canonical_averages_dtype
bond_canonical_statistics
"""
# initialize return array
spanning_cluster = (
'percolation_probability' in canonical_statistics.dtype.names
)
# array should have the same size as the input array
ret = np.empty_like(
canonical_statistics,
dtype=canonical_averages_dtype(spanning_cluster=spanning_cluster),
)
ret['number_of_runs'] = 1
# initialize percolation probability mean and sum of squared differences
if spanning_cluster:
ret['percolation_probability_mean'] = (
canonical_statistics['percolation_probability']
)
ret['percolation_probability_m2'] = 0.0
# initialize maximum cluster size mean and sum of squared differences
ret['max_cluster_size_mean'] = (
canonical_statistics['max_cluster_size']
)
ret['max_cluster_size_m2'] = 0.0
# initialize moments means and sums of squared differences
ret['moments_mean'] = canonical_statistics['moments']
ret['moments_m2'] = 0.0
return ret | Initialize the canonical averages from a single-run cluster statistics
Parameters
----------
canonical_statistics : 1-D structured ndarray
Typically contains the canonical statistics for a range of values
of the occupation probability ``p``.
The dtype is the result of `canonical_statistics_dtype`.
Returns
-------
ret : structured ndarray
The dype is the result of `canonical_averages_dtype`.
ret['number_of_runs'] : 1-D ndarray of int
Equals ``1`` (initial run).
ret['percolation_probability_mean'] : 1-D array of float
Equals ``canonical_statistics['percolation_probability']``
(if ``percolation_probability`` is present)
ret['percolation_probability_m2'] : 1-D array of float
Each entry is ``0.0``
ret['max_cluster_size_mean'] : 1-D array of float
Equals ``canonical_statistics['max_cluster_size']``
ret['max_cluster_size_m2'] : 1-D array of float
Each entry is ``0.0``
ret['moments_mean'] : 2-D array of float
Equals ``canonical_statistics['moments']``
ret['moments_m2'] : 2-D array of float
Each entry is ``0.0``
See Also
--------
canonical_averages_dtype
bond_canonical_statistics | Below is the the instruction that describes the task:
### Input:
Initialize the canonical averages from a single-run cluster statistics
Parameters
----------
canonical_statistics : 1-D structured ndarray
Typically contains the canonical statistics for a range of values
of the occupation probability ``p``.
The dtype is the result of `canonical_statistics_dtype`.
Returns
-------
ret : structured ndarray
The dype is the result of `canonical_averages_dtype`.
ret['number_of_runs'] : 1-D ndarray of int
Equals ``1`` (initial run).
ret['percolation_probability_mean'] : 1-D array of float
Equals ``canonical_statistics['percolation_probability']``
(if ``percolation_probability`` is present)
ret['percolation_probability_m2'] : 1-D array of float
Each entry is ``0.0``
ret['max_cluster_size_mean'] : 1-D array of float
Equals ``canonical_statistics['max_cluster_size']``
ret['max_cluster_size_m2'] : 1-D array of float
Each entry is ``0.0``
ret['moments_mean'] : 2-D array of float
Equals ``canonical_statistics['moments']``
ret['moments_m2'] : 2-D array of float
Each entry is ``0.0``
See Also
--------
canonical_averages_dtype
bond_canonical_statistics
### Response:
def bond_initialize_canonical_averages(
canonical_statistics, **kwargs
):
"""
Initialize the canonical averages from a single-run cluster statistics
Parameters
----------
canonical_statistics : 1-D structured ndarray
Typically contains the canonical statistics for a range of values
of the occupation probability ``p``.
The dtype is the result of `canonical_statistics_dtype`.
Returns
-------
ret : structured ndarray
The dype is the result of `canonical_averages_dtype`.
ret['number_of_runs'] : 1-D ndarray of int
Equals ``1`` (initial run).
ret['percolation_probability_mean'] : 1-D array of float
Equals ``canonical_statistics['percolation_probability']``
(if ``percolation_probability`` is present)
ret['percolation_probability_m2'] : 1-D array of float
Each entry is ``0.0``
ret['max_cluster_size_mean'] : 1-D array of float
Equals ``canonical_statistics['max_cluster_size']``
ret['max_cluster_size_m2'] : 1-D array of float
Each entry is ``0.0``
ret['moments_mean'] : 2-D array of float
Equals ``canonical_statistics['moments']``
ret['moments_m2'] : 2-D array of float
Each entry is ``0.0``
See Also
--------
canonical_averages_dtype
bond_canonical_statistics
"""
# initialize return array
spanning_cluster = (
'percolation_probability' in canonical_statistics.dtype.names
)
# array should have the same size as the input array
ret = np.empty_like(
canonical_statistics,
dtype=canonical_averages_dtype(spanning_cluster=spanning_cluster),
)
ret['number_of_runs'] = 1
# initialize percolation probability mean and sum of squared differences
if spanning_cluster:
ret['percolation_probability_mean'] = (
canonical_statistics['percolation_probability']
)
ret['percolation_probability_m2'] = 0.0
# initialize maximum cluster size mean and sum of squared differences
ret['max_cluster_size_mean'] = (
canonical_statistics['max_cluster_size']
)
ret['max_cluster_size_m2'] = 0.0
# initialize moments means and sums of squared differences
ret['moments_mean'] = canonical_statistics['moments']
ret['moments_m2'] = 0.0
return ret |
def get_grid(self):
"""
Standardize the layout of the table into grids
"""
mentions, lines = _split_text_n_lines(self.elems)
# Sort mentions in reading order where y values are snapped to half
# height-sized grid
mentions.sort(key=lambda m: (m.yc_grid, m.xc))
grid = Grid(mentions, lines, self)
return grid | Standardize the layout of the table into grids | Below is the the instruction that describes the task:
### Input:
Standardize the layout of the table into grids
### Response:
def get_grid(self):
"""
Standardize the layout of the table into grids
"""
mentions, lines = _split_text_n_lines(self.elems)
# Sort mentions in reading order where y values are snapped to half
# height-sized grid
mentions.sort(key=lambda m: (m.yc_grid, m.xc))
grid = Grid(mentions, lines, self)
return grid |
def _identify_dict(core):
"""Specification for a dictionary."""
if not core:
return {}, 1, (), int
core = core.copy()
key = sorted(core.keys(), key=chaospy.poly.base.sort_key)[0]
shape = numpy.array(core[key]).shape
dtype = numpy.array(core[key]).dtype
dim = len(key)
return core, dim, shape, dtype | Specification for a dictionary. | Below is the the instruction that describes the task:
### Input:
Specification for a dictionary.
### Response:
def _identify_dict(core):
"""Specification for a dictionary."""
if not core:
return {}, 1, (), int
core = core.copy()
key = sorted(core.keys(), key=chaospy.poly.base.sort_key)[0]
shape = numpy.array(core[key]).shape
dtype = numpy.array(core[key]).dtype
dim = len(key)
return core, dim, shape, dtype |
def load(self, record_key, secret_key=''):
'''
a method to retrieve byte data of appdata record
:param record_key: string with name of record
:param secret_key: [optional] string used to decrypt data
:return: byte data for record body
'''
title = '%s.load' % self.__class__.__name__
# validate inputs
input_fields = {
'record_key': record_key,
'secret_key': secret_key
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct file path
file_path = '/%s' % record_key
# request file data
try:
metadata, response = self.dropbox.files_download(file_path)
except Exception as err:
if str(err).find("LookupError('not_found'") > -1:
raise Exception('%s(record_key=%s) does not exist.' % (title, record_key))
else:
raise DropboxConnectionError(title)
record_data = response.content
# decrypt (if necessary)
if secret_key:
from labpack.encryption import cryptolab
record_data = cryptolab.decrypt(record_data, secret_key)
return record_data | a method to retrieve byte data of appdata record
:param record_key: string with name of record
:param secret_key: [optional] string used to decrypt data
:return: byte data for record body | Below is the the instruction that describes the task:
### Input:
a method to retrieve byte data of appdata record
:param record_key: string with name of record
:param secret_key: [optional] string used to decrypt data
:return: byte data for record body
### Response:
def load(self, record_key, secret_key=''):
'''
a method to retrieve byte data of appdata record
:param record_key: string with name of record
:param secret_key: [optional] string used to decrypt data
:return: byte data for record body
'''
title = '%s.load' % self.__class__.__name__
# validate inputs
input_fields = {
'record_key': record_key,
'secret_key': secret_key
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct file path
file_path = '/%s' % record_key
# request file data
try:
metadata, response = self.dropbox.files_download(file_path)
except Exception as err:
if str(err).find("LookupError('not_found'") > -1:
raise Exception('%s(record_key=%s) does not exist.' % (title, record_key))
else:
raise DropboxConnectionError(title)
record_data = response.content
# decrypt (if necessary)
if secret_key:
from labpack.encryption import cryptolab
record_data = cryptolab.decrypt(record_data, secret_key)
return record_data |
def broken_seqs(ol,break_points):
'''
ol = initRange(0,20,1)
ol
break_points = [1,6,14,9]
secs = broken_seqs(ol,break_points)
forEach(secs,print)
'''
bps = list(break_points)
length = ol.__len__()
rgs = rangize(bps,length)
rslt = []
for i in range(0,rgs.__len__()):
si,ei = rgs[i]
sec = ol[si:ei]
rslt.append(sec)
return(rslt) | ol = initRange(0,20,1)
ol
break_points = [1,6,14,9]
secs = broken_seqs(ol,break_points)
forEach(secs,print) | Below is the the instruction that describes the task:
### Input:
ol = initRange(0,20,1)
ol
break_points = [1,6,14,9]
secs = broken_seqs(ol,break_points)
forEach(secs,print)
### Response:
def broken_seqs(ol,break_points):
'''
ol = initRange(0,20,1)
ol
break_points = [1,6,14,9]
secs = broken_seqs(ol,break_points)
forEach(secs,print)
'''
bps = list(break_points)
length = ol.__len__()
rgs = rangize(bps,length)
rslt = []
for i in range(0,rgs.__len__()):
si,ei = rgs[i]
sec = ol[si:ei]
rslt.append(sec)
return(rslt) |
def MakeType(name, base_classes,
namespace):
"""A compatibility wrapper for the `type` built-in function.
In Python 2 `type` (used as a type constructor) requires the name argument to
be a `bytes` object whereas in Python 3 it is required to be an `unicode`
object. Since class name is human readable text rather than arbitrary stream
of bytes, the Python 3 behaviour is considered to be the sane one.
Once support for Python 2 is dropped all invocations of this call can be
replaced with the `type` built-in.
Args:
name: A name of the type to create.
base_classes: A tuple of base classes that the returned type is supposed to
derive from.
namespace: A dictionary of methods and fields that the returned type is
supposed to contain.
Returns:
A new type with specified parameters.
"""
precondition.AssertType(name, str)
if PY2:
name = name.encode("ascii")
return type(name, base_classes, namespace) | A compatibility wrapper for the `type` built-in function.
In Python 2 `type` (used as a type constructor) requires the name argument to
be a `bytes` object whereas in Python 3 it is required to be an `unicode`
object. Since class name is human readable text rather than arbitrary stream
of bytes, the Python 3 behaviour is considered to be the sane one.
Once support for Python 2 is dropped all invocations of this call can be
replaced with the `type` built-in.
Args:
name: A name of the type to create.
base_classes: A tuple of base classes that the returned type is supposed to
derive from.
namespace: A dictionary of methods and fields that the returned type is
supposed to contain.
Returns:
A new type with specified parameters. | Below is the the instruction that describes the task:
### Input:
A compatibility wrapper for the `type` built-in function.
In Python 2 `type` (used as a type constructor) requires the name argument to
be a `bytes` object whereas in Python 3 it is required to be an `unicode`
object. Since class name is human readable text rather than arbitrary stream
of bytes, the Python 3 behaviour is considered to be the sane one.
Once support for Python 2 is dropped all invocations of this call can be
replaced with the `type` built-in.
Args:
name: A name of the type to create.
base_classes: A tuple of base classes that the returned type is supposed to
derive from.
namespace: A dictionary of methods and fields that the returned type is
supposed to contain.
Returns:
A new type with specified parameters.
### Response:
def MakeType(name, base_classes,
namespace):
"""A compatibility wrapper for the `type` built-in function.
In Python 2 `type` (used as a type constructor) requires the name argument to
be a `bytes` object whereas in Python 3 it is required to be an `unicode`
object. Since class name is human readable text rather than arbitrary stream
of bytes, the Python 3 behaviour is considered to be the sane one.
Once support for Python 2 is dropped all invocations of this call can be
replaced with the `type` built-in.
Args:
name: A name of the type to create.
base_classes: A tuple of base classes that the returned type is supposed to
derive from.
namespace: A dictionary of methods and fields that the returned type is
supposed to contain.
Returns:
A new type with specified parameters.
"""
precondition.AssertType(name, str)
if PY2:
name = name.encode("ascii")
return type(name, base_classes, namespace) |
def OS_filter(x,h,N,mode=0):
"""
Overlap and save transform domain FIR filtering.
This function implements the classical overlap and save method of
transform domain filtering using a length P FIR filter.
Parameters
----------
x : input signal to be filtered as an ndarray
h : FIR filter coefficients as an ndarray of length P
N : FFT size > P, typically a power of two
mode : 0 or 1, when 1 returns a diagnostic matrix
Returns
-------
y : the filtered output as an ndarray
y_mat : an ndarray whose rows are the individual overlap outputs.
Notes
-----
y_mat is used for diagnostics and to gain understanding of the algorithm.
Examples
--------
>>> n = arange(0,100)
>>> x = cos(2*pi*0.05*n)
>>> b = ones(10)
>>> y = OS_filter(x,h,N)
>>> # set mode = 1
>>> y, y_mat = OS_filter(x,h,N,1)
"""
P = len(h)
# zero pad start of x so first frame can recover first true samples of x
x = np.hstack((np.zeros(P-1),x))
L = N - P + 1
Nx = len(x)
Nframe = int(np.ceil(Nx/float(L)))
# zero pad end of x to full number of frames needed
x = np.hstack((x,np.zeros(Nframe*L-Nx)))
y = np.zeros(int(Nframe*N))
# create an instrumentation matrix to observe the overlap and save behavior
y_mat = np.zeros((Nframe,int(Nframe*N)))
H = fft.fft(h,N)
# begin the filtering operation
for k in range(Nframe):
xk = x[k*L:k*L+N]
Xk = fft.fft(xk,N)
Yk = H*Xk
yk = np.real(fft.ifft(Yk)) # imag part should be zero
y[k*L+P-1:k*L+N] = yk[P-1:]
y_mat[k,k*L:k*L+N] = yk
if mode == 1:
return y[P-1:Nx], y_mat[:,P-1:Nx]
else:
return y[P-1:Nx] | Overlap and save transform domain FIR filtering.
This function implements the classical overlap and save method of
transform domain filtering using a length P FIR filter.
Parameters
----------
x : input signal to be filtered as an ndarray
h : FIR filter coefficients as an ndarray of length P
N : FFT size > P, typically a power of two
mode : 0 or 1, when 1 returns a diagnostic matrix
Returns
-------
y : the filtered output as an ndarray
y_mat : an ndarray whose rows are the individual overlap outputs.
Notes
-----
y_mat is used for diagnostics and to gain understanding of the algorithm.
Examples
--------
>>> n = arange(0,100)
>>> x = cos(2*pi*0.05*n)
>>> b = ones(10)
>>> y = OS_filter(x,h,N)
>>> # set mode = 1
>>> y, y_mat = OS_filter(x,h,N,1) | Below is the the instruction that describes the task:
### Input:
Overlap and save transform domain FIR filtering.
This function implements the classical overlap and save method of
transform domain filtering using a length P FIR filter.
Parameters
----------
x : input signal to be filtered as an ndarray
h : FIR filter coefficients as an ndarray of length P
N : FFT size > P, typically a power of two
mode : 0 or 1, when 1 returns a diagnostic matrix
Returns
-------
y : the filtered output as an ndarray
y_mat : an ndarray whose rows are the individual overlap outputs.
Notes
-----
y_mat is used for diagnostics and to gain understanding of the algorithm.
Examples
--------
>>> n = arange(0,100)
>>> x = cos(2*pi*0.05*n)
>>> b = ones(10)
>>> y = OS_filter(x,h,N)
>>> # set mode = 1
>>> y, y_mat = OS_filter(x,h,N,1)
### Response:
def OS_filter(x,h,N,mode=0):
"""
Overlap and save transform domain FIR filtering.
This function implements the classical overlap and save method of
transform domain filtering using a length P FIR filter.
Parameters
----------
x : input signal to be filtered as an ndarray
h : FIR filter coefficients as an ndarray of length P
N : FFT size > P, typically a power of two
mode : 0 or 1, when 1 returns a diagnostic matrix
Returns
-------
y : the filtered output as an ndarray
y_mat : an ndarray whose rows are the individual overlap outputs.
Notes
-----
y_mat is used for diagnostics and to gain understanding of the algorithm.
Examples
--------
>>> n = arange(0,100)
>>> x = cos(2*pi*0.05*n)
>>> b = ones(10)
>>> y = OS_filter(x,h,N)
>>> # set mode = 1
>>> y, y_mat = OS_filter(x,h,N,1)
"""
P = len(h)
# zero pad start of x so first frame can recover first true samples of x
x = np.hstack((np.zeros(P-1),x))
L = N - P + 1
Nx = len(x)
Nframe = int(np.ceil(Nx/float(L)))
# zero pad end of x to full number of frames needed
x = np.hstack((x,np.zeros(Nframe*L-Nx)))
y = np.zeros(int(Nframe*N))
# create an instrumentation matrix to observe the overlap and save behavior
y_mat = np.zeros((Nframe,int(Nframe*N)))
H = fft.fft(h,N)
# begin the filtering operation
for k in range(Nframe):
xk = x[k*L:k*L+N]
Xk = fft.fft(xk,N)
Yk = H*Xk
yk = np.real(fft.ifft(Yk)) # imag part should be zero
y[k*L+P-1:k*L+N] = yk[P-1:]
y_mat[k,k*L:k*L+N] = yk
if mode == 1:
return y[P-1:Nx], y_mat[:,P-1:Nx]
else:
return y[P-1:Nx] |
def feeling_lucky(cls, obj):
"""Tries to convert given object to an UTC timestamp is ms, based
on its type.
"""
if isinstance(obj, six.string_types):
return cls.from_str(obj)
elif isinstance(obj, six.integer_types) and obj <= MAX_POSIX_TIMESTAMP:
return cls.from_posix_timestamp(obj)
elif isinstance(obj, datetime):
return cls.from_datetime(obj)
else:
raise ValueError(
u"Don't know how to get timestamp from '{}'".format(obj)
) | Tries to convert given object to an UTC timestamp is ms, based
on its type. | Below is the the instruction that describes the task:
### Input:
Tries to convert given object to an UTC timestamp is ms, based
on its type.
### Response:
def feeling_lucky(cls, obj):
"""Tries to convert given object to an UTC timestamp is ms, based
on its type.
"""
if isinstance(obj, six.string_types):
return cls.from_str(obj)
elif isinstance(obj, six.integer_types) and obj <= MAX_POSIX_TIMESTAMP:
return cls.from_posix_timestamp(obj)
elif isinstance(obj, datetime):
return cls.from_datetime(obj)
else:
raise ValueError(
u"Don't know how to get timestamp from '{}'".format(obj)
) |
def make_argument_subquery(arg):
"""
Decide when a Join argument needs to be wrapped in a subquery
"""
return Subquery.create(arg) if isinstance(arg, (GroupBy, Projection)) or arg.restriction else arg | Decide when a Join argument needs to be wrapped in a subquery | Below is the the instruction that describes the task:
### Input:
Decide when a Join argument needs to be wrapped in a subquery
### Response:
def make_argument_subquery(arg):
"""
Decide when a Join argument needs to be wrapped in a subquery
"""
return Subquery.create(arg) if isinstance(arg, (GroupBy, Projection)) or arg.restriction else arg |
def highlight_cell_surroundings(self, target_y, target_x):
"""
highlights the cells around a target to make it simpler
to see on a grid. Currently assumes the target is within
the boundary by 1 on all sides
"""
#print('SELF_WORLD', self.world)
#print('target_y, target_x, self.world.grd.grid_height, self.world.grd.grid_width ', target_y, target_x, self.#world.grd.grid_height, self.world.grd.grid_width )
#exit(0)
if target_y < 1:
print("target too close to top")
if target_y > self.world.grd.grid_height - 1:
print("target too close to bottom")
if target_x < 1:
print("target too close to left")
if target_x < self.world.grd.grid_width:
print("target too close to right")
#valid_cells = ['\\', '-', '|', '/']
self.world.grd.set_tile(target_y - 1, target_x - 1, '\\')
self.world.grd.set_tile(target_y - 0, target_x - 1, '-')
self.world.grd.set_tile(target_y + 1, target_x - 1, '/')
self.world.grd.set_tile(target_y - 1, target_x - 0, '|')
self.world.grd.set_tile(target_y + 1, target_x - 0, '|')
self.world.grd.set_tile(target_y - 1, target_x + 1, '/')
self.world.grd.set_tile(target_y - 0, target_x + 1, '-')
self.world.grd.set_tile(target_y + 1, target_x + 1, '\\') | highlights the cells around a target to make it simpler
to see on a grid. Currently assumes the target is within
the boundary by 1 on all sides | Below is the the instruction that describes the task:
### Input:
highlights the cells around a target to make it simpler
to see on a grid. Currently assumes the target is within
the boundary by 1 on all sides
### Response:
def highlight_cell_surroundings(self, target_y, target_x):
"""
highlights the cells around a target to make it simpler
to see on a grid. Currently assumes the target is within
the boundary by 1 on all sides
"""
#print('SELF_WORLD', self.world)
#print('target_y, target_x, self.world.grd.grid_height, self.world.grd.grid_width ', target_y, target_x, self.#world.grd.grid_height, self.world.grd.grid_width )
#exit(0)
if target_y < 1:
print("target too close to top")
if target_y > self.world.grd.grid_height - 1:
print("target too close to bottom")
if target_x < 1:
print("target too close to left")
if target_x < self.world.grd.grid_width:
print("target too close to right")
#valid_cells = ['\\', '-', '|', '/']
self.world.grd.set_tile(target_y - 1, target_x - 1, '\\')
self.world.grd.set_tile(target_y - 0, target_x - 1, '-')
self.world.grd.set_tile(target_y + 1, target_x - 1, '/')
self.world.grd.set_tile(target_y - 1, target_x - 0, '|')
self.world.grd.set_tile(target_y + 1, target_x - 0, '|')
self.world.grd.set_tile(target_y - 1, target_x + 1, '/')
self.world.grd.set_tile(target_y - 0, target_x + 1, '-')
self.world.grd.set_tile(target_y + 1, target_x + 1, '\\') |
def get_objective_hierarchy_design_session(self, proxy):
"""Gets the session for designing objective hierarchies.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.learning.ObjectiveHierarchyDesignSession) - an
``ObjectiveHierarchyDesignSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_objective_hierarchy_design()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_hierarchy_design()`` is ``true``.*
"""
if not self.supports_objective_hierarchy_design():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.ObjectiveHierarchyDesignSession(proxy=proxy, runtime=self._runtime) | Gets the session for designing objective hierarchies.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.learning.ObjectiveHierarchyDesignSession) - an
``ObjectiveHierarchyDesignSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_objective_hierarchy_design()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_hierarchy_design()`` is ``true``.* | Below is the the instruction that describes the task:
### Input:
Gets the session for designing objective hierarchies.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.learning.ObjectiveHierarchyDesignSession) - an
``ObjectiveHierarchyDesignSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_objective_hierarchy_design()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_hierarchy_design()`` is ``true``.*
### Response:
def get_objective_hierarchy_design_session(self, proxy):
"""Gets the session for designing objective hierarchies.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.learning.ObjectiveHierarchyDesignSession) - an
``ObjectiveHierarchyDesignSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_objective_hierarchy_design()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_objective_hierarchy_design()`` is ``true``.*
"""
if not self.supports_objective_hierarchy_design():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.ObjectiveHierarchyDesignSession(proxy=proxy, runtime=self._runtime) |
def get_error(exc):
"""
Return the appropriate HTTP status code according to the Exception/Error.
"""
if isinstance(exc, HTTPError):
# Returning the HTTP Error code coming from requests module
return exc.response.status_code, text(exc.response.content)
if isinstance(exc, Timeout):
# A timeout is a 408, and it's not a HTTPError (why? dunno).
return 408, exc
if isinstance(exc, Http404):
# 404 is 404
return 404, exc
if isinstance(exc, PermissionDenied):
# Permission denied is 403
return 403, exc
if isinstance(exc, SuspiciousOperation):
# Shouldn't happen, but you never know
return 400, exc
# The default error code is 500
return 500, exc | Return the appropriate HTTP status code according to the Exception/Error. | Below is the the instruction that describes the task:
### Input:
Return the appropriate HTTP status code according to the Exception/Error.
### Response:
def get_error(exc):
"""
Return the appropriate HTTP status code according to the Exception/Error.
"""
if isinstance(exc, HTTPError):
# Returning the HTTP Error code coming from requests module
return exc.response.status_code, text(exc.response.content)
if isinstance(exc, Timeout):
# A timeout is a 408, and it's not a HTTPError (why? dunno).
return 408, exc
if isinstance(exc, Http404):
# 404 is 404
return 404, exc
if isinstance(exc, PermissionDenied):
# Permission denied is 403
return 403, exc
if isinstance(exc, SuspiciousOperation):
# Shouldn't happen, but you never know
return 400, exc
# The default error code is 500
return 500, exc |
def get_top_segmentations(table, n):
"""
Parameters
----------
table : matrix of probabilities
Each cell (i, j) of `table` gives the probability that i and j are in
the same symbol.
n : int
Number of best segmentations which get returned
"""
stroke_count = list(range(len(table)))
topf = TopFinder(n)
for curr_segmentation in all_segmentations(stroke_count):
curr_seg_score = score_segmentation(curr_segmentation, table)
topf.push(curr_segmentation, curr_seg_score)
for el, score in topf:
yield [normalize_segmentation(el), score] | Parameters
----------
table : matrix of probabilities
Each cell (i, j) of `table` gives the probability that i and j are in
the same symbol.
n : int
Number of best segmentations which get returned | Below is the the instruction that describes the task:
### Input:
Parameters
----------
table : matrix of probabilities
Each cell (i, j) of `table` gives the probability that i and j are in
the same symbol.
n : int
Number of best segmentations which get returned
### Response:
def get_top_segmentations(table, n):
"""
Parameters
----------
table : matrix of probabilities
Each cell (i, j) of `table` gives the probability that i and j are in
the same symbol.
n : int
Number of best segmentations which get returned
"""
stroke_count = list(range(len(table)))
topf = TopFinder(n)
for curr_segmentation in all_segmentations(stroke_count):
curr_seg_score = score_segmentation(curr_segmentation, table)
topf.push(curr_segmentation, curr_seg_score)
for el, score in topf:
yield [normalize_segmentation(el), score] |
def get_sequence_rule_enablers_by_search(self, sequence_rule_enabler_query, sequence_rule_enabler_search):
"""Pass through to provider SequenceRuleEnablerSearchSession.get_sequence_rule_enablers_by_search"""
# Implemented from azosid template for -
# osid.resource.ResourceSearchSession.get_resources_by_search_template
if not self._can('search'):
raise PermissionDenied()
return self._provider_session.get_sequence_rule_enablers_by_search(sequence_rule_enabler_query, sequence_rule_enabler_search) | Pass through to provider SequenceRuleEnablerSearchSession.get_sequence_rule_enablers_by_search | Below is the the instruction that describes the task:
### Input:
Pass through to provider SequenceRuleEnablerSearchSession.get_sequence_rule_enablers_by_search
### Response:
def get_sequence_rule_enablers_by_search(self, sequence_rule_enabler_query, sequence_rule_enabler_search):
"""Pass through to provider SequenceRuleEnablerSearchSession.get_sequence_rule_enablers_by_search"""
# Implemented from azosid template for -
# osid.resource.ResourceSearchSession.get_resources_by_search_template
if not self._can('search'):
raise PermissionDenied()
return self._provider_session.get_sequence_rule_enablers_by_search(sequence_rule_enabler_query, sequence_rule_enabler_search) |
def cookies(self):
"""
Retrieve the cookies header from all the users who visited.
"""
return (self.get_query()
.select(PageView.ip, PageView.headers['Cookie'])
.where(PageView.headers['Cookie'].is_null(False))
.tuples()) | Retrieve the cookies header from all the users who visited. | Below is the the instruction that describes the task:
### Input:
Retrieve the cookies header from all the users who visited.
### Response:
def cookies(self):
"""
Retrieve the cookies header from all the users who visited.
"""
return (self.get_query()
.select(PageView.ip, PageView.headers['Cookie'])
.where(PageView.headers['Cookie'].is_null(False))
.tuples()) |
def m2i(self, pkt, m):
"""
Try to parse one of the TLS subprotocols (ccs, alert, handshake or
application_data). This is used inside a loop managed by .getfield().
"""
cls = Raw
if pkt.type == 22:
if len(m) >= 1:
msgtype = orb(m[0])
cls = _tls_handshake_cls.get(msgtype, Raw)
elif pkt.type == 20:
cls = TLSChangeCipherSpec
elif pkt.type == 21:
cls = TLSAlert
elif pkt.type == 23:
cls = TLSApplicationData
if cls is Raw:
return Raw(m)
else:
try:
return cls(m, tls_session=pkt.tls_session)
except Exception:
if conf.debug_dissector:
raise
return Raw(m) | Try to parse one of the TLS subprotocols (ccs, alert, handshake or
application_data). This is used inside a loop managed by .getfield(). | Below is the the instruction that describes the task:
### Input:
Try to parse one of the TLS subprotocols (ccs, alert, handshake or
application_data). This is used inside a loop managed by .getfield().
### Response:
def m2i(self, pkt, m):
"""
Try to parse one of the TLS subprotocols (ccs, alert, handshake or
application_data). This is used inside a loop managed by .getfield().
"""
cls = Raw
if pkt.type == 22:
if len(m) >= 1:
msgtype = orb(m[0])
cls = _tls_handshake_cls.get(msgtype, Raw)
elif pkt.type == 20:
cls = TLSChangeCipherSpec
elif pkt.type == 21:
cls = TLSAlert
elif pkt.type == 23:
cls = TLSApplicationData
if cls is Raw:
return Raw(m)
else:
try:
return cls(m, tls_session=pkt.tls_session)
except Exception:
if conf.debug_dissector:
raise
return Raw(m) |
def as_dictlist(self):
""" Returns a dictlist with values
[
{
"row": "row_a",
"col": "col_a",
"value": 1,
}
]
"""
data = []
for row_i, row in enumerate(self.row_index):
for col_i, col in enumerate(self.col_index):
value = self.values_by_row[row_i][col_i]
data.append({
"row": row,
"col": col,
"value": value,
})
return data | Returns a dictlist with values
[
{
"row": "row_a",
"col": "col_a",
"value": 1,
}
] | Below is the the instruction that describes the task:
### Input:
Returns a dictlist with values
[
{
"row": "row_a",
"col": "col_a",
"value": 1,
}
]
### Response:
def as_dictlist(self):
""" Returns a dictlist with values
[
{
"row": "row_a",
"col": "col_a",
"value": 1,
}
]
"""
data = []
for row_i, row in enumerate(self.row_index):
for col_i, col in enumerate(self.col_index):
value = self.values_by_row[row_i][col_i]
data.append({
"row": row,
"col": col,
"value": value,
})
return data |
def sapm_effective_irradiance(self, poa_direct, poa_diffuse,
airmass_absolute, aoi,
reference_irradiance=1000):
"""
Use the :py:func:`sapm_effective_irradiance` function, the input
parameters, and ``self.module_parameters`` to calculate
effective irradiance.
Parameters
----------
poa_direct : numeric
The direct irradiance incident upon the module.
poa_diffuse : numeric
The diffuse irradiance incident on module.
airmass_absolute : numeric
Absolute airmass.
aoi : numeric
Angle of incidence in degrees.
reference_irradiance : numeric, default 1000
Reference irradiance by which to divide the input irradiance.
Returns
-------
effective_irradiance : numeric
The SAPM effective irradiance.
"""
return sapm_effective_irradiance(
poa_direct, poa_diffuse, airmass_absolute, aoi,
self.module_parameters, reference_irradiance=reference_irradiance) | Use the :py:func:`sapm_effective_irradiance` function, the input
parameters, and ``self.module_parameters`` to calculate
effective irradiance.
Parameters
----------
poa_direct : numeric
The direct irradiance incident upon the module.
poa_diffuse : numeric
The diffuse irradiance incident on module.
airmass_absolute : numeric
Absolute airmass.
aoi : numeric
Angle of incidence in degrees.
reference_irradiance : numeric, default 1000
Reference irradiance by which to divide the input irradiance.
Returns
-------
effective_irradiance : numeric
The SAPM effective irradiance. | Below is the the instruction that describes the task:
### Input:
Use the :py:func:`sapm_effective_irradiance` function, the input
parameters, and ``self.module_parameters`` to calculate
effective irradiance.
Parameters
----------
poa_direct : numeric
The direct irradiance incident upon the module.
poa_diffuse : numeric
The diffuse irradiance incident on module.
airmass_absolute : numeric
Absolute airmass.
aoi : numeric
Angle of incidence in degrees.
reference_irradiance : numeric, default 1000
Reference irradiance by which to divide the input irradiance.
Returns
-------
effective_irradiance : numeric
The SAPM effective irradiance.
### Response:
def sapm_effective_irradiance(self, poa_direct, poa_diffuse,
airmass_absolute, aoi,
reference_irradiance=1000):
"""
Use the :py:func:`sapm_effective_irradiance` function, the input
parameters, and ``self.module_parameters`` to calculate
effective irradiance.
Parameters
----------
poa_direct : numeric
The direct irradiance incident upon the module.
poa_diffuse : numeric
The diffuse irradiance incident on module.
airmass_absolute : numeric
Absolute airmass.
aoi : numeric
Angle of incidence in degrees.
reference_irradiance : numeric, default 1000
Reference irradiance by which to divide the input irradiance.
Returns
-------
effective_irradiance : numeric
The SAPM effective irradiance.
"""
return sapm_effective_irradiance(
poa_direct, poa_diffuse, airmass_absolute, aoi,
self.module_parameters, reference_irradiance=reference_irradiance) |
def print_portfolio_info(returns, avg_rets, weights):
"""
Print information on expected portfolio performance.
"""
ret = (weights * avg_rets).sum()
std = (weights * returns).sum(1).std()
sharpe = ret / std
print("Optimal weights:\n{}\n".format(weights))
print("Expected return: {}".format(ret))
print("Expected variance: {}".format(std**2))
print("Expected Sharpe: {}".format(sharpe)) | Print information on expected portfolio performance. | Below is the the instruction that describes the task:
### Input:
Print information on expected portfolio performance.
### Response:
def print_portfolio_info(returns, avg_rets, weights):
"""
Print information on expected portfolio performance.
"""
ret = (weights * avg_rets).sum()
std = (weights * returns).sum(1).std()
sharpe = ret / std
print("Optimal weights:\n{}\n".format(weights))
print("Expected return: {}".format(ret))
print("Expected variance: {}".format(std**2))
print("Expected Sharpe: {}".format(sharpe)) |
def fcm_send_message(
registration_id,
title=None,
body=None,
icon=None,
data=None,
sound=None,
badge=None,
low_priority=False,
condition=None,
time_to_live=None,
click_action=None,
collapse_key=None,
delay_while_idle=False,
restricted_package_name=None,
dry_run=False,
color=None,
tag=None,
body_loc_key=None,
body_loc_args=None,
title_loc_key=None,
title_loc_args=None,
content_available=None,
extra_kwargs={},
api_key=None,
json_encoder=None,
**kwargs):
"""
Copied from https://github.com/olucurious/PyFCM/blob/master/pyfcm/fcm.py:
Send push notification to a single device
Args:
registration_id (str): FCM device registration IDs.
body (str): Message string to display in the notification tray
data (dict): Data message payload to send alone or with the notification
message
sound (str): The sound file name to play. Specify "Default" for device
default sound.
Keyword Args:
collapse_key (str, optional): Identifier for a group of messages
that can be collapsed so that only the last message gets sent
when delivery can be resumed. Defaults to ``None``.
delay_while_idle (bool, optional): If ``True`` indicates that the
message should not be sent until the device becomes active.
time_to_live (int, optional): How long (in seconds) the message
should be kept in FCM storage if the device is offline. The
maximum time to live supported is 4 weeks. Defaults to ``None``
which uses the FCM default of 4 weeks.
low_priority (boolean, optional): Whether to send notification with
the low priority flag. Defaults to ``False``.
restricted_package_name (str, optional): Package name of the
application where the registration IDs must match in order to
receive the message. Defaults to ``None``.
dry_run (bool, optional): If ``True`` no message will be sent but
request will be tested.
Returns:
:tuple:`multicast_id(long), success(int), failure(int),
canonical_ids(int), results(list)`:
Response from FCM server.
Raises:
AuthenticationError: If :attr:`api_key` is not set or provided or there
is an error authenticating the sender.
FCMServerError: Internal server error or timeout error on Firebase cloud
messaging server
InvalidDataError: Invalid data provided
InternalPackageError: JSON parsing error, mostly from changes in the
response of FCM, create a new github issue to resolve it.
"""
if api_key is None:
api_key = SETTINGS.get("FCM_SERVER_KEY")
push_service = FCMNotification(api_key=api_key, json_encoder=json_encoder)
result = push_service.notify_single_device(
registration_id=registration_id,
message_title=title,
message_body=body,
message_icon=icon,
data_message=data,
sound=sound,
badge=badge,
collapse_key=collapse_key,
low_priority=low_priority,
condition=condition,
time_to_live=time_to_live,
click_action=click_action,
delay_while_idle=delay_while_idle,
restricted_package_name=restricted_package_name,
dry_run=dry_run,
color=color,
tag=tag,
body_loc_key=body_loc_key,
body_loc_args=body_loc_args,
title_loc_key=title_loc_key,
title_loc_args=title_loc_args,
content_available=content_available,
extra_kwargs=extra_kwargs,
**kwargs
)
# do not raise errors, pyfcm will raise exceptions if response status will
# be anything but 200
return result | Copied from https://github.com/olucurious/PyFCM/blob/master/pyfcm/fcm.py:
Send push notification to a single device
Args:
registration_id (str): FCM device registration IDs.
body (str): Message string to display in the notification tray
data (dict): Data message payload to send alone or with the notification
message
sound (str): The sound file name to play. Specify "Default" for device
default sound.
Keyword Args:
collapse_key (str, optional): Identifier for a group of messages
that can be collapsed so that only the last message gets sent
when delivery can be resumed. Defaults to ``None``.
delay_while_idle (bool, optional): If ``True`` indicates that the
message should not be sent until the device becomes active.
time_to_live (int, optional): How long (in seconds) the message
should be kept in FCM storage if the device is offline. The
maximum time to live supported is 4 weeks. Defaults to ``None``
which uses the FCM default of 4 weeks.
low_priority (boolean, optional): Whether to send notification with
the low priority flag. Defaults to ``False``.
restricted_package_name (str, optional): Package name of the
application where the registration IDs must match in order to
receive the message. Defaults to ``None``.
dry_run (bool, optional): If ``True`` no message will be sent but
request will be tested.
Returns:
:tuple:`multicast_id(long), success(int), failure(int),
canonical_ids(int), results(list)`:
Response from FCM server.
Raises:
AuthenticationError: If :attr:`api_key` is not set or provided or there
is an error authenticating the sender.
FCMServerError: Internal server error or timeout error on Firebase cloud
messaging server
InvalidDataError: Invalid data provided
InternalPackageError: JSON parsing error, mostly from changes in the
response of FCM, create a new github issue to resolve it. | Below is the the instruction that describes the task:
### Input:
Copied from https://github.com/olucurious/PyFCM/blob/master/pyfcm/fcm.py:
Send push notification to a single device
Args:
registration_id (str): FCM device registration IDs.
body (str): Message string to display in the notification tray
data (dict): Data message payload to send alone or with the notification
message
sound (str): The sound file name to play. Specify "Default" for device
default sound.
Keyword Args:
collapse_key (str, optional): Identifier for a group of messages
that can be collapsed so that only the last message gets sent
when delivery can be resumed. Defaults to ``None``.
delay_while_idle (bool, optional): If ``True`` indicates that the
message should not be sent until the device becomes active.
time_to_live (int, optional): How long (in seconds) the message
should be kept in FCM storage if the device is offline. The
maximum time to live supported is 4 weeks. Defaults to ``None``
which uses the FCM default of 4 weeks.
low_priority (boolean, optional): Whether to send notification with
the low priority flag. Defaults to ``False``.
restricted_package_name (str, optional): Package name of the
application where the registration IDs must match in order to
receive the message. Defaults to ``None``.
dry_run (bool, optional): If ``True`` no message will be sent but
request will be tested.
Returns:
:tuple:`multicast_id(long), success(int), failure(int),
canonical_ids(int), results(list)`:
Response from FCM server.
Raises:
AuthenticationError: If :attr:`api_key` is not set or provided or there
is an error authenticating the sender.
FCMServerError: Internal server error or timeout error on Firebase cloud
messaging server
InvalidDataError: Invalid data provided
InternalPackageError: JSON parsing error, mostly from changes in the
response of FCM, create a new github issue to resolve it.
### Response:
def fcm_send_message(
registration_id,
title=None,
body=None,
icon=None,
data=None,
sound=None,
badge=None,
low_priority=False,
condition=None,
time_to_live=None,
click_action=None,
collapse_key=None,
delay_while_idle=False,
restricted_package_name=None,
dry_run=False,
color=None,
tag=None,
body_loc_key=None,
body_loc_args=None,
title_loc_key=None,
title_loc_args=None,
content_available=None,
extra_kwargs={},
api_key=None,
json_encoder=None,
**kwargs):
"""
Copied from https://github.com/olucurious/PyFCM/blob/master/pyfcm/fcm.py:
Send push notification to a single device
Args:
registration_id (str): FCM device registration IDs.
body (str): Message string to display in the notification tray
data (dict): Data message payload to send alone or with the notification
message
sound (str): The sound file name to play. Specify "Default" for device
default sound.
Keyword Args:
collapse_key (str, optional): Identifier for a group of messages
that can be collapsed so that only the last message gets sent
when delivery can be resumed. Defaults to ``None``.
delay_while_idle (bool, optional): If ``True`` indicates that the
message should not be sent until the device becomes active.
time_to_live (int, optional): How long (in seconds) the message
should be kept in FCM storage if the device is offline. The
maximum time to live supported is 4 weeks. Defaults to ``None``
which uses the FCM default of 4 weeks.
low_priority (boolean, optional): Whether to send notification with
the low priority flag. Defaults to ``False``.
restricted_package_name (str, optional): Package name of the
application where the registration IDs must match in order to
receive the message. Defaults to ``None``.
dry_run (bool, optional): If ``True`` no message will be sent but
request will be tested.
Returns:
:tuple:`multicast_id(long), success(int), failure(int),
canonical_ids(int), results(list)`:
Response from FCM server.
Raises:
AuthenticationError: If :attr:`api_key` is not set or provided or there
is an error authenticating the sender.
FCMServerError: Internal server error or timeout error on Firebase cloud
messaging server
InvalidDataError: Invalid data provided
InternalPackageError: JSON parsing error, mostly from changes in the
response of FCM, create a new github issue to resolve it.
"""
if api_key is None:
api_key = SETTINGS.get("FCM_SERVER_KEY")
push_service = FCMNotification(api_key=api_key, json_encoder=json_encoder)
result = push_service.notify_single_device(
registration_id=registration_id,
message_title=title,
message_body=body,
message_icon=icon,
data_message=data,
sound=sound,
badge=badge,
collapse_key=collapse_key,
low_priority=low_priority,
condition=condition,
time_to_live=time_to_live,
click_action=click_action,
delay_while_idle=delay_while_idle,
restricted_package_name=restricted_package_name,
dry_run=dry_run,
color=color,
tag=tag,
body_loc_key=body_loc_key,
body_loc_args=body_loc_args,
title_loc_key=title_loc_key,
title_loc_args=title_loc_args,
content_available=content_available,
extra_kwargs=extra_kwargs,
**kwargs
)
# do not raise errors, pyfcm will raise exceptions if response status will
# be anything but 200
return result |
def parameter(self, parameter_id):
"""Return the specified global parameter (the entire object, not just the value)"""
for parametergroup, parameters in self.parameters: #pylint: disable=unused-variable
for parameter in parameters:
if parameter.id == parameter_id:
return parameter
raise KeyError("No such parameter exists: " + parameter_id ) | Return the specified global parameter (the entire object, not just the value) | Below is the the instruction that describes the task:
### Input:
Return the specified global parameter (the entire object, not just the value)
### Response:
def parameter(self, parameter_id):
"""Return the specified global parameter (the entire object, not just the value)"""
for parametergroup, parameters in self.parameters: #pylint: disable=unused-variable
for parameter in parameters:
if parameter.id == parameter_id:
return parameter
raise KeyError("No such parameter exists: " + parameter_id ) |
def close_connection(self): # pylint: disable=C0103
"""
Closes serial port connection.
:return: Nothing
"""
if self.port:
self.stop()
self.logger.debug("Close port '%s'" % self.comport,
extra={'type': '<->'})
self.port.close()
self.port = False | Closes serial port connection.
:return: Nothing | Below is the the instruction that describes the task:
### Input:
Closes serial port connection.
:return: Nothing
### Response:
def close_connection(self): # pylint: disable=C0103
"""
Closes serial port connection.
:return: Nothing
"""
if self.port:
self.stop()
self.logger.debug("Close port '%s'" % self.comport,
extra={'type': '<->'})
self.port.close()
self.port = False |
def plot_series_residuals(self, xres, varied_data, varied_idx, params, **kwargs):
""" Analogous to :meth:`plot_series` but will plot residuals. """
nf = len(self.f_cb(*self.pre_process(xres[0], params)))
xerr = np.empty((xres.shape[0], nf))
new_params = np.array(params)
for idx, row in enumerate(xres):
new_params[varied_idx] = varied_data[idx]
xerr[idx, :] = self.f_cb(*self.pre_process(row, params))
return self.plot_series(xerr, varied_data, varied_idx, **kwargs) | Analogous to :meth:`plot_series` but will plot residuals. | Below is the the instruction that describes the task:
### Input:
Analogous to :meth:`plot_series` but will plot residuals.
### Response:
def plot_series_residuals(self, xres, varied_data, varied_idx, params, **kwargs):
""" Analogous to :meth:`plot_series` but will plot residuals. """
nf = len(self.f_cb(*self.pre_process(xres[0], params)))
xerr = np.empty((xres.shape[0], nf))
new_params = np.array(params)
for idx, row in enumerate(xres):
new_params[varied_idx] = varied_data[idx]
xerr[idx, :] = self.f_cb(*self.pre_process(row, params))
return self.plot_series(xerr, varied_data, varied_idx, **kwargs) |
def signal_optimiser(d, analytes, min_points=5,
threshold_mode='kde_first_max',
threshold_mult=1., x_bias=0,
weights=None, ind=None, mode='minimise'):
"""
Optimise data selection based on specified analytes.
Identifies the longest possible contiguous data region in
the signal where the relative standard deviation (std) and
concentration of all analytes is minimised.
Optimisation is performed via a grid search of all possible
contiguous data regions. For each region, the mean std and
mean scaled analyte concentration ('amplitude') are calculated.
The size and position of the optimal data region are identified
using threshold std and amplitude values. Thresholds are derived
from all calculated stds and amplitudes using the method specified
by `threshold_mode`. For example, using the 'kde_max' method, a
probability density function (PDF) is calculated for std and
amplitude values, and the threshold is set as the maximum of the
PDF. These thresholds are then used to identify the size and position
of the longest contiguous region where the std is below the threshold,
and the amplitude is either below the threshold.
All possible regions of the data that have at least
`min_points` are considered.
For a graphical demonstration of the action of signal_optimiser,
use `optimisation_plot`.
Parameters
----------
d : latools.D object
An latools data object.
analytes : str or array-like
Which analytes to consider.
min_points : int
The minimum number of contiguous points to
consider.
threshold_mode : str
The method used to calculate the optimisation
thresholds. Can be 'mean', 'median', 'kde_max'
or 'bayes_mvs', or a custom function. If a
function, must take a 1D array, and return a
single, real number.
threshood_mult : float or tuple
A multiplier applied to the calculated threshold
before use. If a tuple, the first value is applied
to the mean threshold, and the second is applied to
the standard deviation threshold. Reduce this to make
data selection more stringent.
x_bias : float
If non-zero, a bias is applied to the calculated statistics
to prefer the beginning (if > 0) or end (if < 0) of the
signal. Should be between zero and 1.
weights : array-like of length len(analytes)
An array of numbers specifying the importance of
each analyte considered. Larger number makes the
analyte have a greater effect on the optimisation.
Default is None.
ind : boolean array
A boolean array the same length as the data. Where
false, data will not be included.
mode : str
Whether to 'minimise' or 'maximise' the concentration
of the elements.
Returns
-------
dict, str : optimisation result, error message
"""
errmsg = ''
if isinstance(analytes, str):
analytes = [analytes]
if ind is None:
ind = np.full(len(d.Time), True)
# initial catch
if not any(ind) or (np.diff(bool_2_indices(ind)).max() < min_points):
errmsg = 'Optmisation failed. No contiguous data regions longer than {:.0f} points.'.format(min_points)
return Bunch({'means': np.nan,
'stds': np.nan,
'mean_threshold': np.nan,
'std_threshold': np.nan,
'lims': np.nan,
'filt': ind,
'threshold_mode': threshold_mode,
'min_points': min_points,
'analytes': analytes,
'opt_centre': np.nan,
'opt_n_points': np.nan,
'weights': weights,
'optimisation_success': False,
'errmsg': errmsg}), errmsg
msmeans, msstds = calculate_optimisation_stats(d, analytes, min_points, weights, ind, x_bias)
# second catch
if all(np.isnan(msmeans).flat) or all(np.isnan(msmeans).flat):
errmsg = 'Optmisation failed. No contiguous data regions longer than {:.0f} points.'.format(min_points)
return Bunch({'means': np.nan,
'stds': np.nan,
'mean_threshold': np.nan,
'std_threshold': np.nan,
'lims': np.nan,
'filt': ind,
'threshold_mode': threshold_mode,
'min_points': min_points,
'analytes': analytes,
'opt_centre': np.nan,
'opt_n_points': np.nan,
'weights': weights,
'optimisation_success': False,
'errmsg': errmsg}), errmsg
# define thresholds
valid = ['kde_first_max', 'kde_max', 'median', 'bayes_mvs', 'mean']
n_under = 0
i = np.argwhere(np.array(valid) == threshold_mode)[0, 0]
o_threshold_mode = threshold_mode
while (n_under <= 0) & (i < len(valid)):
if threshold_mode == 'median':
# median - OK, but best?
std_threshold = np.nanmedian(msstds)
mean_threshold = np.nanmedian(msmeans)
elif threshold_mode == 'mean':
# mean
std_threshold = np.nanmean(msstds)
mean_threshold = np.nanmean(msmeans)
elif threshold_mode == 'kde_max':
# maximum of gaussian kernel density estimator
mkd = gaussian_kde(msmeans[~np.isnan(msmeans)].flat)
xm = np.linspace(*np.percentile(msmeans.flatten()[~np.isnan(msmeans.flatten())], (1, 99)), 100)
mdf = mkd.pdf(xm)
mean_threshold = xm[np.argmax(mdf)]
rkd = gaussian_kde(msstds[~np.isnan(msstds)])
xr = np.linspace(*np.percentile(msstds.flatten()[~np.isnan(msstds.flatten())], (1, 99)), 100)
rdf = rkd.pdf(xr)
std_threshold = xr[np.argmax(rdf)]
elif threshold_mode == 'kde_first_max':
# first local maximum of gaussian kernel density estimator
mkd = gaussian_kde(msmeans[~np.isnan(msmeans)].flat)
xm = np.linspace(*np.percentile(msmeans.flatten()[~np.isnan(msmeans.flatten())], (1, 99)), 100)
mdf = mkd.pdf(xm)
inds = np.argwhere(np.r_[False, mdf[1:] > mdf[:-1]] &
np.r_[mdf[:-1] > mdf[1:], False] &
(mdf > 0.25 * mdf.max()))
mean_threshold = xm[np.min(inds)]
rkd = gaussian_kde(msstds[~np.isnan(msstds)])
xr = np.linspace(*np.percentile(msstds.flatten()[~np.isnan(msstds.flatten())], (1, 99)), 100)
rdf = rkd.pdf(xr)
inds = np.argwhere(np.r_[False, rdf[1:] > rdf[:-1]] &
np.r_[rdf[:-1] > rdf[1:], False] &
(rdf > 0.25 * rdf.max()))
std_threshold = xr[np.min(inds)]
elif threshold_mode == 'bayes_mvs':
# bayesian mvs.
bm, _, bs = bayes_mvs(msstds[~np.isnan(msstds)])
std_threshold = bm.statistic
bm, _, bs = bayes_mvs(msmeans[~np.isnan(msmeans)])
mean_threshold = bm.statistic
elif callable(threshold_mode):
std_threshold = threshold_mode(msstds[~np.isnan(msstds)].flatten())
mean_threshold = threshold_mode(msmeans[~np.isnan(msmeans)].flatten())
else:
try:
mean_threshold, std_threshold = threshold_mode
except:
raise ValueError('\nthreshold_mode must be one of:\n ' + ', '.join(valid) + ',\na custom function, or a \n(mean_threshold, std_threshold) tuple.')
# apply threshold_mult
if isinstance(threshold_mult, (int, float)):
std_threshold *= threshold_mult
mean_threshold *= threshold_mult
elif len(threshold_mult) == 2:
mean_threshold *= threshold_mult[0]
std_threshold *= threshold_mult[1]
else:
raise ValueError('\nthreshold_mult must be a float, int or tuple of length 2.')
rind = (msstds < std_threshold)
if mode == 'minimise':
mind = (msmeans < mean_threshold)
else:
mind = (msmeans > mean_threshold)
ind = rind & mind
n_under = ind.sum()
if n_under == 0:
i += 1
if i <= len(valid) - 1:
threshold_mode = valid[i]
else:
errmsg = 'Optimisation failed. No of the threshold_mode would work. Try reducting min_points.'
return Bunch({'means': np.nan,
'stds': np.nan,
'mean_threshold': np.nan,
'std_threshold': np.nan,
'lims': np.nan,
'filt': ind,
'threshold_mode': threshold_mode,
'min_points': min_points,
'analytes': analytes,
'opt_centre': np.nan,
'opt_n_points': np.nan,
'weights': weights,
'optimisation_success': False,
'errmsg': errmsg}), errmsg
if i > 0:
errmsg = "optimisation failed using threshold_mode='{:}', falling back to '{:}'".format(o_threshold_mode, threshold_mode)
# identify max number of points within thresholds
passing = np.argwhere(ind)
opt_n_points = passing[:, 0].max()
opt_centre = passing[passing[:, 0] == opt_n_points, 1].min()
opt_n_points += min_points
# centres, npoints = np.meshgrid(np.arange(msmeans.shape[1]),
# np.arange(min_points, min_points + msmeans.shape[0]))
# opt_n_points = npoints[ind].max()
# plus/minus one point to allow some freedom to shift selection window.
# cind = ind & (npoints == opt_n_points)
# opt_centre = centres[cind].min()
if opt_n_points % 2 == 0:
lims = (opt_centre - opt_n_points // 2,
opt_centre + opt_n_points // 2)
else:
lims = (opt_centre - opt_n_points // 2,
opt_centre + opt_n_points // 2 + 1)
filt = np.zeros(d.Time.shape, dtype=bool)
filt[lims[0]:lims[1]] = True
return Bunch({'means': msmeans,
'stds': msstds,
'mean_threshold': mean_threshold,
'std_threshold': std_threshold,
'lims': lims,
'filt': filt,
'threshold_mode': threshold_mode,
'min_points': min_points,
'analytes': analytes,
'opt_centre': opt_centre,
'opt_n_points': opt_n_points,
'weights': weights,
'optimisation_success': True,
'errmsg': errmsg}), errmsg | Optimise data selection based on specified analytes.
Identifies the longest possible contiguous data region in
the signal where the relative standard deviation (std) and
concentration of all analytes is minimised.
Optimisation is performed via a grid search of all possible
contiguous data regions. For each region, the mean std and
mean scaled analyte concentration ('amplitude') are calculated.
The size and position of the optimal data region are identified
using threshold std and amplitude values. Thresholds are derived
from all calculated stds and amplitudes using the method specified
by `threshold_mode`. For example, using the 'kde_max' method, a
probability density function (PDF) is calculated for std and
amplitude values, and the threshold is set as the maximum of the
PDF. These thresholds are then used to identify the size and position
of the longest contiguous region where the std is below the threshold,
and the amplitude is either below the threshold.
All possible regions of the data that have at least
`min_points` are considered.
For a graphical demonstration of the action of signal_optimiser,
use `optimisation_plot`.
Parameters
----------
d : latools.D object
An latools data object.
analytes : str or array-like
Which analytes to consider.
min_points : int
The minimum number of contiguous points to
consider.
threshold_mode : str
The method used to calculate the optimisation
thresholds. Can be 'mean', 'median', 'kde_max'
or 'bayes_mvs', or a custom function. If a
function, must take a 1D array, and return a
single, real number.
threshood_mult : float or tuple
A multiplier applied to the calculated threshold
before use. If a tuple, the first value is applied
to the mean threshold, and the second is applied to
the standard deviation threshold. Reduce this to make
data selection more stringent.
x_bias : float
If non-zero, a bias is applied to the calculated statistics
to prefer the beginning (if > 0) or end (if < 0) of the
signal. Should be between zero and 1.
weights : array-like of length len(analytes)
An array of numbers specifying the importance of
each analyte considered. Larger number makes the
analyte have a greater effect on the optimisation.
Default is None.
ind : boolean array
A boolean array the same length as the data. Where
false, data will not be included.
mode : str
Whether to 'minimise' or 'maximise' the concentration
of the elements.
Returns
-------
dict, str : optimisation result, error message | Below is the the instruction that describes the task:
### Input:
Optimise data selection based on specified analytes.
Identifies the longest possible contiguous data region in
the signal where the relative standard deviation (std) and
concentration of all analytes is minimised.
Optimisation is performed via a grid search of all possible
contiguous data regions. For each region, the mean std and
mean scaled analyte concentration ('amplitude') are calculated.
The size and position of the optimal data region are identified
using threshold std and amplitude values. Thresholds are derived
from all calculated stds and amplitudes using the method specified
by `threshold_mode`. For example, using the 'kde_max' method, a
probability density function (PDF) is calculated for std and
amplitude values, and the threshold is set as the maximum of the
PDF. These thresholds are then used to identify the size and position
of the longest contiguous region where the std is below the threshold,
and the amplitude is either below the threshold.
All possible regions of the data that have at least
`min_points` are considered.
For a graphical demonstration of the action of signal_optimiser,
use `optimisation_plot`.
Parameters
----------
d : latools.D object
An latools data object.
analytes : str or array-like
Which analytes to consider.
min_points : int
The minimum number of contiguous points to
consider.
threshold_mode : str
The method used to calculate the optimisation
thresholds. Can be 'mean', 'median', 'kde_max'
or 'bayes_mvs', or a custom function. If a
function, must take a 1D array, and return a
single, real number.
threshood_mult : float or tuple
A multiplier applied to the calculated threshold
before use. If a tuple, the first value is applied
to the mean threshold, and the second is applied to
the standard deviation threshold. Reduce this to make
data selection more stringent.
x_bias : float
If non-zero, a bias is applied to the calculated statistics
to prefer the beginning (if > 0) or end (if < 0) of the
signal. Should be between zero and 1.
weights : array-like of length len(analytes)
An array of numbers specifying the importance of
each analyte considered. Larger number makes the
analyte have a greater effect on the optimisation.
Default is None.
ind : boolean array
A boolean array the same length as the data. Where
false, data will not be included.
mode : str
Whether to 'minimise' or 'maximise' the concentration
of the elements.
Returns
-------
dict, str : optimisation result, error message
### Response:
def signal_optimiser(d, analytes, min_points=5,
threshold_mode='kde_first_max',
threshold_mult=1., x_bias=0,
weights=None, ind=None, mode='minimise'):
"""
Optimise data selection based on specified analytes.
Identifies the longest possible contiguous data region in
the signal where the relative standard deviation (std) and
concentration of all analytes is minimised.
Optimisation is performed via a grid search of all possible
contiguous data regions. For each region, the mean std and
mean scaled analyte concentration ('amplitude') are calculated.
The size and position of the optimal data region are identified
using threshold std and amplitude values. Thresholds are derived
from all calculated stds and amplitudes using the method specified
by `threshold_mode`. For example, using the 'kde_max' method, a
probability density function (PDF) is calculated for std and
amplitude values, and the threshold is set as the maximum of the
PDF. These thresholds are then used to identify the size and position
of the longest contiguous region where the std is below the threshold,
and the amplitude is either below the threshold.
All possible regions of the data that have at least
`min_points` are considered.
For a graphical demonstration of the action of signal_optimiser,
use `optimisation_plot`.
Parameters
----------
d : latools.D object
An latools data object.
analytes : str or array-like
Which analytes to consider.
min_points : int
The minimum number of contiguous points to
consider.
threshold_mode : str
The method used to calculate the optimisation
thresholds. Can be 'mean', 'median', 'kde_max'
or 'bayes_mvs', or a custom function. If a
function, must take a 1D array, and return a
single, real number.
threshood_mult : float or tuple
A multiplier applied to the calculated threshold
before use. If a tuple, the first value is applied
to the mean threshold, and the second is applied to
the standard deviation threshold. Reduce this to make
data selection more stringent.
x_bias : float
If non-zero, a bias is applied to the calculated statistics
to prefer the beginning (if > 0) or end (if < 0) of the
signal. Should be between zero and 1.
weights : array-like of length len(analytes)
An array of numbers specifying the importance of
each analyte considered. Larger number makes the
analyte have a greater effect on the optimisation.
Default is None.
ind : boolean array
A boolean array the same length as the data. Where
false, data will not be included.
mode : str
Whether to 'minimise' or 'maximise' the concentration
of the elements.
Returns
-------
dict, str : optimisation result, error message
"""
errmsg = ''
if isinstance(analytes, str):
analytes = [analytes]
if ind is None:
ind = np.full(len(d.Time), True)
# initial catch
if not any(ind) or (np.diff(bool_2_indices(ind)).max() < min_points):
errmsg = 'Optmisation failed. No contiguous data regions longer than {:.0f} points.'.format(min_points)
return Bunch({'means': np.nan,
'stds': np.nan,
'mean_threshold': np.nan,
'std_threshold': np.nan,
'lims': np.nan,
'filt': ind,
'threshold_mode': threshold_mode,
'min_points': min_points,
'analytes': analytes,
'opt_centre': np.nan,
'opt_n_points': np.nan,
'weights': weights,
'optimisation_success': False,
'errmsg': errmsg}), errmsg
msmeans, msstds = calculate_optimisation_stats(d, analytes, min_points, weights, ind, x_bias)
# second catch
if all(np.isnan(msmeans).flat) or all(np.isnan(msmeans).flat):
errmsg = 'Optmisation failed. No contiguous data regions longer than {:.0f} points.'.format(min_points)
return Bunch({'means': np.nan,
'stds': np.nan,
'mean_threshold': np.nan,
'std_threshold': np.nan,
'lims': np.nan,
'filt': ind,
'threshold_mode': threshold_mode,
'min_points': min_points,
'analytes': analytes,
'opt_centre': np.nan,
'opt_n_points': np.nan,
'weights': weights,
'optimisation_success': False,
'errmsg': errmsg}), errmsg
# define thresholds
valid = ['kde_first_max', 'kde_max', 'median', 'bayes_mvs', 'mean']
n_under = 0
i = np.argwhere(np.array(valid) == threshold_mode)[0, 0]
o_threshold_mode = threshold_mode
while (n_under <= 0) & (i < len(valid)):
if threshold_mode == 'median':
# median - OK, but best?
std_threshold = np.nanmedian(msstds)
mean_threshold = np.nanmedian(msmeans)
elif threshold_mode == 'mean':
# mean
std_threshold = np.nanmean(msstds)
mean_threshold = np.nanmean(msmeans)
elif threshold_mode == 'kde_max':
# maximum of gaussian kernel density estimator
mkd = gaussian_kde(msmeans[~np.isnan(msmeans)].flat)
xm = np.linspace(*np.percentile(msmeans.flatten()[~np.isnan(msmeans.flatten())], (1, 99)), 100)
mdf = mkd.pdf(xm)
mean_threshold = xm[np.argmax(mdf)]
rkd = gaussian_kde(msstds[~np.isnan(msstds)])
xr = np.linspace(*np.percentile(msstds.flatten()[~np.isnan(msstds.flatten())], (1, 99)), 100)
rdf = rkd.pdf(xr)
std_threshold = xr[np.argmax(rdf)]
elif threshold_mode == 'kde_first_max':
# first local maximum of gaussian kernel density estimator
mkd = gaussian_kde(msmeans[~np.isnan(msmeans)].flat)
xm = np.linspace(*np.percentile(msmeans.flatten()[~np.isnan(msmeans.flatten())], (1, 99)), 100)
mdf = mkd.pdf(xm)
inds = np.argwhere(np.r_[False, mdf[1:] > mdf[:-1]] &
np.r_[mdf[:-1] > mdf[1:], False] &
(mdf > 0.25 * mdf.max()))
mean_threshold = xm[np.min(inds)]
rkd = gaussian_kde(msstds[~np.isnan(msstds)])
xr = np.linspace(*np.percentile(msstds.flatten()[~np.isnan(msstds.flatten())], (1, 99)), 100)
rdf = rkd.pdf(xr)
inds = np.argwhere(np.r_[False, rdf[1:] > rdf[:-1]] &
np.r_[rdf[:-1] > rdf[1:], False] &
(rdf > 0.25 * rdf.max()))
std_threshold = xr[np.min(inds)]
elif threshold_mode == 'bayes_mvs':
# bayesian mvs.
bm, _, bs = bayes_mvs(msstds[~np.isnan(msstds)])
std_threshold = bm.statistic
bm, _, bs = bayes_mvs(msmeans[~np.isnan(msmeans)])
mean_threshold = bm.statistic
elif callable(threshold_mode):
std_threshold = threshold_mode(msstds[~np.isnan(msstds)].flatten())
mean_threshold = threshold_mode(msmeans[~np.isnan(msmeans)].flatten())
else:
try:
mean_threshold, std_threshold = threshold_mode
except:
raise ValueError('\nthreshold_mode must be one of:\n ' + ', '.join(valid) + ',\na custom function, or a \n(mean_threshold, std_threshold) tuple.')
# apply threshold_mult
if isinstance(threshold_mult, (int, float)):
std_threshold *= threshold_mult
mean_threshold *= threshold_mult
elif len(threshold_mult) == 2:
mean_threshold *= threshold_mult[0]
std_threshold *= threshold_mult[1]
else:
raise ValueError('\nthreshold_mult must be a float, int or tuple of length 2.')
rind = (msstds < std_threshold)
if mode == 'minimise':
mind = (msmeans < mean_threshold)
else:
mind = (msmeans > mean_threshold)
ind = rind & mind
n_under = ind.sum()
if n_under == 0:
i += 1
if i <= len(valid) - 1:
threshold_mode = valid[i]
else:
errmsg = 'Optimisation failed. No of the threshold_mode would work. Try reducting min_points.'
return Bunch({'means': np.nan,
'stds': np.nan,
'mean_threshold': np.nan,
'std_threshold': np.nan,
'lims': np.nan,
'filt': ind,
'threshold_mode': threshold_mode,
'min_points': min_points,
'analytes': analytes,
'opt_centre': np.nan,
'opt_n_points': np.nan,
'weights': weights,
'optimisation_success': False,
'errmsg': errmsg}), errmsg
if i > 0:
errmsg = "optimisation failed using threshold_mode='{:}', falling back to '{:}'".format(o_threshold_mode, threshold_mode)
# identify max number of points within thresholds
passing = np.argwhere(ind)
opt_n_points = passing[:, 0].max()
opt_centre = passing[passing[:, 0] == opt_n_points, 1].min()
opt_n_points += min_points
# centres, npoints = np.meshgrid(np.arange(msmeans.shape[1]),
# np.arange(min_points, min_points + msmeans.shape[0]))
# opt_n_points = npoints[ind].max()
# plus/minus one point to allow some freedom to shift selection window.
# cind = ind & (npoints == opt_n_points)
# opt_centre = centres[cind].min()
if opt_n_points % 2 == 0:
lims = (opt_centre - opt_n_points // 2,
opt_centre + opt_n_points // 2)
else:
lims = (opt_centre - opt_n_points // 2,
opt_centre + opt_n_points // 2 + 1)
filt = np.zeros(d.Time.shape, dtype=bool)
filt[lims[0]:lims[1]] = True
return Bunch({'means': msmeans,
'stds': msstds,
'mean_threshold': mean_threshold,
'std_threshold': std_threshold,
'lims': lims,
'filt': filt,
'threshold_mode': threshold_mode,
'min_points': min_points,
'analytes': analytes,
'opt_centre': opt_centre,
'opt_n_points': opt_n_points,
'weights': weights,
'optimisation_success': True,
'errmsg': errmsg}), errmsg |
def pad_batch(features, batch_multiple):
"""Pad batch dim of features to nearest multiple of batch_multiple."""
feature = list(features.items())[0][1]
batch_size = tf.shape(feature)[0]
mod = batch_size % batch_multiple
has_mod = tf.cast(tf.cast(mod, tf.bool), tf.int32)
batch_padding = batch_multiple * has_mod - mod
padded_features = {}
for k, feature in features.items():
rank = len(feature.shape)
paddings = [[0, 0] for _ in range(rank)]
paddings[0][1] = batch_padding
padded_feature = tf.pad(feature, paddings)
padded_features[k] = padded_feature
return padded_features | Pad batch dim of features to nearest multiple of batch_multiple. | Below is the the instruction that describes the task:
### Input:
Pad batch dim of features to nearest multiple of batch_multiple.
### Response:
def pad_batch(features, batch_multiple):
"""Pad batch dim of features to nearest multiple of batch_multiple."""
feature = list(features.items())[0][1]
batch_size = tf.shape(feature)[0]
mod = batch_size % batch_multiple
has_mod = tf.cast(tf.cast(mod, tf.bool), tf.int32)
batch_padding = batch_multiple * has_mod - mod
padded_features = {}
for k, feature in features.items():
rank = len(feature.shape)
paddings = [[0, 0] for _ in range(rank)]
paddings[0][1] = batch_padding
padded_feature = tf.pad(feature, paddings)
padded_features[k] = padded_feature
return padded_features |
def deep_reload_hook(m):
"""Replacement for reload()."""
if not isinstance(m, ModuleType):
raise TypeError("reload() argument must be module")
name = m.__name__
if name not in sys.modules:
raise ImportError("reload(): module %.200s not in sys.modules" % name)
global modules_reloading
try:
return modules_reloading[name]
except:
modules_reloading[name] = m
dot = name.rfind('.')
if dot < 0:
subname = name
path = None
else:
try:
parent = sys.modules[name[:dot]]
except KeyError:
modules_reloading.clear()
raise ImportError("reload(): parent %.200s not in sys.modules" % name[:dot])
subname = name[dot+1:]
path = getattr(parent, "__path__", None)
try:
# This appears to be necessary on Python 3, because imp.find_module()
# tries to import standard libraries (like io) itself, and we don't
# want them to be processed by our deep_import_hook.
with replace_import_hook(original_import):
fp, filename, stuff = imp.find_module(subname, path)
finally:
modules_reloading.clear()
try:
newm = imp.load_module(name, fp, filename, stuff)
except:
# load_module probably removed name from modules because of
# the error. Put back the original module object.
sys.modules[name] = m
raise
finally:
if fp: fp.close()
modules_reloading.clear()
return newm | Replacement for reload(). | Below is the the instruction that describes the task:
### Input:
Replacement for reload().
### Response:
def deep_reload_hook(m):
"""Replacement for reload()."""
if not isinstance(m, ModuleType):
raise TypeError("reload() argument must be module")
name = m.__name__
if name not in sys.modules:
raise ImportError("reload(): module %.200s not in sys.modules" % name)
global modules_reloading
try:
return modules_reloading[name]
except:
modules_reloading[name] = m
dot = name.rfind('.')
if dot < 0:
subname = name
path = None
else:
try:
parent = sys.modules[name[:dot]]
except KeyError:
modules_reloading.clear()
raise ImportError("reload(): parent %.200s not in sys.modules" % name[:dot])
subname = name[dot+1:]
path = getattr(parent, "__path__", None)
try:
# This appears to be necessary on Python 3, because imp.find_module()
# tries to import standard libraries (like io) itself, and we don't
# want them to be processed by our deep_import_hook.
with replace_import_hook(original_import):
fp, filename, stuff = imp.find_module(subname, path)
finally:
modules_reloading.clear()
try:
newm = imp.load_module(name, fp, filename, stuff)
except:
# load_module probably removed name from modules because of
# the error. Put back the original module object.
sys.modules[name] = m
raise
finally:
if fp: fp.close()
modules_reloading.clear()
return newm |
def check_password(raw_password, enc_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
encryption formats behind the scenes.
"""
algo, salt, hsh = enc_password.split('$')
return enc_password == encrypt_password(raw_password, algorithm=algo,
salt=salt) | Returns a boolean of whether the raw_password was correct. Handles
encryption formats behind the scenes. | Below is the the instruction that describes the task:
### Input:
Returns a boolean of whether the raw_password was correct. Handles
encryption formats behind the scenes.
### Response:
def check_password(raw_password, enc_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
encryption formats behind the scenes.
"""
algo, salt, hsh = enc_password.split('$')
return enc_password == encrypt_password(raw_password, algorithm=algo,
salt=salt) |
def readinto(self, data):
"""Read data from the ring buffer into a user-provided buffer.
This advances the read index after reading;
calling :meth:`advance_read_index` is *not* necessary.
:param data: The memory where the data should be stored.
:type data: CData pointer or buffer
:returns: The number of elements read, which may be less than
the size of *data*.
:rtype: int
"""
try:
data = self._ffi.from_buffer(data)
except TypeError:
pass # input is not a buffer
size, rest = divmod(self._ffi.sizeof(data), self.elementsize)
if rest:
raise ValueError('data size must be multiple of elementsize')
return self._lib.PaUtil_ReadRingBuffer(self._ptr, data, size) | Read data from the ring buffer into a user-provided buffer.
This advances the read index after reading;
calling :meth:`advance_read_index` is *not* necessary.
:param data: The memory where the data should be stored.
:type data: CData pointer or buffer
:returns: The number of elements read, which may be less than
the size of *data*.
:rtype: int | Below is the the instruction that describes the task:
### Input:
Read data from the ring buffer into a user-provided buffer.
This advances the read index after reading;
calling :meth:`advance_read_index` is *not* necessary.
:param data: The memory where the data should be stored.
:type data: CData pointer or buffer
:returns: The number of elements read, which may be less than
the size of *data*.
:rtype: int
### Response:
def readinto(self, data):
"""Read data from the ring buffer into a user-provided buffer.
This advances the read index after reading;
calling :meth:`advance_read_index` is *not* necessary.
:param data: The memory where the data should be stored.
:type data: CData pointer or buffer
:returns: The number of elements read, which may be less than
the size of *data*.
:rtype: int
"""
try:
data = self._ffi.from_buffer(data)
except TypeError:
pass # input is not a buffer
size, rest = divmod(self._ffi.sizeof(data), self.elementsize)
if rest:
raise ValueError('data size must be multiple of elementsize')
return self._lib.PaUtil_ReadRingBuffer(self._ptr, data, size) |
def _parse_uri(uri_as_string):
"""
Parse the given URI from a string.
Supported URI schemes are:
* file
* hdfs
* http
* https
* s3
* s3a
* s3n
* s3u
* webhdfs
.s3, s3a and s3n are treated the same way. s3u is s3 but without SSL.
Valid URI examples::
* s3://my_bucket/my_key
* s3://my_key:my_secret@my_bucket/my_key
* s3://my_key:my_secret@my_server:my_port@my_bucket/my_key
* hdfs:///path/file
* hdfs://path/file
* webhdfs://host:port/path/file
* ./local/path/file
* ~/local/path/file
* local/path/file
* ./local/path/file.gz
* file:///home/user/file
* file:///home/user/file.bz2
* [ssh|scp|sftp]://username@host//path/file
* [ssh|scp|sftp]://username@host/path/file
"""
if os.name == 'nt':
# urlsplit doesn't work on Windows -- it parses the drive as the scheme...
if '://' not in uri_as_string:
# no protocol given => assume a local file
uri_as_string = 'file://' + uri_as_string
parsed_uri = _my_urlsplit(uri_as_string)
if parsed_uri.scheme == "hdfs":
return _parse_uri_hdfs(parsed_uri)
elif parsed_uri.scheme == "webhdfs":
return _parse_uri_webhdfs(parsed_uri)
elif parsed_uri.scheme in smart_open_s3.SUPPORTED_SCHEMES:
return _parse_uri_s3x(parsed_uri)
elif parsed_uri.scheme == 'file':
return _parse_uri_file(parsed_uri.netloc + parsed_uri.path)
elif parsed_uri.scheme in ('', None):
return _parse_uri_file(uri_as_string)
elif parsed_uri.scheme.startswith('http'):
return Uri(scheme=parsed_uri.scheme, uri_path=uri_as_string)
elif parsed_uri.scheme in smart_open_ssh.SCHEMES:
return _parse_uri_ssh(parsed_uri)
else:
raise NotImplementedError(
"unknown URI scheme %r in %r" % (parsed_uri.scheme, uri_as_string)
) | Parse the given URI from a string.
Supported URI schemes are:
* file
* hdfs
* http
* https
* s3
* s3a
* s3n
* s3u
* webhdfs
.s3, s3a and s3n are treated the same way. s3u is s3 but without SSL.
Valid URI examples::
* s3://my_bucket/my_key
* s3://my_key:my_secret@my_bucket/my_key
* s3://my_key:my_secret@my_server:my_port@my_bucket/my_key
* hdfs:///path/file
* hdfs://path/file
* webhdfs://host:port/path/file
* ./local/path/file
* ~/local/path/file
* local/path/file
* ./local/path/file.gz
* file:///home/user/file
* file:///home/user/file.bz2
* [ssh|scp|sftp]://username@host//path/file
* [ssh|scp|sftp]://username@host/path/file | Below is the the instruction that describes the task:
### Input:
Parse the given URI from a string.
Supported URI schemes are:
* file
* hdfs
* http
* https
* s3
* s3a
* s3n
* s3u
* webhdfs
.s3, s3a and s3n are treated the same way. s3u is s3 but without SSL.
Valid URI examples::
* s3://my_bucket/my_key
* s3://my_key:my_secret@my_bucket/my_key
* s3://my_key:my_secret@my_server:my_port@my_bucket/my_key
* hdfs:///path/file
* hdfs://path/file
* webhdfs://host:port/path/file
* ./local/path/file
* ~/local/path/file
* local/path/file
* ./local/path/file.gz
* file:///home/user/file
* file:///home/user/file.bz2
* [ssh|scp|sftp]://username@host//path/file
* [ssh|scp|sftp]://username@host/path/file
### Response:
def _parse_uri(uri_as_string):
"""
Parse the given URI from a string.
Supported URI schemes are:
* file
* hdfs
* http
* https
* s3
* s3a
* s3n
* s3u
* webhdfs
.s3, s3a and s3n are treated the same way. s3u is s3 but without SSL.
Valid URI examples::
* s3://my_bucket/my_key
* s3://my_key:my_secret@my_bucket/my_key
* s3://my_key:my_secret@my_server:my_port@my_bucket/my_key
* hdfs:///path/file
* hdfs://path/file
* webhdfs://host:port/path/file
* ./local/path/file
* ~/local/path/file
* local/path/file
* ./local/path/file.gz
* file:///home/user/file
* file:///home/user/file.bz2
* [ssh|scp|sftp]://username@host//path/file
* [ssh|scp|sftp]://username@host/path/file
"""
if os.name == 'nt':
# urlsplit doesn't work on Windows -- it parses the drive as the scheme...
if '://' not in uri_as_string:
# no protocol given => assume a local file
uri_as_string = 'file://' + uri_as_string
parsed_uri = _my_urlsplit(uri_as_string)
if parsed_uri.scheme == "hdfs":
return _parse_uri_hdfs(parsed_uri)
elif parsed_uri.scheme == "webhdfs":
return _parse_uri_webhdfs(parsed_uri)
elif parsed_uri.scheme in smart_open_s3.SUPPORTED_SCHEMES:
return _parse_uri_s3x(parsed_uri)
elif parsed_uri.scheme == 'file':
return _parse_uri_file(parsed_uri.netloc + parsed_uri.path)
elif parsed_uri.scheme in ('', None):
return _parse_uri_file(uri_as_string)
elif parsed_uri.scheme.startswith('http'):
return Uri(scheme=parsed_uri.scheme, uri_path=uri_as_string)
elif parsed_uri.scheme in smart_open_ssh.SCHEMES:
return _parse_uri_ssh(parsed_uri)
else:
raise NotImplementedError(
"unknown URI scheme %r in %r" % (parsed_uri.scheme, uri_as_string)
) |
def open(self):
"""
Calls SetupDiGetClassDevs to obtain a handle to an opaque device
information set that describes the device interfaces supported by all
the USB collections currently installed in the system. The
application should specify DIGCF.PRESENT and DIGCF.INTERFACEDEVICE
in the Flags parameter passed to SetupDiGetClassDevs.
"""
self.h_info = SetupDiGetClassDevs(byref(self.guid), None, None,
(DIGCF.PRESENT | DIGCF.DEVICEINTERFACE) )
return self.h_info | Calls SetupDiGetClassDevs to obtain a handle to an opaque device
information set that describes the device interfaces supported by all
the USB collections currently installed in the system. The
application should specify DIGCF.PRESENT and DIGCF.INTERFACEDEVICE
in the Flags parameter passed to SetupDiGetClassDevs. | Below is the the instruction that describes the task:
### Input:
Calls SetupDiGetClassDevs to obtain a handle to an opaque device
information set that describes the device interfaces supported by all
the USB collections currently installed in the system. The
application should specify DIGCF.PRESENT and DIGCF.INTERFACEDEVICE
in the Flags parameter passed to SetupDiGetClassDevs.
### Response:
def open(self):
"""
Calls SetupDiGetClassDevs to obtain a handle to an opaque device
information set that describes the device interfaces supported by all
the USB collections currently installed in the system. The
application should specify DIGCF.PRESENT and DIGCF.INTERFACEDEVICE
in the Flags parameter passed to SetupDiGetClassDevs.
"""
self.h_info = SetupDiGetClassDevs(byref(self.guid), None, None,
(DIGCF.PRESENT | DIGCF.DEVICEINTERFACE) )
return self.h_info |
def start(self, origin):
"""
Start this Tracer.
Return a Python function suitable for use with sys.settrace().
"""
self.start_time = time.time()
self.pause_until = None
self.data.update(self._get_struct(origin, 'origin'))
self.data_stack.append(self.data)
sys.settrace(self._trace)
return self._trace | Start this Tracer.
Return a Python function suitable for use with sys.settrace(). | Below is the the instruction that describes the task:
### Input:
Start this Tracer.
Return a Python function suitable for use with sys.settrace().
### Response:
def start(self, origin):
"""
Start this Tracer.
Return a Python function suitable for use with sys.settrace().
"""
self.start_time = time.time()
self.pause_until = None
self.data.update(self._get_struct(origin, 'origin'))
self.data_stack.append(self.data)
sys.settrace(self._trace)
return self._trace |
def insert_seperator_results(results):
"""Given a sequence of BenchmarkResults,
return a new sequence where a "seperator" BenchmarkResult has been placed
between differing benchmarks to provide a visual difference."""
sepbench = BenchmarkResult(*[' ' * w for w in COLUMN_WIDTHS])
last_bm = None
for r in results:
if last_bm is None:
last_bm = r.benchmark
elif last_bm != r.benchmark:
yield sepbench
last_bm = r.benchmark
yield r | Given a sequence of BenchmarkResults,
return a new sequence where a "seperator" BenchmarkResult has been placed
between differing benchmarks to provide a visual difference. | Below is the the instruction that describes the task:
### Input:
Given a sequence of BenchmarkResults,
return a new sequence where a "seperator" BenchmarkResult has been placed
between differing benchmarks to provide a visual difference.
### Response:
def insert_seperator_results(results):
"""Given a sequence of BenchmarkResults,
return a new sequence where a "seperator" BenchmarkResult has been placed
between differing benchmarks to provide a visual difference."""
sepbench = BenchmarkResult(*[' ' * w for w in COLUMN_WIDTHS])
last_bm = None
for r in results:
if last_bm is None:
last_bm = r.benchmark
elif last_bm != r.benchmark:
yield sepbench
last_bm = r.benchmark
yield r |
def getCoeff(self, name, light=None, date=None):
'''
try to get calibration for right light source, but
use another if they is none existent
'''
d = self.coeffs[name]
try:
c = d[light]
except KeyError:
try:
k, i = next(iter(d.items()))
if light is not None:
print(
'no calibration found for [%s] - using [%s] instead' % (light, k))
except StopIteration:
return None
c = i
except TypeError:
# coeff not dependent on light source
c = d
return _getFromDate(c, date) | try to get calibration for right light source, but
use another if they is none existent | Below is the the instruction that describes the task:
### Input:
try to get calibration for right light source, but
use another if they is none existent
### Response:
def getCoeff(self, name, light=None, date=None):
'''
try to get calibration for right light source, but
use another if they is none existent
'''
d = self.coeffs[name]
try:
c = d[light]
except KeyError:
try:
k, i = next(iter(d.items()))
if light is not None:
print(
'no calibration found for [%s] - using [%s] instead' % (light, k))
except StopIteration:
return None
c = i
except TypeError:
# coeff not dependent on light source
c = d
return _getFromDate(c, date) |
def read(self):
"""
Read buffer out as a single stream.
.. warning::
Avoid using this function!
**Why?** This is a *convenience* function; it doesn't encourage good
memory management.
All memory required for a mesh is duplicated, and returned as a
single :class:`str`. So at best, using this function will double
the memory required for a single model.
**Instead:** Wherever possible, please use :meth:`buffer_iter`.
"""
buffer = BytesIO()
for chunk in self.buffer_iter():
log.debug('buffer.write(%r)', chunk)
buffer.write(chunk)
buffer.seek(0)
return buffer.read() | Read buffer out as a single stream.
.. warning::
Avoid using this function!
**Why?** This is a *convenience* function; it doesn't encourage good
memory management.
All memory required for a mesh is duplicated, and returned as a
single :class:`str`. So at best, using this function will double
the memory required for a single model.
**Instead:** Wherever possible, please use :meth:`buffer_iter`. | Below is the the instruction that describes the task:
### Input:
Read buffer out as a single stream.
.. warning::
Avoid using this function!
**Why?** This is a *convenience* function; it doesn't encourage good
memory management.
All memory required for a mesh is duplicated, and returned as a
single :class:`str`. So at best, using this function will double
the memory required for a single model.
**Instead:** Wherever possible, please use :meth:`buffer_iter`.
### Response:
def read(self):
"""
Read buffer out as a single stream.
.. warning::
Avoid using this function!
**Why?** This is a *convenience* function; it doesn't encourage good
memory management.
All memory required for a mesh is duplicated, and returned as a
single :class:`str`. So at best, using this function will double
the memory required for a single model.
**Instead:** Wherever possible, please use :meth:`buffer_iter`.
"""
buffer = BytesIO()
for chunk in self.buffer_iter():
log.debug('buffer.write(%r)', chunk)
buffer.write(chunk)
buffer.seek(0)
return buffer.read() |
def _update_pwm(self):
"""Update the pwm values of the driver regarding the current state."""
if self._is_on:
values = self._get_pwm_values()
else:
values = [0] * len(self._driver.pins)
self._driver.set_pwm(values) | Update the pwm values of the driver regarding the current state. | Below is the the instruction that describes the task:
### Input:
Update the pwm values of the driver regarding the current state.
### Response:
def _update_pwm(self):
"""Update the pwm values of the driver regarding the current state."""
if self._is_on:
values = self._get_pwm_values()
else:
values = [0] * len(self._driver.pins)
self._driver.set_pwm(values) |
def eotvos(target, k, temperature='pore.temperature',
critical_temperature='pore.critical_temperature',
molar_density='pore.molar_density'):
r"""
Missing description
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
k : float
Constant parameter specific to fluid
temperature : string
The dictionary key containing the temperature values (K)
critical_temperature : string
The dictionary key containing the critical temperature values (K)
molar_density : string
The dictionary key containing the molar density values (K)
TODO: Needs description, and improve definition of k
"""
Tc = target[critical_temperature]
T = target[temperature]
Vm = 1/target[molar_density]
value = k*(Tc-T)/(Vm**(2/3))
return value | r"""
Missing description
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
k : float
Constant parameter specific to fluid
temperature : string
The dictionary key containing the temperature values (K)
critical_temperature : string
The dictionary key containing the critical temperature values (K)
molar_density : string
The dictionary key containing the molar density values (K)
TODO: Needs description, and improve definition of k | Below is the the instruction that describes the task:
### Input:
r"""
Missing description
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
k : float
Constant parameter specific to fluid
temperature : string
The dictionary key containing the temperature values (K)
critical_temperature : string
The dictionary key containing the critical temperature values (K)
molar_density : string
The dictionary key containing the molar density values (K)
TODO: Needs description, and improve definition of k
### Response:
def eotvos(target, k, temperature='pore.temperature',
critical_temperature='pore.critical_temperature',
molar_density='pore.molar_density'):
r"""
Missing description
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
k : float
Constant parameter specific to fluid
temperature : string
The dictionary key containing the temperature values (K)
critical_temperature : string
The dictionary key containing the critical temperature values (K)
molar_density : string
The dictionary key containing the molar density values (K)
TODO: Needs description, and improve definition of k
"""
Tc = target[critical_temperature]
T = target[temperature]
Vm = 1/target[molar_density]
value = k*(Tc-T)/(Vm**(2/3))
return value |
def arrow_table_from_vaex_df(ds, column_names=None, selection=None, strings=True, virtual=False):
"""Implementation of Dataset.to_arrow_table"""
names = []
arrays = []
for name, array in ds.to_items(column_names=column_names, selection=selection, strings=strings, virtual=virtual):
names.append(name)
arrays.append(arrow_array_from_numpy_array(array))
return pyarrow.Table.from_arrays(arrays, names) | Implementation of Dataset.to_arrow_table | Below is the the instruction that describes the task:
### Input:
Implementation of Dataset.to_arrow_table
### Response:
def arrow_table_from_vaex_df(ds, column_names=None, selection=None, strings=True, virtual=False):
"""Implementation of Dataset.to_arrow_table"""
names = []
arrays = []
for name, array in ds.to_items(column_names=column_names, selection=selection, strings=strings, virtual=virtual):
names.append(name)
arrays.append(arrow_array_from_numpy_array(array))
return pyarrow.Table.from_arrays(arrays, names) |
def image_predict_proba(self, X):
"""
Predicts class probabilities for the entire image.
Parameters:
-----------
X: array, shape = [n_samples, n_pixels_x, n_pixels_y, n_bands]
Array of training images
y: array, shape = [n_samples] or [n_samples, n_pixels_x, n_pixels_y, n_classes]
Target probabilities
"""
self._check_image(X)
probabilities = self.pixel_classifier.image_predict_proba(X)
patches, _ = self._to_patches(probabilities)
row_steps = self._image_size[0] // self.patch_size[0]
col_steps = self._image_size[1] // self.patch_size[1]
ps = self.patch_size[0] * self.patch_size[1]
# how can this be optimised?
for i, j, k in itertools.product(range(row_steps), range(col_steps), range(self._samples)):
patches[k, i, j, 0] = np.sum(patches[k, i, j, 0]) / ps
patches[k, i, j, 1] = np.sum(patches[k, i, j, 1]) / ps
return probabilities | Predicts class probabilities for the entire image.
Parameters:
-----------
X: array, shape = [n_samples, n_pixels_x, n_pixels_y, n_bands]
Array of training images
y: array, shape = [n_samples] or [n_samples, n_pixels_x, n_pixels_y, n_classes]
Target probabilities | Below is the the instruction that describes the task:
### Input:
Predicts class probabilities for the entire image.
Parameters:
-----------
X: array, shape = [n_samples, n_pixels_x, n_pixels_y, n_bands]
Array of training images
y: array, shape = [n_samples] or [n_samples, n_pixels_x, n_pixels_y, n_classes]
Target probabilities
### Response:
def image_predict_proba(self, X):
"""
Predicts class probabilities for the entire image.
Parameters:
-----------
X: array, shape = [n_samples, n_pixels_x, n_pixels_y, n_bands]
Array of training images
y: array, shape = [n_samples] or [n_samples, n_pixels_x, n_pixels_y, n_classes]
Target probabilities
"""
self._check_image(X)
probabilities = self.pixel_classifier.image_predict_proba(X)
patches, _ = self._to_patches(probabilities)
row_steps = self._image_size[0] // self.patch_size[0]
col_steps = self._image_size[1] // self.patch_size[1]
ps = self.patch_size[0] * self.patch_size[1]
# how can this be optimised?
for i, j, k in itertools.product(range(row_steps), range(col_steps), range(self._samples)):
patches[k, i, j, 0] = np.sum(patches[k, i, j, 0]) / ps
patches[k, i, j, 1] = np.sum(patches[k, i, j, 1]) / ps
return probabilities |
Subsets and Splits