code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def views_preview_count(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/views#preview-count"
api_path = "/api/v2/views/preview/count.json"
return self.call(api_path, method="POST", data=data, **kwargs) | https://developer.zendesk.com/rest_api/docs/core/views#preview-count | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/core/views#preview-count
### Response:
def views_preview_count(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/views#preview-count"
api_path = "/api/v2/views/preview/count.json"
return self.call(api_path, method="POST", data=data, **kwargs) |
async def _on_state_update(self, state_update):
"""Receive a StateUpdate and fan out to Conversations.
Args:
state_update: hangouts_pb2.StateUpdate instance
"""
# The state update will include some type of notification:
notification_type = state_update.WhichOneof('state_update')
# If conversation fields have been updated, the state update will have
# a conversation containing changed fields. Handle updating the
# conversation from this delta:
if state_update.HasField('conversation'):
try:
await self._handle_conversation_delta(
state_update.conversation
)
except exceptions.NetworkError:
logger.warning(
'Discarding %s for %s: Failed to fetch conversation',
notification_type.replace('_', ' '),
state_update.conversation.conversation_id.id
)
return
if notification_type == 'typing_notification':
await self._handle_set_typing_notification(
state_update.typing_notification
)
elif notification_type == 'watermark_notification':
await self._handle_watermark_notification(
state_update.watermark_notification
)
elif notification_type == 'event_notification':
await self._on_event(
state_update.event_notification.event
) | Receive a StateUpdate and fan out to Conversations.
Args:
state_update: hangouts_pb2.StateUpdate instance | Below is the the instruction that describes the task:
### Input:
Receive a StateUpdate and fan out to Conversations.
Args:
state_update: hangouts_pb2.StateUpdate instance
### Response:
async def _on_state_update(self, state_update):
"""Receive a StateUpdate and fan out to Conversations.
Args:
state_update: hangouts_pb2.StateUpdate instance
"""
# The state update will include some type of notification:
notification_type = state_update.WhichOneof('state_update')
# If conversation fields have been updated, the state update will have
# a conversation containing changed fields. Handle updating the
# conversation from this delta:
if state_update.HasField('conversation'):
try:
await self._handle_conversation_delta(
state_update.conversation
)
except exceptions.NetworkError:
logger.warning(
'Discarding %s for %s: Failed to fetch conversation',
notification_type.replace('_', ' '),
state_update.conversation.conversation_id.id
)
return
if notification_type == 'typing_notification':
await self._handle_set_typing_notification(
state_update.typing_notification
)
elif notification_type == 'watermark_notification':
await self._handle_watermark_notification(
state_update.watermark_notification
)
elif notification_type == 'event_notification':
await self._on_event(
state_update.event_notification.event
) |
def getPlugItObject(hproPk):
"""Return the plugit object and the baseURI to use if not in standalone mode"""
from hprojects.models import HostedProject
try:
hproject = HostedProject.objects.get(pk=hproPk)
except (HostedProject.DoesNotExist, ValueError):
try:
hproject = HostedProject.objects.get(plugItCustomUrlKey=hproPk)
except HostedProject.DoesNotExist:
raise Http404
if hproject.plugItURI == '' and not hproject.runURI:
raise Http404
plugIt = PlugIt(hproject.plugItURI)
# Test if we should use custom key
if hasattr(hproject, 'plugItCustomUrlKey') and hproject.plugItCustomUrlKey:
baseURI = reverse('plugIt.views.main', args=(hproject.plugItCustomUrlKey, ''))
else:
baseURI = reverse('plugIt.views.main', args=(hproject.pk, ''))
return (plugIt, baseURI, hproject) | Return the plugit object and the baseURI to use if not in standalone mode | Below is the the instruction that describes the task:
### Input:
Return the plugit object and the baseURI to use if not in standalone mode
### Response:
def getPlugItObject(hproPk):
"""Return the plugit object and the baseURI to use if not in standalone mode"""
from hprojects.models import HostedProject
try:
hproject = HostedProject.objects.get(pk=hproPk)
except (HostedProject.DoesNotExist, ValueError):
try:
hproject = HostedProject.objects.get(plugItCustomUrlKey=hproPk)
except HostedProject.DoesNotExist:
raise Http404
if hproject.plugItURI == '' and not hproject.runURI:
raise Http404
plugIt = PlugIt(hproject.plugItURI)
# Test if we should use custom key
if hasattr(hproject, 'plugItCustomUrlKey') and hproject.plugItCustomUrlKey:
baseURI = reverse('plugIt.views.main', args=(hproject.plugItCustomUrlKey, ''))
else:
baseURI = reverse('plugIt.views.main', args=(hproject.pk, ''))
return (plugIt, baseURI, hproject) |
def attach_usage_plan_to_apis(plan_id, apis, region=None, key=None, keyid=None, profile=None):
'''
Attaches given usage plan to each of the apis provided in a list of apiId and stage values
.. versionadded:: 2017.7.0
apis
a list of dictionaries, where each dictionary contains the following:
apiId
a string, which is the id of the created API in AWS ApiGateway
stage
a string, which is the stage that the created API is deployed to.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.attach_usage_plan_to_apis plan_id='usage plan id' apis='[{"apiId": "some id 1", "stage": "some stage 1"}]'
'''
return _update_usage_plan_apis(plan_id, apis, 'add', region=region, key=key, keyid=keyid, profile=profile) | Attaches given usage plan to each of the apis provided in a list of apiId and stage values
.. versionadded:: 2017.7.0
apis
a list of dictionaries, where each dictionary contains the following:
apiId
a string, which is the id of the created API in AWS ApiGateway
stage
a string, which is the stage that the created API is deployed to.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.attach_usage_plan_to_apis plan_id='usage plan id' apis='[{"apiId": "some id 1", "stage": "some stage 1"}]' | Below is the the instruction that describes the task:
### Input:
Attaches given usage plan to each of the apis provided in a list of apiId and stage values
.. versionadded:: 2017.7.0
apis
a list of dictionaries, where each dictionary contains the following:
apiId
a string, which is the id of the created API in AWS ApiGateway
stage
a string, which is the stage that the created API is deployed to.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.attach_usage_plan_to_apis plan_id='usage plan id' apis='[{"apiId": "some id 1", "stage": "some stage 1"}]'
### Response:
def attach_usage_plan_to_apis(plan_id, apis, region=None, key=None, keyid=None, profile=None):
'''
Attaches given usage plan to each of the apis provided in a list of apiId and stage values
.. versionadded:: 2017.7.0
apis
a list of dictionaries, where each dictionary contains the following:
apiId
a string, which is the id of the created API in AWS ApiGateway
stage
a string, which is the stage that the created API is deployed to.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.attach_usage_plan_to_apis plan_id='usage plan id' apis='[{"apiId": "some id 1", "stage": "some stage 1"}]'
'''
return _update_usage_plan_apis(plan_id, apis, 'add', region=region, key=key, keyid=keyid, profile=profile) |
def load_config_json(conf_file):
"""Banana?"""
try:
with open(conf_file) as _:
try:
json_conf = json.load(_)
except ValueError as ze_error:
error('invalid-config',
'The provided configuration file %s is not valid json.\n'
'The exact error was %s.\n'
'This often happens because of missing or extra commas, '
'but it may be something else, please fix it!\n' %
(conf_file, str(ze_error)))
except FileNotFoundError:
json_conf = {}
except IOError as _err:
error('setup-issue',
'Passed config file %s could not be opened (%s)' %
(conf_file, _err))
return json_conf | Banana? | Below is the the instruction that describes the task:
### Input:
Banana?
### Response:
def load_config_json(conf_file):
"""Banana?"""
try:
with open(conf_file) as _:
try:
json_conf = json.load(_)
except ValueError as ze_error:
error('invalid-config',
'The provided configuration file %s is not valid json.\n'
'The exact error was %s.\n'
'This often happens because of missing or extra commas, '
'but it may be something else, please fix it!\n' %
(conf_file, str(ze_error)))
except FileNotFoundError:
json_conf = {}
except IOError as _err:
error('setup-issue',
'Passed config file %s could not be opened (%s)' %
(conf_file, _err))
return json_conf |
def _get_vispy_app_dir():
"""Helper to get the default directory for storing vispy data"""
# Define default user directory
user_dir = os.path.expanduser('~')
# Get system app data dir
path = None
if sys.platform.startswith('win'):
path1, path2 = os.getenv('LOCALAPPDATA'), os.getenv('APPDATA')
path = path1 or path2
elif sys.platform.startswith('darwin'):
path = os.path.join(user_dir, 'Library', 'Application Support')
# On Linux and as fallback
if not (path and os.path.isdir(path)):
path = user_dir
# Maybe we should store things local to the executable (in case of a
# portable distro or a frozen application that wants to be portable)
prefix = sys.prefix
if getattr(sys, 'frozen', None): # See application_dir() function
prefix = os.path.abspath(os.path.dirname(sys.path[0]))
for reldir in ('settings', '../settings'):
localpath = os.path.abspath(os.path.join(prefix, reldir))
if os.path.isdir(localpath):
try:
open(os.path.join(localpath, 'test.write'), 'wb').close()
os.remove(os.path.join(localpath, 'test.write'))
except IOError:
pass # We cannot write in this directory
else:
path = localpath
break
# Get path specific for this app
appname = '.vispy' if path == user_dir else 'vispy'
path = os.path.join(path, appname)
return path | Helper to get the default directory for storing vispy data | Below is the the instruction that describes the task:
### Input:
Helper to get the default directory for storing vispy data
### Response:
def _get_vispy_app_dir():
"""Helper to get the default directory for storing vispy data"""
# Define default user directory
user_dir = os.path.expanduser('~')
# Get system app data dir
path = None
if sys.platform.startswith('win'):
path1, path2 = os.getenv('LOCALAPPDATA'), os.getenv('APPDATA')
path = path1 or path2
elif sys.platform.startswith('darwin'):
path = os.path.join(user_dir, 'Library', 'Application Support')
# On Linux and as fallback
if not (path and os.path.isdir(path)):
path = user_dir
# Maybe we should store things local to the executable (in case of a
# portable distro or a frozen application that wants to be portable)
prefix = sys.prefix
if getattr(sys, 'frozen', None): # See application_dir() function
prefix = os.path.abspath(os.path.dirname(sys.path[0]))
for reldir in ('settings', '../settings'):
localpath = os.path.abspath(os.path.join(prefix, reldir))
if os.path.isdir(localpath):
try:
open(os.path.join(localpath, 'test.write'), 'wb').close()
os.remove(os.path.join(localpath, 'test.write'))
except IOError:
pass # We cannot write in this directory
else:
path = localpath
break
# Get path specific for this app
appname = '.vispy' if path == user_dir else 'vispy'
path = os.path.join(path, appname)
return path |
def init(force):
"""Initialize registered aliases and mappings."""
click.secho('Creating indexes...', fg='green', bold=True, file=sys.stderr)
with click.progressbar(
current_search.create(ignore=[400] if force else None),
length=current_search.number_of_indexes) as bar:
for name, response in bar:
bar.label = name
click.secho('Putting templates...', fg='green', bold=True, file=sys.stderr)
with click.progressbar(
current_search.put_templates(ignore=[400] if force else None),
length=len(current_search.templates.keys())) as bar:
for response in bar:
bar.label = response | Initialize registered aliases and mappings. | Below is the the instruction that describes the task:
### Input:
Initialize registered aliases and mappings.
### Response:
def init(force):
"""Initialize registered aliases and mappings."""
click.secho('Creating indexes...', fg='green', bold=True, file=sys.stderr)
with click.progressbar(
current_search.create(ignore=[400] if force else None),
length=current_search.number_of_indexes) as bar:
for name, response in bar:
bar.label = name
click.secho('Putting templates...', fg='green', bold=True, file=sys.stderr)
with click.progressbar(
current_search.put_templates(ignore=[400] if force else None),
length=len(current_search.templates.keys())) as bar:
for response in bar:
bar.label = response |
def delete(cls, uuid):
"""Delete a workflow."""
to_delete = Workflow.query.get(uuid)
db.session.delete(to_delete) | Delete a workflow. | Below is the the instruction that describes the task:
### Input:
Delete a workflow.
### Response:
def delete(cls, uuid):
"""Delete a workflow."""
to_delete = Workflow.query.get(uuid)
db.session.delete(to_delete) |
def disasters_sim(early_mean=early_mean,
late_mean=late_mean,
switchpoint=switchpoint):
"""Coal mining disasters sampled from the posterior predictive distribution"""
return concatenate((pm.rpoisson(early_mean, size=switchpoint), pm.rpoisson(
late_mean, size=n - switchpoint))) | Coal mining disasters sampled from the posterior predictive distribution | Below is the the instruction that describes the task:
### Input:
Coal mining disasters sampled from the posterior predictive distribution
### Response:
def disasters_sim(early_mean=early_mean,
late_mean=late_mean,
switchpoint=switchpoint):
"""Coal mining disasters sampled from the posterior predictive distribution"""
return concatenate((pm.rpoisson(early_mean, size=switchpoint), pm.rpoisson(
late_mean, size=n - switchpoint))) |
def set_pin(self, newPin: str, oldPin: str = None) -> dict:
""" sets a new pin for the home
Args:
newPin(str): the new pin
oldPin(str): optional, if there is currently a pin active it must be given here.
Otherwise it will not be possible to set the new pin
Returns:
the result of the call
"""
if newPin == None:
newPin = ""
data = {"pin": newPin}
if oldPin:
self._connection.headers["PIN"] = str(oldPin)
result = self._restCall("home/setPin", body=json.dumps(data))
if oldPin:
del self._connection.headers["PIN"]
return result | sets a new pin for the home
Args:
newPin(str): the new pin
oldPin(str): optional, if there is currently a pin active it must be given here.
Otherwise it will not be possible to set the new pin
Returns:
the result of the call | Below is the the instruction that describes the task:
### Input:
sets a new pin for the home
Args:
newPin(str): the new pin
oldPin(str): optional, if there is currently a pin active it must be given here.
Otherwise it will not be possible to set the new pin
Returns:
the result of the call
### Response:
def set_pin(self, newPin: str, oldPin: str = None) -> dict:
""" sets a new pin for the home
Args:
newPin(str): the new pin
oldPin(str): optional, if there is currently a pin active it must be given here.
Otherwise it will not be possible to set the new pin
Returns:
the result of the call
"""
if newPin == None:
newPin = ""
data = {"pin": newPin}
if oldPin:
self._connection.headers["PIN"] = str(oldPin)
result = self._restCall("home/setPin", body=json.dumps(data))
if oldPin:
del self._connection.headers["PIN"]
return result |
def evaluate(self, x, y, flux, x_0, y_0):
"""
Evaluate the `GriddedPSFModel` for the input parameters.
"""
# NOTE: this is needed because the PSF photometry routines input
# length-1 values instead of scalars. TODO: fix the photometry
# routines.
if not np.isscalar(x_0):
x_0 = x_0[0]
if not np.isscalar(y_0):
y_0 = y_0[0]
if (x_0 < self._xgrid_min or x_0 > self._xgrid_max or
y_0 < self._ygrid_min or y_0 > self._ygrid_max):
# position is outside of the grid, so simply use the
# closest reference PSF
self._ref_indices = np.argsort(np.hypot(self._grid_xpos - x_0,
self._grid_ypos - y_0))[0]
self._psf_interp = self.data[self._ref_indices, :, :]
else:
# find the four bounding reference PSFs and interpolate
self._ref_indices = self._find_bounding_points(x_0, y_0)
xyref = np.array(self.grid_xypos)[self._ref_indices]
psfs = self.data[self._ref_indices, :, :]
self._psf_interp = self._bilinear_interp(xyref, psfs, x_0, y_0)
# now evaluate the PSF at the (x_0, y_0) subpixel position on
# the input (x, y) values
psfmodel = FittableImageModel(self._psf_interp,
oversampling=self.oversampling)
return psfmodel.evaluate(x, y, flux, x_0, y_0) | Evaluate the `GriddedPSFModel` for the input parameters. | Below is the the instruction that describes the task:
### Input:
Evaluate the `GriddedPSFModel` for the input parameters.
### Response:
def evaluate(self, x, y, flux, x_0, y_0):
"""
Evaluate the `GriddedPSFModel` for the input parameters.
"""
# NOTE: this is needed because the PSF photometry routines input
# length-1 values instead of scalars. TODO: fix the photometry
# routines.
if not np.isscalar(x_0):
x_0 = x_0[0]
if not np.isscalar(y_0):
y_0 = y_0[0]
if (x_0 < self._xgrid_min or x_0 > self._xgrid_max or
y_0 < self._ygrid_min or y_0 > self._ygrid_max):
# position is outside of the grid, so simply use the
# closest reference PSF
self._ref_indices = np.argsort(np.hypot(self._grid_xpos - x_0,
self._grid_ypos - y_0))[0]
self._psf_interp = self.data[self._ref_indices, :, :]
else:
# find the four bounding reference PSFs and interpolate
self._ref_indices = self._find_bounding_points(x_0, y_0)
xyref = np.array(self.grid_xypos)[self._ref_indices]
psfs = self.data[self._ref_indices, :, :]
self._psf_interp = self._bilinear_interp(xyref, psfs, x_0, y_0)
# now evaluate the PSF at the (x_0, y_0) subpixel position on
# the input (x, y) values
psfmodel = FittableImageModel(self._psf_interp,
oversampling=self.oversampling)
return psfmodel.evaluate(x, y, flux, x_0, y_0) |
def get_sql_select_all_fields_by_key(
table: str,
fieldlist: Sequence[str],
keyname: str,
delims: Tuple[str, str] = ("", "")) -> str:
"""Returns SQL:
SELECT [all fields in the fieldlist] WHERE [keyname] = ?
"""
return (
"SELECT " +
",".join([delimit(x, delims) for x in fieldlist]) +
" FROM " + delimit(table, delims) +
" WHERE " + delimit(keyname, delims) + "=?"
) | Returns SQL:
SELECT [all fields in the fieldlist] WHERE [keyname] = ? | Below is the the instruction that describes the task:
### Input:
Returns SQL:
SELECT [all fields in the fieldlist] WHERE [keyname] = ?
### Response:
def get_sql_select_all_fields_by_key(
table: str,
fieldlist: Sequence[str],
keyname: str,
delims: Tuple[str, str] = ("", "")) -> str:
"""Returns SQL:
SELECT [all fields in the fieldlist] WHERE [keyname] = ?
"""
return (
"SELECT " +
",".join([delimit(x, delims) for x in fieldlist]) +
" FROM " + delimit(table, delims) +
" WHERE " + delimit(keyname, delims) + "=?"
) |
def normalise(v, dimN=2):
r"""Normalise vectors, corresponding to slices along specified number
of initial spatial dimensions of an array, to have unit
:math:`\ell_2` norm. The remaining axes enumerate the distinct
vectors to be normalised.
Parameters
----------
v : array_like
Array with components to be normalised
dimN : int, optional (default 2)
Number of initial dimensions over which norm should be computed
Returns
-------
vnrm : ndarray
Normalised array
"""
axisN = tuple(range(0, dimN))
vn = np.sqrt(np.sum(v**2, axisN, keepdims=True))
vn[vn == 0] = 1.0
return np.asarray(v / vn, dtype=v.dtype) | r"""Normalise vectors, corresponding to slices along specified number
of initial spatial dimensions of an array, to have unit
:math:`\ell_2` norm. The remaining axes enumerate the distinct
vectors to be normalised.
Parameters
----------
v : array_like
Array with components to be normalised
dimN : int, optional (default 2)
Number of initial dimensions over which norm should be computed
Returns
-------
vnrm : ndarray
Normalised array | Below is the the instruction that describes the task:
### Input:
r"""Normalise vectors, corresponding to slices along specified number
of initial spatial dimensions of an array, to have unit
:math:`\ell_2` norm. The remaining axes enumerate the distinct
vectors to be normalised.
Parameters
----------
v : array_like
Array with components to be normalised
dimN : int, optional (default 2)
Number of initial dimensions over which norm should be computed
Returns
-------
vnrm : ndarray
Normalised array
### Response:
def normalise(v, dimN=2):
r"""Normalise vectors, corresponding to slices along specified number
of initial spatial dimensions of an array, to have unit
:math:`\ell_2` norm. The remaining axes enumerate the distinct
vectors to be normalised.
Parameters
----------
v : array_like
Array with components to be normalised
dimN : int, optional (default 2)
Number of initial dimensions over which norm should be computed
Returns
-------
vnrm : ndarray
Normalised array
"""
axisN = tuple(range(0, dimN))
vn = np.sqrt(np.sum(v**2, axisN, keepdims=True))
vn[vn == 0] = 1.0
return np.asarray(v / vn, dtype=v.dtype) |
def simhash(self, content):
"""
Select policies for simhash on the different types of content.
"""
if content is None:
self.hash = -1
return
if isinstance(content, str):
features = self.tokenizer_func(content, self.keyword_weight_pari)
self.hash = self.build_from_features(features)
elif isinstance(content, collections.Iterable):
self.hash = self.build_from_features(content)
elif isinstance(content, int):
self.hash = content
else:
raise Exception("Unsupported parameter type %s" % type(content)) | Select policies for simhash on the different types of content. | Below is the the instruction that describes the task:
### Input:
Select policies for simhash on the different types of content.
### Response:
def simhash(self, content):
"""
Select policies for simhash on the different types of content.
"""
if content is None:
self.hash = -1
return
if isinstance(content, str):
features = self.tokenizer_func(content, self.keyword_weight_pari)
self.hash = self.build_from_features(features)
elif isinstance(content, collections.Iterable):
self.hash = self.build_from_features(content)
elif isinstance(content, int):
self.hash = content
else:
raise Exception("Unsupported parameter type %s" % type(content)) |
def Crowl_Louvar_UFL(atoms):
r'''Calculates upper flammability limit, using the Crowl-Louvar [1]_
correlation. Uses molecular formula only.
The upper flammability limit of a gas is air is:
.. math::
C_mH_xO_y + zO_2 \to mCO_2 + \frac{x}{2}H_2O
\text{UFL} = \frac{3.5}{4.76m + 1.19x - 2.38y + 1}
Parameters
----------
atoms : dict
Dictionary of atoms and atom counts
Returns
-------
UFL : float
Upper flammability limit, mole fraction
Notes
-----
Coefficient of 3.5 taken from [2]_
Examples
--------
Hexane, example from [1]_, lit. 7.5 %
>>> Crowl_Louvar_UFL({'H': 14, 'C': 6})
0.07572479446127219
References
----------
.. [1] Crowl, Daniel A., and Joseph F. Louvar. Chemical Process Safety:
Fundamentals with Applications. 2E. Upper Saddle River, N.J:
Prentice Hall, 2001.
.. [2] Jones, G. W. "Inflammation Limits and Their Practical Application
in Hazardous Industrial Operations." Chemical Reviews 22, no. 1
(February 1, 1938): 1-26. doi:10.1021/cr60071a001
'''
nC, nH, nO = 0, 0, 0
if 'C' in atoms and atoms['C']:
nC = atoms['C']
else:
return None
if 'H' in atoms:
nH = atoms['H']
if 'O' in atoms:
nO = atoms['O']
return 3.5/(4.76*nC + 1.19*nH - 2.38*nO + 1.) | r'''Calculates upper flammability limit, using the Crowl-Louvar [1]_
correlation. Uses molecular formula only.
The upper flammability limit of a gas is air is:
.. math::
C_mH_xO_y + zO_2 \to mCO_2 + \frac{x}{2}H_2O
\text{UFL} = \frac{3.5}{4.76m + 1.19x - 2.38y + 1}
Parameters
----------
atoms : dict
Dictionary of atoms and atom counts
Returns
-------
UFL : float
Upper flammability limit, mole fraction
Notes
-----
Coefficient of 3.5 taken from [2]_
Examples
--------
Hexane, example from [1]_, lit. 7.5 %
>>> Crowl_Louvar_UFL({'H': 14, 'C': 6})
0.07572479446127219
References
----------
.. [1] Crowl, Daniel A., and Joseph F. Louvar. Chemical Process Safety:
Fundamentals with Applications. 2E. Upper Saddle River, N.J:
Prentice Hall, 2001.
.. [2] Jones, G. W. "Inflammation Limits and Their Practical Application
in Hazardous Industrial Operations." Chemical Reviews 22, no. 1
(February 1, 1938): 1-26. doi:10.1021/cr60071a001 | Below is the the instruction that describes the task:
### Input:
r'''Calculates upper flammability limit, using the Crowl-Louvar [1]_
correlation. Uses molecular formula only.
The upper flammability limit of a gas is air is:
.. math::
C_mH_xO_y + zO_2 \to mCO_2 + \frac{x}{2}H_2O
\text{UFL} = \frac{3.5}{4.76m + 1.19x - 2.38y + 1}
Parameters
----------
atoms : dict
Dictionary of atoms and atom counts
Returns
-------
UFL : float
Upper flammability limit, mole fraction
Notes
-----
Coefficient of 3.5 taken from [2]_
Examples
--------
Hexane, example from [1]_, lit. 7.5 %
>>> Crowl_Louvar_UFL({'H': 14, 'C': 6})
0.07572479446127219
References
----------
.. [1] Crowl, Daniel A., and Joseph F. Louvar. Chemical Process Safety:
Fundamentals with Applications. 2E. Upper Saddle River, N.J:
Prentice Hall, 2001.
.. [2] Jones, G. W. "Inflammation Limits and Their Practical Application
in Hazardous Industrial Operations." Chemical Reviews 22, no. 1
(February 1, 1938): 1-26. doi:10.1021/cr60071a001
### Response:
def Crowl_Louvar_UFL(atoms):
r'''Calculates upper flammability limit, using the Crowl-Louvar [1]_
correlation. Uses molecular formula only.
The upper flammability limit of a gas is air is:
.. math::
C_mH_xO_y + zO_2 \to mCO_2 + \frac{x}{2}H_2O
\text{UFL} = \frac{3.5}{4.76m + 1.19x - 2.38y + 1}
Parameters
----------
atoms : dict
Dictionary of atoms and atom counts
Returns
-------
UFL : float
Upper flammability limit, mole fraction
Notes
-----
Coefficient of 3.5 taken from [2]_
Examples
--------
Hexane, example from [1]_, lit. 7.5 %
>>> Crowl_Louvar_UFL({'H': 14, 'C': 6})
0.07572479446127219
References
----------
.. [1] Crowl, Daniel A., and Joseph F. Louvar. Chemical Process Safety:
Fundamentals with Applications. 2E. Upper Saddle River, N.J:
Prentice Hall, 2001.
.. [2] Jones, G. W. "Inflammation Limits and Their Practical Application
in Hazardous Industrial Operations." Chemical Reviews 22, no. 1
(February 1, 1938): 1-26. doi:10.1021/cr60071a001
'''
nC, nH, nO = 0, 0, 0
if 'C' in atoms and atoms['C']:
nC = atoms['C']
else:
return None
if 'H' in atoms:
nH = atoms['H']
if 'O' in atoms:
nO = atoms['O']
return 3.5/(4.76*nC + 1.19*nH - 2.38*nO + 1.) |
def update_graderoster(graderoster, requestor):
"""
Updates the graderoster resource for the passed restclients.GradeRoster
model. A new restclients.GradeRoster is returned, representing the
document returned from the update request.
"""
label = graderoster.graderoster_label()
url = "{}/{}".format(graderoster_url, encode_section_label(label))
headers = {"Content-Type": "application/xhtml+xml",
"Connection": "keep-alive",
"X-UW-Act-as": requestor.uwnetid}
body = graderoster.xhtml()
response = SWS_GradeRoster_DAO().putURL(url, headers, body)
if response.status != 200:
root = etree.fromstring(response.data)
msg = root.find(".//*[@class='status_description']").text.strip()
raise DataFailureException(url, response.status, msg)
return GradeRoster(data=etree.fromstring(response.data.strip()),
section=graderoster.section,
instructor=graderoster.instructor) | Updates the graderoster resource for the passed restclients.GradeRoster
model. A new restclients.GradeRoster is returned, representing the
document returned from the update request. | Below is the the instruction that describes the task:
### Input:
Updates the graderoster resource for the passed restclients.GradeRoster
model. A new restclients.GradeRoster is returned, representing the
document returned from the update request.
### Response:
def update_graderoster(graderoster, requestor):
"""
Updates the graderoster resource for the passed restclients.GradeRoster
model. A new restclients.GradeRoster is returned, representing the
document returned from the update request.
"""
label = graderoster.graderoster_label()
url = "{}/{}".format(graderoster_url, encode_section_label(label))
headers = {"Content-Type": "application/xhtml+xml",
"Connection": "keep-alive",
"X-UW-Act-as": requestor.uwnetid}
body = graderoster.xhtml()
response = SWS_GradeRoster_DAO().putURL(url, headers, body)
if response.status != 200:
root = etree.fromstring(response.data)
msg = root.find(".//*[@class='status_description']").text.strip()
raise DataFailureException(url, response.status, msg)
return GradeRoster(data=etree.fromstring(response.data.strip()),
section=graderoster.section,
instructor=graderoster.instructor) |
def _group_chunks_by_entities(self, chunks, entities):
"""Groups chunks by entities retrieved from NL API Entity Analysis.
Args:
chunks (:obj:`budou.chunk.ChunkList`): List of chunks to be processed.
entities (:obj:`list` of :obj:`dict`): List of entities.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
"""
for entity in entities:
chunks_to_concat = chunks.get_overlaps(
entity['beginOffset'], len(entity['content']))
if not chunks_to_concat:
continue
new_chunk_word = u''.join([chunk.word for chunk in chunks_to_concat])
new_chunk = Chunk(new_chunk_word)
chunks.swap(chunks_to_concat, new_chunk)
return chunks | Groups chunks by entities retrieved from NL API Entity Analysis.
Args:
chunks (:obj:`budou.chunk.ChunkList`): List of chunks to be processed.
entities (:obj:`list` of :obj:`dict`): List of entities.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`) | Below is the the instruction that describes the task:
### Input:
Groups chunks by entities retrieved from NL API Entity Analysis.
Args:
chunks (:obj:`budou.chunk.ChunkList`): List of chunks to be processed.
entities (:obj:`list` of :obj:`dict`): List of entities.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
### Response:
def _group_chunks_by_entities(self, chunks, entities):
"""Groups chunks by entities retrieved from NL API Entity Analysis.
Args:
chunks (:obj:`budou.chunk.ChunkList`): List of chunks to be processed.
entities (:obj:`list` of :obj:`dict`): List of entities.
Returns:
A chunk list. (:obj:`budou.chunk.ChunkList`)
"""
for entity in entities:
chunks_to_concat = chunks.get_overlaps(
entity['beginOffset'], len(entity['content']))
if not chunks_to_concat:
continue
new_chunk_word = u''.join([chunk.word for chunk in chunks_to_concat])
new_chunk = Chunk(new_chunk_word)
chunks.swap(chunks_to_concat, new_chunk)
return chunks |
def points_are_in_a_straight_line( points, tolerance=1e-7 ):
"""
Check whether a set of points fall on a straight line.
Calculates the areas of triangles formed by triplets of the points.
Returns False is any of these areas are larger than the tolerance.
Args:
points (list(np.array)): list of Cartesian coordinates for each point.
tolerance (optional:float): the maximum triangle size for these points to be considered colinear. Default is 1e-7.
Returns:
(bool): True if all points fall on a straight line (within the allowed tolerance).
"""
a = points[0]
b = points[1]
for c in points[2:]:
if area_of_a_triangle_in_cartesian_space( a, b, c ) > tolerance:
return False
return True | Check whether a set of points fall on a straight line.
Calculates the areas of triangles formed by triplets of the points.
Returns False is any of these areas are larger than the tolerance.
Args:
points (list(np.array)): list of Cartesian coordinates for each point.
tolerance (optional:float): the maximum triangle size for these points to be considered colinear. Default is 1e-7.
Returns:
(bool): True if all points fall on a straight line (within the allowed tolerance). | Below is the the instruction that describes the task:
### Input:
Check whether a set of points fall on a straight line.
Calculates the areas of triangles formed by triplets of the points.
Returns False is any of these areas are larger than the tolerance.
Args:
points (list(np.array)): list of Cartesian coordinates for each point.
tolerance (optional:float): the maximum triangle size for these points to be considered colinear. Default is 1e-7.
Returns:
(bool): True if all points fall on a straight line (within the allowed tolerance).
### Response:
def points_are_in_a_straight_line( points, tolerance=1e-7 ):
"""
Check whether a set of points fall on a straight line.
Calculates the areas of triangles formed by triplets of the points.
Returns False is any of these areas are larger than the tolerance.
Args:
points (list(np.array)): list of Cartesian coordinates for each point.
tolerance (optional:float): the maximum triangle size for these points to be considered colinear. Default is 1e-7.
Returns:
(bool): True if all points fall on a straight line (within the allowed tolerance).
"""
a = points[0]
b = points[1]
for c in points[2:]:
if area_of_a_triangle_in_cartesian_space( a, b, c ) > tolerance:
return False
return True |
def sum_num_dicts(dicts, normalize=False):
"""Sums the given dicts into a single dict mapping each key to the sum
of its mappings in all given dicts.
Parameters
----------
dicts : list
A list of dict objects mapping each key to an numeric value.
normalize : bool, default False
Indicated whether to normalize all values by value sum.
Returns
-------
dict
A dict where each key is mapped to the sum of its mappings in all
given dicts.
Example
-------
>>> dict1 = {'a': 3, 'b': 2}
>>> dict2 = {'a':7, 'c': 8}
>>> result = sum_num_dicts([dict1, dict2])
>>> print(sorted(result.items()))
[('a', 10), ('b', 2), ('c', 8)]
>>> result = sum_num_dicts([dict1, dict2], normalize=True)
>>> print(sorted(result.items()))
[('a', 0.5), ('b', 0.1), ('c', 0.4)]
"""
sum_dict = {}
for dicti in dicts:
for key in dicti:
sum_dict[key] = sum_dict.get(key, 0) + dicti[key]
if normalize:
return norm_int_dict(sum_dict)
return sum_dict | Sums the given dicts into a single dict mapping each key to the sum
of its mappings in all given dicts.
Parameters
----------
dicts : list
A list of dict objects mapping each key to an numeric value.
normalize : bool, default False
Indicated whether to normalize all values by value sum.
Returns
-------
dict
A dict where each key is mapped to the sum of its mappings in all
given dicts.
Example
-------
>>> dict1 = {'a': 3, 'b': 2}
>>> dict2 = {'a':7, 'c': 8}
>>> result = sum_num_dicts([dict1, dict2])
>>> print(sorted(result.items()))
[('a', 10), ('b', 2), ('c', 8)]
>>> result = sum_num_dicts([dict1, dict2], normalize=True)
>>> print(sorted(result.items()))
[('a', 0.5), ('b', 0.1), ('c', 0.4)] | Below is the the instruction that describes the task:
### Input:
Sums the given dicts into a single dict mapping each key to the sum
of its mappings in all given dicts.
Parameters
----------
dicts : list
A list of dict objects mapping each key to an numeric value.
normalize : bool, default False
Indicated whether to normalize all values by value sum.
Returns
-------
dict
A dict where each key is mapped to the sum of its mappings in all
given dicts.
Example
-------
>>> dict1 = {'a': 3, 'b': 2}
>>> dict2 = {'a':7, 'c': 8}
>>> result = sum_num_dicts([dict1, dict2])
>>> print(sorted(result.items()))
[('a', 10), ('b', 2), ('c', 8)]
>>> result = sum_num_dicts([dict1, dict2], normalize=True)
>>> print(sorted(result.items()))
[('a', 0.5), ('b', 0.1), ('c', 0.4)]
### Response:
def sum_num_dicts(dicts, normalize=False):
"""Sums the given dicts into a single dict mapping each key to the sum
of its mappings in all given dicts.
Parameters
----------
dicts : list
A list of dict objects mapping each key to an numeric value.
normalize : bool, default False
Indicated whether to normalize all values by value sum.
Returns
-------
dict
A dict where each key is mapped to the sum of its mappings in all
given dicts.
Example
-------
>>> dict1 = {'a': 3, 'b': 2}
>>> dict2 = {'a':7, 'c': 8}
>>> result = sum_num_dicts([dict1, dict2])
>>> print(sorted(result.items()))
[('a', 10), ('b', 2), ('c', 8)]
>>> result = sum_num_dicts([dict1, dict2], normalize=True)
>>> print(sorted(result.items()))
[('a', 0.5), ('b', 0.1), ('c', 0.4)]
"""
sum_dict = {}
for dicti in dicts:
for key in dicti:
sum_dict[key] = sum_dict.get(key, 0) + dicti[key]
if normalize:
return norm_int_dict(sum_dict)
return sum_dict |
def _lastWord(self, text):
"""Move backward to the start of the word at the end of a string.
Return the word
"""
for index, char in enumerate(text[::-1]):
if char.isspace() or \
char in ('(', ')'):
return text[len(text) - index :]
else:
return text | Move backward to the start of the word at the end of a string.
Return the word | Below is the the instruction that describes the task:
### Input:
Move backward to the start of the word at the end of a string.
Return the word
### Response:
def _lastWord(self, text):
"""Move backward to the start of the word at the end of a string.
Return the word
"""
for index, char in enumerate(text[::-1]):
if char.isspace() or \
char in ('(', ')'):
return text[len(text) - index :]
else:
return text |
def url_join(base, *args):
"""
Helper function to join an arbitrary number of url segments together.
"""
scheme, netloc, path, query, fragment = urlsplit(base)
path = path if len(path) else "/"
path = posixpath.join(path, *[('%s' % x) for x in args])
return urlunsplit([scheme, netloc, path, query, fragment]) | Helper function to join an arbitrary number of url segments together. | Below is the the instruction that describes the task:
### Input:
Helper function to join an arbitrary number of url segments together.
### Response:
def url_join(base, *args):
"""
Helper function to join an arbitrary number of url segments together.
"""
scheme, netloc, path, query, fragment = urlsplit(base)
path = path if len(path) else "/"
path = posixpath.join(path, *[('%s' % x) for x in args])
return urlunsplit([scheme, netloc, path, query, fragment]) |
def select_page(self, limit, offset=0, **kwargs):
"""
:type limit: int
:param limit: The max row number for each page
:type offset: int
:param offset: The starting position of the page
:return:
"""
start = offset
while True:
result = self.select(limit=[start, limit], **kwargs)
start += limit
if result:
yield result
else:
break
if self.debug:
break | :type limit: int
:param limit: The max row number for each page
:type offset: int
:param offset: The starting position of the page
:return: | Below is the the instruction that describes the task:
### Input:
:type limit: int
:param limit: The max row number for each page
:type offset: int
:param offset: The starting position of the page
:return:
### Response:
def select_page(self, limit, offset=0, **kwargs):
"""
:type limit: int
:param limit: The max row number for each page
:type offset: int
:param offset: The starting position of the page
:return:
"""
start = offset
while True:
result = self.select(limit=[start, limit], **kwargs)
start += limit
if result:
yield result
else:
break
if self.debug:
break |
def _create_page(cls, page, lang, auto_title, cms_app=None, parent=None, namespace=None,
site=None, set_home=False):
"""
Create a single page or titles
:param page: Page instance
:param lang: language code
:param auto_title: title text for the newly created title
:param cms_app: Apphook Class to be attached to the page
:param parent: parent page (None when creating the home page)
:param namespace: application instance name (as provided to the ApphookConfig)
:param set_home: mark as home page (on django CMS 3.5 only)
:return: draft copy of the created page
"""
from cms.api import create_page, create_title
from cms.utils.conf import get_templates
default_template = get_templates()[0][0]
if page is None:
page = create_page(
auto_title, language=lang, parent=parent, site=site,
template=default_template, in_navigation=True, published=True
)
page.application_urls = cms_app
page.application_namespace = namespace
page.save()
page.publish(lang)
elif lang not in page.get_languages():
create_title(
language=lang, title=auto_title, page=page
)
page.publish(lang)
if set_home:
page.set_as_homepage()
return page.get_draft_object() | Create a single page or titles
:param page: Page instance
:param lang: language code
:param auto_title: title text for the newly created title
:param cms_app: Apphook Class to be attached to the page
:param parent: parent page (None when creating the home page)
:param namespace: application instance name (as provided to the ApphookConfig)
:param set_home: mark as home page (on django CMS 3.5 only)
:return: draft copy of the created page | Below is the the instruction that describes the task:
### Input:
Create a single page or titles
:param page: Page instance
:param lang: language code
:param auto_title: title text for the newly created title
:param cms_app: Apphook Class to be attached to the page
:param parent: parent page (None when creating the home page)
:param namespace: application instance name (as provided to the ApphookConfig)
:param set_home: mark as home page (on django CMS 3.5 only)
:return: draft copy of the created page
### Response:
def _create_page(cls, page, lang, auto_title, cms_app=None, parent=None, namespace=None,
site=None, set_home=False):
"""
Create a single page or titles
:param page: Page instance
:param lang: language code
:param auto_title: title text for the newly created title
:param cms_app: Apphook Class to be attached to the page
:param parent: parent page (None when creating the home page)
:param namespace: application instance name (as provided to the ApphookConfig)
:param set_home: mark as home page (on django CMS 3.5 only)
:return: draft copy of the created page
"""
from cms.api import create_page, create_title
from cms.utils.conf import get_templates
default_template = get_templates()[0][0]
if page is None:
page = create_page(
auto_title, language=lang, parent=parent, site=site,
template=default_template, in_navigation=True, published=True
)
page.application_urls = cms_app
page.application_namespace = namespace
page.save()
page.publish(lang)
elif lang not in page.get_languages():
create_title(
language=lang, title=auto_title, page=page
)
page.publish(lang)
if set_home:
page.set_as_homepage()
return page.get_draft_object() |
def _ExtractContentSettingsExceptions(self, exceptions_dict, parser_mediator):
"""Extracts site specific events.
Args:
exceptions_dict (dict): Permission exceptions data from Preferences file.
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
"""
for permission in exceptions_dict:
if permission not in self._EXCEPTIONS_KEYS:
continue
exception_dict = exceptions_dict.get(permission, {})
for urls, url_dict in exception_dict.items():
last_used = url_dict.get('last_used', None)
if not last_used:
continue
# If secondary_url is '*', the permission applies to primary_url.
# If secondary_url is a valid URL, the permission applies to
# elements loaded from secondary_url being embedded in primary_url.
primary_url, secondary_url = urls.split(',')
event_data = ChromeContentSettingsExceptionsEventData()
event_data.permission = permission
event_data.primary_url = primary_url
event_data.secondary_url = secondary_url
timestamp = int(last_used * 1000000)
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data) | Extracts site specific events.
Args:
exceptions_dict (dict): Permission exceptions data from Preferences file.
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs. | Below is the the instruction that describes the task:
### Input:
Extracts site specific events.
Args:
exceptions_dict (dict): Permission exceptions data from Preferences file.
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
### Response:
def _ExtractContentSettingsExceptions(self, exceptions_dict, parser_mediator):
"""Extracts site specific events.
Args:
exceptions_dict (dict): Permission exceptions data from Preferences file.
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
"""
for permission in exceptions_dict:
if permission not in self._EXCEPTIONS_KEYS:
continue
exception_dict = exceptions_dict.get(permission, {})
for urls, url_dict in exception_dict.items():
last_used = url_dict.get('last_used', None)
if not last_used:
continue
# If secondary_url is '*', the permission applies to primary_url.
# If secondary_url is a valid URL, the permission applies to
# elements loaded from secondary_url being embedded in primary_url.
primary_url, secondary_url = urls.split(',')
event_data = ChromeContentSettingsExceptionsEventData()
event_data.permission = permission
event_data.primary_url = primary_url
event_data.secondary_url = secondary_url
timestamp = int(last_used * 1000000)
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data) |
def WhatMustIUnderstand(self):
'''Return a list of (uri,localname) tuples for all elements in the
header that have mustUnderstand set.
'''
return [ ( E.namespaceURI, E.localName )
for E in self.header_elements if _find_mu(E) == "1" ] | Return a list of (uri,localname) tuples for all elements in the
header that have mustUnderstand set. | Below is the the instruction that describes the task:
### Input:
Return a list of (uri,localname) tuples for all elements in the
header that have mustUnderstand set.
### Response:
def WhatMustIUnderstand(self):
'''Return a list of (uri,localname) tuples for all elements in the
header that have mustUnderstand set.
'''
return [ ( E.namespaceURI, E.localName )
for E in self.header_elements if _find_mu(E) == "1" ] |
def get_convert_dict(fmt):
"""Retrieve parse definition from the format string `fmt`."""
convdef = {}
for literal_text, field_name, format_spec, conversion in formatter.parse(fmt):
if field_name is None:
continue
# XXX: Do I need to include 'conversion'?
convdef[field_name] = format_spec
return convdef | Retrieve parse definition from the format string `fmt`. | Below is the the instruction that describes the task:
### Input:
Retrieve parse definition from the format string `fmt`.
### Response:
def get_convert_dict(fmt):
"""Retrieve parse definition from the format string `fmt`."""
convdef = {}
for literal_text, field_name, format_spec, conversion in formatter.parse(fmt):
if field_name is None:
continue
# XXX: Do I need to include 'conversion'?
convdef[field_name] = format_spec
return convdef |
def value(self):
"""
Take last known value as the value
"""
try:
value = self.lastValue
except IndexError:
value = "NaN"
except ValueError:
value = "NaN"
return value | Take last known value as the value | Below is the the instruction that describes the task:
### Input:
Take last known value as the value
### Response:
def value(self):
"""
Take last known value as the value
"""
try:
value = self.lastValue
except IndexError:
value = "NaN"
except ValueError:
value = "NaN"
return value |
def is_correlated(self, threshold=0):
"""
Compare with a threshold to determine whether two timeseries correlate to each other.
:return: a CorrelationResult object if two time series correlate otherwise false.
"""
return self.correlation_result if self.correlation_result.coefficient >= threshold else False | Compare with a threshold to determine whether two timeseries correlate to each other.
:return: a CorrelationResult object if two time series correlate otherwise false. | Below is the the instruction that describes the task:
### Input:
Compare with a threshold to determine whether two timeseries correlate to each other.
:return: a CorrelationResult object if two time series correlate otherwise false.
### Response:
def is_correlated(self, threshold=0):
"""
Compare with a threshold to determine whether two timeseries correlate to each other.
:return: a CorrelationResult object if two time series correlate otherwise false.
"""
return self.correlation_result if self.correlation_result.coefficient >= threshold else False |
def banner_print(msg, color='', width=60, file=sys.stdout, logger=_LOG):
"""Print the message as a banner with a fixed width.
Also logs the message (un-bannered) to the given logger at the debug level.
Args:
msg: The message to print.
color: Optional colorama color string to be applied to the message. You can
concatenate colorama color strings together in order to get any set of
effects you want.
width: Total width for the resulting banner.
file: A file object to which the banner text will be written. Intended for
use with CLI output file objects like sys.stdout.
logger: A logger to use, or None to disable logging.
Example:
>>> banner_print('Foo Bar Baz')
======================== Foo Bar Baz =======================
"""
if logger:
logger.debug(ANSI_ESC_RE.sub('', msg))
if CLI_QUIET:
return
lpad = int(math.ceil((width - _printed_len(msg) - 2) / 2.0)) * '='
rpad = int(math.floor((width - _printed_len(msg) - 2) / 2.0)) * '='
file.write('{sep}{color}{lpad} {msg} {rpad}{reset}{sep}{sep}'.format(
sep=_linesep_for_file(file), color=color, lpad=lpad, msg=msg, rpad=rpad,
reset=colorama.Style.RESET_ALL))
file.flush() | Print the message as a banner with a fixed width.
Also logs the message (un-bannered) to the given logger at the debug level.
Args:
msg: The message to print.
color: Optional colorama color string to be applied to the message. You can
concatenate colorama color strings together in order to get any set of
effects you want.
width: Total width for the resulting banner.
file: A file object to which the banner text will be written. Intended for
use with CLI output file objects like sys.stdout.
logger: A logger to use, or None to disable logging.
Example:
>>> banner_print('Foo Bar Baz')
======================== Foo Bar Baz ======================= | Below is the the instruction that describes the task:
### Input:
Print the message as a banner with a fixed width.
Also logs the message (un-bannered) to the given logger at the debug level.
Args:
msg: The message to print.
color: Optional colorama color string to be applied to the message. You can
concatenate colorama color strings together in order to get any set of
effects you want.
width: Total width for the resulting banner.
file: A file object to which the banner text will be written. Intended for
use with CLI output file objects like sys.stdout.
logger: A logger to use, or None to disable logging.
Example:
>>> banner_print('Foo Bar Baz')
======================== Foo Bar Baz =======================
### Response:
def banner_print(msg, color='', width=60, file=sys.stdout, logger=_LOG):
"""Print the message as a banner with a fixed width.
Also logs the message (un-bannered) to the given logger at the debug level.
Args:
msg: The message to print.
color: Optional colorama color string to be applied to the message. You can
concatenate colorama color strings together in order to get any set of
effects you want.
width: Total width for the resulting banner.
file: A file object to which the banner text will be written. Intended for
use with CLI output file objects like sys.stdout.
logger: A logger to use, or None to disable logging.
Example:
>>> banner_print('Foo Bar Baz')
======================== Foo Bar Baz =======================
"""
if logger:
logger.debug(ANSI_ESC_RE.sub('', msg))
if CLI_QUIET:
return
lpad = int(math.ceil((width - _printed_len(msg) - 2) / 2.0)) * '='
rpad = int(math.floor((width - _printed_len(msg) - 2) / 2.0)) * '='
file.write('{sep}{color}{lpad} {msg} {rpad}{reset}{sep}{sep}'.format(
sep=_linesep_for_file(file), color=color, lpad=lpad, msg=msg, rpad=rpad,
reset=colorama.Style.RESET_ALL))
file.flush() |
def _on_axes_updated(self):
"""Method to run when axes are changed in any way.
Propagates updated axes properly.
"""
# update attrs
self.attrs["axes"] = [a.identity.encode() for a in self._axes]
# remove old attributes
while len(self._current_axis_identities_in_natural_namespace) > 0:
key = self._current_axis_identities_in_natural_namespace.pop(0)
try:
delattr(self, key)
except AttributeError:
pass # already gone
# populate new attributes
for a in self._axes:
key = a.natural_name
setattr(self, key, a)
self._current_axis_identities_in_natural_namespace.append(key) | Method to run when axes are changed in any way.
Propagates updated axes properly. | Below is the the instruction that describes the task:
### Input:
Method to run when axes are changed in any way.
Propagates updated axes properly.
### Response:
def _on_axes_updated(self):
"""Method to run when axes are changed in any way.
Propagates updated axes properly.
"""
# update attrs
self.attrs["axes"] = [a.identity.encode() for a in self._axes]
# remove old attributes
while len(self._current_axis_identities_in_natural_namespace) > 0:
key = self._current_axis_identities_in_natural_namespace.pop(0)
try:
delattr(self, key)
except AttributeError:
pass # already gone
# populate new attributes
for a in self._axes:
key = a.natural_name
setattr(self, key, a)
self._current_axis_identities_in_natural_namespace.append(key) |
def domain(domain_name):
"""
Allow to apply a function f(df: DataFrame) -> DataFrame) on dfs by specifying the key
E.g instead of writing:
def process_domain1(dfs):
df = dfs['domain1']
# actual process
dfs['domain1'] = df
return dfs
You can write:
@domain('domain1')
def process_domain1(df):
#actual process
return df
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
dfs, *args = args
if not isinstance(dfs, dict):
raise TypeError(f'{dfs} is not a dict')
df = dfs.pop(domain_name)
df = func(df, *args, **kwargs)
return {domain_name: df, **dfs}
return wrapper
return decorator | Allow to apply a function f(df: DataFrame) -> DataFrame) on dfs by specifying the key
E.g instead of writing:
def process_domain1(dfs):
df = dfs['domain1']
# actual process
dfs['domain1'] = df
return dfs
You can write:
@domain('domain1')
def process_domain1(df):
#actual process
return df | Below is the the instruction that describes the task:
### Input:
Allow to apply a function f(df: DataFrame) -> DataFrame) on dfs by specifying the key
E.g instead of writing:
def process_domain1(dfs):
df = dfs['domain1']
# actual process
dfs['domain1'] = df
return dfs
You can write:
@domain('domain1')
def process_domain1(df):
#actual process
return df
### Response:
def domain(domain_name):
"""
Allow to apply a function f(df: DataFrame) -> DataFrame) on dfs by specifying the key
E.g instead of writing:
def process_domain1(dfs):
df = dfs['domain1']
# actual process
dfs['domain1'] = df
return dfs
You can write:
@domain('domain1')
def process_domain1(df):
#actual process
return df
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
dfs, *args = args
if not isinstance(dfs, dict):
raise TypeError(f'{dfs} is not a dict')
df = dfs.pop(domain_name)
df = func(df, *args, **kwargs)
return {domain_name: df, **dfs}
return wrapper
return decorator |
def unwrap(self, value, session=None):
''' Unwrap value using the unwrap function from ``EnumField.item_type``.
Since unwrap validation could not happen in is_valid_wrap, it
happens in this function.'''
self.validate_unwrap(value)
value = self.item_type.unwrap(value, session=session)
for val in self.values:
if val == value:
return val
self._fail_validation(value, 'Value was not in the enum values') | Unwrap value using the unwrap function from ``EnumField.item_type``.
Since unwrap validation could not happen in is_valid_wrap, it
happens in this function. | Below is the the instruction that describes the task:
### Input:
Unwrap value using the unwrap function from ``EnumField.item_type``.
Since unwrap validation could not happen in is_valid_wrap, it
happens in this function.
### Response:
def unwrap(self, value, session=None):
''' Unwrap value using the unwrap function from ``EnumField.item_type``.
Since unwrap validation could not happen in is_valid_wrap, it
happens in this function.'''
self.validate_unwrap(value)
value = self.item_type.unwrap(value, session=session)
for val in self.values:
if val == value:
return val
self._fail_validation(value, 'Value was not in the enum values') |
def tryAcquire(self, lockID, callback=None, sync=False, timeout=None):
"""Attempt to acquire lock.
:param lockID: unique lock identifier.
:type lockID: str
:param sync: True - to wait until lock is acquired or failed to acquire.
:type sync: bool
:param callback: if sync is False - callback will be called with operation result.
:type callback: func(opResult, error)
:param timeout: max operation time (default - unlimited)
:type timeout: float
:return True if acquired, False - somebody else already acquired lock
"""
return self.__lockImpl.acquire(lockID, self.__selfID, time.time(), callback=callback, sync=sync, timeout=timeout) | Attempt to acquire lock.
:param lockID: unique lock identifier.
:type lockID: str
:param sync: True - to wait until lock is acquired or failed to acquire.
:type sync: bool
:param callback: if sync is False - callback will be called with operation result.
:type callback: func(opResult, error)
:param timeout: max operation time (default - unlimited)
:type timeout: float
:return True if acquired, False - somebody else already acquired lock | Below is the the instruction that describes the task:
### Input:
Attempt to acquire lock.
:param lockID: unique lock identifier.
:type lockID: str
:param sync: True - to wait until lock is acquired or failed to acquire.
:type sync: bool
:param callback: if sync is False - callback will be called with operation result.
:type callback: func(opResult, error)
:param timeout: max operation time (default - unlimited)
:type timeout: float
:return True if acquired, False - somebody else already acquired lock
### Response:
def tryAcquire(self, lockID, callback=None, sync=False, timeout=None):
"""Attempt to acquire lock.
:param lockID: unique lock identifier.
:type lockID: str
:param sync: True - to wait until lock is acquired or failed to acquire.
:type sync: bool
:param callback: if sync is False - callback will be called with operation result.
:type callback: func(opResult, error)
:param timeout: max operation time (default - unlimited)
:type timeout: float
:return True if acquired, False - somebody else already acquired lock
"""
return self.__lockImpl.acquire(lockID, self.__selfID, time.time(), callback=callback, sync=sync, timeout=timeout) |
def parse_python_assigns(assign_str):
"""
Parses a string, containing assign statements
into a dictionary.
.. code-block:: python
h5 = katdal.open('123456789.h5')
kwargs = parse_python_assigns("spw=3; scans=[1,2];"
"targets='bpcal,radec';"
"channels=slice(0,2048)")
h5.select(**kwargs)
Parameters
----------
assign_str: str
Assignment string. Should only contain assignment statements
assigning python literals or builtin function calls, to variable names.
Multiple assignment statements should be separated by semi-colons.
Returns
-------
dict
Dictionary { name: value } containing
assignment results.
"""
if not assign_str:
return {}
def _eval_value(stmt_value):
# If the statement value is a call to a builtin, try evaluate it
if isinstance(stmt_value, ast.Call):
func_name = stmt_value.func.id
if func_name not in _BUILTIN_WHITELIST:
raise ValueError("Function '%s' in '%s' is not builtin. "
"Available builtins: '%s'"
% (func_name, assign_str, list(_BUILTIN_WHITELIST)))
# Recursively pass arguments through this same function
if stmt_value.args is not None:
args = tuple(_eval_value(a) for a in stmt_value.args)
else:
args = ()
# Recursively pass keyword arguments through this same function
if stmt_value.keywords is not None:
kwargs = {kw.arg : _eval_value(kw.value) for kw
in stmt_value.keywords}
else:
kwargs = {}
return getattr(__builtin__, func_name)(*args, **kwargs)
# Try a literal eval
else:
return ast.literal_eval(stmt_value)
# Variable dictionary
variables = {}
# Parse the assignment string
stmts = ast.parse(assign_str, mode='single').body
for i, stmt in enumerate(stmts):
if not isinstance(stmt, ast.Assign):
raise ValueError("Statement %d in '%s' is not a "
"variable assignment." % (i, assign_str))
# Evaluate assignment lhs
values = _eval_value(stmt.value)
# "a = b = c" => targets 'a' and 'b' with 'c' as result
for target in stmt.targets:
# a = 2
if isinstance(target, ast.Name):
variables[target.id] = values
# Tuple/List unpacking case
# (a, b) = 2
elif isinstance(target, (ast.Tuple, ast.List)):
# Require all tuple/list elements to be variable names,
# although anything else is probably a syntax error
if not all(isinstance(e, ast.Name) for e in target.elts):
raise ValueError("Tuple unpacking in assignment %d "
"in expression '%s' failed as not all "
"tuple contents are variable names.")
# Promote for zip and length checking
if not isinstance(values, (tuple, list)):
elements = (values,)
else:
elements = values
if not len(target.elts) == len(elements):
raise ValueError("Unpacking '%s' into a tuple/list in "
"assignment %d of expression '%s' failed. "
"The number of tuple elements did not match "
"the number of values."
% (values, i, assign_str))
# Unpack
for variable, value in zip(target.elts, elements):
variables[variable.id] = value
else:
raise TypeError("'%s' types are not supported"
"as assignment targets." % type(target))
return variables | Parses a string, containing assign statements
into a dictionary.
.. code-block:: python
h5 = katdal.open('123456789.h5')
kwargs = parse_python_assigns("spw=3; scans=[1,2];"
"targets='bpcal,radec';"
"channels=slice(0,2048)")
h5.select(**kwargs)
Parameters
----------
assign_str: str
Assignment string. Should only contain assignment statements
assigning python literals or builtin function calls, to variable names.
Multiple assignment statements should be separated by semi-colons.
Returns
-------
dict
Dictionary { name: value } containing
assignment results. | Below is the the instruction that describes the task:
### Input:
Parses a string, containing assign statements
into a dictionary.
.. code-block:: python
h5 = katdal.open('123456789.h5')
kwargs = parse_python_assigns("spw=3; scans=[1,2];"
"targets='bpcal,radec';"
"channels=slice(0,2048)")
h5.select(**kwargs)
Parameters
----------
assign_str: str
Assignment string. Should only contain assignment statements
assigning python literals or builtin function calls, to variable names.
Multiple assignment statements should be separated by semi-colons.
Returns
-------
dict
Dictionary { name: value } containing
assignment results.
### Response:
def parse_python_assigns(assign_str):
"""
Parses a string, containing assign statements
into a dictionary.
.. code-block:: python
h5 = katdal.open('123456789.h5')
kwargs = parse_python_assigns("spw=3; scans=[1,2];"
"targets='bpcal,radec';"
"channels=slice(0,2048)")
h5.select(**kwargs)
Parameters
----------
assign_str: str
Assignment string. Should only contain assignment statements
assigning python literals or builtin function calls, to variable names.
Multiple assignment statements should be separated by semi-colons.
Returns
-------
dict
Dictionary { name: value } containing
assignment results.
"""
if not assign_str:
return {}
def _eval_value(stmt_value):
# If the statement value is a call to a builtin, try evaluate it
if isinstance(stmt_value, ast.Call):
func_name = stmt_value.func.id
if func_name not in _BUILTIN_WHITELIST:
raise ValueError("Function '%s' in '%s' is not builtin. "
"Available builtins: '%s'"
% (func_name, assign_str, list(_BUILTIN_WHITELIST)))
# Recursively pass arguments through this same function
if stmt_value.args is not None:
args = tuple(_eval_value(a) for a in stmt_value.args)
else:
args = ()
# Recursively pass keyword arguments through this same function
if stmt_value.keywords is not None:
kwargs = {kw.arg : _eval_value(kw.value) for kw
in stmt_value.keywords}
else:
kwargs = {}
return getattr(__builtin__, func_name)(*args, **kwargs)
# Try a literal eval
else:
return ast.literal_eval(stmt_value)
# Variable dictionary
variables = {}
# Parse the assignment string
stmts = ast.parse(assign_str, mode='single').body
for i, stmt in enumerate(stmts):
if not isinstance(stmt, ast.Assign):
raise ValueError("Statement %d in '%s' is not a "
"variable assignment." % (i, assign_str))
# Evaluate assignment lhs
values = _eval_value(stmt.value)
# "a = b = c" => targets 'a' and 'b' with 'c' as result
for target in stmt.targets:
# a = 2
if isinstance(target, ast.Name):
variables[target.id] = values
# Tuple/List unpacking case
# (a, b) = 2
elif isinstance(target, (ast.Tuple, ast.List)):
# Require all tuple/list elements to be variable names,
# although anything else is probably a syntax error
if not all(isinstance(e, ast.Name) for e in target.elts):
raise ValueError("Tuple unpacking in assignment %d "
"in expression '%s' failed as not all "
"tuple contents are variable names.")
# Promote for zip and length checking
if not isinstance(values, (tuple, list)):
elements = (values,)
else:
elements = values
if not len(target.elts) == len(elements):
raise ValueError("Unpacking '%s' into a tuple/list in "
"assignment %d of expression '%s' failed. "
"The number of tuple elements did not match "
"the number of values."
% (values, i, assign_str))
# Unpack
for variable, value in zip(target.elts, elements):
variables[variable.id] = value
else:
raise TypeError("'%s' types are not supported"
"as assignment targets." % type(target))
return variables |
def stem(self, word):
"""Perform stemming on an input word."""
if self.stemmer:
return unicode_to_ascii(self._stemmer.stem(word))
else:
return word | Perform stemming on an input word. | Below is the the instruction that describes the task:
### Input:
Perform stemming on an input word.
### Response:
def stem(self, word):
"""Perform stemming on an input word."""
if self.stemmer:
return unicode_to_ascii(self._stemmer.stem(word))
else:
return word |
def update(self, name=None, email=None, blog=None, company=None,
location=None, hireable=False, bio=None):
"""If authenticated as this user, update the information with
the information provided in the parameters.
:param str name: e.g., 'John Smith', not login name
:param str email: e.g., '[email protected]'
:param str blog: e.g., 'http://www.example.com/jsmith/blog'
:param str company:
:param str location:
:param bool hireable: defaults to False
:param str bio: GitHub flavored markdown
:returns: bool
"""
user = {'name': name, 'email': email, 'blog': blog,
'company': company, 'location': location,
'hireable': hireable, 'bio': bio}
self._remove_none(user)
url = self._build_url('user')
json = self._json(self._patch(url, data=dumps(user)), 200)
if json:
self._update_(json)
return True
return False | If authenticated as this user, update the information with
the information provided in the parameters.
:param str name: e.g., 'John Smith', not login name
:param str email: e.g., '[email protected]'
:param str blog: e.g., 'http://www.example.com/jsmith/blog'
:param str company:
:param str location:
:param bool hireable: defaults to False
:param str bio: GitHub flavored markdown
:returns: bool | Below is the the instruction that describes the task:
### Input:
If authenticated as this user, update the information with
the information provided in the parameters.
:param str name: e.g., 'John Smith', not login name
:param str email: e.g., '[email protected]'
:param str blog: e.g., 'http://www.example.com/jsmith/blog'
:param str company:
:param str location:
:param bool hireable: defaults to False
:param str bio: GitHub flavored markdown
:returns: bool
### Response:
def update(self, name=None, email=None, blog=None, company=None,
location=None, hireable=False, bio=None):
"""If authenticated as this user, update the information with
the information provided in the parameters.
:param str name: e.g., 'John Smith', not login name
:param str email: e.g., '[email protected]'
:param str blog: e.g., 'http://www.example.com/jsmith/blog'
:param str company:
:param str location:
:param bool hireable: defaults to False
:param str bio: GitHub flavored markdown
:returns: bool
"""
user = {'name': name, 'email': email, 'blog': blog,
'company': company, 'location': location,
'hireable': hireable, 'bio': bio}
self._remove_none(user)
url = self._build_url('user')
json = self._json(self._patch(url, data=dumps(user)), 200)
if json:
self._update_(json)
return True
return False |
def deviation(self, series, start, limit, mean):
'''
:type start: int
:type limit: int
:type mean: int
:rtype: list()
'''
d = []
for x in range(start, limit):
d.append(float(series[x] - mean))
return d | :type start: int
:type limit: int
:type mean: int
:rtype: list() | Below is the the instruction that describes the task:
### Input:
:type start: int
:type limit: int
:type mean: int
:rtype: list()
### Response:
def deviation(self, series, start, limit, mean):
'''
:type start: int
:type limit: int
:type mean: int
:rtype: list()
'''
d = []
for x in range(start, limit):
d.append(float(series[x] - mean))
return d |
def first_image(self):
"""Ready-only attribute that provides the value of the first non-none image that's
not the thumbnail override field.
"""
# loop through image fields and grab the first non-none one
for model_field in self._meta.fields:
if isinstance(model_field, ImageField):
if model_field.name is not 'thumbnail_override':
field_value = getattr(self, model_field.name)
if field_value.id is not None:
return field_value
# no non-none images, return None
return None | Ready-only attribute that provides the value of the first non-none image that's
not the thumbnail override field. | Below is the the instruction that describes the task:
### Input:
Ready-only attribute that provides the value of the first non-none image that's
not the thumbnail override field.
### Response:
def first_image(self):
"""Ready-only attribute that provides the value of the first non-none image that's
not the thumbnail override field.
"""
# loop through image fields and grab the first non-none one
for model_field in self._meta.fields:
if isinstance(model_field, ImageField):
if model_field.name is not 'thumbnail_override':
field_value = getattr(self, model_field.name)
if field_value.id is not None:
return field_value
# no non-none images, return None
return None |
def results(self):
"An iterable of all `(job-id, WorkResult)`s."
cur = self._conn.cursor()
rows = cur.execute("SELECT * FROM results")
for row in rows:
yield (row['job_id'], _row_to_work_result(row)) | An iterable of all `(job-id, WorkResult)`s. | Below is the the instruction that describes the task:
### Input:
An iterable of all `(job-id, WorkResult)`s.
### Response:
def results(self):
"An iterable of all `(job-id, WorkResult)`s."
cur = self._conn.cursor()
rows = cur.execute("SELECT * FROM results")
for row in rows:
yield (row['job_id'], _row_to_work_result(row)) |
def render_POST(self, request):
"""
Read remoting request from the client.
@type request: The HTTP Request.
@param request: C{twisted.web.http.Request}
"""
def handleDecodeError(failure):
"""
Return HTTP 400 Bad Request.
"""
errMesg = "%s: %s" % (failure.type, failure.getErrorMessage())
if self.logger:
self.logger.error(errMesg)
self.logger.error(failure.getTraceback())
body = "400 Bad Request\n\nThe request body was unable to " \
"be successfully decoded."
if self.debug:
body += "\n\nTraceback:\n\n%s" % failure.getTraceback()
self._finaliseRequest(request, 400, body)
request.content.seek(0, 0)
timezone_offset = self._get_timezone_offset()
d = threads.deferToThread(remoting.decode, request.content.read(),
strict=self.strict, logger=self.logger,
timezone_offset=timezone_offset)
def cb(amf_request):
if self.logger:
self.logger.debug("AMF Request: %r" % amf_request)
x = self.getResponse(request, amf_request)
x.addCallback(self.sendResponse, request)
# Process the request
d.addCallback(cb).addErrback(handleDecodeError)
return server.NOT_DONE_YET | Read remoting request from the client.
@type request: The HTTP Request.
@param request: C{twisted.web.http.Request} | Below is the the instruction that describes the task:
### Input:
Read remoting request from the client.
@type request: The HTTP Request.
@param request: C{twisted.web.http.Request}
### Response:
def render_POST(self, request):
"""
Read remoting request from the client.
@type request: The HTTP Request.
@param request: C{twisted.web.http.Request}
"""
def handleDecodeError(failure):
"""
Return HTTP 400 Bad Request.
"""
errMesg = "%s: %s" % (failure.type, failure.getErrorMessage())
if self.logger:
self.logger.error(errMesg)
self.logger.error(failure.getTraceback())
body = "400 Bad Request\n\nThe request body was unable to " \
"be successfully decoded."
if self.debug:
body += "\n\nTraceback:\n\n%s" % failure.getTraceback()
self._finaliseRequest(request, 400, body)
request.content.seek(0, 0)
timezone_offset = self._get_timezone_offset()
d = threads.deferToThread(remoting.decode, request.content.read(),
strict=self.strict, logger=self.logger,
timezone_offset=timezone_offset)
def cb(amf_request):
if self.logger:
self.logger.debug("AMF Request: %r" % amf_request)
x = self.getResponse(request, amf_request)
x.addCallback(self.sendResponse, request)
# Process the request
d.addCallback(cb).addErrback(handleDecodeError)
return server.NOT_DONE_YET |
def _fallback_cleanups(self):
'''
Fallback cleanup routines, attempting to fix leaked processes, threads, etc.
'''
# Add an extra fallback in case a forked process leaks through
multiprocessing.active_children()
# Cleanup Windows threads
if not salt.utils.platform.is_windows():
return
for thread in self.win_proc:
if not thread.is_alive():
thread.join()
try:
self.win_proc.remove(thread)
del thread
except (ValueError, NameError):
pass | Fallback cleanup routines, attempting to fix leaked processes, threads, etc. | Below is the the instruction that describes the task:
### Input:
Fallback cleanup routines, attempting to fix leaked processes, threads, etc.
### Response:
def _fallback_cleanups(self):
'''
Fallback cleanup routines, attempting to fix leaked processes, threads, etc.
'''
# Add an extra fallback in case a forked process leaks through
multiprocessing.active_children()
# Cleanup Windows threads
if not salt.utils.platform.is_windows():
return
for thread in self.win_proc:
if not thread.is_alive():
thread.join()
try:
self.win_proc.remove(thread)
del thread
except (ValueError, NameError):
pass |
def unselect(self, rows, status=True, progress=True):
"Unselect given rows. Don't show progress if progress=False; don't show status if status=False."
before = len(self._selectedRows)
for r in (Progress(rows, 'unselecting') if progress else rows):
self.unselectRow(r)
if status:
vd().status('unselected %s/%s %s' % (before-len(self._selectedRows), before, self.rowtype)) | Unselect given rows. Don't show progress if progress=False; don't show status if status=False. | Below is the the instruction that describes the task:
### Input:
Unselect given rows. Don't show progress if progress=False; don't show status if status=False.
### Response:
def unselect(self, rows, status=True, progress=True):
"Unselect given rows. Don't show progress if progress=False; don't show status if status=False."
before = len(self._selectedRows)
for r in (Progress(rows, 'unselecting') if progress else rows):
self.unselectRow(r)
if status:
vd().status('unselected %s/%s %s' % (before-len(self._selectedRows), before, self.rowtype)) |
def handler(self, signum, frame): # pragma: no cover
'''Signal handler for this process'''
if signum in (signal.SIGTERM, signal.SIGINT, signal.SIGQUIT):
self.stop(signum)
os._exit(0) | Signal handler for this process | Below is the the instruction that describes the task:
### Input:
Signal handler for this process
### Response:
def handler(self, signum, frame): # pragma: no cover
'''Signal handler for this process'''
if signum in (signal.SIGTERM, signal.SIGINT, signal.SIGQUIT):
self.stop(signum)
os._exit(0) |
def relaxation_operators(p):
"""
Return the amplitude damping Kraus operators
"""
k0 = np.array([[1.0, 0.0], [0.0, np.sqrt(1 - p)]])
k1 = np.array([[0.0, np.sqrt(p)], [0.0, 0.0]])
return k0, k1 | Return the amplitude damping Kraus operators | Below is the the instruction that describes the task:
### Input:
Return the amplitude damping Kraus operators
### Response:
def relaxation_operators(p):
"""
Return the amplitude damping Kraus operators
"""
k0 = np.array([[1.0, 0.0], [0.0, np.sqrt(1 - p)]])
k1 = np.array([[0.0, np.sqrt(p)], [0.0, 0.0]])
return k0, k1 |
def display_popup(self, message,
size_x=None, size_y=None,
duration=3,
is_input=False,
input_size=30,
input_value=None):
"""
Display a centered popup.
If is_input is False:
Display a centered popup with the given message during duration seconds
If size_x and size_y: set the popup size
else set it automatically
Return True if the popup could be displayed
If is_input is True:
Display a centered popup with the given message and a input field
If size_x and size_y: set the popup size
else set it automatically
Return the input string or None if the field is empty
"""
# Center the popup
sentence_list = message.split('\n')
if size_x is None:
size_x = len(max(sentence_list, key=len)) + 4
# Add space for the input field
if is_input:
size_x += input_size
if size_y is None:
size_y = len(sentence_list) + 4
screen_x = self.screen.getmaxyx()[1]
screen_y = self.screen.getmaxyx()[0]
if size_x > screen_x or size_y > screen_y:
# No size to display the popup => abord
return False
pos_x = int((screen_x - size_x) / 2)
pos_y = int((screen_y - size_y) / 2)
# Create the popup
popup = curses.newwin(size_y, size_x, pos_y, pos_x)
# Fill the popup
popup.border()
# Add the message
for y, m in enumerate(message.split('\n')):
popup.addnstr(2 + y, 2, m, len(m))
if is_input and not WINDOWS:
# Create a subwindow for the text field
subpop = popup.derwin(1, input_size, 2, 2 + len(m))
subpop.attron(self.colors_list['FILTER'])
# Init the field with the current value
if input_value is not None:
subpop.addnstr(0, 0, input_value, len(input_value))
# Display the popup
popup.refresh()
subpop.refresh()
# Create the textbox inside the subwindows
self.set_cursor(2)
self.term_window.keypad(1)
textbox = GlancesTextbox(subpop, insert_mode=False)
textbox.edit()
self.set_cursor(0)
self.term_window.keypad(0)
if textbox.gather() != '':
logger.debug(
"User enters the following string: %s" % textbox.gather())
return textbox.gather()[:-1]
else:
logger.debug("User centers an empty string")
return None
else:
# Display the popup
popup.refresh()
self.wait(duration * 1000)
return True | Display a centered popup.
If is_input is False:
Display a centered popup with the given message during duration seconds
If size_x and size_y: set the popup size
else set it automatically
Return True if the popup could be displayed
If is_input is True:
Display a centered popup with the given message and a input field
If size_x and size_y: set the popup size
else set it automatically
Return the input string or None if the field is empty | Below is the the instruction that describes the task:
### Input:
Display a centered popup.
If is_input is False:
Display a centered popup with the given message during duration seconds
If size_x and size_y: set the popup size
else set it automatically
Return True if the popup could be displayed
If is_input is True:
Display a centered popup with the given message and a input field
If size_x and size_y: set the popup size
else set it automatically
Return the input string or None if the field is empty
### Response:
def display_popup(self, message,
size_x=None, size_y=None,
duration=3,
is_input=False,
input_size=30,
input_value=None):
"""
Display a centered popup.
If is_input is False:
Display a centered popup with the given message during duration seconds
If size_x and size_y: set the popup size
else set it automatically
Return True if the popup could be displayed
If is_input is True:
Display a centered popup with the given message and a input field
If size_x and size_y: set the popup size
else set it automatically
Return the input string or None if the field is empty
"""
# Center the popup
sentence_list = message.split('\n')
if size_x is None:
size_x = len(max(sentence_list, key=len)) + 4
# Add space for the input field
if is_input:
size_x += input_size
if size_y is None:
size_y = len(sentence_list) + 4
screen_x = self.screen.getmaxyx()[1]
screen_y = self.screen.getmaxyx()[0]
if size_x > screen_x or size_y > screen_y:
# No size to display the popup => abord
return False
pos_x = int((screen_x - size_x) / 2)
pos_y = int((screen_y - size_y) / 2)
# Create the popup
popup = curses.newwin(size_y, size_x, pos_y, pos_x)
# Fill the popup
popup.border()
# Add the message
for y, m in enumerate(message.split('\n')):
popup.addnstr(2 + y, 2, m, len(m))
if is_input and not WINDOWS:
# Create a subwindow for the text field
subpop = popup.derwin(1, input_size, 2, 2 + len(m))
subpop.attron(self.colors_list['FILTER'])
# Init the field with the current value
if input_value is not None:
subpop.addnstr(0, 0, input_value, len(input_value))
# Display the popup
popup.refresh()
subpop.refresh()
# Create the textbox inside the subwindows
self.set_cursor(2)
self.term_window.keypad(1)
textbox = GlancesTextbox(subpop, insert_mode=False)
textbox.edit()
self.set_cursor(0)
self.term_window.keypad(0)
if textbox.gather() != '':
logger.debug(
"User enters the following string: %s" % textbox.gather())
return textbox.gather()[:-1]
else:
logger.debug("User centers an empty string")
return None
else:
# Display the popup
popup.refresh()
self.wait(duration * 1000)
return True |
def http_log_graph(self, stream):
''' Build up a graph (nodes and edges from a Bro http.log) '''
print 'Entering http_log_graph...'
for row in list(stream):
# Skip '-' hosts
if (row['id.orig_h'] == '-'):
continue
# Add the originating host
self.add_node(row['id.orig_h'], row['id.orig_h'], ['host', 'origin'])
# Add the response host and reponse ip
self.add_node(row['host'], row['host'], ['host'])
self.add_node(row['id.resp_h'], row['id.resp_h'], ['host'])
# Add the http request relationships
self.add_rel(row['id.orig_h'], row['host'], 'http_request')
self.add_rel(row['host'], row['id.resp_h'], 'A') | Build up a graph (nodes and edges from a Bro http.log) | Below is the the instruction that describes the task:
### Input:
Build up a graph (nodes and edges from a Bro http.log)
### Response:
def http_log_graph(self, stream):
''' Build up a graph (nodes and edges from a Bro http.log) '''
print 'Entering http_log_graph...'
for row in list(stream):
# Skip '-' hosts
if (row['id.orig_h'] == '-'):
continue
# Add the originating host
self.add_node(row['id.orig_h'], row['id.orig_h'], ['host', 'origin'])
# Add the response host and reponse ip
self.add_node(row['host'], row['host'], ['host'])
self.add_node(row['id.resp_h'], row['id.resp_h'], ['host'])
# Add the http request relationships
self.add_rel(row['id.orig_h'], row['host'], 'http_request')
self.add_rel(row['host'], row['id.resp_h'], 'A') |
def update(self,o):
"""Update from another index or index dict"""
self.open()
try:
self._db.update(o._db)
except AttributeError:
self._db.update(o) | Update from another index or index dict | Below is the the instruction that describes the task:
### Input:
Update from another index or index dict
### Response:
def update(self,o):
"""Update from another index or index dict"""
self.open()
try:
self._db.update(o._db)
except AttributeError:
self._db.update(o) |
def fsliceafter(astr, sub):
"""Return the slice after at sub in string astr"""
findex = astr.find(sub)
return astr[findex + len(sub):] | Return the slice after at sub in string astr | Below is the the instruction that describes the task:
### Input:
Return the slice after at sub in string astr
### Response:
def fsliceafter(astr, sub):
"""Return the slice after at sub in string astr"""
findex = astr.find(sub)
return astr[findex + len(sub):] |
def dependency_check(self, task_cls, skip_unresolved=False):
""" Check dependency of task for irresolvable conflicts (like task to task mutual dependency)
:param task_cls: task to check
:param skip_unresolved: flag controls this method behaviour for tasks that could not be found. \
When False, method will raise an exception if task tag was set in dependency and the related task \
wasn't found in registry. When True that unresolvable task will be omitted
:return: None
"""
def check(check_task_cls, global_dependencies):
if check_task_cls.__registry_tag__ in global_dependencies:
raise RuntimeError('Recursion dependencies for %s' % task_cls.__registry_tag__)
dependencies = global_dependencies.copy()
dependencies.append(check_task_cls.__registry_tag__)
for dependency in check_task_cls.__dependency__:
dependent_task = self.tasks_by_tag(dependency)
if dependent_task is None and skip_unresolved is False:
raise RuntimeError(
"Task '%s' dependency unresolved (%s)" %
(task_cls.__registry_tag__, dependency)
)
if dependent_task is not None:
check(dependent_task, dependencies)
check(task_cls, []) | Check dependency of task for irresolvable conflicts (like task to task mutual dependency)
:param task_cls: task to check
:param skip_unresolved: flag controls this method behaviour for tasks that could not be found. \
When False, method will raise an exception if task tag was set in dependency and the related task \
wasn't found in registry. When True that unresolvable task will be omitted
:return: None | Below is the the instruction that describes the task:
### Input:
Check dependency of task for irresolvable conflicts (like task to task mutual dependency)
:param task_cls: task to check
:param skip_unresolved: flag controls this method behaviour for tasks that could not be found. \
When False, method will raise an exception if task tag was set in dependency and the related task \
wasn't found in registry. When True that unresolvable task will be omitted
:return: None
### Response:
def dependency_check(self, task_cls, skip_unresolved=False):
""" Check dependency of task for irresolvable conflicts (like task to task mutual dependency)
:param task_cls: task to check
:param skip_unresolved: flag controls this method behaviour for tasks that could not be found. \
When False, method will raise an exception if task tag was set in dependency and the related task \
wasn't found in registry. When True that unresolvable task will be omitted
:return: None
"""
def check(check_task_cls, global_dependencies):
if check_task_cls.__registry_tag__ in global_dependencies:
raise RuntimeError('Recursion dependencies for %s' % task_cls.__registry_tag__)
dependencies = global_dependencies.copy()
dependencies.append(check_task_cls.__registry_tag__)
for dependency in check_task_cls.__dependency__:
dependent_task = self.tasks_by_tag(dependency)
if dependent_task is None and skip_unresolved is False:
raise RuntimeError(
"Task '%s' dependency unresolved (%s)" %
(task_cls.__registry_tag__, dependency)
)
if dependent_task is not None:
check(dependent_task, dependencies)
check(task_cls, []) |
def map_or_apply(function, param):
"""
Map the function on ``param``, or apply it, depending whether ``param`` \
is a list or an item.
:param function: The function to apply.
:param param: The parameter to feed the function with (list or item).
:returns: The computed value or ``None``.
"""
try:
if isinstance(param, list):
return [next(iter(function(i))) for i in param]
else:
return next(iter(function(param)))
except StopIteration:
return None | Map the function on ``param``, or apply it, depending whether ``param`` \
is a list or an item.
:param function: The function to apply.
:param param: The parameter to feed the function with (list or item).
:returns: The computed value or ``None``. | Below is the the instruction that describes the task:
### Input:
Map the function on ``param``, or apply it, depending whether ``param`` \
is a list or an item.
:param function: The function to apply.
:param param: The parameter to feed the function with (list or item).
:returns: The computed value or ``None``.
### Response:
def map_or_apply(function, param):
"""
Map the function on ``param``, or apply it, depending whether ``param`` \
is a list or an item.
:param function: The function to apply.
:param param: The parameter to feed the function with (list or item).
:returns: The computed value or ``None``.
"""
try:
if isinstance(param, list):
return [next(iter(function(i))) for i in param]
else:
return next(iter(function(param)))
except StopIteration:
return None |
def models(cls, api_version=DEFAULT_API_VERSION):
"""Module depends on the API version:
* 2015-06-15: :mod:`v2015_06_15.models<azure.mgmt.storage.v2015_06_15.models>`
* 2016-01-01: :mod:`v2016_01_01.models<azure.mgmt.storage.v2016_01_01.models>`
* 2016-12-01: :mod:`v2016_12_01.models<azure.mgmt.storage.v2016_12_01.models>`
* 2017-06-01: :mod:`v2017_06_01.models<azure.mgmt.storage.v2017_06_01.models>`
* 2017-10-01: :mod:`v2017_10_01.models<azure.mgmt.storage.v2017_10_01.models>`
* 2018-02-01: :mod:`v2018_02_01.models<azure.mgmt.storage.v2018_02_01.models>`
* 2018-03-01-preview: :mod:`v2018_03_01_preview.models<azure.mgmt.storage.v2018_03_01_preview.models>`
* 2018-07-01: :mod:`v2018_07_01.models<azure.mgmt.storage.v2018_07_01.models>`
"""
if api_version == '2015-06-15':
from .v2015_06_15 import models
return models
elif api_version == '2016-01-01':
from .v2016_01_01 import models
return models
elif api_version == '2016-12-01':
from .v2016_12_01 import models
return models
elif api_version == '2017-06-01':
from .v2017_06_01 import models
return models
elif api_version == '2017-10-01':
from .v2017_10_01 import models
return models
elif api_version == '2018-02-01':
from .v2018_02_01 import models
return models
elif api_version == '2018-03-01-preview':
from .v2018_03_01_preview import models
return models
elif api_version == '2018-07-01':
from .v2018_07_01 import models
return models
raise NotImplementedError("APIVersion {} is not available".format(api_version)) | Module depends on the API version:
* 2015-06-15: :mod:`v2015_06_15.models<azure.mgmt.storage.v2015_06_15.models>`
* 2016-01-01: :mod:`v2016_01_01.models<azure.mgmt.storage.v2016_01_01.models>`
* 2016-12-01: :mod:`v2016_12_01.models<azure.mgmt.storage.v2016_12_01.models>`
* 2017-06-01: :mod:`v2017_06_01.models<azure.mgmt.storage.v2017_06_01.models>`
* 2017-10-01: :mod:`v2017_10_01.models<azure.mgmt.storage.v2017_10_01.models>`
* 2018-02-01: :mod:`v2018_02_01.models<azure.mgmt.storage.v2018_02_01.models>`
* 2018-03-01-preview: :mod:`v2018_03_01_preview.models<azure.mgmt.storage.v2018_03_01_preview.models>`
* 2018-07-01: :mod:`v2018_07_01.models<azure.mgmt.storage.v2018_07_01.models>` | Below is the the instruction that describes the task:
### Input:
Module depends on the API version:
* 2015-06-15: :mod:`v2015_06_15.models<azure.mgmt.storage.v2015_06_15.models>`
* 2016-01-01: :mod:`v2016_01_01.models<azure.mgmt.storage.v2016_01_01.models>`
* 2016-12-01: :mod:`v2016_12_01.models<azure.mgmt.storage.v2016_12_01.models>`
* 2017-06-01: :mod:`v2017_06_01.models<azure.mgmt.storage.v2017_06_01.models>`
* 2017-10-01: :mod:`v2017_10_01.models<azure.mgmt.storage.v2017_10_01.models>`
* 2018-02-01: :mod:`v2018_02_01.models<azure.mgmt.storage.v2018_02_01.models>`
* 2018-03-01-preview: :mod:`v2018_03_01_preview.models<azure.mgmt.storage.v2018_03_01_preview.models>`
* 2018-07-01: :mod:`v2018_07_01.models<azure.mgmt.storage.v2018_07_01.models>`
### Response:
def models(cls, api_version=DEFAULT_API_VERSION):
"""Module depends on the API version:
* 2015-06-15: :mod:`v2015_06_15.models<azure.mgmt.storage.v2015_06_15.models>`
* 2016-01-01: :mod:`v2016_01_01.models<azure.mgmt.storage.v2016_01_01.models>`
* 2016-12-01: :mod:`v2016_12_01.models<azure.mgmt.storage.v2016_12_01.models>`
* 2017-06-01: :mod:`v2017_06_01.models<azure.mgmt.storage.v2017_06_01.models>`
* 2017-10-01: :mod:`v2017_10_01.models<azure.mgmt.storage.v2017_10_01.models>`
* 2018-02-01: :mod:`v2018_02_01.models<azure.mgmt.storage.v2018_02_01.models>`
* 2018-03-01-preview: :mod:`v2018_03_01_preview.models<azure.mgmt.storage.v2018_03_01_preview.models>`
* 2018-07-01: :mod:`v2018_07_01.models<azure.mgmt.storage.v2018_07_01.models>`
"""
if api_version == '2015-06-15':
from .v2015_06_15 import models
return models
elif api_version == '2016-01-01':
from .v2016_01_01 import models
return models
elif api_version == '2016-12-01':
from .v2016_12_01 import models
return models
elif api_version == '2017-06-01':
from .v2017_06_01 import models
return models
elif api_version == '2017-10-01':
from .v2017_10_01 import models
return models
elif api_version == '2018-02-01':
from .v2018_02_01 import models
return models
elif api_version == '2018-03-01-preview':
from .v2018_03_01_preview import models
return models
elif api_version == '2018-07-01':
from .v2018_07_01 import models
return models
raise NotImplementedError("APIVersion {} is not available".format(api_version)) |
def insert_many(cls, documents):
"""Insert a list of documents"""
from mongoframes.queries import to_refs
# Ensure all documents have been converted to frames
frames = cls._ensure_frames(documents)
# Send insert signal
signal('insert').send(cls, frames=frames)
# Prepare the documents to be inserted
documents = [to_refs(f._document) for f in frames]
# Bulk insert
ids = cls.get_collection().insert_many(documents).inserted_ids
# Apply the Ids to the frames
for i, id in enumerate(ids):
frames[i]._id = id
# Send inserted signal
signal('inserted').send(cls, frames=frames)
return frames | Insert a list of documents | Below is the the instruction that describes the task:
### Input:
Insert a list of documents
### Response:
def insert_many(cls, documents):
"""Insert a list of documents"""
from mongoframes.queries import to_refs
# Ensure all documents have been converted to frames
frames = cls._ensure_frames(documents)
# Send insert signal
signal('insert').send(cls, frames=frames)
# Prepare the documents to be inserted
documents = [to_refs(f._document) for f in frames]
# Bulk insert
ids = cls.get_collection().insert_many(documents).inserted_ids
# Apply the Ids to the frames
for i, id in enumerate(ids):
frames[i]._id = id
# Send inserted signal
signal('inserted').send(cls, frames=frames)
return frames |
def variants(context, case_id, institute, force, cancer, cancer_research, sv,
sv_research, snv, snv_research, str_clinical, chrom, start, end, hgnc_id,
hgnc_symbol, rank_treshold):
"""Upload variants to a case
Note that the files has to be linked with the case,
if they are not use 'scout update case'.
"""
LOG.info("Running scout load variants")
adapter = context.obj['adapter']
if institute:
case_id = "{0}-{1}".format(institute, case_id)
else:
institute = case_id.split('-')[0]
case_obj = adapter.case(case_id=case_id)
if case_obj is None:
LOG.info("No matching case found")
context.abort()
files = [
{'category': 'cancer', 'variant_type': 'clinical', 'upload': cancer},
{'category': 'cancer', 'variant_type': 'research', 'upload': cancer_research},
{'category': 'sv', 'variant_type': 'clinical', 'upload': sv},
{'category': 'sv', 'variant_type': 'research', 'upload': sv_research},
{'category': 'snv', 'variant_type': 'clinical', 'upload': snv},
{'category': 'snv', 'variant_type': 'research', 'upload': snv_research},
{'category': 'str', 'variant_type': 'clinical', 'upload': str_clinical},
]
gene_obj = None
if (hgnc_id or hgnc_symbol):
if hgnc_id:
gene_obj = adapter.hgnc_gene(hgnc_id)
if hgnc_symbol:
for res in adapter.gene_by_alias(hgnc_symbol):
gene_obj = res
if not gene_obj:
LOG.warning("The gene could not be found")
context.abort()
i = 0
for file_type in files:
variant_type = file_type['variant_type']
category = file_type['category']
if file_type['upload']:
i += 1
if variant_type == 'research':
if not (force or case_obj['research_requested']):
LOG.warn("research not requested, use '--force'")
context.abort()
LOG.info("Delete {0} {1} variants for case {2}".format(
variant_type, category, case_id))
adapter.delete_variants(case_id=case_obj['_id'],
variant_type=variant_type,
category=category)
LOG.info("Load {0} {1} variants for case {2}".format(
variant_type, category, case_id))
try:
adapter.load_variants(
case_obj=case_obj,
variant_type=variant_type,
category=category,
rank_threshold=rank_treshold,
chrom=chrom,
start=start,
end=end,
gene_obj=gene_obj
)
except Exception as e:
LOG.warning(e)
context.abort()
if i == 0:
LOG.info("No files where specified to upload variants from") | Upload variants to a case
Note that the files has to be linked with the case,
if they are not use 'scout update case'. | Below is the the instruction that describes the task:
### Input:
Upload variants to a case
Note that the files has to be linked with the case,
if they are not use 'scout update case'.
### Response:
def variants(context, case_id, institute, force, cancer, cancer_research, sv,
sv_research, snv, snv_research, str_clinical, chrom, start, end, hgnc_id,
hgnc_symbol, rank_treshold):
"""Upload variants to a case
Note that the files has to be linked with the case,
if they are not use 'scout update case'.
"""
LOG.info("Running scout load variants")
adapter = context.obj['adapter']
if institute:
case_id = "{0}-{1}".format(institute, case_id)
else:
institute = case_id.split('-')[0]
case_obj = adapter.case(case_id=case_id)
if case_obj is None:
LOG.info("No matching case found")
context.abort()
files = [
{'category': 'cancer', 'variant_type': 'clinical', 'upload': cancer},
{'category': 'cancer', 'variant_type': 'research', 'upload': cancer_research},
{'category': 'sv', 'variant_type': 'clinical', 'upload': sv},
{'category': 'sv', 'variant_type': 'research', 'upload': sv_research},
{'category': 'snv', 'variant_type': 'clinical', 'upload': snv},
{'category': 'snv', 'variant_type': 'research', 'upload': snv_research},
{'category': 'str', 'variant_type': 'clinical', 'upload': str_clinical},
]
gene_obj = None
if (hgnc_id or hgnc_symbol):
if hgnc_id:
gene_obj = adapter.hgnc_gene(hgnc_id)
if hgnc_symbol:
for res in adapter.gene_by_alias(hgnc_symbol):
gene_obj = res
if not gene_obj:
LOG.warning("The gene could not be found")
context.abort()
i = 0
for file_type in files:
variant_type = file_type['variant_type']
category = file_type['category']
if file_type['upload']:
i += 1
if variant_type == 'research':
if not (force or case_obj['research_requested']):
LOG.warn("research not requested, use '--force'")
context.abort()
LOG.info("Delete {0} {1} variants for case {2}".format(
variant_type, category, case_id))
adapter.delete_variants(case_id=case_obj['_id'],
variant_type=variant_type,
category=category)
LOG.info("Load {0} {1} variants for case {2}".format(
variant_type, category, case_id))
try:
adapter.load_variants(
case_obj=case_obj,
variant_type=variant_type,
category=category,
rank_threshold=rank_treshold,
chrom=chrom,
start=start,
end=end,
gene_obj=gene_obj
)
except Exception as e:
LOG.warning(e)
context.abort()
if i == 0:
LOG.info("No files where specified to upload variants from") |
def register(self, mimetype, processor):
"""Register passed `processor` for passed `mimetype`."""
if mimetype not in self or processor not in self[mimetype]:
self.setdefault(mimetype, []).append(processor) | Register passed `processor` for passed `mimetype`. | Below is the the instruction that describes the task:
### Input:
Register passed `processor` for passed `mimetype`.
### Response:
def register(self, mimetype, processor):
"""Register passed `processor` for passed `mimetype`."""
if mimetype not in self or processor not in self[mimetype]:
self.setdefault(mimetype, []).append(processor) |
def _parse_hextet(self, hextet_str):
"""Convert an IPv6 hextet string into an integer.
Args:
hextet_str: A string, the number to parse.
Returns:
The hextet as an integer.
Raises:
ValueError: if the input isn't strictly a hex number from [0..FFFF].
"""
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not self._HEX_DIGITS.issuperset(hextet_str):
raise ValueError
if len(hextet_str) > 4:
raise ValueError
hextet_int = int(hextet_str, 16)
if hextet_int > 0xFFFF:
raise ValueError
return hextet_int | Convert an IPv6 hextet string into an integer.
Args:
hextet_str: A string, the number to parse.
Returns:
The hextet as an integer.
Raises:
ValueError: if the input isn't strictly a hex number from [0..FFFF]. | Below is the the instruction that describes the task:
### Input:
Convert an IPv6 hextet string into an integer.
Args:
hextet_str: A string, the number to parse.
Returns:
The hextet as an integer.
Raises:
ValueError: if the input isn't strictly a hex number from [0..FFFF].
### Response:
def _parse_hextet(self, hextet_str):
"""Convert an IPv6 hextet string into an integer.
Args:
hextet_str: A string, the number to parse.
Returns:
The hextet as an integer.
Raises:
ValueError: if the input isn't strictly a hex number from [0..FFFF].
"""
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not self._HEX_DIGITS.issuperset(hextet_str):
raise ValueError
if len(hextet_str) > 4:
raise ValueError
hextet_int = int(hextet_str, 16)
if hextet_int > 0xFFFF:
raise ValueError
return hextet_int |
def cmd(send, msg, args):
"""Gives help.
Syntax: {command} [command]
"""
cmdchar = args['config']['core']['cmdchar']
if msg:
if msg.startswith(cmdchar):
msg = msg[len(cmdchar):]
if len(msg.split()) > 1:
send("One argument only")
elif not command_registry.is_registered(msg):
send("Not a module.")
else:
doc = command_registry.get_command(msg).get_doc()
if doc is None:
send("No documentation found.")
else:
for line in doc.splitlines():
send(line.format(command=cmdchar + msg), target=args['nick'])
else:
modules = sorted(command_registry.get_enabled_commands())
cmdlist = (' %s' % cmdchar).join(modules)
send('Commands: %s%s' % (cmdchar, cmdlist), target=args['nick'], ignore_length=True)
send('%shelp <command> for more info on a command.' % cmdchar, target=args['nick']) | Gives help.
Syntax: {command} [command] | Below is the the instruction that describes the task:
### Input:
Gives help.
Syntax: {command} [command]
### Response:
def cmd(send, msg, args):
"""Gives help.
Syntax: {command} [command]
"""
cmdchar = args['config']['core']['cmdchar']
if msg:
if msg.startswith(cmdchar):
msg = msg[len(cmdchar):]
if len(msg.split()) > 1:
send("One argument only")
elif not command_registry.is_registered(msg):
send("Not a module.")
else:
doc = command_registry.get_command(msg).get_doc()
if doc is None:
send("No documentation found.")
else:
for line in doc.splitlines():
send(line.format(command=cmdchar + msg), target=args['nick'])
else:
modules = sorted(command_registry.get_enabled_commands())
cmdlist = (' %s' % cmdchar).join(modules)
send('Commands: %s%s' % (cmdchar, cmdlist), target=args['nick'], ignore_length=True)
send('%shelp <command> for more info on a command.' % cmdchar, target=args['nick']) |
def add_or_update(user_id, app_id, value):
'''
Editing evaluation.
'''
rec = MEvaluation.get_by_signature(user_id, app_id)
if rec:
entry = TabEvaluation.update(
value=value,
).where(TabEvaluation.uid == rec.uid)
entry.execute()
else:
TabEvaluation.create(
uid=tools.get_uuid(),
user_id=user_id,
post_id=app_id,
value=value,
) | Editing evaluation. | Below is the the instruction that describes the task:
### Input:
Editing evaluation.
### Response:
def add_or_update(user_id, app_id, value):
'''
Editing evaluation.
'''
rec = MEvaluation.get_by_signature(user_id, app_id)
if rec:
entry = TabEvaluation.update(
value=value,
).where(TabEvaluation.uid == rec.uid)
entry.execute()
else:
TabEvaluation.create(
uid=tools.get_uuid(),
user_id=user_id,
post_id=app_id,
value=value,
) |
def to_str(self, delimiter='|', null='NULL'):
"""
Sets the current encoder output to Python `str` and returns
a row iterator.
:param str null: The string representation of null values
:param str delimiter: The string delimiting values in the output
string
:rtype: iterator (yields ``str``)
"""
self.export.set_null(null)
self.export.set_delimiter(delimiter)
self.options("delimiter", escape_string(delimiter), 2)
self.options("null", null, 3)
return self._fetchall(ENCODER_SETTINGS_STRING, coerce_floats=False) | Sets the current encoder output to Python `str` and returns
a row iterator.
:param str null: The string representation of null values
:param str delimiter: The string delimiting values in the output
string
:rtype: iterator (yields ``str``) | Below is the the instruction that describes the task:
### Input:
Sets the current encoder output to Python `str` and returns
a row iterator.
:param str null: The string representation of null values
:param str delimiter: The string delimiting values in the output
string
:rtype: iterator (yields ``str``)
### Response:
def to_str(self, delimiter='|', null='NULL'):
"""
Sets the current encoder output to Python `str` and returns
a row iterator.
:param str null: The string representation of null values
:param str delimiter: The string delimiting values in the output
string
:rtype: iterator (yields ``str``)
"""
self.export.set_null(null)
self.export.set_delimiter(delimiter)
self.options("delimiter", escape_string(delimiter), 2)
self.options("null", null, 3)
return self._fetchall(ENCODER_SETTINGS_STRING, coerce_floats=False) |
def add_elasticache_node(self, node, cluster, region):
''' Adds an ElastiCache node to the inventory and index, as long as
it is addressable '''
# Only want available nodes unless all_elasticache_nodes is True
if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available':
return
# Select the best destination address
dest = node['Endpoint']['Address']
if not dest:
# Skip nodes we cannot address (e.g. private VPC subnet)
return
node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId'])
# Add to index
self.index[dest] = [region, node_id]
# Inventory: Group by node ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[node_id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', node_id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
# Inventory: Group by node type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + cluster['CacheNodeType'])
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC (information not available in the current
# AWS API version for ElastiCache)
# Inventory: Group by security group
if self.group_by_security_group:
# Check for the existence of the 'SecurityGroups' key and also if
# this key has some value. When the cluster is not placed in a SG
# the query can return None here and cause an error.
if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
for security_group in cluster['SecurityGroups']:
key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
# Inventory: Group by engine
if self.group_by_elasticache_engine:
self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine']))
# Inventory: Group by parameter group (done at cluster level)
# Inventory: Group by replication group (done at cluster level)
# Inventory: Group by ElastiCache Cluster
if self.group_by_elasticache_cluster:
self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest)
# Global Tag: all ElastiCache nodes
self.push(self.inventory, 'elasticache_nodes', dest)
host_info = self.get_host_info_dict_from_describe_dict(node)
if dest in self.inventory["_meta"]["hostvars"]:
self.inventory["_meta"]["hostvars"][dest].update(host_info)
else:
self.inventory["_meta"]["hostvars"][dest] = host_info | Adds an ElastiCache node to the inventory and index, as long as
it is addressable | Below is the the instruction that describes the task:
### Input:
Adds an ElastiCache node to the inventory and index, as long as
it is addressable
### Response:
def add_elasticache_node(self, node, cluster, region):
''' Adds an ElastiCache node to the inventory and index, as long as
it is addressable '''
# Only want available nodes unless all_elasticache_nodes is True
if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available':
return
# Select the best destination address
dest = node['Endpoint']['Address']
if not dest:
# Skip nodes we cannot address (e.g. private VPC subnet)
return
node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId'])
# Add to index
self.index[dest] = [region, node_id]
# Inventory: Group by node ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[node_id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', node_id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
# Inventory: Group by node type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + cluster['CacheNodeType'])
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC (information not available in the current
# AWS API version for ElastiCache)
# Inventory: Group by security group
if self.group_by_security_group:
# Check for the existence of the 'SecurityGroups' key and also if
# this key has some value. When the cluster is not placed in a SG
# the query can return None here and cause an error.
if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
for security_group in cluster['SecurityGroups']:
key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
# Inventory: Group by engine
if self.group_by_elasticache_engine:
self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine']))
# Inventory: Group by parameter group (done at cluster level)
# Inventory: Group by replication group (done at cluster level)
# Inventory: Group by ElastiCache Cluster
if self.group_by_elasticache_cluster:
self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest)
# Global Tag: all ElastiCache nodes
self.push(self.inventory, 'elasticache_nodes', dest)
host_info = self.get_host_info_dict_from_describe_dict(node)
if dest in self.inventory["_meta"]["hostvars"]:
self.inventory["_meta"]["hostvars"][dest].update(host_info)
else:
self.inventory["_meta"]["hostvars"][dest] = host_info |
def get_area_dates(bbox, date_interval, maxcc=None):
""" Get list of times of existing images from specified area and time range
:param bbox: bounding box of requested area
:type bbox: geometry.BBox
:param date_interval: a pair of time strings in ISO8601 format
:type date_interval: tuple(str)
:param maxcc: filter images by maximum percentage of cloud coverage
:type maxcc: float in range [0, 1] or None
:return: list of time strings in ISO8601 format
:rtype: list[datetime.datetime]
"""
area_info = get_area_info(bbox, date_interval, maxcc=maxcc)
return sorted({datetime.datetime.strptime(tile_info['properties']['startDate'].strip('Z'),
'%Y-%m-%dT%H:%M:%S')
for tile_info in area_info}) | Get list of times of existing images from specified area and time range
:param bbox: bounding box of requested area
:type bbox: geometry.BBox
:param date_interval: a pair of time strings in ISO8601 format
:type date_interval: tuple(str)
:param maxcc: filter images by maximum percentage of cloud coverage
:type maxcc: float in range [0, 1] or None
:return: list of time strings in ISO8601 format
:rtype: list[datetime.datetime] | Below is the the instruction that describes the task:
### Input:
Get list of times of existing images from specified area and time range
:param bbox: bounding box of requested area
:type bbox: geometry.BBox
:param date_interval: a pair of time strings in ISO8601 format
:type date_interval: tuple(str)
:param maxcc: filter images by maximum percentage of cloud coverage
:type maxcc: float in range [0, 1] or None
:return: list of time strings in ISO8601 format
:rtype: list[datetime.datetime]
### Response:
def get_area_dates(bbox, date_interval, maxcc=None):
""" Get list of times of existing images from specified area and time range
:param bbox: bounding box of requested area
:type bbox: geometry.BBox
:param date_interval: a pair of time strings in ISO8601 format
:type date_interval: tuple(str)
:param maxcc: filter images by maximum percentage of cloud coverage
:type maxcc: float in range [0, 1] or None
:return: list of time strings in ISO8601 format
:rtype: list[datetime.datetime]
"""
area_info = get_area_info(bbox, date_interval, maxcc=maxcc)
return sorted({datetime.datetime.strptime(tile_info['properties']['startDate'].strip('Z'),
'%Y-%m-%dT%H:%M:%S')
for tile_info in area_info}) |
def assets(self, asset_code=None, asset_issuer=None, cursor=None, order='asc', limit=10):
"""This endpoint represents all assets. It will give you all the assets
in the system along with various statistics about each.
See the documentation below for details on query parameters that are
available.
`GET /assets{?asset_code,asset_issuer,cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/assets-all.html>`_
:param str asset_code: Code of the Asset to filter by.
:param str asset_issuer: Issuer of the Asset to filter by.
:param int cursor: A paging token, specifying where to start returning records from.
:param str order: The order in which to return rows, "asc" or "desc",
ordered by asset_code then by asset_issuer.
:param int limit: Maximum number of records to return.
:return: A list of all valid payment operations
:rtype: dict
"""
endpoint = '/assets'
params = self.__query_params(asset_code=asset_code, asset_issuer=asset_issuer, cursor=cursor, order=order,
limit=limit)
return self.query(endpoint, params) | This endpoint represents all assets. It will give you all the assets
in the system along with various statistics about each.
See the documentation below for details on query parameters that are
available.
`GET /assets{?asset_code,asset_issuer,cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/assets-all.html>`_
:param str asset_code: Code of the Asset to filter by.
:param str asset_issuer: Issuer of the Asset to filter by.
:param int cursor: A paging token, specifying where to start returning records from.
:param str order: The order in which to return rows, "asc" or "desc",
ordered by asset_code then by asset_issuer.
:param int limit: Maximum number of records to return.
:return: A list of all valid payment operations
:rtype: dict | Below is the the instruction that describes the task:
### Input:
This endpoint represents all assets. It will give you all the assets
in the system along with various statistics about each.
See the documentation below for details on query parameters that are
available.
`GET /assets{?asset_code,asset_issuer,cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/assets-all.html>`_
:param str asset_code: Code of the Asset to filter by.
:param str asset_issuer: Issuer of the Asset to filter by.
:param int cursor: A paging token, specifying where to start returning records from.
:param str order: The order in which to return rows, "asc" or "desc",
ordered by asset_code then by asset_issuer.
:param int limit: Maximum number of records to return.
:return: A list of all valid payment operations
:rtype: dict
### Response:
def assets(self, asset_code=None, asset_issuer=None, cursor=None, order='asc', limit=10):
"""This endpoint represents all assets. It will give you all the assets
in the system along with various statistics about each.
See the documentation below for details on query parameters that are
available.
`GET /assets{?asset_code,asset_issuer,cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/assets-all.html>`_
:param str asset_code: Code of the Asset to filter by.
:param str asset_issuer: Issuer of the Asset to filter by.
:param int cursor: A paging token, specifying where to start returning records from.
:param str order: The order in which to return rows, "asc" or "desc",
ordered by asset_code then by asset_issuer.
:param int limit: Maximum number of records to return.
:return: A list of all valid payment operations
:rtype: dict
"""
endpoint = '/assets'
params = self.__query_params(asset_code=asset_code, asset_issuer=asset_issuer, cursor=cursor, order=order,
limit=limit)
return self.query(endpoint, params) |
def message_handler(stream, type_, from_, cb):
"""
Context manager to temporarily register a callback to handle messages on a
:class:`StanzaStream`.
:param stream: Stanza stream to register the coroutine at
:type stream: :class:`StanzaStream`
:param type_: Message type to listen for, or :data:`None` for a wildcard
match.
:type type_: :class:`~.MessageType` or :data:`None`
:param from_: Sender JID to listen for, or :data:`None` for a wildcard
match.
:type from_: :class:`~aioxmpp.JID` or :data:`None`
:param cb: Callback to register
The callback is registered when the context is entered and unregistered
when the context is exited.
.. versionadded:: 0.8
"""
stream.register_message_callback(
type_,
from_,
cb,
)
try:
yield
finally:
stream.unregister_message_callback(
type_,
from_,
) | Context manager to temporarily register a callback to handle messages on a
:class:`StanzaStream`.
:param stream: Stanza stream to register the coroutine at
:type stream: :class:`StanzaStream`
:param type_: Message type to listen for, or :data:`None` for a wildcard
match.
:type type_: :class:`~.MessageType` or :data:`None`
:param from_: Sender JID to listen for, or :data:`None` for a wildcard
match.
:type from_: :class:`~aioxmpp.JID` or :data:`None`
:param cb: Callback to register
The callback is registered when the context is entered and unregistered
when the context is exited.
.. versionadded:: 0.8 | Below is the the instruction that describes the task:
### Input:
Context manager to temporarily register a callback to handle messages on a
:class:`StanzaStream`.
:param stream: Stanza stream to register the coroutine at
:type stream: :class:`StanzaStream`
:param type_: Message type to listen for, or :data:`None` for a wildcard
match.
:type type_: :class:`~.MessageType` or :data:`None`
:param from_: Sender JID to listen for, or :data:`None` for a wildcard
match.
:type from_: :class:`~aioxmpp.JID` or :data:`None`
:param cb: Callback to register
The callback is registered when the context is entered and unregistered
when the context is exited.
.. versionadded:: 0.8
### Response:
def message_handler(stream, type_, from_, cb):
"""
Context manager to temporarily register a callback to handle messages on a
:class:`StanzaStream`.
:param stream: Stanza stream to register the coroutine at
:type stream: :class:`StanzaStream`
:param type_: Message type to listen for, or :data:`None` for a wildcard
match.
:type type_: :class:`~.MessageType` or :data:`None`
:param from_: Sender JID to listen for, or :data:`None` for a wildcard
match.
:type from_: :class:`~aioxmpp.JID` or :data:`None`
:param cb: Callback to register
The callback is registered when the context is entered and unregistered
when the context is exited.
.. versionadded:: 0.8
"""
stream.register_message_callback(
type_,
from_,
cb,
)
try:
yield
finally:
stream.unregister_message_callback(
type_,
from_,
) |
def initial_key_to_master_key(initial_key):
"""
initial_key:
a hex string of length 32
"""
b = initial_key.encode("utf8")
orig_input = b
for i in range(100000):
b = hashlib.sha256(b + orig_input).digest()
return from_bytes_32(b) | initial_key:
a hex string of length 32 | Below is the the instruction that describes the task:
### Input:
initial_key:
a hex string of length 32
### Response:
def initial_key_to_master_key(initial_key):
"""
initial_key:
a hex string of length 32
"""
b = initial_key.encode("utf8")
orig_input = b
for i in range(100000):
b = hashlib.sha256(b + orig_input).digest()
return from_bytes_32(b) |
def t_string_NGRAPH(t):
r"\\[ '.:][ '.:]"
global __STRING
P = {' ': 0, "'": 2, '.': 8, ':': 10}
N = {' ': 0, "'": 1, '.': 4, ':': 5}
__STRING += chr(128 + P[t.value[1]] + N[t.value[2]]) | r"\\[ '.:][ '.:] | Below is the the instruction that describes the task:
### Input:
r"\\[ '.:][ '.:]
### Response:
def t_string_NGRAPH(t):
r"\\[ '.:][ '.:]"
global __STRING
P = {' ': 0, "'": 2, '.': 8, ':': 10}
N = {' ': 0, "'": 1, '.': 4, ':': 5}
__STRING += chr(128 + P[t.value[1]] + N[t.value[2]]) |
def create_index(self, cardinality):
"""
Create an index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality to create a index for.
"""
DatabaseConnector.create_index(self, cardinality)
query = "CREATE INDEX idx_{0}_gram_varchar ON _{0}_gram(word varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
if self.lowercase:
for i in reversed(range(cardinality)):
if i != 0:
query = "CREATE INDEX idx_{0}_gram_{1}_lower ON _{0}_gram(LOWER(word_{1}));".format(cardinality, i)
self.execute_sql(query)
if self.normalize:
query = "CREATE INDEX idx_{0}_gram_lower_normalized_varchar ON _{0}_gram(NORMALIZE(LOWER(word)) varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
else:
query = "CREATE INDEX idx_{0}_gram_lower_varchar ON _{0}_gram(LOWER(word) varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
elif self.normalize:
query = "CREATE INDEX idx_{0}_gram_normalized_varchar ON _{0}_gram(NORMALIZE(word) varchar_pattern_ops);".format(cardinality)
self.execute_sql(query) | Create an index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality to create a index for. | Below is the the instruction that describes the task:
### Input:
Create an index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality to create a index for.
### Response:
def create_index(self, cardinality):
"""
Create an index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality to create a index for.
"""
DatabaseConnector.create_index(self, cardinality)
query = "CREATE INDEX idx_{0}_gram_varchar ON _{0}_gram(word varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
if self.lowercase:
for i in reversed(range(cardinality)):
if i != 0:
query = "CREATE INDEX idx_{0}_gram_{1}_lower ON _{0}_gram(LOWER(word_{1}));".format(cardinality, i)
self.execute_sql(query)
if self.normalize:
query = "CREATE INDEX idx_{0}_gram_lower_normalized_varchar ON _{0}_gram(NORMALIZE(LOWER(word)) varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
else:
query = "CREATE INDEX idx_{0}_gram_lower_varchar ON _{0}_gram(LOWER(word) varchar_pattern_ops);".format(cardinality)
self.execute_sql(query)
elif self.normalize:
query = "CREATE INDEX idx_{0}_gram_normalized_varchar ON _{0}_gram(NORMALIZE(word) varchar_pattern_ops);".format(cardinality)
self.execute_sql(query) |
def aggregate_daily(image_coll, start_date=None, end_date=None,
agg_type='mean'):
"""Aggregate images by day without using joins
The primary purpose of this function is to join separate Landsat images
from the same path into a single daily image.
Parameters
----------
image_coll : ee.ImageCollection
Input image collection.
start_date : date, number, string, optional
Start date.
Needs to be an EE readable date (i.e. ISO Date string or milliseconds).
end_date : date, number, string, optional
Exclusive end date.
Needs to be an EE readable date (i.e. ISO Date string or milliseconds).
agg_type : {'mean'}, optional
Aggregation type (the default is 'mean').
Currently only a 'mean' aggregation type is supported.
Returns
-------
ee.ImageCollection()
Notes
-----
This function should be used to mosaic Landsat images from same path
but different rows.
system:time_start of returned images will be 0 UTC (not the image time).
"""
if start_date and end_date:
test_coll = image_coll.filterDate(ee.Date(start_date), ee.Date(end_date))
elif start_date:
test_coll = image_coll.filter(ee.Filter.greaterThanOrEquals(
'system:time_start', ee.Date(start_date).millis()))
elif end_date:
test_coll = image_coll.filter(ee.Filter.lessThan(
'system:time_start', ee.Date(end_date).millis()))
else:
test_coll = image_coll
# Build a list of dates in the image_coll
def get_date(time):
return ee.Date(ee.Number(time)).format('yyyy-MM-dd')
date_list = ee.List(test_coll.aggregate_array('system:time_start'))\
.map(get_date).distinct().sort()
def aggregate_func(date_str):
start_date = ee.Date(ee.String(date_str))
end_date = start_date.advance(1, 'day')
agg_coll = image_coll.filterDate(start_date, end_date)
# if agg_type.lower() == 'mean':
agg_img = agg_coll.mean()
# elif agg_type.lower() == 'median':
# agg_img = agg_coll.median()
return agg_img.set({
'system:index': start_date.format('yyyyMMdd'),
'system:time_start': start_date.millis(),
'date': start_date.format('yyyy-MM-dd'),
})
return ee.ImageCollection(date_list.map(aggregate_func)) | Aggregate images by day without using joins
The primary purpose of this function is to join separate Landsat images
from the same path into a single daily image.
Parameters
----------
image_coll : ee.ImageCollection
Input image collection.
start_date : date, number, string, optional
Start date.
Needs to be an EE readable date (i.e. ISO Date string or milliseconds).
end_date : date, number, string, optional
Exclusive end date.
Needs to be an EE readable date (i.e. ISO Date string or milliseconds).
agg_type : {'mean'}, optional
Aggregation type (the default is 'mean').
Currently only a 'mean' aggregation type is supported.
Returns
-------
ee.ImageCollection()
Notes
-----
This function should be used to mosaic Landsat images from same path
but different rows.
system:time_start of returned images will be 0 UTC (not the image time). | Below is the the instruction that describes the task:
### Input:
Aggregate images by day without using joins
The primary purpose of this function is to join separate Landsat images
from the same path into a single daily image.
Parameters
----------
image_coll : ee.ImageCollection
Input image collection.
start_date : date, number, string, optional
Start date.
Needs to be an EE readable date (i.e. ISO Date string or milliseconds).
end_date : date, number, string, optional
Exclusive end date.
Needs to be an EE readable date (i.e. ISO Date string or milliseconds).
agg_type : {'mean'}, optional
Aggregation type (the default is 'mean').
Currently only a 'mean' aggregation type is supported.
Returns
-------
ee.ImageCollection()
Notes
-----
This function should be used to mosaic Landsat images from same path
but different rows.
system:time_start of returned images will be 0 UTC (not the image time).
### Response:
def aggregate_daily(image_coll, start_date=None, end_date=None,
agg_type='mean'):
"""Aggregate images by day without using joins
The primary purpose of this function is to join separate Landsat images
from the same path into a single daily image.
Parameters
----------
image_coll : ee.ImageCollection
Input image collection.
start_date : date, number, string, optional
Start date.
Needs to be an EE readable date (i.e. ISO Date string or milliseconds).
end_date : date, number, string, optional
Exclusive end date.
Needs to be an EE readable date (i.e. ISO Date string or milliseconds).
agg_type : {'mean'}, optional
Aggregation type (the default is 'mean').
Currently only a 'mean' aggregation type is supported.
Returns
-------
ee.ImageCollection()
Notes
-----
This function should be used to mosaic Landsat images from same path
but different rows.
system:time_start of returned images will be 0 UTC (not the image time).
"""
if start_date and end_date:
test_coll = image_coll.filterDate(ee.Date(start_date), ee.Date(end_date))
elif start_date:
test_coll = image_coll.filter(ee.Filter.greaterThanOrEquals(
'system:time_start', ee.Date(start_date).millis()))
elif end_date:
test_coll = image_coll.filter(ee.Filter.lessThan(
'system:time_start', ee.Date(end_date).millis()))
else:
test_coll = image_coll
# Build a list of dates in the image_coll
def get_date(time):
return ee.Date(ee.Number(time)).format('yyyy-MM-dd')
date_list = ee.List(test_coll.aggregate_array('system:time_start'))\
.map(get_date).distinct().sort()
def aggregate_func(date_str):
start_date = ee.Date(ee.String(date_str))
end_date = start_date.advance(1, 'day')
agg_coll = image_coll.filterDate(start_date, end_date)
# if agg_type.lower() == 'mean':
agg_img = agg_coll.mean()
# elif agg_type.lower() == 'median':
# agg_img = agg_coll.median()
return agg_img.set({
'system:index': start_date.format('yyyyMMdd'),
'system:time_start': start_date.millis(),
'date': start_date.format('yyyy-MM-dd'),
})
return ee.ImageCollection(date_list.map(aggregate_func)) |
def _get_best_subset(self, ritz):
'''Return candidate set with smallest goal functional.'''
# (c,\omega(c)) for all considered subsets c
overall_evaluations = {}
def evaluate(_subset, _evaluations):
try:
_evaluations[_subset] = \
self.subset_evaluator.evaluate(ritz, _subset)
except utils.AssumptionError:
# no evaluation possible -> move on
pass
# I in algo
current_subset = frozenset()
# evaluate empty set
evaluate(current_subset, overall_evaluations)
while True:
# get a list of subset candidates for inclusion in current_subset
# (S in algo)
remaining_subset = set(range(len(ritz.values))) \
.difference(current_subset)
subsets = self.subsets_generator.generate(ritz, remaining_subset)
# no more candidates to check?
if len(subsets) == 0:
break
# evaluate candidates
evaluations = {}
for subset in subsets:
eval_subset = current_subset.union(subset)
evaluate(eval_subset, evaluations)
if len(evaluations) > 0:
current_subset = min(evaluations, key=evaluations.get)
else:
# fallback: pick the subset with smallest residual
# note: only a bound is used if the subset consists of more
# than one index.
resnorms = [numpy.sum(ritz.resnorms[list(subset)])
for subset in subsets]
subset = subsets[numpy.argmin(resnorms)]
current_subset = current_subset.union(subset)
overall_evaluations.update(evaluations)
if len(overall_evaluations) > 0:
# if there was a successfull evaluation: pick the best one
selection = list(min(overall_evaluations,
key=overall_evaluations.get))
else:
# otherwise: return empty list
selection = []
# debug output requested?
if self.print_results == 'number':
print('# of selected deflation vectors: {0}'
.format(len(selection)))
elif self.print_results == 'values':
print('{0} Ritz values corresponding to selected deflation '
.format(len(selection)) + 'vectors: '
+ (', '.join([str(el) for el in ritz.values[selection]])))
elif self.print_results == 'timings':
import operator
print('Timings for all successfully evaluated choices of '
'deflation vectors with corresponding Ritz values:')
for subset, time in sorted(overall_evaluations.items(),
key=operator.itemgetter(1)):
print(' {0}s: '.format(time)
+ ', '.join([str(el)
for el in ritz.values[list(subset)]]))
elif self.print_results is None:
pass
else:
raise utils.ArgumentError(
'Invalid value `{0}` for argument `print_result`. '
.format(self.print_results)
+ 'Valid are `None`, `number`, `values` and `timings`.')
return selection | Return candidate set with smallest goal functional. | Below is the the instruction that describes the task:
### Input:
Return candidate set with smallest goal functional.
### Response:
def _get_best_subset(self, ritz):
'''Return candidate set with smallest goal functional.'''
# (c,\omega(c)) for all considered subsets c
overall_evaluations = {}
def evaluate(_subset, _evaluations):
try:
_evaluations[_subset] = \
self.subset_evaluator.evaluate(ritz, _subset)
except utils.AssumptionError:
# no evaluation possible -> move on
pass
# I in algo
current_subset = frozenset()
# evaluate empty set
evaluate(current_subset, overall_evaluations)
while True:
# get a list of subset candidates for inclusion in current_subset
# (S in algo)
remaining_subset = set(range(len(ritz.values))) \
.difference(current_subset)
subsets = self.subsets_generator.generate(ritz, remaining_subset)
# no more candidates to check?
if len(subsets) == 0:
break
# evaluate candidates
evaluations = {}
for subset in subsets:
eval_subset = current_subset.union(subset)
evaluate(eval_subset, evaluations)
if len(evaluations) > 0:
current_subset = min(evaluations, key=evaluations.get)
else:
# fallback: pick the subset with smallest residual
# note: only a bound is used if the subset consists of more
# than one index.
resnorms = [numpy.sum(ritz.resnorms[list(subset)])
for subset in subsets]
subset = subsets[numpy.argmin(resnorms)]
current_subset = current_subset.union(subset)
overall_evaluations.update(evaluations)
if len(overall_evaluations) > 0:
# if there was a successfull evaluation: pick the best one
selection = list(min(overall_evaluations,
key=overall_evaluations.get))
else:
# otherwise: return empty list
selection = []
# debug output requested?
if self.print_results == 'number':
print('# of selected deflation vectors: {0}'
.format(len(selection)))
elif self.print_results == 'values':
print('{0} Ritz values corresponding to selected deflation '
.format(len(selection)) + 'vectors: '
+ (', '.join([str(el) for el in ritz.values[selection]])))
elif self.print_results == 'timings':
import operator
print('Timings for all successfully evaluated choices of '
'deflation vectors with corresponding Ritz values:')
for subset, time in sorted(overall_evaluations.items(),
key=operator.itemgetter(1)):
print(' {0}s: '.format(time)
+ ', '.join([str(el)
for el in ritz.values[list(subset)]]))
elif self.print_results is None:
pass
else:
raise utils.ArgumentError(
'Invalid value `{0}` for argument `print_result`. '
.format(self.print_results)
+ 'Valid are `None`, `number`, `values` and `timings`.')
return selection |
def dump(self, zone, output_dir, lenient, split, source, *sources):
'''
Dump zone data from the specified source
'''
self.log.info('dump: zone=%s, sources=%s', zone, sources)
# We broke out source to force at least one to be passed, add it to any
# others we got.
sources = [source] + list(sources)
try:
sources = [self.providers[s] for s in sources]
except KeyError as e:
raise Exception('Unknown source: {}'.format(e.args[0]))
clz = YamlProvider
if split:
clz = SplitYamlProvider
target = clz('dump', output_dir)
zone = Zone(zone, self.configured_sub_zones(zone))
for source in sources:
source.populate(zone, lenient=lenient)
plan = target.plan(zone)
if plan is None:
plan = Plan(zone, zone, [], False)
target.apply(plan) | Dump zone data from the specified source | Below is the the instruction that describes the task:
### Input:
Dump zone data from the specified source
### Response:
def dump(self, zone, output_dir, lenient, split, source, *sources):
'''
Dump zone data from the specified source
'''
self.log.info('dump: zone=%s, sources=%s', zone, sources)
# We broke out source to force at least one to be passed, add it to any
# others we got.
sources = [source] + list(sources)
try:
sources = [self.providers[s] for s in sources]
except KeyError as e:
raise Exception('Unknown source: {}'.format(e.args[0]))
clz = YamlProvider
if split:
clz = SplitYamlProvider
target = clz('dump', output_dir)
zone = Zone(zone, self.configured_sub_zones(zone))
for source in sources:
source.populate(zone, lenient=lenient)
plan = target.plan(zone)
if plan is None:
plan = Plan(zone, zone, [], False)
target.apply(plan) |
def JR(self,**kwargs):
"""
NAME:
JR
PURPOSE:
Calculate the radial action
INPUT:
fixed_quad= (False) if True, use n=10 fixed_quad
+scipy.integrate.quad keywords
OUTPUT:
J_R(R,vT,vT)/ro/vc + estimate of the error (nan for fixed_quad)
HISTORY:
2012-11-27 - Written - Bovy (IAS)
"""
if hasattr(self,'_JR'): #pragma: no cover
return self._JR
umin, umax= self.calcUminUmax()
#print self._ux, self._pux, (umax-umin)/umax
if (umax-umin)/umax < 10.**-6: return nu.array([0.])
order= kwargs.pop('order',10)
if kwargs.pop('fixed_quad',False):
# factor in next line bc integrand=/2delta^2
self._JR= 1./nu.pi*nu.sqrt(2.)*self._delta\
*integrate.fixed_quad(_JRStaeckelIntegrand,
umin,umax,
args=(self._E,self._Lz,self._I3U,
self._delta,
self._u0,self._sinhu0**2.,
self._vx,self._sinvx**2.,
self._potu0v0,self._pot),
n=order,
**kwargs)[0]
else:
self._JR= 1./nu.pi*nu.sqrt(2.)*self._delta\
*integrate.quad(_JRStaeckelIntegrand,
umin,umax,
args=(self._E,self._Lz,self._I3U,
self._delta,
self._u0,self._sinhu0**2.,
self._vx,self._sinvx**2.,
self._potu0v0,self._pot),
**kwargs)[0]
return self._JR | NAME:
JR
PURPOSE:
Calculate the radial action
INPUT:
fixed_quad= (False) if True, use n=10 fixed_quad
+scipy.integrate.quad keywords
OUTPUT:
J_R(R,vT,vT)/ro/vc + estimate of the error (nan for fixed_quad)
HISTORY:
2012-11-27 - Written - Bovy (IAS) | Below is the the instruction that describes the task:
### Input:
NAME:
JR
PURPOSE:
Calculate the radial action
INPUT:
fixed_quad= (False) if True, use n=10 fixed_quad
+scipy.integrate.quad keywords
OUTPUT:
J_R(R,vT,vT)/ro/vc + estimate of the error (nan for fixed_quad)
HISTORY:
2012-11-27 - Written - Bovy (IAS)
### Response:
def JR(self,**kwargs):
"""
NAME:
JR
PURPOSE:
Calculate the radial action
INPUT:
fixed_quad= (False) if True, use n=10 fixed_quad
+scipy.integrate.quad keywords
OUTPUT:
J_R(R,vT,vT)/ro/vc + estimate of the error (nan for fixed_quad)
HISTORY:
2012-11-27 - Written - Bovy (IAS)
"""
if hasattr(self,'_JR'): #pragma: no cover
return self._JR
umin, umax= self.calcUminUmax()
#print self._ux, self._pux, (umax-umin)/umax
if (umax-umin)/umax < 10.**-6: return nu.array([0.])
order= kwargs.pop('order',10)
if kwargs.pop('fixed_quad',False):
# factor in next line bc integrand=/2delta^2
self._JR= 1./nu.pi*nu.sqrt(2.)*self._delta\
*integrate.fixed_quad(_JRStaeckelIntegrand,
umin,umax,
args=(self._E,self._Lz,self._I3U,
self._delta,
self._u0,self._sinhu0**2.,
self._vx,self._sinvx**2.,
self._potu0v0,self._pot),
n=order,
**kwargs)[0]
else:
self._JR= 1./nu.pi*nu.sqrt(2.)*self._delta\
*integrate.quad(_JRStaeckelIntegrand,
umin,umax,
args=(self._E,self._Lz,self._I3U,
self._delta,
self._u0,self._sinhu0**2.,
self._vx,self._sinvx**2.,
self._potu0v0,self._pot),
**kwargs)[0]
return self._JR |
def process(self):
"""Construct and start a new File hunt.
Returns:
The newly created GRR hunt object.
Raises:
RuntimeError: if no items specified for collection.
"""
print('Hunt to collect {0:d} items'.format(len(self.file_path_list)))
print('Files to be collected: {0!s}'.format(self.file_path_list))
hunt_action = flows_pb2.FileFinderAction(
action_type=flows_pb2.FileFinderAction.DOWNLOAD)
hunt_args = flows_pb2.FileFinderArgs(
paths=self.file_path_list, action=hunt_action)
return self._create_hunt('FileFinder', hunt_args) | Construct and start a new File hunt.
Returns:
The newly created GRR hunt object.
Raises:
RuntimeError: if no items specified for collection. | Below is the the instruction that describes the task:
### Input:
Construct and start a new File hunt.
Returns:
The newly created GRR hunt object.
Raises:
RuntimeError: if no items specified for collection.
### Response:
def process(self):
"""Construct and start a new File hunt.
Returns:
The newly created GRR hunt object.
Raises:
RuntimeError: if no items specified for collection.
"""
print('Hunt to collect {0:d} items'.format(len(self.file_path_list)))
print('Files to be collected: {0!s}'.format(self.file_path_list))
hunt_action = flows_pb2.FileFinderAction(
action_type=flows_pb2.FileFinderAction.DOWNLOAD)
hunt_args = flows_pb2.FileFinderArgs(
paths=self.file_path_list, action=hunt_action)
return self._create_hunt('FileFinder', hunt_args) |
def _get_individual_image(self, run, tag, index, sample):
"""
Returns the actual image bytes for a given image.
Args:
run: The name of the run the image belongs to.
tag: The name of the tag the images belongs to.
index: The index of the image in the current reservoir.
sample: The zero-indexed sample of the image to retrieve (for example,
setting `sample` to `2` will fetch the third image sample at `step`).
Returns:
A bytestring of the raw image bytes.
"""
if self._db_connection_provider:
db = self._db_connection_provider()
cursor = db.execute(
'''
SELECT data
FROM TensorStrings
WHERE
/* Skip first 2 elements which are width and height. */
idx = 2 + :sample
AND tensor_rowid = (
SELECT rowid
FROM Tensors
WHERE
series = (
SELECT tag_id
FROM Runs
CROSS JOIN Tags USING (run_id)
WHERE
Runs.run_name = :run
AND Tags.tag_name = :tag)
AND step IS NOT NULL
AND dtype = :dtype
/* Should be n-vector, n >= 3: [width, height, samples...] */
AND (NOT INSTR(shape, ',') AND CAST (shape AS INT) >= 3)
ORDER BY step
LIMIT 1
OFFSET :index)
''',
{'run': run,
'tag': tag,
'sample': sample,
'index': index,
'dtype': tf.string.as_datatype_enum})
(data,) = cursor.fetchone()
return six.binary_type(data)
events = self._filter_by_sample(self._multiplexer.Tensors(run, tag), sample)
images = events[index].tensor_proto.string_val[2:] # skip width, height
return images[sample] | Returns the actual image bytes for a given image.
Args:
run: The name of the run the image belongs to.
tag: The name of the tag the images belongs to.
index: The index of the image in the current reservoir.
sample: The zero-indexed sample of the image to retrieve (for example,
setting `sample` to `2` will fetch the third image sample at `step`).
Returns:
A bytestring of the raw image bytes. | Below is the the instruction that describes the task:
### Input:
Returns the actual image bytes for a given image.
Args:
run: The name of the run the image belongs to.
tag: The name of the tag the images belongs to.
index: The index of the image in the current reservoir.
sample: The zero-indexed sample of the image to retrieve (for example,
setting `sample` to `2` will fetch the third image sample at `step`).
Returns:
A bytestring of the raw image bytes.
### Response:
def _get_individual_image(self, run, tag, index, sample):
"""
Returns the actual image bytes for a given image.
Args:
run: The name of the run the image belongs to.
tag: The name of the tag the images belongs to.
index: The index of the image in the current reservoir.
sample: The zero-indexed sample of the image to retrieve (for example,
setting `sample` to `2` will fetch the third image sample at `step`).
Returns:
A bytestring of the raw image bytes.
"""
if self._db_connection_provider:
db = self._db_connection_provider()
cursor = db.execute(
'''
SELECT data
FROM TensorStrings
WHERE
/* Skip first 2 elements which are width and height. */
idx = 2 + :sample
AND tensor_rowid = (
SELECT rowid
FROM Tensors
WHERE
series = (
SELECT tag_id
FROM Runs
CROSS JOIN Tags USING (run_id)
WHERE
Runs.run_name = :run
AND Tags.tag_name = :tag)
AND step IS NOT NULL
AND dtype = :dtype
/* Should be n-vector, n >= 3: [width, height, samples...] */
AND (NOT INSTR(shape, ',') AND CAST (shape AS INT) >= 3)
ORDER BY step
LIMIT 1
OFFSET :index)
''',
{'run': run,
'tag': tag,
'sample': sample,
'index': index,
'dtype': tf.string.as_datatype_enum})
(data,) = cursor.fetchone()
return six.binary_type(data)
events = self._filter_by_sample(self._multiplexer.Tensors(run, tag), sample)
images = events[index].tensor_proto.string_val[2:] # skip width, height
return images[sample] |
def trimmed(self, pred=trimmed_pred_default):
"""Trim a ParseTree.
A node is trimmed if pred(node) returns True.
"""
new_children = []
for child in self.children:
if isinstance(child, ParseNode):
new_child = child.trimmed(pred)
else:
new_child = child
if not pred(new_child, self):
new_children.append(new_child)
return ParseNode(self.node_type,
children=new_children,
consumed=self.consumed,
position=self.position,
ignored=self.ignored) | Trim a ParseTree.
A node is trimmed if pred(node) returns True. | Below is the the instruction that describes the task:
### Input:
Trim a ParseTree.
A node is trimmed if pred(node) returns True.
### Response:
def trimmed(self, pred=trimmed_pred_default):
"""Trim a ParseTree.
A node is trimmed if pred(node) returns True.
"""
new_children = []
for child in self.children:
if isinstance(child, ParseNode):
new_child = child.trimmed(pred)
else:
new_child = child
if not pred(new_child, self):
new_children.append(new_child)
return ParseNode(self.node_type,
children=new_children,
consumed=self.consumed,
position=self.position,
ignored=self.ignored) |
def StartingKey(self, evt):
"""
If the editor is enabled by pressing keys on the grid, this will be
called to let the editor do something about that first key if desired.
"""
key = evt.GetKeyCode()
ch = None
if key in [
wx.WXK_NUMPAD0, wx.WXK_NUMPAD1, wx.WXK_NUMPAD2, wx.WXK_NUMPAD3,
wx.WXK_NUMPAD4, wx.WXK_NUMPAD5, wx.WXK_NUMPAD6, wx.WXK_NUMPAD7,
wx.WXK_NUMPAD8, wx.WXK_NUMPAD9]:
ch = ch = chr(ord('0') + key - wx.WXK_NUMPAD0)
elif key < 256 and key >= 0 and chr(key) in string.printable:
ch = chr(key)
if ch is not None and self._tc.IsEnabled():
# For this example, replace the text. Normally we would append it.
#self._tc.AppendText(ch)
self._tc.SetValue(ch)
self._tc.SetInsertionPointEnd()
else:
evt.Skip() | If the editor is enabled by pressing keys on the grid, this will be
called to let the editor do something about that first key if desired. | Below is the the instruction that describes the task:
### Input:
If the editor is enabled by pressing keys on the grid, this will be
called to let the editor do something about that first key if desired.
### Response:
def StartingKey(self, evt):
"""
If the editor is enabled by pressing keys on the grid, this will be
called to let the editor do something about that first key if desired.
"""
key = evt.GetKeyCode()
ch = None
if key in [
wx.WXK_NUMPAD0, wx.WXK_NUMPAD1, wx.WXK_NUMPAD2, wx.WXK_NUMPAD3,
wx.WXK_NUMPAD4, wx.WXK_NUMPAD5, wx.WXK_NUMPAD6, wx.WXK_NUMPAD7,
wx.WXK_NUMPAD8, wx.WXK_NUMPAD9]:
ch = ch = chr(ord('0') + key - wx.WXK_NUMPAD0)
elif key < 256 and key >= 0 and chr(key) in string.printable:
ch = chr(key)
if ch is not None and self._tc.IsEnabled():
# For this example, replace the text. Normally we would append it.
#self._tc.AppendText(ch)
self._tc.SetValue(ch)
self._tc.SetInsertionPointEnd()
else:
evt.Skip() |
def p_iteration_statement_6(self, p):
"""
iteration_statement \
: FOR LPAREN VAR identifier initializer_noin IN expr RPAREN statement
"""
vardecl = self.asttypes.VarDeclNoIn(
identifier=p[4], initializer=p[5])
vardecl.setpos(p, 3)
p[0] = self.asttypes.ForIn(item=vardecl, iterable=p[7], statement=p[9])
p[0].setpos(p) | iteration_statement \
: FOR LPAREN VAR identifier initializer_noin IN expr RPAREN statement | Below is the the instruction that describes the task:
### Input:
iteration_statement \
: FOR LPAREN VAR identifier initializer_noin IN expr RPAREN statement
### Response:
def p_iteration_statement_6(self, p):
"""
iteration_statement \
: FOR LPAREN VAR identifier initializer_noin IN expr RPAREN statement
"""
vardecl = self.asttypes.VarDeclNoIn(
identifier=p[4], initializer=p[5])
vardecl.setpos(p, 3)
p[0] = self.asttypes.ForIn(item=vardecl, iterable=p[7], statement=p[9])
p[0].setpos(p) |
def asd(result, reference, voxelspacing=None, connectivity=1):
"""
Average surface distance metric.
Computes the average surface distance (ASD) between the binary objects in two images.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
voxelspacing : float or sequence of floats, optional
The voxelspacing in a distance unit i.e. spacing of elements
along each dimension. If a sequence, must be of length equal to
the input rank; if a single number, this is used for all axes. If
not specified, a grid spacing of unity is implied.
connectivity : int
The neighbourhood/connectivity considered when determining the surface
of the binary objects. This value is passed to
`scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`.
The decision on the connectivity is important, as it can influence the results
strongly. If in doubt, leave it as it is.
Returns
-------
asd : float
The average surface distance between the object(s) in ``result`` and the
object(s) in ``reference``. The distance unit is the same as for the spacing
of elements along each dimension, which is usually given in mm.
See also
--------
:func:`assd`
:func:`hd`
Notes
-----
This is not a real metric, as it is directed. See `assd` for a real metric of this.
The method is implemented making use of distance images and simple binary morphology
to achieve high computational speed.
Examples
--------
The `connectivity` determines what pixels/voxels are considered the surface of a
binary object. Take the following binary image showing a cross
>>> from scipy.ndimage.morphology import generate_binary_structure
>>> cross = generate_binary_structure(2, 1)
array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
With `connectivity` set to `1` a 4-neighbourhood is considered when determining the
object surface, resulting in the surface
.. code-block:: python
array([[0, 1, 0],
[1, 0, 1],
[0, 1, 0]])
Changing `connectivity` to `2`, a 8-neighbourhood is considered and we get:
.. code-block:: python
array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
, as a diagonal connection does no longer qualifies as valid object surface.
This influences the results `asd` returns. Imagine we want to compute the surface
distance of our cross to a cube-like object:
>>> cube = generate_binary_structure(2, 1)
array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
, which surface is, independent of the `connectivity` value set, always
.. code-block:: python
array([[1, 1, 1],
[1, 0, 1],
[1, 1, 1]])
Using a `connectivity` of `1` we get
>>> asd(cross, cube, connectivity=1)
0.0
while a value of `2` returns us
>>> asd(cross, cube, connectivity=2)
0.20000000000000001
due to the center of the cross being considered surface as well.
"""
sds = __surface_distances(result, reference, voxelspacing, connectivity)
asd = sds.mean()
return asd | Average surface distance metric.
Computes the average surface distance (ASD) between the binary objects in two images.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
voxelspacing : float or sequence of floats, optional
The voxelspacing in a distance unit i.e. spacing of elements
along each dimension. If a sequence, must be of length equal to
the input rank; if a single number, this is used for all axes. If
not specified, a grid spacing of unity is implied.
connectivity : int
The neighbourhood/connectivity considered when determining the surface
of the binary objects. This value is passed to
`scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`.
The decision on the connectivity is important, as it can influence the results
strongly. If in doubt, leave it as it is.
Returns
-------
asd : float
The average surface distance between the object(s) in ``result`` and the
object(s) in ``reference``. The distance unit is the same as for the spacing
of elements along each dimension, which is usually given in mm.
See also
--------
:func:`assd`
:func:`hd`
Notes
-----
This is not a real metric, as it is directed. See `assd` for a real metric of this.
The method is implemented making use of distance images and simple binary morphology
to achieve high computational speed.
Examples
--------
The `connectivity` determines what pixels/voxels are considered the surface of a
binary object. Take the following binary image showing a cross
>>> from scipy.ndimage.morphology import generate_binary_structure
>>> cross = generate_binary_structure(2, 1)
array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
With `connectivity` set to `1` a 4-neighbourhood is considered when determining the
object surface, resulting in the surface
.. code-block:: python
array([[0, 1, 0],
[1, 0, 1],
[0, 1, 0]])
Changing `connectivity` to `2`, a 8-neighbourhood is considered and we get:
.. code-block:: python
array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
, as a diagonal connection does no longer qualifies as valid object surface.
This influences the results `asd` returns. Imagine we want to compute the surface
distance of our cross to a cube-like object:
>>> cube = generate_binary_structure(2, 1)
array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
, which surface is, independent of the `connectivity` value set, always
.. code-block:: python
array([[1, 1, 1],
[1, 0, 1],
[1, 1, 1]])
Using a `connectivity` of `1` we get
>>> asd(cross, cube, connectivity=1)
0.0
while a value of `2` returns us
>>> asd(cross, cube, connectivity=2)
0.20000000000000001
due to the center of the cross being considered surface as well. | Below is the the instruction that describes the task:
### Input:
Average surface distance metric.
Computes the average surface distance (ASD) between the binary objects in two images.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
voxelspacing : float or sequence of floats, optional
The voxelspacing in a distance unit i.e. spacing of elements
along each dimension. If a sequence, must be of length equal to
the input rank; if a single number, this is used for all axes. If
not specified, a grid spacing of unity is implied.
connectivity : int
The neighbourhood/connectivity considered when determining the surface
of the binary objects. This value is passed to
`scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`.
The decision on the connectivity is important, as it can influence the results
strongly. If in doubt, leave it as it is.
Returns
-------
asd : float
The average surface distance between the object(s) in ``result`` and the
object(s) in ``reference``. The distance unit is the same as for the spacing
of elements along each dimension, which is usually given in mm.
See also
--------
:func:`assd`
:func:`hd`
Notes
-----
This is not a real metric, as it is directed. See `assd` for a real metric of this.
The method is implemented making use of distance images and simple binary morphology
to achieve high computational speed.
Examples
--------
The `connectivity` determines what pixels/voxels are considered the surface of a
binary object. Take the following binary image showing a cross
>>> from scipy.ndimage.morphology import generate_binary_structure
>>> cross = generate_binary_structure(2, 1)
array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
With `connectivity` set to `1` a 4-neighbourhood is considered when determining the
object surface, resulting in the surface
.. code-block:: python
array([[0, 1, 0],
[1, 0, 1],
[0, 1, 0]])
Changing `connectivity` to `2`, a 8-neighbourhood is considered and we get:
.. code-block:: python
array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
, as a diagonal connection does no longer qualifies as valid object surface.
This influences the results `asd` returns. Imagine we want to compute the surface
distance of our cross to a cube-like object:
>>> cube = generate_binary_structure(2, 1)
array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
, which surface is, independent of the `connectivity` value set, always
.. code-block:: python
array([[1, 1, 1],
[1, 0, 1],
[1, 1, 1]])
Using a `connectivity` of `1` we get
>>> asd(cross, cube, connectivity=1)
0.0
while a value of `2` returns us
>>> asd(cross, cube, connectivity=2)
0.20000000000000001
due to the center of the cross being considered surface as well.
### Response:
def asd(result, reference, voxelspacing=None, connectivity=1):
"""
Average surface distance metric.
Computes the average surface distance (ASD) between the binary objects in two images.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
voxelspacing : float or sequence of floats, optional
The voxelspacing in a distance unit i.e. spacing of elements
along each dimension. If a sequence, must be of length equal to
the input rank; if a single number, this is used for all axes. If
not specified, a grid spacing of unity is implied.
connectivity : int
The neighbourhood/connectivity considered when determining the surface
of the binary objects. This value is passed to
`scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`.
The decision on the connectivity is important, as it can influence the results
strongly. If in doubt, leave it as it is.
Returns
-------
asd : float
The average surface distance between the object(s) in ``result`` and the
object(s) in ``reference``. The distance unit is the same as for the spacing
of elements along each dimension, which is usually given in mm.
See also
--------
:func:`assd`
:func:`hd`
Notes
-----
This is not a real metric, as it is directed. See `assd` for a real metric of this.
The method is implemented making use of distance images and simple binary morphology
to achieve high computational speed.
Examples
--------
The `connectivity` determines what pixels/voxels are considered the surface of a
binary object. Take the following binary image showing a cross
>>> from scipy.ndimage.morphology import generate_binary_structure
>>> cross = generate_binary_structure(2, 1)
array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
With `connectivity` set to `1` a 4-neighbourhood is considered when determining the
object surface, resulting in the surface
.. code-block:: python
array([[0, 1, 0],
[1, 0, 1],
[0, 1, 0]])
Changing `connectivity` to `2`, a 8-neighbourhood is considered and we get:
.. code-block:: python
array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
, as a diagonal connection does no longer qualifies as valid object surface.
This influences the results `asd` returns. Imagine we want to compute the surface
distance of our cross to a cube-like object:
>>> cube = generate_binary_structure(2, 1)
array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
, which surface is, independent of the `connectivity` value set, always
.. code-block:: python
array([[1, 1, 1],
[1, 0, 1],
[1, 1, 1]])
Using a `connectivity` of `1` we get
>>> asd(cross, cube, connectivity=1)
0.0
while a value of `2` returns us
>>> asd(cross, cube, connectivity=2)
0.20000000000000001
due to the center of the cross being considered surface as well.
"""
sds = __surface_distances(result, reference, voxelspacing, connectivity)
asd = sds.mean()
return asd |
def write(self, pack_uri, blob):
"""
Write *blob* to this zip package with the membername corresponding to
*pack_uri*.
"""
self._zipf.writestr(pack_uri.membername, blob) | Write *blob* to this zip package with the membername corresponding to
*pack_uri*. | Below is the the instruction that describes the task:
### Input:
Write *blob* to this zip package with the membername corresponding to
*pack_uri*.
### Response:
def write(self, pack_uri, blob):
"""
Write *blob* to this zip package with the membername corresponding to
*pack_uri*.
"""
self._zipf.writestr(pack_uri.membername, blob) |
def place_project_bid(session, project_id, bidder_id, description, amount,
period, milestone_percentage):
"""
Place a bid on a project
"""
bid_data = {
'project_id': project_id,
'bidder_id': bidder_id,
'description': description,
'amount': amount,
'period': period,
'milestone_percentage': milestone_percentage,
}
# POST /api/projects/0.1/bids/
response = make_post_request(session, 'bids', json_data=bid_data)
json_data = response.json()
if response.status_code == 200:
bid_data = json_data['result']
return Bid(bid_data)
else:
raise BidNotPlacedException(message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) | Place a bid on a project | Below is the the instruction that describes the task:
### Input:
Place a bid on a project
### Response:
def place_project_bid(session, project_id, bidder_id, description, amount,
period, milestone_percentage):
"""
Place a bid on a project
"""
bid_data = {
'project_id': project_id,
'bidder_id': bidder_id,
'description': description,
'amount': amount,
'period': period,
'milestone_percentage': milestone_percentage,
}
# POST /api/projects/0.1/bids/
response = make_post_request(session, 'bids', json_data=bid_data)
json_data = response.json()
if response.status_code == 200:
bid_data = json_data['result']
return Bid(bid_data)
else:
raise BidNotPlacedException(message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) |
def transfer(self, new_region_slug):
"""
Transfer the image
"""
return self.get_data(
"images/%s/actions/" % self.id,
type=POST,
params={"type": "transfer", "region": new_region_slug}
) | Transfer the image | Below is the the instruction that describes the task:
### Input:
Transfer the image
### Response:
def transfer(self, new_region_slug):
"""
Transfer the image
"""
return self.get_data(
"images/%s/actions/" % self.id,
type=POST,
params={"type": "transfer", "region": new_region_slug}
) |
def project_variant_forward(self, c_variant):
"""
project c_variant on the source transcript onto the destination transcript
:param c_variant: an :class:`hgvs.sequencevariant.SequenceVariant` object on the source transcript
:returns: c_variant: an :class:`hgvs.sequencevariant.SequenceVariant` object on the destination transcript
"""
if c_variant.ac != self.src_tm.tx_ac:
raise RuntimeError("variant accession does not match that used to initialize " +
__name__)
new_c_variant = copy.deepcopy(c_variant)
new_c_variant.ac = self.dst_tm.tx_ac
new_c_variant.posedit.pos = self.project_interval_forward(c_variant.posedit.pos)
return new_c_variant | project c_variant on the source transcript onto the destination transcript
:param c_variant: an :class:`hgvs.sequencevariant.SequenceVariant` object on the source transcript
:returns: c_variant: an :class:`hgvs.sequencevariant.SequenceVariant` object on the destination transcript | Below is the the instruction that describes the task:
### Input:
project c_variant on the source transcript onto the destination transcript
:param c_variant: an :class:`hgvs.sequencevariant.SequenceVariant` object on the source transcript
:returns: c_variant: an :class:`hgvs.sequencevariant.SequenceVariant` object on the destination transcript
### Response:
def project_variant_forward(self, c_variant):
"""
project c_variant on the source transcript onto the destination transcript
:param c_variant: an :class:`hgvs.sequencevariant.SequenceVariant` object on the source transcript
:returns: c_variant: an :class:`hgvs.sequencevariant.SequenceVariant` object on the destination transcript
"""
if c_variant.ac != self.src_tm.tx_ac:
raise RuntimeError("variant accession does not match that used to initialize " +
__name__)
new_c_variant = copy.deepcopy(c_variant)
new_c_variant.ac = self.dst_tm.tx_ac
new_c_variant.posedit.pos = self.project_interval_forward(c_variant.posedit.pos)
return new_c_variant |
def save_data(self, trigger_id, **data):
"""
used to save data to the service
but first of all
make some work about the data to find
and the data to convert
:param trigger_id: trigger ID from which to save data
:param data: the data to check to be used and save
:type trigger_id: int
:type data: dict
:return: the status of the save statement
:rtype: boolean
"""
title = self.set_title(data)
title = HtmlEntities(title).html_entity_decode
content = self.set_content(data)
content = HtmlEntities(content).html_entity_decode
if data.get('output_format'):
# pandoc to convert tools
import pypandoc
content = pypandoc.convert(content, str(data.get('output_format')), format='html')
return title, content | used to save data to the service
but first of all
make some work about the data to find
and the data to convert
:param trigger_id: trigger ID from which to save data
:param data: the data to check to be used and save
:type trigger_id: int
:type data: dict
:return: the status of the save statement
:rtype: boolean | Below is the the instruction that describes the task:
### Input:
used to save data to the service
but first of all
make some work about the data to find
and the data to convert
:param trigger_id: trigger ID from which to save data
:param data: the data to check to be used and save
:type trigger_id: int
:type data: dict
:return: the status of the save statement
:rtype: boolean
### Response:
def save_data(self, trigger_id, **data):
"""
used to save data to the service
but first of all
make some work about the data to find
and the data to convert
:param trigger_id: trigger ID from which to save data
:param data: the data to check to be used and save
:type trigger_id: int
:type data: dict
:return: the status of the save statement
:rtype: boolean
"""
title = self.set_title(data)
title = HtmlEntities(title).html_entity_decode
content = self.set_content(data)
content = HtmlEntities(content).html_entity_decode
if data.get('output_format'):
# pandoc to convert tools
import pypandoc
content = pypandoc.convert(content, str(data.get('output_format')), format='html')
return title, content |
def create_analysis(self, config):
"""
Create Analysis and save in Naarad from config
:param config:
:return:
"""
self._default_test_id += 1
self._analyses[self._default_test_id] = _Analysis(ts_start=None, config=config, test_id=self._default_test_id) | Create Analysis and save in Naarad from config
:param config:
:return: | Below is the the instruction that describes the task:
### Input:
Create Analysis and save in Naarad from config
:param config:
:return:
### Response:
def create_analysis(self, config):
"""
Create Analysis and save in Naarad from config
:param config:
:return:
"""
self._default_test_id += 1
self._analyses[self._default_test_id] = _Analysis(ts_start=None, config=config, test_id=self._default_test_id) |
def acquire(self, **kwargs):
"""
Download the file and return its path
Returns
-------
str or None
The path of the file in BatchUp's temporary directory or None if
the download failed.
"""
return config.download_data(self.temp_filename, self.url,
self.sha256) | Download the file and return its path
Returns
-------
str or None
The path of the file in BatchUp's temporary directory or None if
the download failed. | Below is the the instruction that describes the task:
### Input:
Download the file and return its path
Returns
-------
str or None
The path of the file in BatchUp's temporary directory or None if
the download failed.
### Response:
def acquire(self, **kwargs):
"""
Download the file and return its path
Returns
-------
str or None
The path of the file in BatchUp's temporary directory or None if
the download failed.
"""
return config.download_data(self.temp_filename, self.url,
self.sha256) |
def get_cohesive_energy(self, material_id, per_atom=False):
"""
Gets the cohesive for a material (eV per formula unit). Cohesive energy
is defined as the difference between the bulk energy and the sum of
total DFT energy of isolated atoms for atom elements in the bulk.
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
per_atom (bool): Whether or not to return cohesive energy per atom
Returns:
Cohesive energy (eV).
"""
entry = self.get_entry_by_material_id(material_id)
ebulk = entry.energy / \
entry.composition.get_integer_formula_and_factor()[1]
comp_dict = entry.composition.reduced_composition.as_dict()
isolated_atom_e_sum, n = 0, 0
for el in comp_dict.keys():
e = self._make_request("/element/%s/tasks/isolated_atom" % (el),
mp_decode=False)[0]
isolated_atom_e_sum += e['output']["final_energy"] * comp_dict[el]
n += comp_dict[el]
ecoh_per_formula = isolated_atom_e_sum - ebulk
return ecoh_per_formula/n if per_atom else ecoh_per_formula | Gets the cohesive for a material (eV per formula unit). Cohesive energy
is defined as the difference between the bulk energy and the sum of
total DFT energy of isolated atoms for atom elements in the bulk.
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
per_atom (bool): Whether or not to return cohesive energy per atom
Returns:
Cohesive energy (eV). | Below is the the instruction that describes the task:
### Input:
Gets the cohesive for a material (eV per formula unit). Cohesive energy
is defined as the difference between the bulk energy and the sum of
total DFT energy of isolated atoms for atom elements in the bulk.
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
per_atom (bool): Whether or not to return cohesive energy per atom
Returns:
Cohesive energy (eV).
### Response:
def get_cohesive_energy(self, material_id, per_atom=False):
"""
Gets the cohesive for a material (eV per formula unit). Cohesive energy
is defined as the difference between the bulk energy and the sum of
total DFT energy of isolated atoms for atom elements in the bulk.
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
per_atom (bool): Whether or not to return cohesive energy per atom
Returns:
Cohesive energy (eV).
"""
entry = self.get_entry_by_material_id(material_id)
ebulk = entry.energy / \
entry.composition.get_integer_formula_and_factor()[1]
comp_dict = entry.composition.reduced_composition.as_dict()
isolated_atom_e_sum, n = 0, 0
for el in comp_dict.keys():
e = self._make_request("/element/%s/tasks/isolated_atom" % (el),
mp_decode=False)[0]
isolated_atom_e_sum += e['output']["final_energy"] * comp_dict[el]
n += comp_dict[el]
ecoh_per_formula = isolated_atom_e_sum - ebulk
return ecoh_per_formula/n if per_atom else ecoh_per_formula |
def define_attribute(self, name, atype, data=None):
"""
Define a new attribute. atype has to be one of 'integer', 'real', 'numeric', 'string', 'date' or 'nominal'.
For nominal attributes, pass the possible values as data.
For date attributes, pass the format as data.
"""
self.attributes.append(name)
assert atype in TYPES, "Unknown type '%s'. Must be one of: %s" % (atype, ', '.join(TYPES),)
self.attribute_types[name] = atype
self.attribute_data[name] = data | Define a new attribute. atype has to be one of 'integer', 'real', 'numeric', 'string', 'date' or 'nominal'.
For nominal attributes, pass the possible values as data.
For date attributes, pass the format as data. | Below is the the instruction that describes the task:
### Input:
Define a new attribute. atype has to be one of 'integer', 'real', 'numeric', 'string', 'date' or 'nominal'.
For nominal attributes, pass the possible values as data.
For date attributes, pass the format as data.
### Response:
def define_attribute(self, name, atype, data=None):
"""
Define a new attribute. atype has to be one of 'integer', 'real', 'numeric', 'string', 'date' or 'nominal'.
For nominal attributes, pass the possible values as data.
For date attributes, pass the format as data.
"""
self.attributes.append(name)
assert atype in TYPES, "Unknown type '%s'. Must be one of: %s" % (atype, ', '.join(TYPES),)
self.attribute_types[name] = atype
self.attribute_data[name] = data |
def get_default_net_device():
""" Find the device where the default route is. """
with open('/proc/net/route') as fh:
for line in fh:
iface, dest, _ = line.split(None, 2)
if dest == '00000000':
return iface
return None | Find the device where the default route is. | Below is the the instruction that describes the task:
### Input:
Find the device where the default route is.
### Response:
def get_default_net_device():
""" Find the device where the default route is. """
with open('/proc/net/route') as fh:
for line in fh:
iface, dest, _ = line.split(None, 2)
if dest == '00000000':
return iface
return None |
def threads_bt(self):
"""Display thread backtraces."""
import threading
import traceback
threads = {}
for thread in threading.enumerate():
frames = sys._current_frames().get(thread.ident)
if frames:
stack = traceback.format_stack(frames)
else:
stack = []
threads[thread] = "".join(stack)
return flask.render_template("gourde/threads.html", threads=threads) | Display thread backtraces. | Below is the the instruction that describes the task:
### Input:
Display thread backtraces.
### Response:
def threads_bt(self):
"""Display thread backtraces."""
import threading
import traceback
threads = {}
for thread in threading.enumerate():
frames = sys._current_frames().get(thread.ident)
if frames:
stack = traceback.format_stack(frames)
else:
stack = []
threads[thread] = "".join(stack)
return flask.render_template("gourde/threads.html", threads=threads) |
def create_user(app, appbuilder, role, username, firstname, lastname, email, password):
"""
Create a user
"""
_appbuilder = import_application(app, appbuilder)
role_object = _appbuilder.sm.find_role(role)
user = _appbuilder.sm.add_user(
username, firstname, lastname, email, role_object, password
)
if user:
click.echo(click.style("User {0} created.".format(username), fg="green"))
else:
click.echo(click.style("Error! No user created", fg="red")) | Create a user | Below is the the instruction that describes the task:
### Input:
Create a user
### Response:
def create_user(app, appbuilder, role, username, firstname, lastname, email, password):
"""
Create a user
"""
_appbuilder = import_application(app, appbuilder)
role_object = _appbuilder.sm.find_role(role)
user = _appbuilder.sm.add_user(
username, firstname, lastname, email, role_object, password
)
if user:
click.echo(click.style("User {0} created.".format(username), fg="green"))
else:
click.echo(click.style("Error! No user created", fg="red")) |
def insert_entity(self, table_name, entity, timeout=None):
'''
Inserts a new entity into the table. Throws if an entity with the same
PartitionKey and RowKey already exists.
When inserting an entity into a table, you must specify values for the
PartitionKey and RowKey system properties. Together, these properties
form the primary key and must be unique within the table. Both the
PartitionKey and RowKey values must be string values; each key value may
be up to 64 KB in size. If you are using an integer value for the key
value, you should convert the integer to a fixed-width string, because
they are canonically sorted. For example, you should convert the value
1 to 0000001 to ensure proper sorting.
:param str table_name:
The name of the table to insert the entity into.
:param entity:
The entity to insert. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: a dict or :class:`~azure.storage.table.models.Entity`
:param int timeout:
The server timeout, expressed in seconds.
:return: The etag of the inserted entity.
:rtype: str
'''
_validate_not_none('table_name', table_name)
request = _insert_entity(entity)
request.host = self._get_host()
request.path = '/' + _to_str(table_name)
request.query += [('timeout', _int_to_str(timeout))]
response = self._perform_request(request)
return _extract_etag(response) | Inserts a new entity into the table. Throws if an entity with the same
PartitionKey and RowKey already exists.
When inserting an entity into a table, you must specify values for the
PartitionKey and RowKey system properties. Together, these properties
form the primary key and must be unique within the table. Both the
PartitionKey and RowKey values must be string values; each key value may
be up to 64 KB in size. If you are using an integer value for the key
value, you should convert the integer to a fixed-width string, because
they are canonically sorted. For example, you should convert the value
1 to 0000001 to ensure proper sorting.
:param str table_name:
The name of the table to insert the entity into.
:param entity:
The entity to insert. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: a dict or :class:`~azure.storage.table.models.Entity`
:param int timeout:
The server timeout, expressed in seconds.
:return: The etag of the inserted entity.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Inserts a new entity into the table. Throws if an entity with the same
PartitionKey and RowKey already exists.
When inserting an entity into a table, you must specify values for the
PartitionKey and RowKey system properties. Together, these properties
form the primary key and must be unique within the table. Both the
PartitionKey and RowKey values must be string values; each key value may
be up to 64 KB in size. If you are using an integer value for the key
value, you should convert the integer to a fixed-width string, because
they are canonically sorted. For example, you should convert the value
1 to 0000001 to ensure proper sorting.
:param str table_name:
The name of the table to insert the entity into.
:param entity:
The entity to insert. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: a dict or :class:`~azure.storage.table.models.Entity`
:param int timeout:
The server timeout, expressed in seconds.
:return: The etag of the inserted entity.
:rtype: str
### Response:
def insert_entity(self, table_name, entity, timeout=None):
'''
Inserts a new entity into the table. Throws if an entity with the same
PartitionKey and RowKey already exists.
When inserting an entity into a table, you must specify values for the
PartitionKey and RowKey system properties. Together, these properties
form the primary key and must be unique within the table. Both the
PartitionKey and RowKey values must be string values; each key value may
be up to 64 KB in size. If you are using an integer value for the key
value, you should convert the integer to a fixed-width string, because
they are canonically sorted. For example, you should convert the value
1 to 0000001 to ensure proper sorting.
:param str table_name:
The name of the table to insert the entity into.
:param entity:
The entity to insert. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: a dict or :class:`~azure.storage.table.models.Entity`
:param int timeout:
The server timeout, expressed in seconds.
:return: The etag of the inserted entity.
:rtype: str
'''
_validate_not_none('table_name', table_name)
request = _insert_entity(entity)
request.host = self._get_host()
request.path = '/' + _to_str(table_name)
request.query += [('timeout', _int_to_str(timeout))]
response = self._perform_request(request)
return _extract_etag(response) |
def _insert_data(self, connection, name, value, timestamp, interval, config):
'''Helper to insert data into cql.'''
cursor = connection.cursor()
try:
stmt = self._insert_stmt(name, value, timestamp, interval, config)
if stmt:
cursor.execute(stmt)
finally:
cursor.close() | Helper to insert data into cql. | Below is the the instruction that describes the task:
### Input:
Helper to insert data into cql.
### Response:
def _insert_data(self, connection, name, value, timestamp, interval, config):
'''Helper to insert data into cql.'''
cursor = connection.cursor()
try:
stmt = self._insert_stmt(name, value, timestamp, interval, config)
if stmt:
cursor.execute(stmt)
finally:
cursor.close() |
def translate(self, tx, ty):
"""Applies a translation by :obj:`tx`, :obj:`ty`
to the transformation in this matrix.
The effect of the new transformation is to
first translate the coordinates by :obj:`tx` and :obj:`ty`,
then apply the original transformation to the coordinates.
.. note::
This changes the matrix in-place.
:param tx: Amount to translate in the X direction.
:param ty: Amount to translate in the Y direction.
:type tx: float
:type ty: float
"""
cairo.cairo_matrix_translate(self._pointer, tx, ty) | Applies a translation by :obj:`tx`, :obj:`ty`
to the transformation in this matrix.
The effect of the new transformation is to
first translate the coordinates by :obj:`tx` and :obj:`ty`,
then apply the original transformation to the coordinates.
.. note::
This changes the matrix in-place.
:param tx: Amount to translate in the X direction.
:param ty: Amount to translate in the Y direction.
:type tx: float
:type ty: float | Below is the the instruction that describes the task:
### Input:
Applies a translation by :obj:`tx`, :obj:`ty`
to the transformation in this matrix.
The effect of the new transformation is to
first translate the coordinates by :obj:`tx` and :obj:`ty`,
then apply the original transformation to the coordinates.
.. note::
This changes the matrix in-place.
:param tx: Amount to translate in the X direction.
:param ty: Amount to translate in the Y direction.
:type tx: float
:type ty: float
### Response:
def translate(self, tx, ty):
"""Applies a translation by :obj:`tx`, :obj:`ty`
to the transformation in this matrix.
The effect of the new transformation is to
first translate the coordinates by :obj:`tx` and :obj:`ty`,
then apply the original transformation to the coordinates.
.. note::
This changes the matrix in-place.
:param tx: Amount to translate in the X direction.
:param ty: Amount to translate in the Y direction.
:type tx: float
:type ty: float
"""
cairo.cairo_matrix_translate(self._pointer, tx, ty) |
def first(self, default=None, as_dict=False, as_ordereddict=False):
"""Returns a single record for the RecordCollection, or `default`. If
`default` is an instance or subclass of Exception, then raise it
instead of returning it."""
# Try to get a record, or return/raise default.
try:
record = self[0]
except IndexError:
if isexception(default):
raise default
return default
# Cast and return.
if as_dict:
return record.as_dict()
elif as_ordereddict:
return record.as_dict(ordered=True)
else:
return record | Returns a single record for the RecordCollection, or `default`. If
`default` is an instance or subclass of Exception, then raise it
instead of returning it. | Below is the the instruction that describes the task:
### Input:
Returns a single record for the RecordCollection, or `default`. If
`default` is an instance or subclass of Exception, then raise it
instead of returning it.
### Response:
def first(self, default=None, as_dict=False, as_ordereddict=False):
"""Returns a single record for the RecordCollection, or `default`. If
`default` is an instance or subclass of Exception, then raise it
instead of returning it."""
# Try to get a record, or return/raise default.
try:
record = self[0]
except IndexError:
if isexception(default):
raise default
return default
# Cast and return.
if as_dict:
return record.as_dict()
elif as_ordereddict:
return record.as_dict(ordered=True)
else:
return record |
def elasprep(self):
"""
dx4, dy4, dx2dy2, D = elasprep(dx,dy,Te,E=1E11,nu=0.25)
Defines the variables that are required to create the 2D finite
difference solution coefficient matrix
"""
if self.Method != 'SAS_NG':
self.dx4 = self.dx**4
self.dy4 = self.dy**4
self.dx2dy2 = self.dx**2 * self.dy**2
self.D = self.E*self.Te**3/(12*(1-self.nu**2)) | dx4, dy4, dx2dy2, D = elasprep(dx,dy,Te,E=1E11,nu=0.25)
Defines the variables that are required to create the 2D finite
difference solution coefficient matrix | Below is the the instruction that describes the task:
### Input:
dx4, dy4, dx2dy2, D = elasprep(dx,dy,Te,E=1E11,nu=0.25)
Defines the variables that are required to create the 2D finite
difference solution coefficient matrix
### Response:
def elasprep(self):
"""
dx4, dy4, dx2dy2, D = elasprep(dx,dy,Te,E=1E11,nu=0.25)
Defines the variables that are required to create the 2D finite
difference solution coefficient matrix
"""
if self.Method != 'SAS_NG':
self.dx4 = self.dx**4
self.dy4 = self.dy**4
self.dx2dy2 = self.dx**2 * self.dy**2
self.D = self.E*self.Te**3/(12*(1-self.nu**2)) |
def toxml(self):
"""
Exports this object into a LEMS XML object
"""
return '<DerivedVariable name="{0}"'.format(self.name) +\
(' dimension="{0}"'.format(self.dimension) if self.dimension else '') +\
(' exposure="{0}"'.format(self.exposure) if self.exposure else '') +\
(' select="{0}"'.format(self.select) if self.select else '') +\
(' value="{0}"'.format(self.value) if self.value else '') +\
(' reduce="{0}"'.format(self.reduce) if self.reduce else '') +\
(' required="{0}"'.format(self.required) if self.required else '') +\
'/>' | Exports this object into a LEMS XML object | Below is the the instruction that describes the task:
### Input:
Exports this object into a LEMS XML object
### Response:
def toxml(self):
"""
Exports this object into a LEMS XML object
"""
return '<DerivedVariable name="{0}"'.format(self.name) +\
(' dimension="{0}"'.format(self.dimension) if self.dimension else '') +\
(' exposure="{0}"'.format(self.exposure) if self.exposure else '') +\
(' select="{0}"'.format(self.select) if self.select else '') +\
(' value="{0}"'.format(self.value) if self.value else '') +\
(' reduce="{0}"'.format(self.reduce) if self.reduce else '') +\
(' required="{0}"'.format(self.required) if self.required else '') +\
'/>' |
def transfer(self, from_acct: Account, b58_to_address: str, value: int, payer_acct: Account, gas_limit: int,
gas_price: int) -> str:
"""
This interface is used to call the Transfer method in ope4
that transfer an amount of tokens from one account to another account.
:param from_acct: an Account class that send the oep4 token.
:param b58_to_address: a base58 encode address that receive the oep4 token.
:param value: an int value that indicate the amount oep4 token that will be transferred in this transaction.
:param payer_acct: an Account class that used to pay for the transaction.
:param gas_limit: an int value that indicate the gas limit.
:param gas_price: an int value that indicate the gas price.
:return: the hexadecimal transaction hash value.
"""
func = InvokeFunction('transfer')
if not isinstance(value, int):
raise SDKException(ErrorCode.param_err('the data type of value should be int.'))
if value < 0:
raise SDKException(ErrorCode.param_err('the value should be equal or great than 0.'))
if not isinstance(from_acct, Account):
raise SDKException(ErrorCode.param_err('the data type of from_acct should be Account.'))
Oep4.__b58_address_check(b58_to_address)
from_address = from_acct.get_address().to_bytes()
to_address = Address.b58decode(b58_to_address).to_bytes()
func.set_params_value(from_address, to_address, value)
tx_hash = self.__sdk.get_network().send_neo_vm_transaction(self.__hex_contract_address, from_acct, payer_acct,
gas_limit, gas_price, func, False)
return tx_hash | This interface is used to call the Transfer method in ope4
that transfer an amount of tokens from one account to another account.
:param from_acct: an Account class that send the oep4 token.
:param b58_to_address: a base58 encode address that receive the oep4 token.
:param value: an int value that indicate the amount oep4 token that will be transferred in this transaction.
:param payer_acct: an Account class that used to pay for the transaction.
:param gas_limit: an int value that indicate the gas limit.
:param gas_price: an int value that indicate the gas price.
:return: the hexadecimal transaction hash value. | Below is the the instruction that describes the task:
### Input:
This interface is used to call the Transfer method in ope4
that transfer an amount of tokens from one account to another account.
:param from_acct: an Account class that send the oep4 token.
:param b58_to_address: a base58 encode address that receive the oep4 token.
:param value: an int value that indicate the amount oep4 token that will be transferred in this transaction.
:param payer_acct: an Account class that used to pay for the transaction.
:param gas_limit: an int value that indicate the gas limit.
:param gas_price: an int value that indicate the gas price.
:return: the hexadecimal transaction hash value.
### Response:
def transfer(self, from_acct: Account, b58_to_address: str, value: int, payer_acct: Account, gas_limit: int,
gas_price: int) -> str:
"""
This interface is used to call the Transfer method in ope4
that transfer an amount of tokens from one account to another account.
:param from_acct: an Account class that send the oep4 token.
:param b58_to_address: a base58 encode address that receive the oep4 token.
:param value: an int value that indicate the amount oep4 token that will be transferred in this transaction.
:param payer_acct: an Account class that used to pay for the transaction.
:param gas_limit: an int value that indicate the gas limit.
:param gas_price: an int value that indicate the gas price.
:return: the hexadecimal transaction hash value.
"""
func = InvokeFunction('transfer')
if not isinstance(value, int):
raise SDKException(ErrorCode.param_err('the data type of value should be int.'))
if value < 0:
raise SDKException(ErrorCode.param_err('the value should be equal or great than 0.'))
if not isinstance(from_acct, Account):
raise SDKException(ErrorCode.param_err('the data type of from_acct should be Account.'))
Oep4.__b58_address_check(b58_to_address)
from_address = from_acct.get_address().to_bytes()
to_address = Address.b58decode(b58_to_address).to_bytes()
func.set_params_value(from_address, to_address, value)
tx_hash = self.__sdk.get_network().send_neo_vm_transaction(self.__hex_contract_address, from_acct, payer_acct,
gas_limit, gas_price, func, False)
return tx_hash |
def _expand_spatial_bounds_to_fit_axes(bounds, ax_width, ax_height):
"""
Parameters
----------
bounds: dict
ax_width: float
ax_height: float
Returns
-------
spatial_bounds
"""
b = bounds
height_meters = util.wgs84_distance(b['lat_min'], b['lon_min'], b['lat_max'], b['lon_min'])
width_meters = util.wgs84_distance(b['lat_min'], b['lon_min'], b['lat_min'], b['lon_max'])
x_per_y_meters = width_meters / height_meters
x_per_y_axes = ax_width / ax_height
if x_per_y_axes > x_per_y_meters: # x-axis
# axis x_axis has slack -> the spatial longitude bounds need to be extended
width_meters_new = (height_meters * x_per_y_axes)
d_lon_new = ((b['lon_max'] - b['lon_min']) / width_meters) * width_meters_new
mean_lon = (b['lon_min'] + b['lon_max'])/2.
lon_min = mean_lon - d_lon_new / 2.
lon_max = mean_lon + d_lon_new / 2.
spatial_bounds = {
"lon_min": lon_min,
"lon_max": lon_max,
"lat_min": b['lat_min'],
"lat_max": b['lat_max']
}
else:
# axis y_axis has slack -> the spatial latitude bounds need to be extended
height_meters_new = (width_meters / x_per_y_axes)
d_lat_new = ((b['lat_max'] - b['lat_min']) / height_meters) * height_meters_new
mean_lat = (b['lat_min'] + b['lat_max']) / 2.
lat_min = mean_lat - d_lat_new / 2.
lat_max = mean_lat + d_lat_new / 2.
spatial_bounds = {
"lon_min": b['lon_min'],
"lon_max": b['lon_max'],
"lat_min": lat_min,
"lat_max": lat_max
}
return spatial_bounds | Parameters
----------
bounds: dict
ax_width: float
ax_height: float
Returns
-------
spatial_bounds | Below is the the instruction that describes the task:
### Input:
Parameters
----------
bounds: dict
ax_width: float
ax_height: float
Returns
-------
spatial_bounds
### Response:
def _expand_spatial_bounds_to_fit_axes(bounds, ax_width, ax_height):
"""
Parameters
----------
bounds: dict
ax_width: float
ax_height: float
Returns
-------
spatial_bounds
"""
b = bounds
height_meters = util.wgs84_distance(b['lat_min'], b['lon_min'], b['lat_max'], b['lon_min'])
width_meters = util.wgs84_distance(b['lat_min'], b['lon_min'], b['lat_min'], b['lon_max'])
x_per_y_meters = width_meters / height_meters
x_per_y_axes = ax_width / ax_height
if x_per_y_axes > x_per_y_meters: # x-axis
# axis x_axis has slack -> the spatial longitude bounds need to be extended
width_meters_new = (height_meters * x_per_y_axes)
d_lon_new = ((b['lon_max'] - b['lon_min']) / width_meters) * width_meters_new
mean_lon = (b['lon_min'] + b['lon_max'])/2.
lon_min = mean_lon - d_lon_new / 2.
lon_max = mean_lon + d_lon_new / 2.
spatial_bounds = {
"lon_min": lon_min,
"lon_max": lon_max,
"lat_min": b['lat_min'],
"lat_max": b['lat_max']
}
else:
# axis y_axis has slack -> the spatial latitude bounds need to be extended
height_meters_new = (width_meters / x_per_y_axes)
d_lat_new = ((b['lat_max'] - b['lat_min']) / height_meters) * height_meters_new
mean_lat = (b['lat_min'] + b['lat_max']) / 2.
lat_min = mean_lat - d_lat_new / 2.
lat_max = mean_lat + d_lat_new / 2.
spatial_bounds = {
"lon_min": b['lon_min'],
"lon_max": b['lon_max'],
"lat_min": lat_min,
"lat_max": lat_max
}
return spatial_bounds |
def radius_cmp(a, b, offsets):
'''return +1 or -1 for for sorting'''
diff = radius(a, offsets) - radius(b, offsets)
if diff > 0:
return 1
if diff < 0:
return -1
return 0 | return +1 or -1 for for sorting | Below is the the instruction that describes the task:
### Input:
return +1 or -1 for for sorting
### Response:
def radius_cmp(a, b, offsets):
'''return +1 or -1 for for sorting'''
diff = radius(a, offsets) - radius(b, offsets)
if diff > 0:
return 1
if diff < 0:
return -1
return 0 |
def regenerate_thumbs(self):
"""
Handle re-generating the thumbnails. All this involves is reading the
original file, then saving the same exact thing. Kind of annoying, but
it's simple.
"""
Model = self.model
instances = Model.objects.all()
num_instances = instances.count()
# Filenames are keys in here, to help avoid re-genning something that
# we have already done.
regen_tracker = {}
counter = 1
for instance in instances:
file = getattr(instance, self.field)
if not file:
print "(%d/%d) ID: %d -- Skipped -- No file" % (counter,
num_instances,
instance.id)
counter += 1
continue
file_name = os.path.basename(file.name)
if regen_tracker.has_key(file_name):
print "(%d/%d) ID: %d -- Skipped -- Already re-genned %s" % (
counter,
num_instances,
instance.id,
file_name)
counter += 1
continue
# Keep them informed on the progress.
print "(%d/%d) ID: %d -- %s" % (counter, num_instances,
instance.id, file_name)
try:
fdat = file.read()
file.close()
del file.file
except IOError:
# Key didn't exist.
print "(%d/%d) ID %d -- Error -- File missing on S3" % (
counter,
num_instances,
instance.id)
counter += 1
continue
try:
file_contents = ContentFile(fdat)
except ValueError:
# This field has no file associated with it, skip it.
print "(%d/%d) ID %d -- Skipped -- No file on field)" % (
counter,
num_instances,
instance.id)
counter += 1
continue
# Saving pumps it back through the thumbnailer, if this is a
# ThumbnailField. If not, it's still pretty harmless.
try:
file.generate_thumbs(file_name, file_contents)
except IOError, e:
print "(%d/%d) ID %d -- Error -- Image may be corrupt)" % (
counter,
num_instances,
instance.id)
counter += 1
continue
regen_tracker[file_name] = True
counter += 1 | Handle re-generating the thumbnails. All this involves is reading the
original file, then saving the same exact thing. Kind of annoying, but
it's simple. | Below is the the instruction that describes the task:
### Input:
Handle re-generating the thumbnails. All this involves is reading the
original file, then saving the same exact thing. Kind of annoying, but
it's simple.
### Response:
def regenerate_thumbs(self):
"""
Handle re-generating the thumbnails. All this involves is reading the
original file, then saving the same exact thing. Kind of annoying, but
it's simple.
"""
Model = self.model
instances = Model.objects.all()
num_instances = instances.count()
# Filenames are keys in here, to help avoid re-genning something that
# we have already done.
regen_tracker = {}
counter = 1
for instance in instances:
file = getattr(instance, self.field)
if not file:
print "(%d/%d) ID: %d -- Skipped -- No file" % (counter,
num_instances,
instance.id)
counter += 1
continue
file_name = os.path.basename(file.name)
if regen_tracker.has_key(file_name):
print "(%d/%d) ID: %d -- Skipped -- Already re-genned %s" % (
counter,
num_instances,
instance.id,
file_name)
counter += 1
continue
# Keep them informed on the progress.
print "(%d/%d) ID: %d -- %s" % (counter, num_instances,
instance.id, file_name)
try:
fdat = file.read()
file.close()
del file.file
except IOError:
# Key didn't exist.
print "(%d/%d) ID %d -- Error -- File missing on S3" % (
counter,
num_instances,
instance.id)
counter += 1
continue
try:
file_contents = ContentFile(fdat)
except ValueError:
# This field has no file associated with it, skip it.
print "(%d/%d) ID %d -- Skipped -- No file on field)" % (
counter,
num_instances,
instance.id)
counter += 1
continue
# Saving pumps it back through the thumbnailer, if this is a
# ThumbnailField. If not, it's still pretty harmless.
try:
file.generate_thumbs(file_name, file_contents)
except IOError, e:
print "(%d/%d) ID %d -- Error -- Image may be corrupt)" % (
counter,
num_instances,
instance.id)
counter += 1
continue
regen_tracker[file_name] = True
counter += 1 |
def csv_to_num_matrix(csv_file_path):
"""Load a CSV file consisting only of numbers into a Python matrix of floats.
Args:
csv_file_path: Full path to a valid CSV file (e.g. c:/ladybug/test.csv)
"""
mtx = []
with open(csv_file_path) as csv_data_file:
for row in csv_data_file:
mtx.append([float(val) for val in row.split(',')])
return mtx | Load a CSV file consisting only of numbers into a Python matrix of floats.
Args:
csv_file_path: Full path to a valid CSV file (e.g. c:/ladybug/test.csv) | Below is the the instruction that describes the task:
### Input:
Load a CSV file consisting only of numbers into a Python matrix of floats.
Args:
csv_file_path: Full path to a valid CSV file (e.g. c:/ladybug/test.csv)
### Response:
def csv_to_num_matrix(csv_file_path):
"""Load a CSV file consisting only of numbers into a Python matrix of floats.
Args:
csv_file_path: Full path to a valid CSV file (e.g. c:/ladybug/test.csv)
"""
mtx = []
with open(csv_file_path) as csv_data_file:
for row in csv_data_file:
mtx.append([float(val) for val in row.split(',')])
return mtx |
def nack(self, message, subscription_id=None, **kwargs):
"""Reject receipt of a message. This only makes sense when the
'acknowledgement' flag was set for the relevant subscription.
:param message: ID of the message to be rejected, OR a dictionary
containing a field 'message-id'.
:param subscription_id: ID of the associated subscription. Optional when
a dictionary is passed as first parameter and
that dictionary contains field 'subscription'.
:param **kwargs: Further parameters for the transport layer. For example
transaction: Transaction ID if rejection should be part of a
transaction
"""
if isinstance(message, dict):
message_id = message.get("message-id")
if not subscription_id:
subscription_id = message.get("subscription")
else:
message_id = message
if not message_id:
raise workflows.Error("Cannot reject message without " + "message ID")
if not subscription_id:
raise workflows.Error("Cannot reject message without " + "subscription ID")
self.log.debug(
"Rejecting message %s on subscription %s", message_id, subscription_id
)
self._nack(message_id, subscription_id, **kwargs) | Reject receipt of a message. This only makes sense when the
'acknowledgement' flag was set for the relevant subscription.
:param message: ID of the message to be rejected, OR a dictionary
containing a field 'message-id'.
:param subscription_id: ID of the associated subscription. Optional when
a dictionary is passed as first parameter and
that dictionary contains field 'subscription'.
:param **kwargs: Further parameters for the transport layer. For example
transaction: Transaction ID if rejection should be part of a
transaction | Below is the the instruction that describes the task:
### Input:
Reject receipt of a message. This only makes sense when the
'acknowledgement' flag was set for the relevant subscription.
:param message: ID of the message to be rejected, OR a dictionary
containing a field 'message-id'.
:param subscription_id: ID of the associated subscription. Optional when
a dictionary is passed as first parameter and
that dictionary contains field 'subscription'.
:param **kwargs: Further parameters for the transport layer. For example
transaction: Transaction ID if rejection should be part of a
transaction
### Response:
def nack(self, message, subscription_id=None, **kwargs):
"""Reject receipt of a message. This only makes sense when the
'acknowledgement' flag was set for the relevant subscription.
:param message: ID of the message to be rejected, OR a dictionary
containing a field 'message-id'.
:param subscription_id: ID of the associated subscription. Optional when
a dictionary is passed as first parameter and
that dictionary contains field 'subscription'.
:param **kwargs: Further parameters for the transport layer. For example
transaction: Transaction ID if rejection should be part of a
transaction
"""
if isinstance(message, dict):
message_id = message.get("message-id")
if not subscription_id:
subscription_id = message.get("subscription")
else:
message_id = message
if not message_id:
raise workflows.Error("Cannot reject message without " + "message ID")
if not subscription_id:
raise workflows.Error("Cannot reject message without " + "subscription ID")
self.log.debug(
"Rejecting message %s on subscription %s", message_id, subscription_id
)
self._nack(message_id, subscription_id, **kwargs) |
def infomax(data, weights=None, l_rate=None, block=None, w_change=1e-12,
anneal_deg=60., anneal_step=0.9, extended=False, n_subgauss=1,
kurt_size=6000, ext_blocks=1, max_iter=200,
random_state=None, verbose=None):
"""Run the (extended) Infomax ICA decomposition on raw data
based on the publications of Bell & Sejnowski 1995 (Infomax)
and Lee, Girolami & Sejnowski, 1999 (extended Infomax)
Parameters
----------
data : np.ndarray, shape (n_samples, n_features)
The data to unmix.
w_init : np.ndarray, shape (n_features, n_features)
The initialized unmixing matrix. Defaults to None. If None, the
identity matrix is used.
l_rate : float
This quantity indicates the relative size of the change in weights.
Note. Smaller learining rates will slow down the procedure.
Defaults to 0.010d / alog(n_features ^ 2.0)
block : int
The block size of randomly chosen data segment.
Defaults to floor(sqrt(n_times / 3d))
w_change : float
The change at which to stop iteration. Defaults to 1e-12.
anneal_deg : float
The angle at which (in degree) the learning rate will be reduced.
Defaults to 60.0
anneal_step : float
The factor by which the learning rate will be reduced once
``anneal_deg`` is exceeded:
l_rate *= anneal_step
Defaults to 0.9
extended : bool
Wheather to use the extended infomax algorithm or not. Defaults to
True.
n_subgauss : int
The number of subgaussian components. Only considered for extended
Infomax.
kurt_size : int
The window size for kurtosis estimation. Only considered for extended
Infomax.
ext_blocks : int
The number of blocks after which to recompute Kurtosis.
Only considered for extended Infomax.
max_iter : int
The maximum number of iterations. Defaults to 200.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
unmixing_matrix : np.ndarray of float, shape (n_features, n_features)
The linear unmixing operator.
"""
rng = check_random_state(random_state)
# define some default parameter
max_weight = 1e8
restart_fac = 0.9
min_l_rate = 1e-10
blowup = 1e4
blowup_fac = 0.5
n_small_angle = 20
degconst = 180.0 / np.pi
# for extended Infomax
extmomentum = 0.5
signsbias = 0.02
signcount_threshold = 25
signcount_step = 2
if ext_blocks > 0: # allow not to recompute kurtosis
n_subgauss = 1 # but initialize n_subgauss to 1 if you recompute
# check data shape
n_samples, n_features = data.shape
n_features_square = n_features ** 2
# check input parameter
# heuristic default - may need adjustment for
# large or tiny data sets
if l_rate is None:
l_rate = 0.01 / math.log(n_features ** 2.0)
if block is None:
block = int(math.floor(math.sqrt(n_samples / 3.0)))
logger.info('computing%sInfomax ICA' % ' Extended ' if extended is True
else ' ')
# collect parameter
nblock = n_samples // block
lastt = (nblock - 1) * block + 1
# initialize training
if weights is None:
# initialize weights as identity matrix
weights = np.identity(n_features, dtype=np.float64)
BI = block * np.identity(n_features, dtype=np.float64)
bias = np.zeros((n_features, 1), dtype=np.float64)
onesrow = np.ones((1, block), dtype=np.float64)
startweights = weights.copy()
oldweights = startweights.copy()
step = 0
count_small_angle = 0
wts_blowup = False
blockno = 0
signcount = 0
# for extended Infomax
if extended is True:
signs = np.identity(n_features)
signs.flat[slice(0, n_features * n_subgauss, n_features)]
kurt_size = min(kurt_size, n_samples)
old_kurt = np.zeros(n_features, dtype=np.float64)
oldsigns = np.zeros((n_features, n_features))
# trainings loop
olddelta, oldchange = 1., 0.
while step < max_iter:
# shuffle data at each step
permute = list(range(n_samples))
rng.shuffle(permute)
# ICA training block
# loop across block samples
for t in range(0, lastt, block):
u = np.dot(data[permute[t:t + block], :], weights)
u += np.dot(bias, onesrow).T
if extended is True:
# extended ICA update
y = np.tanh(u)
weights += l_rate * np.dot(weights,
BI - np.dot(np.dot(u.T, y), signs) -
np.dot(u.T, u))
bias += l_rate * np.reshape(np.sum(y, axis=0,
dtype=np.float64) * -2.0,
(n_features, 1))
else:
# logistic ICA weights update
y = 1.0 / (1.0 + np.exp(-u))
weights += l_rate * np.dot(weights,
BI + np.dot(u.T, (1.0 - 2.0 * y)))
bias += l_rate * np.reshape(np.sum((1.0 - 2.0 * y), axis=0,
dtype=np.float64), (n_features, 1))
# check change limit
max_weight_val = np.max(np.abs(weights))
if max_weight_val > max_weight:
wts_blowup = True
blockno += 1
if wts_blowup:
break
# ICA kurtosis estimation
if extended is True:
n = np.fix(blockno / ext_blocks)
if np.abs(n) * ext_blocks == blockno:
if kurt_size < n_samples:
rp = np.floor(rng.uniform(0, 1, kurt_size) *
(n_samples - 1))
tpartact = np.dot(data[rp.astype(int), :], weights).T
else:
tpartact = np.dot(data, weights).T
# estimate kurtosis
kurt = kurtosis(tpartact, axis=1, fisher=True)
if extmomentum != 0:
kurt = (extmomentum * old_kurt +
(1.0 - extmomentum) * kurt)
old_kurt = kurt
# estimate weighted signs
signs.flat[::n_features + 1] = ((kurt + signsbias) /
np.abs(kurt + signsbias))
ndiff = ((signs.flat[::n_features + 1] -
oldsigns.flat[::n_features + 1]) != 0).sum()
if ndiff == 0:
signcount += 1
else:
signcount = 0
oldsigns = signs
if signcount >= signcount_threshold:
ext_blocks = np.fix(ext_blocks * signcount_step)
signcount = 0
# here we continue after the for
# loop over the ICA training blocks
# if weights in bounds:
if not wts_blowup:
oldwtchange = weights - oldweights
step += 1
angledelta = 0.0
delta = oldwtchange.reshape(1, n_features_square)
change = np.sum(delta * delta, dtype=np.float64)
if step > 1:
angledelta = math.acos(np.sum(delta * olddelta) /
math.sqrt(change * oldchange))
angledelta *= degconst
# anneal learning rate
oldweights = weights.copy()
if angledelta > anneal_deg:
l_rate *= anneal_step # anneal learning rate
# accumulate angledelta until anneal_deg reached l_rates
olddelta = delta
oldchange = change
count_small_angle = 0 # reset count when angle delta is large
else:
if step == 1: # on first step only
olddelta = delta # initialize
oldchange = change
count_small_angle += 1
if count_small_angle > n_small_angle:
max_iter = step
# apply stopping rule
if step > 2 and change < w_change:
step = max_iter
elif change > blowup:
l_rate *= blowup_fac
# restart if weights blow up
# (for lowering l_rate)
else:
step = 0 # start again
wts_blowup = 0 # re-initialize variables
blockno = 1
l_rate *= restart_fac # with lower learning rate
weights = startweights.copy()
oldweights = startweights.copy()
olddelta = np.zeros((1, n_features_square), dtype=np.float64)
bias = np.zeros((n_features, 1), dtype=np.float64)
# for extended Infomax
if extended:
signs = np.identity(n_features)
signs.flat[slice(0, n_features * n_subgauss, n_features)]
oldsigns = np.zeros((n_features, n_features))
if l_rate > min_l_rate:
if verbose:
logger.info('... lowering learning rate to %g'
'\n... re-starting...' % l_rate)
else:
raise ValueError('Error in Infomax ICA: unmixing_matrix matrix'
'might not be invertible!')
# prepare return values
return weights.T | Run the (extended) Infomax ICA decomposition on raw data
based on the publications of Bell & Sejnowski 1995 (Infomax)
and Lee, Girolami & Sejnowski, 1999 (extended Infomax)
Parameters
----------
data : np.ndarray, shape (n_samples, n_features)
The data to unmix.
w_init : np.ndarray, shape (n_features, n_features)
The initialized unmixing matrix. Defaults to None. If None, the
identity matrix is used.
l_rate : float
This quantity indicates the relative size of the change in weights.
Note. Smaller learining rates will slow down the procedure.
Defaults to 0.010d / alog(n_features ^ 2.0)
block : int
The block size of randomly chosen data segment.
Defaults to floor(sqrt(n_times / 3d))
w_change : float
The change at which to stop iteration. Defaults to 1e-12.
anneal_deg : float
The angle at which (in degree) the learning rate will be reduced.
Defaults to 60.0
anneal_step : float
The factor by which the learning rate will be reduced once
``anneal_deg`` is exceeded:
l_rate *= anneal_step
Defaults to 0.9
extended : bool
Wheather to use the extended infomax algorithm or not. Defaults to
True.
n_subgauss : int
The number of subgaussian components. Only considered for extended
Infomax.
kurt_size : int
The window size for kurtosis estimation. Only considered for extended
Infomax.
ext_blocks : int
The number of blocks after which to recompute Kurtosis.
Only considered for extended Infomax.
max_iter : int
The maximum number of iterations. Defaults to 200.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
unmixing_matrix : np.ndarray of float, shape (n_features, n_features)
The linear unmixing operator. | Below is the the instruction that describes the task:
### Input:
Run the (extended) Infomax ICA decomposition on raw data
based on the publications of Bell & Sejnowski 1995 (Infomax)
and Lee, Girolami & Sejnowski, 1999 (extended Infomax)
Parameters
----------
data : np.ndarray, shape (n_samples, n_features)
The data to unmix.
w_init : np.ndarray, shape (n_features, n_features)
The initialized unmixing matrix. Defaults to None. If None, the
identity matrix is used.
l_rate : float
This quantity indicates the relative size of the change in weights.
Note. Smaller learining rates will slow down the procedure.
Defaults to 0.010d / alog(n_features ^ 2.0)
block : int
The block size of randomly chosen data segment.
Defaults to floor(sqrt(n_times / 3d))
w_change : float
The change at which to stop iteration. Defaults to 1e-12.
anneal_deg : float
The angle at which (in degree) the learning rate will be reduced.
Defaults to 60.0
anneal_step : float
The factor by which the learning rate will be reduced once
``anneal_deg`` is exceeded:
l_rate *= anneal_step
Defaults to 0.9
extended : bool
Wheather to use the extended infomax algorithm or not. Defaults to
True.
n_subgauss : int
The number of subgaussian components. Only considered for extended
Infomax.
kurt_size : int
The window size for kurtosis estimation. Only considered for extended
Infomax.
ext_blocks : int
The number of blocks after which to recompute Kurtosis.
Only considered for extended Infomax.
max_iter : int
The maximum number of iterations. Defaults to 200.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
unmixing_matrix : np.ndarray of float, shape (n_features, n_features)
The linear unmixing operator.
### Response:
def infomax(data, weights=None, l_rate=None, block=None, w_change=1e-12,
anneal_deg=60., anneal_step=0.9, extended=False, n_subgauss=1,
kurt_size=6000, ext_blocks=1, max_iter=200,
random_state=None, verbose=None):
"""Run the (extended) Infomax ICA decomposition on raw data
based on the publications of Bell & Sejnowski 1995 (Infomax)
and Lee, Girolami & Sejnowski, 1999 (extended Infomax)
Parameters
----------
data : np.ndarray, shape (n_samples, n_features)
The data to unmix.
w_init : np.ndarray, shape (n_features, n_features)
The initialized unmixing matrix. Defaults to None. If None, the
identity matrix is used.
l_rate : float
This quantity indicates the relative size of the change in weights.
Note. Smaller learining rates will slow down the procedure.
Defaults to 0.010d / alog(n_features ^ 2.0)
block : int
The block size of randomly chosen data segment.
Defaults to floor(sqrt(n_times / 3d))
w_change : float
The change at which to stop iteration. Defaults to 1e-12.
anneal_deg : float
The angle at which (in degree) the learning rate will be reduced.
Defaults to 60.0
anneal_step : float
The factor by which the learning rate will be reduced once
``anneal_deg`` is exceeded:
l_rate *= anneal_step
Defaults to 0.9
extended : bool
Wheather to use the extended infomax algorithm or not. Defaults to
True.
n_subgauss : int
The number of subgaussian components. Only considered for extended
Infomax.
kurt_size : int
The window size for kurtosis estimation. Only considered for extended
Infomax.
ext_blocks : int
The number of blocks after which to recompute Kurtosis.
Only considered for extended Infomax.
max_iter : int
The maximum number of iterations. Defaults to 200.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
unmixing_matrix : np.ndarray of float, shape (n_features, n_features)
The linear unmixing operator.
"""
rng = check_random_state(random_state)
# define some default parameter
max_weight = 1e8
restart_fac = 0.9
min_l_rate = 1e-10
blowup = 1e4
blowup_fac = 0.5
n_small_angle = 20
degconst = 180.0 / np.pi
# for extended Infomax
extmomentum = 0.5
signsbias = 0.02
signcount_threshold = 25
signcount_step = 2
if ext_blocks > 0: # allow not to recompute kurtosis
n_subgauss = 1 # but initialize n_subgauss to 1 if you recompute
# check data shape
n_samples, n_features = data.shape
n_features_square = n_features ** 2
# check input parameter
# heuristic default - may need adjustment for
# large or tiny data sets
if l_rate is None:
l_rate = 0.01 / math.log(n_features ** 2.0)
if block is None:
block = int(math.floor(math.sqrt(n_samples / 3.0)))
logger.info('computing%sInfomax ICA' % ' Extended ' if extended is True
else ' ')
# collect parameter
nblock = n_samples // block
lastt = (nblock - 1) * block + 1
# initialize training
if weights is None:
# initialize weights as identity matrix
weights = np.identity(n_features, dtype=np.float64)
BI = block * np.identity(n_features, dtype=np.float64)
bias = np.zeros((n_features, 1), dtype=np.float64)
onesrow = np.ones((1, block), dtype=np.float64)
startweights = weights.copy()
oldweights = startweights.copy()
step = 0
count_small_angle = 0
wts_blowup = False
blockno = 0
signcount = 0
# for extended Infomax
if extended is True:
signs = np.identity(n_features)
signs.flat[slice(0, n_features * n_subgauss, n_features)]
kurt_size = min(kurt_size, n_samples)
old_kurt = np.zeros(n_features, dtype=np.float64)
oldsigns = np.zeros((n_features, n_features))
# trainings loop
olddelta, oldchange = 1., 0.
while step < max_iter:
# shuffle data at each step
permute = list(range(n_samples))
rng.shuffle(permute)
# ICA training block
# loop across block samples
for t in range(0, lastt, block):
u = np.dot(data[permute[t:t + block], :], weights)
u += np.dot(bias, onesrow).T
if extended is True:
# extended ICA update
y = np.tanh(u)
weights += l_rate * np.dot(weights,
BI - np.dot(np.dot(u.T, y), signs) -
np.dot(u.T, u))
bias += l_rate * np.reshape(np.sum(y, axis=0,
dtype=np.float64) * -2.0,
(n_features, 1))
else:
# logistic ICA weights update
y = 1.0 / (1.0 + np.exp(-u))
weights += l_rate * np.dot(weights,
BI + np.dot(u.T, (1.0 - 2.0 * y)))
bias += l_rate * np.reshape(np.sum((1.0 - 2.0 * y), axis=0,
dtype=np.float64), (n_features, 1))
# check change limit
max_weight_val = np.max(np.abs(weights))
if max_weight_val > max_weight:
wts_blowup = True
blockno += 1
if wts_blowup:
break
# ICA kurtosis estimation
if extended is True:
n = np.fix(blockno / ext_blocks)
if np.abs(n) * ext_blocks == blockno:
if kurt_size < n_samples:
rp = np.floor(rng.uniform(0, 1, kurt_size) *
(n_samples - 1))
tpartact = np.dot(data[rp.astype(int), :], weights).T
else:
tpartact = np.dot(data, weights).T
# estimate kurtosis
kurt = kurtosis(tpartact, axis=1, fisher=True)
if extmomentum != 0:
kurt = (extmomentum * old_kurt +
(1.0 - extmomentum) * kurt)
old_kurt = kurt
# estimate weighted signs
signs.flat[::n_features + 1] = ((kurt + signsbias) /
np.abs(kurt + signsbias))
ndiff = ((signs.flat[::n_features + 1] -
oldsigns.flat[::n_features + 1]) != 0).sum()
if ndiff == 0:
signcount += 1
else:
signcount = 0
oldsigns = signs
if signcount >= signcount_threshold:
ext_blocks = np.fix(ext_blocks * signcount_step)
signcount = 0
# here we continue after the for
# loop over the ICA training blocks
# if weights in bounds:
if not wts_blowup:
oldwtchange = weights - oldweights
step += 1
angledelta = 0.0
delta = oldwtchange.reshape(1, n_features_square)
change = np.sum(delta * delta, dtype=np.float64)
if step > 1:
angledelta = math.acos(np.sum(delta * olddelta) /
math.sqrt(change * oldchange))
angledelta *= degconst
# anneal learning rate
oldweights = weights.copy()
if angledelta > anneal_deg:
l_rate *= anneal_step # anneal learning rate
# accumulate angledelta until anneal_deg reached l_rates
olddelta = delta
oldchange = change
count_small_angle = 0 # reset count when angle delta is large
else:
if step == 1: # on first step only
olddelta = delta # initialize
oldchange = change
count_small_angle += 1
if count_small_angle > n_small_angle:
max_iter = step
# apply stopping rule
if step > 2 and change < w_change:
step = max_iter
elif change > blowup:
l_rate *= blowup_fac
# restart if weights blow up
# (for lowering l_rate)
else:
step = 0 # start again
wts_blowup = 0 # re-initialize variables
blockno = 1
l_rate *= restart_fac # with lower learning rate
weights = startweights.copy()
oldweights = startweights.copy()
olddelta = np.zeros((1, n_features_square), dtype=np.float64)
bias = np.zeros((n_features, 1), dtype=np.float64)
# for extended Infomax
if extended:
signs = np.identity(n_features)
signs.flat[slice(0, n_features * n_subgauss, n_features)]
oldsigns = np.zeros((n_features, n_features))
if l_rate > min_l_rate:
if verbose:
logger.info('... lowering learning rate to %g'
'\n... re-starting...' % l_rate)
else:
raise ValueError('Error in Infomax ICA: unmixing_matrix matrix'
'might not be invertible!')
# prepare return values
return weights.T |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.