code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def group_pop(name, app, **kwargs):
"""
Remove application from the specified routing group.
"""
ctx = Context(**kwargs)
ctx.execute_action('group:app:remove', **{
'storage': ctx.repo.create_secure_service('storage'),
'name': name,
'app': app,
}) | Remove application from the specified routing group. | Below is the the instruction that describes the task:
### Input:
Remove application from the specified routing group.
### Response:
def group_pop(name, app, **kwargs):
"""
Remove application from the specified routing group.
"""
ctx = Context(**kwargs)
ctx.execute_action('group:app:remove', **{
'storage': ctx.repo.create_secure_service('storage'),
'name': name,
'app': app,
}) |
def incr(l, cap): # to increment a list up to a max-list of 'cap'
"""
Simulate a counting system from an n-dimensional list.
Usage: lincr(l,cap) l=list to increment, cap=max values for each list pos'n
Returns: next set of values for list l, OR -1 (if overflow)
"""
l[0] = l[0] + 1 # e.g., [0,0,0] --> [2,4,3] (=cap)
for i in range(len(l)):
if l[i] > cap[i] and i < len(l) - 1: # if carryover AND not done
l[i] = 0
l[i + 1] = l[i + 1] + 1
elif l[i] > cap[i] and i == len(l) - 1: # overflow past last column, must be finished
l = -1
return l | Simulate a counting system from an n-dimensional list.
Usage: lincr(l,cap) l=list to increment, cap=max values for each list pos'n
Returns: next set of values for list l, OR -1 (if overflow) | Below is the the instruction that describes the task:
### Input:
Simulate a counting system from an n-dimensional list.
Usage: lincr(l,cap) l=list to increment, cap=max values for each list pos'n
Returns: next set of values for list l, OR -1 (if overflow)
### Response:
def incr(l, cap): # to increment a list up to a max-list of 'cap'
"""
Simulate a counting system from an n-dimensional list.
Usage: lincr(l,cap) l=list to increment, cap=max values for each list pos'n
Returns: next set of values for list l, OR -1 (if overflow)
"""
l[0] = l[0] + 1 # e.g., [0,0,0] --> [2,4,3] (=cap)
for i in range(len(l)):
if l[i] > cap[i] and i < len(l) - 1: # if carryover AND not done
l[i] = 0
l[i + 1] = l[i + 1] + 1
elif l[i] > cap[i] and i == len(l) - 1: # overflow past last column, must be finished
l = -1
return l |
def log_proto(self, message, client=None, **kw):
"""API call: log a protobuf message via a POST request
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/list
:type message: :class:`~google.protobuf.message.Message`
:param message: The protobuf message to be logged.
:type client: :class:`~google.cloud.logging.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current logger.
:type kw: dict
:param kw: (optional) additional keyword arguments for the entry.
See :class:`~google.cloud.logging.entries.LogEntry`.
"""
self._do_log(client, ProtobufEntry, message, **kw) | API call: log a protobuf message via a POST request
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/list
:type message: :class:`~google.protobuf.message.Message`
:param message: The protobuf message to be logged.
:type client: :class:`~google.cloud.logging.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current logger.
:type kw: dict
:param kw: (optional) additional keyword arguments for the entry.
See :class:`~google.cloud.logging.entries.LogEntry`. | Below is the the instruction that describes the task:
### Input:
API call: log a protobuf message via a POST request
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/list
:type message: :class:`~google.protobuf.message.Message`
:param message: The protobuf message to be logged.
:type client: :class:`~google.cloud.logging.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current logger.
:type kw: dict
:param kw: (optional) additional keyword arguments for the entry.
See :class:`~google.cloud.logging.entries.LogEntry`.
### Response:
def log_proto(self, message, client=None, **kw):
"""API call: log a protobuf message via a POST request
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/list
:type message: :class:`~google.protobuf.message.Message`
:param message: The protobuf message to be logged.
:type client: :class:`~google.cloud.logging.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current logger.
:type kw: dict
:param kw: (optional) additional keyword arguments for the entry.
See :class:`~google.cloud.logging.entries.LogEntry`.
"""
self._do_log(client, ProtobufEntry, message, **kw) |
def _new_open_bin(self, remaining_rect):
"""
Extract the next bin where at least one of the rectangles in
rem
Arguments:
remaining_rect (dict): rectangles not placed yet
Returns:
PackingAlgorithm: Initialized empty packing bin.
None: No bin big enough for the rectangle was found
"""
factories_to_delete = set() #
new_bin = None
for key, binfac in self._empty_bins.items():
# Only return the new bin if at least one of the remaining
# rectangles fit inside.
a_rectangle_fits = False
for _, rect in remaining_rect.items():
if binfac.fits_inside(rect[0], rect[1]):
a_rectangle_fits = True
break
if not a_rectangle_fits:
factories_to_delete.add(key)
continue
# Create bin and add to open_bins
new_bin = binfac.new_bin()
if new_bin is None:
continue
self._open_bins.append(new_bin)
# If the factory was depleted mark for deletion
if binfac.is_empty():
factories_to_delete.add(key)
break
# Delete marked factories
for f in factories_to_delete:
del self._empty_bins[f]
return new_bin | Extract the next bin where at least one of the rectangles in
rem
Arguments:
remaining_rect (dict): rectangles not placed yet
Returns:
PackingAlgorithm: Initialized empty packing bin.
None: No bin big enough for the rectangle was found | Below is the the instruction that describes the task:
### Input:
Extract the next bin where at least one of the rectangles in
rem
Arguments:
remaining_rect (dict): rectangles not placed yet
Returns:
PackingAlgorithm: Initialized empty packing bin.
None: No bin big enough for the rectangle was found
### Response:
def _new_open_bin(self, remaining_rect):
"""
Extract the next bin where at least one of the rectangles in
rem
Arguments:
remaining_rect (dict): rectangles not placed yet
Returns:
PackingAlgorithm: Initialized empty packing bin.
None: No bin big enough for the rectangle was found
"""
factories_to_delete = set() #
new_bin = None
for key, binfac in self._empty_bins.items():
# Only return the new bin if at least one of the remaining
# rectangles fit inside.
a_rectangle_fits = False
for _, rect in remaining_rect.items():
if binfac.fits_inside(rect[0], rect[1]):
a_rectangle_fits = True
break
if not a_rectangle_fits:
factories_to_delete.add(key)
continue
# Create bin and add to open_bins
new_bin = binfac.new_bin()
if new_bin is None:
continue
self._open_bins.append(new_bin)
# If the factory was depleted mark for deletion
if binfac.is_empty():
factories_to_delete.add(key)
break
# Delete marked factories
for f in factories_to_delete:
del self._empty_bins[f]
return new_bin |
def set_initial_values(self):
"""Set initial values form existing self.data value
:return: None
"""
self.normals = self.data['normals']
self.vectors = numpy.ones((
self.data['vectors'].shape[0],
self.data['vectors'].shape[1],
self.data['vectors'].shape[2] + 1
))
self.vectors[:, :, :-1] = self.data['vectors']
self.attr = self.data['attr']
return | Set initial values form existing self.data value
:return: None | Below is the the instruction that describes the task:
### Input:
Set initial values form existing self.data value
:return: None
### Response:
def set_initial_values(self):
"""Set initial values form existing self.data value
:return: None
"""
self.normals = self.data['normals']
self.vectors = numpy.ones((
self.data['vectors'].shape[0],
self.data['vectors'].shape[1],
self.data['vectors'].shape[2] + 1
))
self.vectors[:, :, :-1] = self.data['vectors']
self.attr = self.data['attr']
return |
def get_id_token_hint(self, request_args=None, **kwargs):
"""
Add id_token_hint to request
:param request_args:
:param kwargs:
:return:
"""
request_args = self.multiple_extend_request_args(
request_args, kwargs['state'], ['id_token'],
['auth_response', 'token_response', 'refresh_token_response'],
orig=True
)
try:
request_args['id_token_hint'] = request_args['id_token']
except KeyError:
pass
else:
del request_args['id_token']
return request_args, {} | Add id_token_hint to request
:param request_args:
:param kwargs:
:return: | Below is the the instruction that describes the task:
### Input:
Add id_token_hint to request
:param request_args:
:param kwargs:
:return:
### Response:
def get_id_token_hint(self, request_args=None, **kwargs):
"""
Add id_token_hint to request
:param request_args:
:param kwargs:
:return:
"""
request_args = self.multiple_extend_request_args(
request_args, kwargs['state'], ['id_token'],
['auth_response', 'token_response', 'refresh_token_response'],
orig=True
)
try:
request_args['id_token_hint'] = request_args['id_token']
except KeyError:
pass
else:
del request_args['id_token']
return request_args, {} |
def shift(
self,
periods: int = 1,
fill_value: object = None,
) -> ABCExtensionArray:
"""
Shift values by desired number.
Newly introduced missing values are filled with
``self.dtype.na_value``.
.. versionadded:: 0.24.0
Parameters
----------
periods : int, default 1
The number of periods to shift. Negative values are allowed
for shifting backwards.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
The default is ``self.dtype.na_value``
.. versionadded:: 0.24.0
Returns
-------
shifted : ExtensionArray
Notes
-----
If ``self`` is empty or ``periods`` is 0, a copy of ``self`` is
returned.
If ``periods > len(self)``, then an array of size
len(self) is returned, with all values filled with
``self.dtype.na_value``.
"""
# Note: this implementation assumes that `self.dtype.na_value` can be
# stored in an instance of your ExtensionArray with `self.dtype`.
if not len(self) or periods == 0:
return self.copy()
if isna(fill_value):
fill_value = self.dtype.na_value
empty = self._from_sequence(
[fill_value] * min(abs(periods), len(self)),
dtype=self.dtype
)
if periods > 0:
a = empty
b = self[:-periods]
else:
a = self[abs(periods):]
b = empty
return self._concat_same_type([a, b]) | Shift values by desired number.
Newly introduced missing values are filled with
``self.dtype.na_value``.
.. versionadded:: 0.24.0
Parameters
----------
periods : int, default 1
The number of periods to shift. Negative values are allowed
for shifting backwards.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
The default is ``self.dtype.na_value``
.. versionadded:: 0.24.0
Returns
-------
shifted : ExtensionArray
Notes
-----
If ``self`` is empty or ``periods`` is 0, a copy of ``self`` is
returned.
If ``periods > len(self)``, then an array of size
len(self) is returned, with all values filled with
``self.dtype.na_value``. | Below is the the instruction that describes the task:
### Input:
Shift values by desired number.
Newly introduced missing values are filled with
``self.dtype.na_value``.
.. versionadded:: 0.24.0
Parameters
----------
periods : int, default 1
The number of periods to shift. Negative values are allowed
for shifting backwards.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
The default is ``self.dtype.na_value``
.. versionadded:: 0.24.0
Returns
-------
shifted : ExtensionArray
Notes
-----
If ``self`` is empty or ``periods`` is 0, a copy of ``self`` is
returned.
If ``periods > len(self)``, then an array of size
len(self) is returned, with all values filled with
``self.dtype.na_value``.
### Response:
def shift(
self,
periods: int = 1,
fill_value: object = None,
) -> ABCExtensionArray:
"""
Shift values by desired number.
Newly introduced missing values are filled with
``self.dtype.na_value``.
.. versionadded:: 0.24.0
Parameters
----------
periods : int, default 1
The number of periods to shift. Negative values are allowed
for shifting backwards.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
The default is ``self.dtype.na_value``
.. versionadded:: 0.24.0
Returns
-------
shifted : ExtensionArray
Notes
-----
If ``self`` is empty or ``periods`` is 0, a copy of ``self`` is
returned.
If ``periods > len(self)``, then an array of size
len(self) is returned, with all values filled with
``self.dtype.na_value``.
"""
# Note: this implementation assumes that `self.dtype.na_value` can be
# stored in an instance of your ExtensionArray with `self.dtype`.
if not len(self) or periods == 0:
return self.copy()
if isna(fill_value):
fill_value = self.dtype.na_value
empty = self._from_sequence(
[fill_value] * min(abs(periods), len(self)),
dtype=self.dtype
)
if periods > 0:
a = empty
b = self[:-periods]
else:
a = self[abs(periods):]
b = empty
return self._concat_same_type([a, b]) |
def _convert_key(self, key, is_setter=False):
""" require integer args (and convert to label arguments) """
for a, i in zip(self.obj.axes, key):
if not is_integer(i):
raise ValueError("iAt based indexing can only have integer "
"indexers")
return key | require integer args (and convert to label arguments) | Below is the the instruction that describes the task:
### Input:
require integer args (and convert to label arguments)
### Response:
def _convert_key(self, key, is_setter=False):
""" require integer args (and convert to label arguments) """
for a, i in zip(self.obj.axes, key):
if not is_integer(i):
raise ValueError("iAt based indexing can only have integer "
"indexers")
return key |
def _parse_codeargs(argstr):
'''
Parse and clean up argument to user code; separate *args from
**kwargs.
'''
args = []
kwargs = {}
if isinstance(argstr, str):
for a in argstr.split():
if '=' in a:
k,attr = a.split('=')
kwargs[k] = attr
else:
args.append(a)
rd = {'args':args, 'kwargs':kwargs}
return rd | Parse and clean up argument to user code; separate *args from
**kwargs. | Below is the the instruction that describes the task:
### Input:
Parse and clean up argument to user code; separate *args from
**kwargs.
### Response:
def _parse_codeargs(argstr):
'''
Parse and clean up argument to user code; separate *args from
**kwargs.
'''
args = []
kwargs = {}
if isinstance(argstr, str):
for a in argstr.split():
if '=' in a:
k,attr = a.split('=')
kwargs[k] = attr
else:
args.append(a)
rd = {'args':args, 'kwargs':kwargs}
return rd |
def status_count(self, project):
'''
return a dict
'''
pipe = self.redis.pipeline(transaction=False)
for status in range(1, 5):
pipe.scard(self._gen_status_key(project, status))
ret = pipe.execute()
result = {}
for status, count in enumerate(ret):
if count > 0:
result[status + 1] = count
return result | return a dict | Below is the the instruction that describes the task:
### Input:
return a dict
### Response:
def status_count(self, project):
'''
return a dict
'''
pipe = self.redis.pipeline(transaction=False)
for status in range(1, 5):
pipe.scard(self._gen_status_key(project, status))
ret = pipe.execute()
result = {}
for status, count in enumerate(ret):
if count > 0:
result[status + 1] = count
return result |
def intersect(self, other_seg, tol=1e-12):
"""Finds the intersections of two segments.
returns a list of tuples (t1, t2) such that
self.point(t1) == other_seg.point(t2).
Note: This will fail if the two segments coincide for more than a
finite collection of points."""
if isinstance(other_seg, Line):
return bezier_by_line_intersections(self, other_seg)
elif (isinstance(other_seg, QuadraticBezier) or
isinstance(other_seg, CubicBezier)):
assert self != other_seg
longer_length = max(self.length(), other_seg.length())
return bezier_intersections(self, other_seg,
longer_length=longer_length,
tol=tol, tol_deC=tol)
elif isinstance(other_seg, Arc):
t2t1s = other_seg.intersect(self)
return [(t1, t2) for t2, t1 in t2t1s]
elif isinstance(other_seg, Path):
raise TypeError(
"other_seg must be a path segment, not a Path object, use "
"Path.intersect().")
else:
raise TypeError("other_seg must be a path segment.") | Finds the intersections of two segments.
returns a list of tuples (t1, t2) such that
self.point(t1) == other_seg.point(t2).
Note: This will fail if the two segments coincide for more than a
finite collection of points. | Below is the the instruction that describes the task:
### Input:
Finds the intersections of two segments.
returns a list of tuples (t1, t2) such that
self.point(t1) == other_seg.point(t2).
Note: This will fail if the two segments coincide for more than a
finite collection of points.
### Response:
def intersect(self, other_seg, tol=1e-12):
"""Finds the intersections of two segments.
returns a list of tuples (t1, t2) such that
self.point(t1) == other_seg.point(t2).
Note: This will fail if the two segments coincide for more than a
finite collection of points."""
if isinstance(other_seg, Line):
return bezier_by_line_intersections(self, other_seg)
elif (isinstance(other_seg, QuadraticBezier) or
isinstance(other_seg, CubicBezier)):
assert self != other_seg
longer_length = max(self.length(), other_seg.length())
return bezier_intersections(self, other_seg,
longer_length=longer_length,
tol=tol, tol_deC=tol)
elif isinstance(other_seg, Arc):
t2t1s = other_seg.intersect(self)
return [(t1, t2) for t2, t1 in t2t1s]
elif isinstance(other_seg, Path):
raise TypeError(
"other_seg must be a path segment, not a Path object, use "
"Path.intersect().")
else:
raise TypeError("other_seg must be a path segment.") |
def buy(self, account_id, **params):
"""https://developers.coinbase.com/api/v2#buy-bitcoin"""
if 'amount' not in params and 'total' not in params:
raise ValueError("Missing required parameter: 'amount' or 'total'")
for required in ['currency', 'payment_method']:
if required not in params:
raise ValueError("Missing required parameter: %s" % required)
response = self._post('v2', 'accounts', account_id, 'buys', data=params)
return self._make_api_object(response, Buy) | https://developers.coinbase.com/api/v2#buy-bitcoin | Below is the the instruction that describes the task:
### Input:
https://developers.coinbase.com/api/v2#buy-bitcoin
### Response:
def buy(self, account_id, **params):
"""https://developers.coinbase.com/api/v2#buy-bitcoin"""
if 'amount' not in params and 'total' not in params:
raise ValueError("Missing required parameter: 'amount' or 'total'")
for required in ['currency', 'payment_method']:
if required not in params:
raise ValueError("Missing required parameter: %s" % required)
response = self._post('v2', 'accounts', account_id, 'buys', data=params)
return self._make_api_object(response, Buy) |
def env_maker(environment_id):
""" Create a relatively raw atari environment """
env = gym.make(environment_id)
assert 'NoFrameskip' in env.spec.id
# Wait for between 1 and 30 rounds doing nothing on start
env = NoopResetEnv(env, noop_max=30)
# Do the same action for k steps. Return max of last 2 frames. Return sum of rewards
env = MaxAndSkipEnv(env, skip=4)
return env | Create a relatively raw atari environment | Below is the the instruction that describes the task:
### Input:
Create a relatively raw atari environment
### Response:
def env_maker(environment_id):
""" Create a relatively raw atari environment """
env = gym.make(environment_id)
assert 'NoFrameskip' in env.spec.id
# Wait for between 1 and 30 rounds doing nothing on start
env = NoopResetEnv(env, noop_max=30)
# Do the same action for k steps. Return max of last 2 frames. Return sum of rewards
env = MaxAndSkipEnv(env, skip=4)
return env |
def ensure_file(path):
""" Checks if file exists, if fails, tries to create file """
try:
exists = isfile(path)
if not exists:
with open(path, 'w+') as fname:
fname.write('initialized')
return (True, path)
return (True, 'exists')
except OSError as e: # pragma: no cover
return (False, e) | Checks if file exists, if fails, tries to create file | Below is the the instruction that describes the task:
### Input:
Checks if file exists, if fails, tries to create file
### Response:
def ensure_file(path):
""" Checks if file exists, if fails, tries to create file """
try:
exists = isfile(path)
if not exists:
with open(path, 'w+') as fname:
fname.write('initialized')
return (True, path)
return (True, 'exists')
except OSError as e: # pragma: no cover
return (False, e) |
def make_figure_hmaps(extractors, what):
"""
$ oq plot 'hmaps?kind=mean&imt=PGA'
"""
import matplotlib.pyplot as plt
fig = plt.figure()
ncalcs = len(extractors)
for i, ex in enumerate(extractors):
oq = ex.oqparam
n_poes = len(oq.poes)
sitecol = ex.get('sitecol')
hmaps = ex.get(what)
[imt] = hmaps.imt
[kind] = hmaps.kind
for j, poe in enumerate(oq.poes):
ax = fig.add_subplot(n_poes, ncalcs, j * ncalcs + i + 1)
ax.grid(True)
ax.set_xlabel('hmap for IMT=%s, kind=%s, poe=%s\ncalculation %d, '
'inv_time=%dy' %
(imt, kind, poe, ex.calc_id, oq.investigation_time))
bmap = basemap('cyl', sitecol)
bmap.scatter(sitecol['lon'], sitecol['lat'],
c=hmaps[kind][:, 0, j], cmap='jet')
return plt | $ oq plot 'hmaps?kind=mean&imt=PGA' | Below is the the instruction that describes the task:
### Input:
$ oq plot 'hmaps?kind=mean&imt=PGA'
### Response:
def make_figure_hmaps(extractors, what):
"""
$ oq plot 'hmaps?kind=mean&imt=PGA'
"""
import matplotlib.pyplot as plt
fig = plt.figure()
ncalcs = len(extractors)
for i, ex in enumerate(extractors):
oq = ex.oqparam
n_poes = len(oq.poes)
sitecol = ex.get('sitecol')
hmaps = ex.get(what)
[imt] = hmaps.imt
[kind] = hmaps.kind
for j, poe in enumerate(oq.poes):
ax = fig.add_subplot(n_poes, ncalcs, j * ncalcs + i + 1)
ax.grid(True)
ax.set_xlabel('hmap for IMT=%s, kind=%s, poe=%s\ncalculation %d, '
'inv_time=%dy' %
(imt, kind, poe, ex.calc_id, oq.investigation_time))
bmap = basemap('cyl', sitecol)
bmap.scatter(sitecol['lon'], sitecol['lat'],
c=hmaps[kind][:, 0, j], cmap='jet')
return plt |
def delete(cls, id, api_key=None, **kwargs):
"""Delete an entity from the server by ID."""
inst = cls(api_key=api_key)
endpoint = '/'.join((cls.get_endpoint(), id))
inst.request('DELETE', endpoint=endpoint, query_params=kwargs)
inst._is_deleted = True
return True | Delete an entity from the server by ID. | Below is the the instruction that describes the task:
### Input:
Delete an entity from the server by ID.
### Response:
def delete(cls, id, api_key=None, **kwargs):
"""Delete an entity from the server by ID."""
inst = cls(api_key=api_key)
endpoint = '/'.join((cls.get_endpoint(), id))
inst.request('DELETE', endpoint=endpoint, query_params=kwargs)
inst._is_deleted = True
return True |
def loop(self):
"""Main loop daemon."""
while True:
sleep(1)
new_file_list = self.walk(self.file_path, {})
if new_file_list != self.file_list:
if self.debug:
self.diff_list(new_file_list, self.file_list)
self.run_tests()
self.file_list = new_file_list | Main loop daemon. | Below is the the instruction that describes the task:
### Input:
Main loop daemon.
### Response:
def loop(self):
"""Main loop daemon."""
while True:
sleep(1)
new_file_list = self.walk(self.file_path, {})
if new_file_list != self.file_list:
if self.debug:
self.diff_list(new_file_list, self.file_list)
self.run_tests()
self.file_list = new_file_list |
def drop_indexes(self):
"""Drops all indexes on this collection.
Can be used on non-existant collections or collections with no indexes.
Raises OperationFailure on an error.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
"""
self.__database.client._purge_index(self.__database.name, self.__name)
self.drop_index("*") | Drops all indexes on this collection.
Can be used on non-existant collections or collections with no indexes.
Raises OperationFailure on an error.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4. | Below is the the instruction that describes the task:
### Input:
Drops all indexes on this collection.
Can be used on non-existant collections or collections with no indexes.
Raises OperationFailure on an error.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
### Response:
def drop_indexes(self):
"""Drops all indexes on this collection.
Can be used on non-existant collections or collections with no indexes.
Raises OperationFailure on an error.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
"""
self.__database.client._purge_index(self.__database.name, self.__name)
self.drop_index("*") |
def _blocks_to_samples(sig_data, n_samp, fmt):
"""
Convert uint8 blocks into signal samples for unaligned dat formats.
Parameters
----------
sig_data : numpy array
The uint8 data blocks.
n_samp : int
The number of samples contained in the bytes
Returns
-------
signal : numpy array
The numpy array of digital samples
"""
if fmt == '212':
# Easier to process when dealing with whole blocks
if n_samp % 2:
n_samp += 1
added_samps = 1
sig_data = np.append(sig_data, np.zeros(1, dtype='uint8'))
else:
added_samps = 0
sig_data = sig_data.astype('int16')
sig = np.zeros(n_samp, dtype='int16')
# One sample pair is stored in one byte triplet.
# Even numbered samples
sig[0::2] = sig_data[0::3] + 256 * np.bitwise_and(sig_data[1::3], 0x0f)
# Odd numbered samples (len(sig) always > 1 due to processing of
# whole blocks)
sig[1::2] = sig_data[2::3] + 256*np.bitwise_and(sig_data[1::3] >> 4, 0x0f)
# Remove trailing sample read within the byte block if
# originally odd sampled
if added_samps:
sig = sig[:-added_samps]
# Loaded values as un_signed. Convert to 2's complement form:
# values > 2^11-1 are negative.
sig[sig > 2047] -= 4096
elif fmt == '310':
# Easier to process when dealing with whole blocks
if n_samp % 3:
n_samp = upround(n_samp,3)
added_samps = n_samp % 3
sig_data = np.append(sig_data, np.zeros(added_samps, dtype='uint8'))
else:
added_samps = 0
sig_data = sig_data.astype('int16')
sig = np.zeros(n_samp, dtype='int16')
# One sample triplet is stored in one byte quartet
# First sample is 7 msb of first byte and 3 lsb of second byte.
sig[0::3] = (sig_data[0::4] >> 1)[0:len(sig[0::3])] + 128 * np.bitwise_and(sig_data[1::4], 0x07)[0:len(sig[0::3])]
# Second signal is 7 msb of third byte and 3 lsb of forth byte
sig[1::3] = (sig_data[2::4] >> 1)[0:len(sig[1::3])] + 128 * np.bitwise_and(sig_data[3::4], 0x07)[0:len(sig[1::3])]
# Third signal is 5 msb of second byte and 5 msb of forth byte
sig[2::3] = np.bitwise_and((sig_data[1::4] >> 3), 0x1f)[0:len(sig[2::3])] + 32 * np.bitwise_and(sig_data[3::4] >> 3, 0x1f)[0:len(sig[2::3])]
# Remove trailing samples read within the byte block if
# originally not 3n sampled
if added_samps:
sig = sig[:-added_samps]
# Loaded values as un_signed. Convert to 2's complement form:
# values > 2^9-1 are negative.
sig[sig > 511] -= 1024
elif fmt == '311':
# Easier to process when dealing with whole blocks
if n_samp % 3:
n_samp = upround(n_samp,3)
added_samps = n_samp % 3
sig_data = np.append(sig_data, np.zeros(added_samps, dtype='uint8'))
else:
added_samps = 0
sig_data = sig_data.astype('int16')
sig = np.zeros(n_samp, dtype='int16')
# One sample triplet is stored in one byte quartet
# First sample is first byte and 2 lsb of second byte.
sig[0::3] = sig_data[0::4][0:len(sig[0::3])] + 256 * np.bitwise_and(sig_data[1::4], 0x03)[0:len(sig[0::3])]
# Second sample is 6 msb of second byte and 4 lsb of third byte
sig[1::3] = (sig_data[1::4] >> 2)[0:len(sig[1::3])] + 64 * np.bitwise_and(sig_data[2::4], 0x0f)[0:len(sig[1::3])]
# Third sample is 4 msb of third byte and 6 msb of forth byte
sig[2::3] = (sig_data[2::4] >> 4)[0:len(sig[2::3])] + 16 * np.bitwise_and(sig_data[3::4], 0x7f)[0:len(sig[2::3])]
# Remove trailing samples read within the byte block if
# originally not 3n sampled
if added_samps:
sig = sig[:-added_samps]
# Loaded values as un_signed. Convert to 2's complement form.
# Values > 2^9-1 are negative.
sig[sig > 511] -= 1024
return sig | Convert uint8 blocks into signal samples for unaligned dat formats.
Parameters
----------
sig_data : numpy array
The uint8 data blocks.
n_samp : int
The number of samples contained in the bytes
Returns
-------
signal : numpy array
The numpy array of digital samples | Below is the the instruction that describes the task:
### Input:
Convert uint8 blocks into signal samples for unaligned dat formats.
Parameters
----------
sig_data : numpy array
The uint8 data blocks.
n_samp : int
The number of samples contained in the bytes
Returns
-------
signal : numpy array
The numpy array of digital samples
### Response:
def _blocks_to_samples(sig_data, n_samp, fmt):
"""
Convert uint8 blocks into signal samples for unaligned dat formats.
Parameters
----------
sig_data : numpy array
The uint8 data blocks.
n_samp : int
The number of samples contained in the bytes
Returns
-------
signal : numpy array
The numpy array of digital samples
"""
if fmt == '212':
# Easier to process when dealing with whole blocks
if n_samp % 2:
n_samp += 1
added_samps = 1
sig_data = np.append(sig_data, np.zeros(1, dtype='uint8'))
else:
added_samps = 0
sig_data = sig_data.astype('int16')
sig = np.zeros(n_samp, dtype='int16')
# One sample pair is stored in one byte triplet.
# Even numbered samples
sig[0::2] = sig_data[0::3] + 256 * np.bitwise_and(sig_data[1::3], 0x0f)
# Odd numbered samples (len(sig) always > 1 due to processing of
# whole blocks)
sig[1::2] = sig_data[2::3] + 256*np.bitwise_and(sig_data[1::3] >> 4, 0x0f)
# Remove trailing sample read within the byte block if
# originally odd sampled
if added_samps:
sig = sig[:-added_samps]
# Loaded values as un_signed. Convert to 2's complement form:
# values > 2^11-1 are negative.
sig[sig > 2047] -= 4096
elif fmt == '310':
# Easier to process when dealing with whole blocks
if n_samp % 3:
n_samp = upround(n_samp,3)
added_samps = n_samp % 3
sig_data = np.append(sig_data, np.zeros(added_samps, dtype='uint8'))
else:
added_samps = 0
sig_data = sig_data.astype('int16')
sig = np.zeros(n_samp, dtype='int16')
# One sample triplet is stored in one byte quartet
# First sample is 7 msb of first byte and 3 lsb of second byte.
sig[0::3] = (sig_data[0::4] >> 1)[0:len(sig[0::3])] + 128 * np.bitwise_and(sig_data[1::4], 0x07)[0:len(sig[0::3])]
# Second signal is 7 msb of third byte and 3 lsb of forth byte
sig[1::3] = (sig_data[2::4] >> 1)[0:len(sig[1::3])] + 128 * np.bitwise_and(sig_data[3::4], 0x07)[0:len(sig[1::3])]
# Third signal is 5 msb of second byte and 5 msb of forth byte
sig[2::3] = np.bitwise_and((sig_data[1::4] >> 3), 0x1f)[0:len(sig[2::3])] + 32 * np.bitwise_and(sig_data[3::4] >> 3, 0x1f)[0:len(sig[2::3])]
# Remove trailing samples read within the byte block if
# originally not 3n sampled
if added_samps:
sig = sig[:-added_samps]
# Loaded values as un_signed. Convert to 2's complement form:
# values > 2^9-1 are negative.
sig[sig > 511] -= 1024
elif fmt == '311':
# Easier to process when dealing with whole blocks
if n_samp % 3:
n_samp = upround(n_samp,3)
added_samps = n_samp % 3
sig_data = np.append(sig_data, np.zeros(added_samps, dtype='uint8'))
else:
added_samps = 0
sig_data = sig_data.astype('int16')
sig = np.zeros(n_samp, dtype='int16')
# One sample triplet is stored in one byte quartet
# First sample is first byte and 2 lsb of second byte.
sig[0::3] = sig_data[0::4][0:len(sig[0::3])] + 256 * np.bitwise_and(sig_data[1::4], 0x03)[0:len(sig[0::3])]
# Second sample is 6 msb of second byte and 4 lsb of third byte
sig[1::3] = (sig_data[1::4] >> 2)[0:len(sig[1::3])] + 64 * np.bitwise_and(sig_data[2::4], 0x0f)[0:len(sig[1::3])]
# Third sample is 4 msb of third byte and 6 msb of forth byte
sig[2::3] = (sig_data[2::4] >> 4)[0:len(sig[2::3])] + 16 * np.bitwise_and(sig_data[3::4], 0x7f)[0:len(sig[2::3])]
# Remove trailing samples read within the byte block if
# originally not 3n sampled
if added_samps:
sig = sig[:-added_samps]
# Loaded values as un_signed. Convert to 2's complement form.
# Values > 2^9-1 are negative.
sig[sig > 511] -= 1024
return sig |
def get_submission_filenames(self, tournament=None, round_num=None):
"""Get filenames of the submission of the user.
Args:
tournament (int): optionally filter by ID of the tournament
round_num (int): optionally filter round number
Returns:
list: list of user filenames (`dict`)
Each filenames in the list as the following structure:
* filename (`str`)
* round_num (`int`)
* tournament (`int`)
Example:
>>> NumerAPI().get_submission_filenames(3, 111)
[{'filename': 'model57-dMpHpYMPIUAF.csv',
'round_num': 111,
'tournament': 3}]
"""
query = '''
query {
user {
submissions {
filename
selected
round {
tournament
number
}
}
}
}
'''
data = self.raw_query(query, authorization=True)['data']['user']
filenames = [{"round_num": item['round']['number'],
"tournament": item['round']['tournament'],
"filename": item['filename']}
for item in data['submissions'] if item['selected']]
if round_num is not None:
filenames = [f for f in filenames if f['round_num'] == round_num]
if tournament is not None:
filenames = [f for f in filenames if f['tournament'] == tournament]
filenames.sort(key=lambda f: (f['round_num'], f['tournament']))
return filenames | Get filenames of the submission of the user.
Args:
tournament (int): optionally filter by ID of the tournament
round_num (int): optionally filter round number
Returns:
list: list of user filenames (`dict`)
Each filenames in the list as the following structure:
* filename (`str`)
* round_num (`int`)
* tournament (`int`)
Example:
>>> NumerAPI().get_submission_filenames(3, 111)
[{'filename': 'model57-dMpHpYMPIUAF.csv',
'round_num': 111,
'tournament': 3}] | Below is the the instruction that describes the task:
### Input:
Get filenames of the submission of the user.
Args:
tournament (int): optionally filter by ID of the tournament
round_num (int): optionally filter round number
Returns:
list: list of user filenames (`dict`)
Each filenames in the list as the following structure:
* filename (`str`)
* round_num (`int`)
* tournament (`int`)
Example:
>>> NumerAPI().get_submission_filenames(3, 111)
[{'filename': 'model57-dMpHpYMPIUAF.csv',
'round_num': 111,
'tournament': 3}]
### Response:
def get_submission_filenames(self, tournament=None, round_num=None):
"""Get filenames of the submission of the user.
Args:
tournament (int): optionally filter by ID of the tournament
round_num (int): optionally filter round number
Returns:
list: list of user filenames (`dict`)
Each filenames in the list as the following structure:
* filename (`str`)
* round_num (`int`)
* tournament (`int`)
Example:
>>> NumerAPI().get_submission_filenames(3, 111)
[{'filename': 'model57-dMpHpYMPIUAF.csv',
'round_num': 111,
'tournament': 3}]
"""
query = '''
query {
user {
submissions {
filename
selected
round {
tournament
number
}
}
}
}
'''
data = self.raw_query(query, authorization=True)['data']['user']
filenames = [{"round_num": item['round']['number'],
"tournament": item['round']['tournament'],
"filename": item['filename']}
for item in data['submissions'] if item['selected']]
if round_num is not None:
filenames = [f for f in filenames if f['round_num'] == round_num]
if tournament is not None:
filenames = [f for f in filenames if f['tournament'] == tournament]
filenames.sort(key=lambda f: (f['round_num'], f['tournament']))
return filenames |
def byteswap(self, fmt=None, start=None, end=None, repeat=True):
"""Change the endianness in-place. Return number of repeats of fmt done.
fmt -- A compact structure string, an integer number of bytes or
an iterable of integers. Defaults to 0, which byte reverses the
whole bitstring.
start -- Start bit position, defaults to 0.
end -- End bit position, defaults to self.len.
repeat -- If True (the default) the byte swapping pattern is repeated
as much as possible.
"""
start, end = self._validate_slice(start, end)
if fmt is None or fmt == 0:
# reverse all of the whole bytes.
bytesizes = [(end - start) // 8]
elif isinstance(fmt, numbers.Integral):
if fmt < 0:
raise ValueError("Improper byte length {0}.".format(fmt))
bytesizes = [fmt]
elif isinstance(fmt, basestring):
m = STRUCT_PACK_RE.match(fmt)
if not m:
raise ValueError("Cannot parse format string {0}.".format(fmt))
# Split the format string into a list of 'q', '4h' etc.
formatlist = re.findall(STRUCT_SPLIT_RE, m.group('fmt'))
# Now deal with multiplicative factors, 4h -> hhhh etc.
bytesizes = []
for f in formatlist:
if len(f) == 1:
bytesizes.append(PACK_CODE_SIZE[f])
else:
bytesizes.extend([PACK_CODE_SIZE[f[-1]]] * int(f[:-1]))
elif isinstance(fmt, collections.Iterable):
bytesizes = fmt
for bytesize in bytesizes:
if not isinstance(bytesize, numbers.Integral) or bytesize < 0:
raise ValueError("Improper byte length {0}.".format(bytesize))
else:
raise TypeError("Format must be an integer, string or iterable.")
repeats = 0
totalbitsize = 8 * sum(bytesizes)
if not totalbitsize:
return 0
if repeat:
# Try to repeat up to the end of the bitstring.
finalbit = end
else:
# Just try one (set of) byteswap(s).
finalbit = start + totalbitsize
for patternend in xrange(start + totalbitsize, finalbit + 1, totalbitsize):
bytestart = patternend - totalbitsize
for bytesize in bytesizes:
byteend = bytestart + bytesize * 8
self._reversebytes(bytestart, byteend)
bytestart += bytesize * 8
repeats += 1
return repeats | Change the endianness in-place. Return number of repeats of fmt done.
fmt -- A compact structure string, an integer number of bytes or
an iterable of integers. Defaults to 0, which byte reverses the
whole bitstring.
start -- Start bit position, defaults to 0.
end -- End bit position, defaults to self.len.
repeat -- If True (the default) the byte swapping pattern is repeated
as much as possible. | Below is the the instruction that describes the task:
### Input:
Change the endianness in-place. Return number of repeats of fmt done.
fmt -- A compact structure string, an integer number of bytes or
an iterable of integers. Defaults to 0, which byte reverses the
whole bitstring.
start -- Start bit position, defaults to 0.
end -- End bit position, defaults to self.len.
repeat -- If True (the default) the byte swapping pattern is repeated
as much as possible.
### Response:
def byteswap(self, fmt=None, start=None, end=None, repeat=True):
"""Change the endianness in-place. Return number of repeats of fmt done.
fmt -- A compact structure string, an integer number of bytes or
an iterable of integers. Defaults to 0, which byte reverses the
whole bitstring.
start -- Start bit position, defaults to 0.
end -- End bit position, defaults to self.len.
repeat -- If True (the default) the byte swapping pattern is repeated
as much as possible.
"""
start, end = self._validate_slice(start, end)
if fmt is None or fmt == 0:
# reverse all of the whole bytes.
bytesizes = [(end - start) // 8]
elif isinstance(fmt, numbers.Integral):
if fmt < 0:
raise ValueError("Improper byte length {0}.".format(fmt))
bytesizes = [fmt]
elif isinstance(fmt, basestring):
m = STRUCT_PACK_RE.match(fmt)
if not m:
raise ValueError("Cannot parse format string {0}.".format(fmt))
# Split the format string into a list of 'q', '4h' etc.
formatlist = re.findall(STRUCT_SPLIT_RE, m.group('fmt'))
# Now deal with multiplicative factors, 4h -> hhhh etc.
bytesizes = []
for f in formatlist:
if len(f) == 1:
bytesizes.append(PACK_CODE_SIZE[f])
else:
bytesizes.extend([PACK_CODE_SIZE[f[-1]]] * int(f[:-1]))
elif isinstance(fmt, collections.Iterable):
bytesizes = fmt
for bytesize in bytesizes:
if not isinstance(bytesize, numbers.Integral) or bytesize < 0:
raise ValueError("Improper byte length {0}.".format(bytesize))
else:
raise TypeError("Format must be an integer, string or iterable.")
repeats = 0
totalbitsize = 8 * sum(bytesizes)
if not totalbitsize:
return 0
if repeat:
# Try to repeat up to the end of the bitstring.
finalbit = end
else:
# Just try one (set of) byteswap(s).
finalbit = start + totalbitsize
for patternend in xrange(start + totalbitsize, finalbit + 1, totalbitsize):
bytestart = patternend - totalbitsize
for bytesize in bytesizes:
byteend = bytestart + bytesize * 8
self._reversebytes(bytestart, byteend)
bytestart += bytesize * 8
repeats += 1
return repeats |
def is_valid(self, tol: float = DISTANCE_TOLERANCE) -> bool:
"""
True if SiteCollection does not contain atoms that are too close
together. Note that the distance definition is based on type of
SiteCollection. Cartesian distances are used for non-periodic
Molecules, while PBC is taken into account for periodic structures.
Args:
tol (float): Distance tolerance. Default is 0.5A.
Returns:
(bool) True if SiteCollection does not contain atoms that are too
close together.
"""
if len(self.sites) == 1:
return True
all_dists = self.distance_matrix[np.triu_indices(len(self), 1)]
return bool(np.min(all_dists) > tol) | True if SiteCollection does not contain atoms that are too close
together. Note that the distance definition is based on type of
SiteCollection. Cartesian distances are used for non-periodic
Molecules, while PBC is taken into account for periodic structures.
Args:
tol (float): Distance tolerance. Default is 0.5A.
Returns:
(bool) True if SiteCollection does not contain atoms that are too
close together. | Below is the the instruction that describes the task:
### Input:
True if SiteCollection does not contain atoms that are too close
together. Note that the distance definition is based on type of
SiteCollection. Cartesian distances are used for non-periodic
Molecules, while PBC is taken into account for periodic structures.
Args:
tol (float): Distance tolerance. Default is 0.5A.
Returns:
(bool) True if SiteCollection does not contain atoms that are too
close together.
### Response:
def is_valid(self, tol: float = DISTANCE_TOLERANCE) -> bool:
"""
True if SiteCollection does not contain atoms that are too close
together. Note that the distance definition is based on type of
SiteCollection. Cartesian distances are used for non-periodic
Molecules, while PBC is taken into account for periodic structures.
Args:
tol (float): Distance tolerance. Default is 0.5A.
Returns:
(bool) True if SiteCollection does not contain atoms that are too
close together.
"""
if len(self.sites) == 1:
return True
all_dists = self.distance_matrix[np.triu_indices(len(self), 1)]
return bool(np.min(all_dists) > tol) |
def post_json(session, url, json):
"""
Post JSON to the Forest endpoint.
"""
res = session.post(url, json=json)
if res.status_code >= 400:
raise parse_error(res)
return res | Post JSON to the Forest endpoint. | Below is the the instruction that describes the task:
### Input:
Post JSON to the Forest endpoint.
### Response:
def post_json(session, url, json):
"""
Post JSON to the Forest endpoint.
"""
res = session.post(url, json=json)
if res.status_code >= 400:
raise parse_error(res)
return res |
def app_context_processor(self, f):
"""Like :meth:`Flask.context_processor` but for a blueprint. Such a
function is executed each request, even if outside of the blueprint.
"""
self.record_once(lambda s: s.app.template_context_processors
.setdefault(None, []).append(f))
return f | Like :meth:`Flask.context_processor` but for a blueprint. Such a
function is executed each request, even if outside of the blueprint. | Below is the the instruction that describes the task:
### Input:
Like :meth:`Flask.context_processor` but for a blueprint. Such a
function is executed each request, even if outside of the blueprint.
### Response:
def app_context_processor(self, f):
"""Like :meth:`Flask.context_processor` but for a blueprint. Such a
function is executed each request, even if outside of the blueprint.
"""
self.record_once(lambda s: s.app.template_context_processors
.setdefault(None, []).append(f))
return f |
def fft(wave, npoints=None, indep_min=None, indep_max=None):
r"""
Return the Fast Fourier Transform of a waveform.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param npoints: Number of points to use in the transform. If **npoints**
is less than the size of the independent variable vector
the waveform is truncated; if **npoints** is greater than
the size of the independent variable vector, the waveform
is zero-padded
:type npoints: positive integer
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for peng.wave_functions.fft
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`npoints\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
* RuntimeError (Non-uniform sampling)
.. [[[end]]]
"""
ret = copy.copy(wave)
_bound_waveform(ret, indep_min, indep_max)
npoints = npoints or ret._indep_vector.size
fs = (npoints - 1) / float(ret._indep_vector[-1])
spoints = min(ret._indep_vector.size, npoints)
sdiff = np.diff(ret._indep_vector[:spoints])
cond = not np.all(
np.isclose(sdiff, sdiff[0] * np.ones(spoints - 1), FP_RTOL, FP_ATOL)
)
pexdoc.addex(RuntimeError, "Non-uniform sampling", cond)
finc = fs / float(npoints - 1)
indep_vector = _barange(-fs / 2.0, +fs / 2.0, finc)
dep_vector = np.fft.fft(ret._dep_vector, npoints)
return Waveform(
indep_vector=indep_vector,
dep_vector=dep_vector,
dep_name="fft({0})".format(ret.dep_name),
indep_scale="LINEAR",
dep_scale="LINEAR",
indep_units="Hz",
dep_units="",
) | r"""
Return the Fast Fourier Transform of a waveform.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param npoints: Number of points to use in the transform. If **npoints**
is less than the size of the independent variable vector
the waveform is truncated; if **npoints** is greater than
the size of the independent variable vector, the waveform
is zero-padded
:type npoints: positive integer
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for peng.wave_functions.fft
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`npoints\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
* RuntimeError (Non-uniform sampling)
.. [[[end]]] | Below is the the instruction that describes the task:
### Input:
r"""
Return the Fast Fourier Transform of a waveform.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param npoints: Number of points to use in the transform. If **npoints**
is less than the size of the independent variable vector
the waveform is truncated; if **npoints** is greater than
the size of the independent variable vector, the waveform
is zero-padded
:type npoints: positive integer
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for peng.wave_functions.fft
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`npoints\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
* RuntimeError (Non-uniform sampling)
.. [[[end]]]
### Response:
def fft(wave, npoints=None, indep_min=None, indep_max=None):
r"""
Return the Fast Fourier Transform of a waveform.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param npoints: Number of points to use in the transform. If **npoints**
is less than the size of the independent variable vector
the waveform is truncated; if **npoints** is greater than
the size of the independent variable vector, the waveform
is zero-padded
:type npoints: positive integer
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for peng.wave_functions.fft
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`npoints\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
* RuntimeError (Non-uniform sampling)
.. [[[end]]]
"""
ret = copy.copy(wave)
_bound_waveform(ret, indep_min, indep_max)
npoints = npoints or ret._indep_vector.size
fs = (npoints - 1) / float(ret._indep_vector[-1])
spoints = min(ret._indep_vector.size, npoints)
sdiff = np.diff(ret._indep_vector[:spoints])
cond = not np.all(
np.isclose(sdiff, sdiff[0] * np.ones(spoints - 1), FP_RTOL, FP_ATOL)
)
pexdoc.addex(RuntimeError, "Non-uniform sampling", cond)
finc = fs / float(npoints - 1)
indep_vector = _barange(-fs / 2.0, +fs / 2.0, finc)
dep_vector = np.fft.fft(ret._dep_vector, npoints)
return Waveform(
indep_vector=indep_vector,
dep_vector=dep_vector,
dep_name="fft({0})".format(ret.dep_name),
indep_scale="LINEAR",
dep_scale="LINEAR",
indep_units="Hz",
dep_units="",
) |
def remove_missing(self, data, return_bool="any"):
""" ???
Parameters
----------
data : pd.DataFrame()
Input dataframe.
return_bool : bool
???
Returns
-------
pd.DataFrame()
???
"""
if return_bool == "any":
bool_sel = self._find_missing(data,return_bool="any")
elif return_bool == "all":
bool_sel = self._find_missing(data,return_bool="all")
return data[~bool_sel] | ???
Parameters
----------
data : pd.DataFrame()
Input dataframe.
return_bool : bool
???
Returns
-------
pd.DataFrame()
??? | Below is the the instruction that describes the task:
### Input:
???
Parameters
----------
data : pd.DataFrame()
Input dataframe.
return_bool : bool
???
Returns
-------
pd.DataFrame()
???
### Response:
def remove_missing(self, data, return_bool="any"):
""" ???
Parameters
----------
data : pd.DataFrame()
Input dataframe.
return_bool : bool
???
Returns
-------
pd.DataFrame()
???
"""
if return_bool == "any":
bool_sel = self._find_missing(data,return_bool="any")
elif return_bool == "all":
bool_sel = self._find_missing(data,return_bool="all")
return data[~bool_sel] |
def _read_http_headers(self, size, kind, flag):
"""Read HTTP/2 HEADERS frames.
Structure of HTTP/2 HEADERS frame [RFC 7540]:
+-----------------------------------------------+
| Length (24) |
+---------------+---------------+---------------+
| Type (8) | Flags (8) |
+-+-------------+---------------+-------------------------------+
|R| Stream Identifier (31) |
+---------------+-----------------------------------------------+
|Pad Length? (8)|
+-+-------------+-----------------------------------------------+
|E| Stream Dependency? (31) |
+-+-------------+-----------------------------------------------+
| Weight? (8) |
+-+-------------+-----------------------------------------------+
| Header Block Fragment (*) ...
+---------------------------------------------------------------+
| Padding (*) ...
+---------------------------------------------------------------+
Octets Bits Name Description
0 0 http.length Length
3 24 http.type Type (1)
4 32 http.flags Flags
5 40 - Reserved
5 41 http.sid Stream Identifier
9 72 http.pad_len Pad Length (Optional)
10 80 http.exclusive Exclusive Flag
10 81 http.deps Stream Dependency (Optional)
14 112 http.weight Weight (Optional)
15 120 http.frag Header Block Fragment
? ? - Padding (Optional)
"""
_plen = 0
_elen = 0
_flag = dict(
END_STREAM=False, # bit 0
END_HEADERS=False, # bit 2
PADDED=False, # bit 3
PRIORITY=False, # bit 5
)
for index, bit in enumerate(flag):
if index == 0 and bit:
_flag['END_STREAM'] = True
elif index == 2 and bit:
_flag['END_HEADERS'] = True
elif index == 3 and bit:
_flag['PADDED'] = True
_plen = self._read_unpack(1)
elif index == 5 and bit:
_flag['PRIORITY'] = True
_edep = self._read_binary(4)
_wght = self._read_unpack(1)
_elen = 5
elif bit:
raise ProtocolError(f'HTTP/2: [Type {kind}] invalid format', quiet=True)
else:
continue
if _flag['PADDED']:
_dlen = size - _plen - _elen - 1
else:
_dlen = size - _plen - _elen
if _dlen < 0:
raise ProtocolError(f'HTTP/2: [Type {kind}] invalid format', quiet=True)
_frag = self._read_fileng(_dlen) or None
padding = self._read_binary(_plen)
if any((int(bit, base=2) for bit in padding)):
raise ProtocolError(f'HTTP/2: [Type {kind}] invalid format', quiet=True)
data = dict(
flags=_flag,
frag=_frag,
)
if _flag['PADDED']:
data['ped_len'] = _plen
if _flag['PRIORITY']:
data['exclusive'] = True if int(_edep[0], base=2) else False
data['deps'] = int(_edep[1:], base=2)
data['weight'] = _wght + 1
return data | Read HTTP/2 HEADERS frames.
Structure of HTTP/2 HEADERS frame [RFC 7540]:
+-----------------------------------------------+
| Length (24) |
+---------------+---------------+---------------+
| Type (8) | Flags (8) |
+-+-------------+---------------+-------------------------------+
|R| Stream Identifier (31) |
+---------------+-----------------------------------------------+
|Pad Length? (8)|
+-+-------------+-----------------------------------------------+
|E| Stream Dependency? (31) |
+-+-------------+-----------------------------------------------+
| Weight? (8) |
+-+-------------+-----------------------------------------------+
| Header Block Fragment (*) ...
+---------------------------------------------------------------+
| Padding (*) ...
+---------------------------------------------------------------+
Octets Bits Name Description
0 0 http.length Length
3 24 http.type Type (1)
4 32 http.flags Flags
5 40 - Reserved
5 41 http.sid Stream Identifier
9 72 http.pad_len Pad Length (Optional)
10 80 http.exclusive Exclusive Flag
10 81 http.deps Stream Dependency (Optional)
14 112 http.weight Weight (Optional)
15 120 http.frag Header Block Fragment
? ? - Padding (Optional) | Below is the the instruction that describes the task:
### Input:
Read HTTP/2 HEADERS frames.
Structure of HTTP/2 HEADERS frame [RFC 7540]:
+-----------------------------------------------+
| Length (24) |
+---------------+---------------+---------------+
| Type (8) | Flags (8) |
+-+-------------+---------------+-------------------------------+
|R| Stream Identifier (31) |
+---------------+-----------------------------------------------+
|Pad Length? (8)|
+-+-------------+-----------------------------------------------+
|E| Stream Dependency? (31) |
+-+-------------+-----------------------------------------------+
| Weight? (8) |
+-+-------------+-----------------------------------------------+
| Header Block Fragment (*) ...
+---------------------------------------------------------------+
| Padding (*) ...
+---------------------------------------------------------------+
Octets Bits Name Description
0 0 http.length Length
3 24 http.type Type (1)
4 32 http.flags Flags
5 40 - Reserved
5 41 http.sid Stream Identifier
9 72 http.pad_len Pad Length (Optional)
10 80 http.exclusive Exclusive Flag
10 81 http.deps Stream Dependency (Optional)
14 112 http.weight Weight (Optional)
15 120 http.frag Header Block Fragment
? ? - Padding (Optional)
### Response:
def _read_http_headers(self, size, kind, flag):
"""Read HTTP/2 HEADERS frames.
Structure of HTTP/2 HEADERS frame [RFC 7540]:
+-----------------------------------------------+
| Length (24) |
+---------------+---------------+---------------+
| Type (8) | Flags (8) |
+-+-------------+---------------+-------------------------------+
|R| Stream Identifier (31) |
+---------------+-----------------------------------------------+
|Pad Length? (8)|
+-+-------------+-----------------------------------------------+
|E| Stream Dependency? (31) |
+-+-------------+-----------------------------------------------+
| Weight? (8) |
+-+-------------+-----------------------------------------------+
| Header Block Fragment (*) ...
+---------------------------------------------------------------+
| Padding (*) ...
+---------------------------------------------------------------+
Octets Bits Name Description
0 0 http.length Length
3 24 http.type Type (1)
4 32 http.flags Flags
5 40 - Reserved
5 41 http.sid Stream Identifier
9 72 http.pad_len Pad Length (Optional)
10 80 http.exclusive Exclusive Flag
10 81 http.deps Stream Dependency (Optional)
14 112 http.weight Weight (Optional)
15 120 http.frag Header Block Fragment
? ? - Padding (Optional)
"""
_plen = 0
_elen = 0
_flag = dict(
END_STREAM=False, # bit 0
END_HEADERS=False, # bit 2
PADDED=False, # bit 3
PRIORITY=False, # bit 5
)
for index, bit in enumerate(flag):
if index == 0 and bit:
_flag['END_STREAM'] = True
elif index == 2 and bit:
_flag['END_HEADERS'] = True
elif index == 3 and bit:
_flag['PADDED'] = True
_plen = self._read_unpack(1)
elif index == 5 and bit:
_flag['PRIORITY'] = True
_edep = self._read_binary(4)
_wght = self._read_unpack(1)
_elen = 5
elif bit:
raise ProtocolError(f'HTTP/2: [Type {kind}] invalid format', quiet=True)
else:
continue
if _flag['PADDED']:
_dlen = size - _plen - _elen - 1
else:
_dlen = size - _plen - _elen
if _dlen < 0:
raise ProtocolError(f'HTTP/2: [Type {kind}] invalid format', quiet=True)
_frag = self._read_fileng(_dlen) or None
padding = self._read_binary(_plen)
if any((int(bit, base=2) for bit in padding)):
raise ProtocolError(f'HTTP/2: [Type {kind}] invalid format', quiet=True)
data = dict(
flags=_flag,
frag=_frag,
)
if _flag['PADDED']:
data['ped_len'] = _plen
if _flag['PRIORITY']:
data['exclusive'] = True if int(_edep[0], base=2) else False
data['deps'] = int(_edep[1:], base=2)
data['weight'] = _wght + 1
return data |
def update(self, tickDict):
''' consume ticks '''
if not self.__trakers:
self.__setUpTrakers()
for security, tick in tickDict.items():
if security in self.__trakers:
self.__trakers[security].tickUpdate(tick) | consume ticks | Below is the the instruction that describes the task:
### Input:
consume ticks
### Response:
def update(self, tickDict):
''' consume ticks '''
if not self.__trakers:
self.__setUpTrakers()
for security, tick in tickDict.items():
if security in self.__trakers:
self.__trakers[security].tickUpdate(tick) |
def keystone_configure_api_version(self, sentry_relation_pairs, deployment,
api_version):
"""Configure preferred-api-version of keystone in deployment and
monitor provided list of relation objects for propagation
before returning to caller.
:param sentry_relation_pairs: list of sentry, relation tuples used for
monitoring propagation of relation data
:param deployment: deployment to configure
:param api_version: value preferred-api-version will be set to
:returns: None if successful. Raise on error.
"""
self.log.debug("Setting keystone preferred-api-version: '{}'"
"".format(api_version))
config = {'preferred-api-version': api_version}
deployment.d.configure('keystone', config)
deployment._auto_wait_for_status()
self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) | Configure preferred-api-version of keystone in deployment and
monitor provided list of relation objects for propagation
before returning to caller.
:param sentry_relation_pairs: list of sentry, relation tuples used for
monitoring propagation of relation data
:param deployment: deployment to configure
:param api_version: value preferred-api-version will be set to
:returns: None if successful. Raise on error. | Below is the the instruction that describes the task:
### Input:
Configure preferred-api-version of keystone in deployment and
monitor provided list of relation objects for propagation
before returning to caller.
:param sentry_relation_pairs: list of sentry, relation tuples used for
monitoring propagation of relation data
:param deployment: deployment to configure
:param api_version: value preferred-api-version will be set to
:returns: None if successful. Raise on error.
### Response:
def keystone_configure_api_version(self, sentry_relation_pairs, deployment,
api_version):
"""Configure preferred-api-version of keystone in deployment and
monitor provided list of relation objects for propagation
before returning to caller.
:param sentry_relation_pairs: list of sentry, relation tuples used for
monitoring propagation of relation data
:param deployment: deployment to configure
:param api_version: value preferred-api-version will be set to
:returns: None if successful. Raise on error.
"""
self.log.debug("Setting keystone preferred-api-version: '{}'"
"".format(api_version))
config = {'preferred-api-version': api_version}
deployment.d.configure('keystone', config)
deployment._auto_wait_for_status()
self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) |
def centerLatLon(self):
"""
Get the center lat/lon of model
"""
# GET CENTROID FROM GSSHA GRID
gssha_grid = self.getGrid()
min_x, max_x, min_y, max_y = gssha_grid.bounds()
x_ext, y_ext = transform(gssha_grid.proj,
Proj(init='epsg:4326'),
[min_x, max_x, min_x, max_x],
[min_y, max_y, max_y, min_y],
)
return np.mean(y_ext), np.mean(x_ext) | Get the center lat/lon of model | Below is the the instruction that describes the task:
### Input:
Get the center lat/lon of model
### Response:
def centerLatLon(self):
"""
Get the center lat/lon of model
"""
# GET CENTROID FROM GSSHA GRID
gssha_grid = self.getGrid()
min_x, max_x, min_y, max_y = gssha_grid.bounds()
x_ext, y_ext = transform(gssha_grid.proj,
Proj(init='epsg:4326'),
[min_x, max_x, min_x, max_x],
[min_y, max_y, max_y, min_y],
)
return np.mean(y_ext), np.mean(x_ext) |
def read_embedded(self, data, parent_var_type):
"""Read method for "mixed" variable type.
.. Note:: The ``read()`` method will automatically determine if the input is a variable or
needs to be searched for embedded variables. There usually is no reason to call
this method directly.
This method will automatically covert variables embedded in a string with data retrieved
from DB. If there are no keys/variables the raw string will be returned.
Examples::
DB Values
#App:7979:variable_name!String:
"embedded \\"variable\\""
#App:7979:two!String:
"two"
#App:7979:variable_name!StringArray:
["one", "two", "three"]
Examples 1:
Input: "This input has a embedded #App:7979:variable_name!String"
Examples 2:
Input: ["one", #App:7979:two!String, "three"]
Examples 3:
Input: [{
"key": "embedded string",
"value": "This input has a embedded #App:7979:variable_name!String"
}, {
"key": "string array",
"value": #App:7979:variable_name!StringArray
}, {
"key": "string",
"value": #App:7979:variable_name!String
}]
Args:
data (string): The data to parsed and updated from the DB.
parent_var_type (string): The parent type of the embedded variable.
Returns:
(string): Results retrieved from DB
"""
if data is None:
return data
# iterate all matching variables
for var in (v.group(0) for v in re.finditer(self._variable_parse, str(data))):
self.tcex.log.debug(
'embedded variable: {}, parent_var_type: {}'.format(var, parent_var_type)
)
key_type = self.variable_type(var)
val = self.read(var)
if val is None:
val = ''
elif key_type != 'String':
var = r'"?{}"?'.format(var) # replace quotes if they exist
val = json.dumps(val)
data = re.sub(var, val, data)
return data | Read method for "mixed" variable type.
.. Note:: The ``read()`` method will automatically determine if the input is a variable or
needs to be searched for embedded variables. There usually is no reason to call
this method directly.
This method will automatically covert variables embedded in a string with data retrieved
from DB. If there are no keys/variables the raw string will be returned.
Examples::
DB Values
#App:7979:variable_name!String:
"embedded \\"variable\\""
#App:7979:two!String:
"two"
#App:7979:variable_name!StringArray:
["one", "two", "three"]
Examples 1:
Input: "This input has a embedded #App:7979:variable_name!String"
Examples 2:
Input: ["one", #App:7979:two!String, "three"]
Examples 3:
Input: [{
"key": "embedded string",
"value": "This input has a embedded #App:7979:variable_name!String"
}, {
"key": "string array",
"value": #App:7979:variable_name!StringArray
}, {
"key": "string",
"value": #App:7979:variable_name!String
}]
Args:
data (string): The data to parsed and updated from the DB.
parent_var_type (string): The parent type of the embedded variable.
Returns:
(string): Results retrieved from DB | Below is the the instruction that describes the task:
### Input:
Read method for "mixed" variable type.
.. Note:: The ``read()`` method will automatically determine if the input is a variable or
needs to be searched for embedded variables. There usually is no reason to call
this method directly.
This method will automatically covert variables embedded in a string with data retrieved
from DB. If there are no keys/variables the raw string will be returned.
Examples::
DB Values
#App:7979:variable_name!String:
"embedded \\"variable\\""
#App:7979:two!String:
"two"
#App:7979:variable_name!StringArray:
["one", "two", "three"]
Examples 1:
Input: "This input has a embedded #App:7979:variable_name!String"
Examples 2:
Input: ["one", #App:7979:two!String, "three"]
Examples 3:
Input: [{
"key": "embedded string",
"value": "This input has a embedded #App:7979:variable_name!String"
}, {
"key": "string array",
"value": #App:7979:variable_name!StringArray
}, {
"key": "string",
"value": #App:7979:variable_name!String
}]
Args:
data (string): The data to parsed and updated from the DB.
parent_var_type (string): The parent type of the embedded variable.
Returns:
(string): Results retrieved from DB
### Response:
def read_embedded(self, data, parent_var_type):
"""Read method for "mixed" variable type.
.. Note:: The ``read()`` method will automatically determine if the input is a variable or
needs to be searched for embedded variables. There usually is no reason to call
this method directly.
This method will automatically covert variables embedded in a string with data retrieved
from DB. If there are no keys/variables the raw string will be returned.
Examples::
DB Values
#App:7979:variable_name!String:
"embedded \\"variable\\""
#App:7979:two!String:
"two"
#App:7979:variable_name!StringArray:
["one", "two", "three"]
Examples 1:
Input: "This input has a embedded #App:7979:variable_name!String"
Examples 2:
Input: ["one", #App:7979:two!String, "three"]
Examples 3:
Input: [{
"key": "embedded string",
"value": "This input has a embedded #App:7979:variable_name!String"
}, {
"key": "string array",
"value": #App:7979:variable_name!StringArray
}, {
"key": "string",
"value": #App:7979:variable_name!String
}]
Args:
data (string): The data to parsed and updated from the DB.
parent_var_type (string): The parent type of the embedded variable.
Returns:
(string): Results retrieved from DB
"""
if data is None:
return data
# iterate all matching variables
for var in (v.group(0) for v in re.finditer(self._variable_parse, str(data))):
self.tcex.log.debug(
'embedded variable: {}, parent_var_type: {}'.format(var, parent_var_type)
)
key_type = self.variable_type(var)
val = self.read(var)
if val is None:
val = ''
elif key_type != 'String':
var = r'"?{}"?'.format(var) # replace quotes if they exist
val = json.dumps(val)
data = re.sub(var, val, data)
return data |
def construct_makeblastdb_cmd(
filename, outdir, blastdb_exe=pyani_config.MAKEBLASTDB_DEFAULT
):
"""Returns a single makeblastdb command.
- filename - input filename
- blastdb_exe - path to the makeblastdb executable
"""
title = os.path.splitext(os.path.split(filename)[-1])[0]
outfilename = os.path.join(outdir, os.path.split(filename)[-1])
return (
"{0} -dbtype nucl -in {1} -title {2} -out {3}".format(
blastdb_exe, filename, title, outfilename
),
outfilename,
) | Returns a single makeblastdb command.
- filename - input filename
- blastdb_exe - path to the makeblastdb executable | Below is the the instruction that describes the task:
### Input:
Returns a single makeblastdb command.
- filename - input filename
- blastdb_exe - path to the makeblastdb executable
### Response:
def construct_makeblastdb_cmd(
filename, outdir, blastdb_exe=pyani_config.MAKEBLASTDB_DEFAULT
):
"""Returns a single makeblastdb command.
- filename - input filename
- blastdb_exe - path to the makeblastdb executable
"""
title = os.path.splitext(os.path.split(filename)[-1])[0]
outfilename = os.path.join(outdir, os.path.split(filename)[-1])
return (
"{0} -dbtype nucl -in {1} -title {2} -out {3}".format(
blastdb_exe, filename, title, outfilename
),
outfilename,
) |
def _remove_duplicates(objects):
"""Removes duplicate objects.
http://www.peterbe.com/plog/uniqifiers-benchmark.
"""
seen, uniq = set(), []
for obj in objects:
obj_id = id(obj)
if obj_id in seen:
continue
seen.add(obj_id)
uniq.append(obj)
return uniq | Removes duplicate objects.
http://www.peterbe.com/plog/uniqifiers-benchmark. | Below is the the instruction that describes the task:
### Input:
Removes duplicate objects.
http://www.peterbe.com/plog/uniqifiers-benchmark.
### Response:
def _remove_duplicates(objects):
"""Removes duplicate objects.
http://www.peterbe.com/plog/uniqifiers-benchmark.
"""
seen, uniq = set(), []
for obj in objects:
obj_id = id(obj)
if obj_id in seen:
continue
seen.add(obj_id)
uniq.append(obj)
return uniq |
def shape_based_slice_interpolation(img, dim, nslices):
"""
Adds `nslices` slices between all slices of the binary image `img` along dimension
`dim` respecting the original slice values to be situated in the middle of each
slice. Extrapolation situations are handled by simple repeating.
Interpolation of new slices is performed using shape based interpolation.
Parameters
----------
img : array_like
A n-dimensional image.
dim : int
The dimension along which to add slices.
nslices : int
The number of slices to add. Must be an even number.
Returns
-------
out : ndarray
The re-sampled image.
"""
# check arguments
if not 0 == nslices % 2:
raise ValueError('nslices must be an even number')
out = None
slicer = [slice(None)] * img.ndim
chunk_full_shape = list(img.shape)
chunk_full_shape[dim] = nslices + 2
for sl1, sl2 in zip(numpy.rollaxis(img, dim)[:-1], numpy.rollaxis(img, dim)[1:]):
if 0 == numpy.count_nonzero(sl1) and 0 == numpy.count_nonzero(sl2):
chunk = numpy.zeros(chunk_full_shape, dtype=numpy.bool)
else:
chunk = shape_based_slice_insertation_object_wise(sl1, sl2, dim, nslices)
if out is None:
out = numpy.delete(chunk, -1, dim)
else:
out = numpy.concatenate((out, numpy.delete(chunk, -1, dim)), dim)
slicer[dim] = numpy.newaxis
out = numpy.concatenate((out, sl2[slicer]), dim)
slicer[dim] = slice(0, 1)
for _ in range(nslices // 2):
out = numpy.concatenate((img[slicer], out), dim)
slicer[dim] = slice(-1, None)
for _ in range(nslices // 2):
out = numpy.concatenate((out, img[slicer]), dim)
return out | Adds `nslices` slices between all slices of the binary image `img` along dimension
`dim` respecting the original slice values to be situated in the middle of each
slice. Extrapolation situations are handled by simple repeating.
Interpolation of new slices is performed using shape based interpolation.
Parameters
----------
img : array_like
A n-dimensional image.
dim : int
The dimension along which to add slices.
nslices : int
The number of slices to add. Must be an even number.
Returns
-------
out : ndarray
The re-sampled image. | Below is the the instruction that describes the task:
### Input:
Adds `nslices` slices between all slices of the binary image `img` along dimension
`dim` respecting the original slice values to be situated in the middle of each
slice. Extrapolation situations are handled by simple repeating.
Interpolation of new slices is performed using shape based interpolation.
Parameters
----------
img : array_like
A n-dimensional image.
dim : int
The dimension along which to add slices.
nslices : int
The number of slices to add. Must be an even number.
Returns
-------
out : ndarray
The re-sampled image.
### Response:
def shape_based_slice_interpolation(img, dim, nslices):
"""
Adds `nslices` slices between all slices of the binary image `img` along dimension
`dim` respecting the original slice values to be situated in the middle of each
slice. Extrapolation situations are handled by simple repeating.
Interpolation of new slices is performed using shape based interpolation.
Parameters
----------
img : array_like
A n-dimensional image.
dim : int
The dimension along which to add slices.
nslices : int
The number of slices to add. Must be an even number.
Returns
-------
out : ndarray
The re-sampled image.
"""
# check arguments
if not 0 == nslices % 2:
raise ValueError('nslices must be an even number')
out = None
slicer = [slice(None)] * img.ndim
chunk_full_shape = list(img.shape)
chunk_full_shape[dim] = nslices + 2
for sl1, sl2 in zip(numpy.rollaxis(img, dim)[:-1], numpy.rollaxis(img, dim)[1:]):
if 0 == numpy.count_nonzero(sl1) and 0 == numpy.count_nonzero(sl2):
chunk = numpy.zeros(chunk_full_shape, dtype=numpy.bool)
else:
chunk = shape_based_slice_insertation_object_wise(sl1, sl2, dim, nslices)
if out is None:
out = numpy.delete(chunk, -1, dim)
else:
out = numpy.concatenate((out, numpy.delete(chunk, -1, dim)), dim)
slicer[dim] = numpy.newaxis
out = numpy.concatenate((out, sl2[slicer]), dim)
slicer[dim] = slice(0, 1)
for _ in range(nslices // 2):
out = numpy.concatenate((img[slicer], out), dim)
slicer[dim] = slice(-1, None)
for _ in range(nslices // 2):
out = numpy.concatenate((out, img[slicer]), dim)
return out |
def _R2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_R2deriv
PURPOSE:
evaluate the second radial derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second radial derivative
"""
if not self.isNonAxi:
phi= 0.
x,y,z= self._compute_xyz(R,phi,z,t)
phixxa= self._2ndderiv_xyz(x,y,z,0,0)
phixya= self._2ndderiv_xyz(x,y,z,0,1)
phiyya= self._2ndderiv_xyz(x,y,z,1,1)
ang = self._omegab*t + self._pa
c, s = np.cos(ang), np.sin(ang)
phixx = c**2*phixxa + 2.*c*s*phixya + s**2*phiyya
phixy = (c**2-s**2)*phixya + c*s*(phiyya - phixxa)
phiyy = s**2*phixxa - 2.*c*s*phixya + c**2*phiyya
return np.cos(phi)**2.*phixx + np.sin(phi)**2.*phiyy + \
2.*np.cos(phi)*np.sin(phi)*phixy | NAME:
_R2deriv
PURPOSE:
evaluate the second radial derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second radial derivative | Below is the the instruction that describes the task:
### Input:
NAME:
_R2deriv
PURPOSE:
evaluate the second radial derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second radial derivative
### Response:
def _R2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_R2deriv
PURPOSE:
evaluate the second radial derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second radial derivative
"""
if not self.isNonAxi:
phi= 0.
x,y,z= self._compute_xyz(R,phi,z,t)
phixxa= self._2ndderiv_xyz(x,y,z,0,0)
phixya= self._2ndderiv_xyz(x,y,z,0,1)
phiyya= self._2ndderiv_xyz(x,y,z,1,1)
ang = self._omegab*t + self._pa
c, s = np.cos(ang), np.sin(ang)
phixx = c**2*phixxa + 2.*c*s*phixya + s**2*phiyya
phixy = (c**2-s**2)*phixya + c*s*(phiyya - phixxa)
phiyy = s**2*phixxa - 2.*c*s*phixya + c**2*phiyya
return np.cos(phi)**2.*phixx + np.sin(phi)**2.*phiyy + \
2.*np.cos(phi)*np.sin(phi)*phixy |
def create_client(self,only_db=False):
"""返回连接的客户端
"""
#database = parse_uri(self.uri).get("database")
if self.ioloop:
if only_db == False:
client = AsyncIOMotorClient("/".join(self.uri.split("/")[:-1]), io_loop=self.ioloop)
else:
client = AsyncIOMotorClient(self.uri, io_loop=self.ioloop)
else:
if only_db == False:
client = AsyncIOMotorClient("/".join(self.uri.split("/")[:-1]))
else:
client = AsyncIOMotorClient(self.uri)
return client | 返回连接的客户端 | Below is the the instruction that describes the task:
### Input:
返回连接的客户端
### Response:
def create_client(self,only_db=False):
"""返回连接的客户端
"""
#database = parse_uri(self.uri).get("database")
if self.ioloop:
if only_db == False:
client = AsyncIOMotorClient("/".join(self.uri.split("/")[:-1]), io_loop=self.ioloop)
else:
client = AsyncIOMotorClient(self.uri, io_loop=self.ioloop)
else:
if only_db == False:
client = AsyncIOMotorClient("/".join(self.uri.split("/")[:-1]))
else:
client = AsyncIOMotorClient(self.uri)
return client |
def _process_failures(self, key):
'''
Handles the retrying of the failed key
'''
if self.settings['RETRY_FAILURES']:
self.logger.debug("going to retry failure")
# get the current failure count
failkey = self._get_fail_key(key)
current = self.redis_conn.get(failkey)
if current is None:
current = 0
else:
current = int(current)
if current < self.settings['RETRY_FAILURES_MAX']:
self.logger.debug("Incr fail key")
current += 1
self.redis_conn.set(failkey, current)
else:
self.logger.error("Could not process action within"
" failure limit")
self.redis_conn.delete(failkey)
self.redis_conn.delete(key) | Handles the retrying of the failed key | Below is the the instruction that describes the task:
### Input:
Handles the retrying of the failed key
### Response:
def _process_failures(self, key):
'''
Handles the retrying of the failed key
'''
if self.settings['RETRY_FAILURES']:
self.logger.debug("going to retry failure")
# get the current failure count
failkey = self._get_fail_key(key)
current = self.redis_conn.get(failkey)
if current is None:
current = 0
else:
current = int(current)
if current < self.settings['RETRY_FAILURES_MAX']:
self.logger.debug("Incr fail key")
current += 1
self.redis_conn.set(failkey, current)
else:
self.logger.error("Could not process action within"
" failure limit")
self.redis_conn.delete(failkey)
self.redis_conn.delete(key) |
def get_units_per_page(self, per_page=1000, page=1, params=None):
"""
Get units per page
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:param params: Search parameters. Default: {}
:return: list
"""
return self._get_resource_per_page(resource=UNITS, per_page=per_page, page=page, params=params) | Get units per page
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:param params: Search parameters. Default: {}
:return: list | Below is the the instruction that describes the task:
### Input:
Get units per page
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:param params: Search parameters. Default: {}
:return: list
### Response:
def get_units_per_page(self, per_page=1000, page=1, params=None):
"""
Get units per page
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:param params: Search parameters. Default: {}
:return: list
"""
return self._get_resource_per_page(resource=UNITS, per_page=per_page, page=page, params=params) |
def cli_put_account(context):
"""
Performs a PUT on the account.
See :py:mod:`swiftly.cli.put` for context usage information.
See :py:class:`CLIPut` for more information.
"""
body = None
if context.input_:
if context.input_ == '-':
body = context.io_manager.get_stdin()
else:
body = open(context.input_, 'rb')
with context.client_manager.with_client() as client:
status, reason, headers, contents = client.put_account(
headers=context.headers, query=context.query, cdn=context.cdn,
body=body)
if hasattr(contents, 'read'):
contents.read()
if status // 100 != 2:
raise ReturnCode('putting account: %s %s' % (status, reason)) | Performs a PUT on the account.
See :py:mod:`swiftly.cli.put` for context usage information.
See :py:class:`CLIPut` for more information. | Below is the the instruction that describes the task:
### Input:
Performs a PUT on the account.
See :py:mod:`swiftly.cli.put` for context usage information.
See :py:class:`CLIPut` for more information.
### Response:
def cli_put_account(context):
"""
Performs a PUT on the account.
See :py:mod:`swiftly.cli.put` for context usage information.
See :py:class:`CLIPut` for more information.
"""
body = None
if context.input_:
if context.input_ == '-':
body = context.io_manager.get_stdin()
else:
body = open(context.input_, 'rb')
with context.client_manager.with_client() as client:
status, reason, headers, contents = client.put_account(
headers=context.headers, query=context.query, cdn=context.cdn,
body=body)
if hasattr(contents, 'read'):
contents.read()
if status // 100 != 2:
raise ReturnCode('putting account: %s %s' % (status, reason)) |
def get(self, *, asset_id, operation=None, headers=None):
"""Given an asset id, get its list of transactions (and
optionally filter for only ``'CREATE'`` or ``'TRANSFER'``
transactions).
Args:
asset_id (str): Id of the asset.
operation (str): The type of operation the transaction
should be. Either ``'CREATE'`` or ``'TRANSFER'``.
Defaults to ``None``.
headers (dict): Optional headers to pass to the request.
Note:
Please note that the id of an asset in BigchainDB is
actually the id of the transaction which created the asset.
In other words, when querying for an asset id with the
operation set to ``'CREATE'``, only one transaction should
be expected. This transaction will be the transaction in
which the asset was created, and the transaction id will be
equal to the given asset id. Hence, the following calls to
:meth:`.retrieve` and :meth:`.get` should return the same
transaction.
>>> bdb = BigchainDB()
>>> bdb.transactions.retrieve('foo')
>>> bdb.transactions.get(asset_id='foo', operation='CREATE')
Since :meth:`.get` returns a list of transactions, it may
be more efficient to use :meth:`.retrieve` instead, if one
is only interested in the ``'CREATE'`` operation.
Returns:
list: List of transactions.
"""
return self.transport.forward_request(
method='GET',
path=self.path,
params={'asset_id': asset_id, 'operation': operation},
headers=headers,
) | Given an asset id, get its list of transactions (and
optionally filter for only ``'CREATE'`` or ``'TRANSFER'``
transactions).
Args:
asset_id (str): Id of the asset.
operation (str): The type of operation the transaction
should be. Either ``'CREATE'`` or ``'TRANSFER'``.
Defaults to ``None``.
headers (dict): Optional headers to pass to the request.
Note:
Please note that the id of an asset in BigchainDB is
actually the id of the transaction which created the asset.
In other words, when querying for an asset id with the
operation set to ``'CREATE'``, only one transaction should
be expected. This transaction will be the transaction in
which the asset was created, and the transaction id will be
equal to the given asset id. Hence, the following calls to
:meth:`.retrieve` and :meth:`.get` should return the same
transaction.
>>> bdb = BigchainDB()
>>> bdb.transactions.retrieve('foo')
>>> bdb.transactions.get(asset_id='foo', operation='CREATE')
Since :meth:`.get` returns a list of transactions, it may
be more efficient to use :meth:`.retrieve` instead, if one
is only interested in the ``'CREATE'`` operation.
Returns:
list: List of transactions. | Below is the the instruction that describes the task:
### Input:
Given an asset id, get its list of transactions (and
optionally filter for only ``'CREATE'`` or ``'TRANSFER'``
transactions).
Args:
asset_id (str): Id of the asset.
operation (str): The type of operation the transaction
should be. Either ``'CREATE'`` or ``'TRANSFER'``.
Defaults to ``None``.
headers (dict): Optional headers to pass to the request.
Note:
Please note that the id of an asset in BigchainDB is
actually the id of the transaction which created the asset.
In other words, when querying for an asset id with the
operation set to ``'CREATE'``, only one transaction should
be expected. This transaction will be the transaction in
which the asset was created, and the transaction id will be
equal to the given asset id. Hence, the following calls to
:meth:`.retrieve` and :meth:`.get` should return the same
transaction.
>>> bdb = BigchainDB()
>>> bdb.transactions.retrieve('foo')
>>> bdb.transactions.get(asset_id='foo', operation='CREATE')
Since :meth:`.get` returns a list of transactions, it may
be more efficient to use :meth:`.retrieve` instead, if one
is only interested in the ``'CREATE'`` operation.
Returns:
list: List of transactions.
### Response:
def get(self, *, asset_id, operation=None, headers=None):
"""Given an asset id, get its list of transactions (and
optionally filter for only ``'CREATE'`` or ``'TRANSFER'``
transactions).
Args:
asset_id (str): Id of the asset.
operation (str): The type of operation the transaction
should be. Either ``'CREATE'`` or ``'TRANSFER'``.
Defaults to ``None``.
headers (dict): Optional headers to pass to the request.
Note:
Please note that the id of an asset in BigchainDB is
actually the id of the transaction which created the asset.
In other words, when querying for an asset id with the
operation set to ``'CREATE'``, only one transaction should
be expected. This transaction will be the transaction in
which the asset was created, and the transaction id will be
equal to the given asset id. Hence, the following calls to
:meth:`.retrieve` and :meth:`.get` should return the same
transaction.
>>> bdb = BigchainDB()
>>> bdb.transactions.retrieve('foo')
>>> bdb.transactions.get(asset_id='foo', operation='CREATE')
Since :meth:`.get` returns a list of transactions, it may
be more efficient to use :meth:`.retrieve` instead, if one
is only interested in the ``'CREATE'`` operation.
Returns:
list: List of transactions.
"""
return self.transport.forward_request(
method='GET',
path=self.path,
params={'asset_id': asset_id, 'operation': operation},
headers=headers,
) |
def clean_structure(self, out_suffix='_clean', outdir=None, force_rerun=False,
remove_atom_alt=True, keep_atom_alt_id='A',remove_atom_hydrogen=True, add_atom_occ=True,
remove_res_hetero=True, keep_chemicals=None, keep_res_only=None,
add_chain_id_if_empty='X', keep_chains=None):
"""Clean the structure file associated with this structure, and save it as a new file. Returns the file path.
Args:
out_suffix (str): Suffix to append to original filename
outdir (str): Path to output directory
force_rerun (bool): If structure should be re-cleaned if a clean file exists already
remove_atom_alt (bool): Remove alternate positions
keep_atom_alt_id (str): If removing alternate positions, which alternate ID to keep
remove_atom_hydrogen (bool): Remove hydrogen atoms
add_atom_occ (bool): Add atom occupancy fields if not present
remove_res_hetero (bool): Remove all HETATMs
keep_chemicals (str, list): If removing HETATMs, keep specified chemical names
keep_res_only (str, list): Keep ONLY specified resnames, deletes everything else!
add_chain_id_if_empty (str): Add a chain ID if not present
keep_chains (str, list): Keep only these chains
Returns:
str: Path to cleaned PDB file
"""
if not self.structure_file:
log.error('{}: no structure file, unable to clean'.format(self.id))
return None
clean_pdb_file = ssbio.protein.structure.utils.cleanpdb.clean_pdb(self.structure_path, out_suffix=out_suffix,
outdir=outdir, force_rerun=force_rerun,
remove_atom_alt=remove_atom_alt,
remove_atom_hydrogen=remove_atom_hydrogen,
keep_atom_alt_id=keep_atom_alt_id,
add_atom_occ=add_atom_occ,
remove_res_hetero=remove_res_hetero,
keep_chemicals=keep_chemicals,
keep_res_only=keep_res_only,
add_chain_id_if_empty=add_chain_id_if_empty,
keep_chains=keep_chains)
return clean_pdb_file | Clean the structure file associated with this structure, and save it as a new file. Returns the file path.
Args:
out_suffix (str): Suffix to append to original filename
outdir (str): Path to output directory
force_rerun (bool): If structure should be re-cleaned if a clean file exists already
remove_atom_alt (bool): Remove alternate positions
keep_atom_alt_id (str): If removing alternate positions, which alternate ID to keep
remove_atom_hydrogen (bool): Remove hydrogen atoms
add_atom_occ (bool): Add atom occupancy fields if not present
remove_res_hetero (bool): Remove all HETATMs
keep_chemicals (str, list): If removing HETATMs, keep specified chemical names
keep_res_only (str, list): Keep ONLY specified resnames, deletes everything else!
add_chain_id_if_empty (str): Add a chain ID if not present
keep_chains (str, list): Keep only these chains
Returns:
str: Path to cleaned PDB file | Below is the the instruction that describes the task:
### Input:
Clean the structure file associated with this structure, and save it as a new file. Returns the file path.
Args:
out_suffix (str): Suffix to append to original filename
outdir (str): Path to output directory
force_rerun (bool): If structure should be re-cleaned if a clean file exists already
remove_atom_alt (bool): Remove alternate positions
keep_atom_alt_id (str): If removing alternate positions, which alternate ID to keep
remove_atom_hydrogen (bool): Remove hydrogen atoms
add_atom_occ (bool): Add atom occupancy fields if not present
remove_res_hetero (bool): Remove all HETATMs
keep_chemicals (str, list): If removing HETATMs, keep specified chemical names
keep_res_only (str, list): Keep ONLY specified resnames, deletes everything else!
add_chain_id_if_empty (str): Add a chain ID if not present
keep_chains (str, list): Keep only these chains
Returns:
str: Path to cleaned PDB file
### Response:
def clean_structure(self, out_suffix='_clean', outdir=None, force_rerun=False,
remove_atom_alt=True, keep_atom_alt_id='A',remove_atom_hydrogen=True, add_atom_occ=True,
remove_res_hetero=True, keep_chemicals=None, keep_res_only=None,
add_chain_id_if_empty='X', keep_chains=None):
"""Clean the structure file associated with this structure, and save it as a new file. Returns the file path.
Args:
out_suffix (str): Suffix to append to original filename
outdir (str): Path to output directory
force_rerun (bool): If structure should be re-cleaned if a clean file exists already
remove_atom_alt (bool): Remove alternate positions
keep_atom_alt_id (str): If removing alternate positions, which alternate ID to keep
remove_atom_hydrogen (bool): Remove hydrogen atoms
add_atom_occ (bool): Add atom occupancy fields if not present
remove_res_hetero (bool): Remove all HETATMs
keep_chemicals (str, list): If removing HETATMs, keep specified chemical names
keep_res_only (str, list): Keep ONLY specified resnames, deletes everything else!
add_chain_id_if_empty (str): Add a chain ID if not present
keep_chains (str, list): Keep only these chains
Returns:
str: Path to cleaned PDB file
"""
if not self.structure_file:
log.error('{}: no structure file, unable to clean'.format(self.id))
return None
clean_pdb_file = ssbio.protein.structure.utils.cleanpdb.clean_pdb(self.structure_path, out_suffix=out_suffix,
outdir=outdir, force_rerun=force_rerun,
remove_atom_alt=remove_atom_alt,
remove_atom_hydrogen=remove_atom_hydrogen,
keep_atom_alt_id=keep_atom_alt_id,
add_atom_occ=add_atom_occ,
remove_res_hetero=remove_res_hetero,
keep_chemicals=keep_chemicals,
keep_res_only=keep_res_only,
add_chain_id_if_empty=add_chain_id_if_empty,
keep_chains=keep_chains)
return clean_pdb_file |
def _interpret_angle(name, angle_object, angle_float, unit='degrees'):
"""Return an angle in radians from one of two arguments.
It is common for Skyfield routines to accept both an argument like
`alt` that takes an Angle object as well as an `alt_degrees` that
can be given a bare float or a sexagesimal tuple. A pair of such
arguments can be passed to this routine for interpretation.
"""
if angle_object is not None:
if isinstance(angle_object, Angle):
return angle_object.radians
elif angle_float is not None:
return _unsexagesimalize(angle_float) * _from_degrees
raise ValueError('you must either provide the {0}= parameter with'
' an Angle argument or supply the {0}_{1}= parameter'
' with a numeric argument'.format(name, unit)) | Return an angle in radians from one of two arguments.
It is common for Skyfield routines to accept both an argument like
`alt` that takes an Angle object as well as an `alt_degrees` that
can be given a bare float or a sexagesimal tuple. A pair of such
arguments can be passed to this routine for interpretation. | Below is the the instruction that describes the task:
### Input:
Return an angle in radians from one of two arguments.
It is common for Skyfield routines to accept both an argument like
`alt` that takes an Angle object as well as an `alt_degrees` that
can be given a bare float or a sexagesimal tuple. A pair of such
arguments can be passed to this routine for interpretation.
### Response:
def _interpret_angle(name, angle_object, angle_float, unit='degrees'):
"""Return an angle in radians from one of two arguments.
It is common for Skyfield routines to accept both an argument like
`alt` that takes an Angle object as well as an `alt_degrees` that
can be given a bare float or a sexagesimal tuple. A pair of such
arguments can be passed to this routine for interpretation.
"""
if angle_object is not None:
if isinstance(angle_object, Angle):
return angle_object.radians
elif angle_float is not None:
return _unsexagesimalize(angle_float) * _from_degrees
raise ValueError('you must either provide the {0}= parameter with'
' an Angle argument or supply the {0}_{1}= parameter'
' with a numeric argument'.format(name, unit)) |
def create(self, **kwargs):
"""Create a metric."""
url_str = self.base_url
if 'tenant_id' in kwargs:
url_str = url_str + '?tenant_id=%s' % kwargs['tenant_id']
del kwargs['tenant_id']
data = kwargs['jsonbody'] if 'jsonbody' in kwargs else kwargs
body = self.client.create(url=url_str, json=data)
return body | Create a metric. | Below is the the instruction that describes the task:
### Input:
Create a metric.
### Response:
def create(self, **kwargs):
"""Create a metric."""
url_str = self.base_url
if 'tenant_id' in kwargs:
url_str = url_str + '?tenant_id=%s' % kwargs['tenant_id']
del kwargs['tenant_id']
data = kwargs['jsonbody'] if 'jsonbody' in kwargs else kwargs
body = self.client.create(url=url_str, json=data)
return body |
def generate_random_sframe(num_rows, column_codes, random_seed = 0):
"""
Creates a random SFrame with `num_rows` rows and randomly
generated column types determined by `column_codes`. The output
SFrame is deterministic based on `random_seed`.
`column_types` is a string with each character denoting one type
of column, with the output SFrame having one column for each
character in the string. The legend is as follows:
n: numeric column, uniform 0-1 distribution.
N: numeric column, uniform 0-1 distribution, 1% NaNs.
r: numeric column, uniform -100 to 100 distribution.
R: numeric column, uniform -10000 to 10000 distribution, 1% NaNs.
b: binary integer column, uniform distribution
z: integer column with random integers between 1 and 10.
Z: integer column with random integers between 1 and 100.
s: categorical string column with 10 different unique short strings.
S: categorical string column with 100 different unique short strings.
c: categorical column with short string keys and 1000 unique values, triangle distribution.
C: categorical column with short string keys and 100000 unique values, triangle distribution.
x: categorical column with 128bit hex hashes and 1000 unique values.
X: categorical column with 256bit hex hashes and 100000 unique values.
h: column with unique 128bit hex hashes.
H: column with unique 256bit hex hashes.
l: categorical list with between 0 and 10 unique integer elements from a pool of 100 unique values.
L: categorical list with between 0 and 100 unique integer elements from a pool of 1000 unique values.
M: categorical list with between 0 and 10 unique string elements from a pool of 100 unique values.
m: categorical list with between 0 and 100 unique string elements from a pool of 1000 unique values.
v: numeric vector with 10 elements and uniform 0-1 elements.
V: numeric vector with 1000 elements and uniform 0-1 elements.
w: numeric vector with 10 elements and uniform 0-1 elements, 1% NANs.
W: numeric vector with 1000 elements and uniform 0-1 elements, 1% NANs.
d: dictionary with with between 0 and 10 string keys from a
pool of 100 unique keys, and random 0-1 values.
D: dictionary with with between 0 and 100 string keys from a
pool of 1000 unique keys, and random 0-1 values.
For example::
X = generate_random_sframe(10, 'nnv')
will generate a 10 row SFrame with 2 floating point columns and
one column of length 10 vectors.
"""
from ..extensions import _generate_random_sframe
assert isinstance(column_codes, str)
assert isinstance(num_rows, int)
assert isinstance(random_seed, int)
X = _generate_random_sframe(num_rows, column_codes, random_seed, False, 0)
X.__materialize__()
return X | Creates a random SFrame with `num_rows` rows and randomly
generated column types determined by `column_codes`. The output
SFrame is deterministic based on `random_seed`.
`column_types` is a string with each character denoting one type
of column, with the output SFrame having one column for each
character in the string. The legend is as follows:
n: numeric column, uniform 0-1 distribution.
N: numeric column, uniform 0-1 distribution, 1% NaNs.
r: numeric column, uniform -100 to 100 distribution.
R: numeric column, uniform -10000 to 10000 distribution, 1% NaNs.
b: binary integer column, uniform distribution
z: integer column with random integers between 1 and 10.
Z: integer column with random integers between 1 and 100.
s: categorical string column with 10 different unique short strings.
S: categorical string column with 100 different unique short strings.
c: categorical column with short string keys and 1000 unique values, triangle distribution.
C: categorical column with short string keys and 100000 unique values, triangle distribution.
x: categorical column with 128bit hex hashes and 1000 unique values.
X: categorical column with 256bit hex hashes and 100000 unique values.
h: column with unique 128bit hex hashes.
H: column with unique 256bit hex hashes.
l: categorical list with between 0 and 10 unique integer elements from a pool of 100 unique values.
L: categorical list with between 0 and 100 unique integer elements from a pool of 1000 unique values.
M: categorical list with between 0 and 10 unique string elements from a pool of 100 unique values.
m: categorical list with between 0 and 100 unique string elements from a pool of 1000 unique values.
v: numeric vector with 10 elements and uniform 0-1 elements.
V: numeric vector with 1000 elements and uniform 0-1 elements.
w: numeric vector with 10 elements and uniform 0-1 elements, 1% NANs.
W: numeric vector with 1000 elements and uniform 0-1 elements, 1% NANs.
d: dictionary with with between 0 and 10 string keys from a
pool of 100 unique keys, and random 0-1 values.
D: dictionary with with between 0 and 100 string keys from a
pool of 1000 unique keys, and random 0-1 values.
For example::
X = generate_random_sframe(10, 'nnv')
will generate a 10 row SFrame with 2 floating point columns and
one column of length 10 vectors. | Below is the the instruction that describes the task:
### Input:
Creates a random SFrame with `num_rows` rows and randomly
generated column types determined by `column_codes`. The output
SFrame is deterministic based on `random_seed`.
`column_types` is a string with each character denoting one type
of column, with the output SFrame having one column for each
character in the string. The legend is as follows:
n: numeric column, uniform 0-1 distribution.
N: numeric column, uniform 0-1 distribution, 1% NaNs.
r: numeric column, uniform -100 to 100 distribution.
R: numeric column, uniform -10000 to 10000 distribution, 1% NaNs.
b: binary integer column, uniform distribution
z: integer column with random integers between 1 and 10.
Z: integer column with random integers between 1 and 100.
s: categorical string column with 10 different unique short strings.
S: categorical string column with 100 different unique short strings.
c: categorical column with short string keys and 1000 unique values, triangle distribution.
C: categorical column with short string keys and 100000 unique values, triangle distribution.
x: categorical column with 128bit hex hashes and 1000 unique values.
X: categorical column with 256bit hex hashes and 100000 unique values.
h: column with unique 128bit hex hashes.
H: column with unique 256bit hex hashes.
l: categorical list with between 0 and 10 unique integer elements from a pool of 100 unique values.
L: categorical list with between 0 and 100 unique integer elements from a pool of 1000 unique values.
M: categorical list with between 0 and 10 unique string elements from a pool of 100 unique values.
m: categorical list with between 0 and 100 unique string elements from a pool of 1000 unique values.
v: numeric vector with 10 elements and uniform 0-1 elements.
V: numeric vector with 1000 elements and uniform 0-1 elements.
w: numeric vector with 10 elements and uniform 0-1 elements, 1% NANs.
W: numeric vector with 1000 elements and uniform 0-1 elements, 1% NANs.
d: dictionary with with between 0 and 10 string keys from a
pool of 100 unique keys, and random 0-1 values.
D: dictionary with with between 0 and 100 string keys from a
pool of 1000 unique keys, and random 0-1 values.
For example::
X = generate_random_sframe(10, 'nnv')
will generate a 10 row SFrame with 2 floating point columns and
one column of length 10 vectors.
### Response:
def generate_random_sframe(num_rows, column_codes, random_seed = 0):
"""
Creates a random SFrame with `num_rows` rows and randomly
generated column types determined by `column_codes`. The output
SFrame is deterministic based on `random_seed`.
`column_types` is a string with each character denoting one type
of column, with the output SFrame having one column for each
character in the string. The legend is as follows:
n: numeric column, uniform 0-1 distribution.
N: numeric column, uniform 0-1 distribution, 1% NaNs.
r: numeric column, uniform -100 to 100 distribution.
R: numeric column, uniform -10000 to 10000 distribution, 1% NaNs.
b: binary integer column, uniform distribution
z: integer column with random integers between 1 and 10.
Z: integer column with random integers between 1 and 100.
s: categorical string column with 10 different unique short strings.
S: categorical string column with 100 different unique short strings.
c: categorical column with short string keys and 1000 unique values, triangle distribution.
C: categorical column with short string keys and 100000 unique values, triangle distribution.
x: categorical column with 128bit hex hashes and 1000 unique values.
X: categorical column with 256bit hex hashes and 100000 unique values.
h: column with unique 128bit hex hashes.
H: column with unique 256bit hex hashes.
l: categorical list with between 0 and 10 unique integer elements from a pool of 100 unique values.
L: categorical list with between 0 and 100 unique integer elements from a pool of 1000 unique values.
M: categorical list with between 0 and 10 unique string elements from a pool of 100 unique values.
m: categorical list with between 0 and 100 unique string elements from a pool of 1000 unique values.
v: numeric vector with 10 elements and uniform 0-1 elements.
V: numeric vector with 1000 elements and uniform 0-1 elements.
w: numeric vector with 10 elements and uniform 0-1 elements, 1% NANs.
W: numeric vector with 1000 elements and uniform 0-1 elements, 1% NANs.
d: dictionary with with between 0 and 10 string keys from a
pool of 100 unique keys, and random 0-1 values.
D: dictionary with with between 0 and 100 string keys from a
pool of 1000 unique keys, and random 0-1 values.
For example::
X = generate_random_sframe(10, 'nnv')
will generate a 10 row SFrame with 2 floating point columns and
one column of length 10 vectors.
"""
from ..extensions import _generate_random_sframe
assert isinstance(column_codes, str)
assert isinstance(num_rows, int)
assert isinstance(random_seed, int)
X = _generate_random_sframe(num_rows, column_codes, random_seed, False, 0)
X.__materialize__()
return X |
def card_auth(self, auth_mode, block_address, key, uid):
"""
Authenticates to use specified block address. Tag must be selected using select_tag(uid) before auth.
auth_mode -- RFID.auth_a or RFID.auth_b
key -- list or tuple with six bytes key
uid -- list or tuple with four bytes tag ID
Returns error state.
"""
buf = []
buf.append(auth_mode)
buf.append(block_address)
for i in range(len(key)):
buf.append(key[i])
for i in range(4):
buf.append(uid[i])
(error, back_data, back_length) = self.card_write(self.mode_auth, buf)
if not (self.dev_read(0x08) & 0x08) != 0:
error = True
if not error:
self.authed = True
return error | Authenticates to use specified block address. Tag must be selected using select_tag(uid) before auth.
auth_mode -- RFID.auth_a or RFID.auth_b
key -- list or tuple with six bytes key
uid -- list or tuple with four bytes tag ID
Returns error state. | Below is the the instruction that describes the task:
### Input:
Authenticates to use specified block address. Tag must be selected using select_tag(uid) before auth.
auth_mode -- RFID.auth_a or RFID.auth_b
key -- list or tuple with six bytes key
uid -- list or tuple with four bytes tag ID
Returns error state.
### Response:
def card_auth(self, auth_mode, block_address, key, uid):
"""
Authenticates to use specified block address. Tag must be selected using select_tag(uid) before auth.
auth_mode -- RFID.auth_a or RFID.auth_b
key -- list or tuple with six bytes key
uid -- list or tuple with four bytes tag ID
Returns error state.
"""
buf = []
buf.append(auth_mode)
buf.append(block_address)
for i in range(len(key)):
buf.append(key[i])
for i in range(4):
buf.append(uid[i])
(error, back_data, back_length) = self.card_write(self.mode_auth, buf)
if not (self.dev_read(0x08) & 0x08) != 0:
error = True
if not error:
self.authed = True
return error |
def get_aligned_adjacent_coords(x, y):
'''
returns the nine clockwise adjacent coordinates on a keypad, where each row is vertically aligned.
'''
return [(x-1, y), (x-1, y-1), (x, y-1), (x+1, y-1), (x+1, y), (x+1, y+1), (x, y+1), (x-1, y+1)] | returns the nine clockwise adjacent coordinates on a keypad, where each row is vertically aligned. | Below is the the instruction that describes the task:
### Input:
returns the nine clockwise adjacent coordinates on a keypad, where each row is vertically aligned.
### Response:
def get_aligned_adjacent_coords(x, y):
'''
returns the nine clockwise adjacent coordinates on a keypad, where each row is vertically aligned.
'''
return [(x-1, y), (x-1, y-1), (x, y-1), (x+1, y-1), (x+1, y), (x+1, y+1), (x, y+1), (x-1, y+1)] |
def _string_to_int(self, string):
"""
Convert a string to a number, using the given alphabet..
"""
number = 0
for char in string[::-1]:
number = number * self._alpha_len + self._alphabet.index(char)
return number | Convert a string to a number, using the given alphabet.. | Below is the the instruction that describes the task:
### Input:
Convert a string to a number, using the given alphabet..
### Response:
def _string_to_int(self, string):
"""
Convert a string to a number, using the given alphabet..
"""
number = 0
for char in string[::-1]:
number = number * self._alpha_len + self._alphabet.index(char)
return number |
def stream_user(self, listener, run_async=False, timeout=__DEFAULT_STREAM_TIMEOUT, reconnect_async=False, reconnect_async_wait_sec=__DEFAULT_STREAM_RECONNECT_WAIT_SEC):
"""
Streams events that are relevant to the authorized user, i.e. home
timeline and notifications.
"""
return self.__stream('/api/v1/streaming/user', listener, run_async=run_async, timeout=timeout, reconnect_async=reconnect_async, reconnect_async_wait_sec=reconnect_async_wait_sec) | Streams events that are relevant to the authorized user, i.e. home
timeline and notifications. | Below is the the instruction that describes the task:
### Input:
Streams events that are relevant to the authorized user, i.e. home
timeline and notifications.
### Response:
def stream_user(self, listener, run_async=False, timeout=__DEFAULT_STREAM_TIMEOUT, reconnect_async=False, reconnect_async_wait_sec=__DEFAULT_STREAM_RECONNECT_WAIT_SEC):
"""
Streams events that are relevant to the authorized user, i.e. home
timeline and notifications.
"""
return self.__stream('/api/v1/streaming/user', listener, run_async=run_async, timeout=timeout, reconnect_async=reconnect_async, reconnect_async_wait_sec=reconnect_async_wait_sec) |
def do_usufy(self, query, **kwargs):
"""
Verifying a usufy query in this platform.
This might be redefined in any class inheriting from Platform.
Args:
-----
query: The element to be searched.
Return:
-------
A list of elements to be appended.
"""
# Trying to interact with the API Wrapper
try:
self.wrapperAPI = TwitterAPIWrapper()
results = self.wrapperAPI.get_user(query)
for r in results:
# Manually appending the URL
aux = {}
aux["type"]="i3visio.uri"
alias=r["value"].split(' - ')[1]
aux["value"]= self.createURL(word=alias, mode="usufy")
aux["attributes"]= []
r["attributes"].append(aux)
# Standard execution
except Exception, e:
return super(Twitter, self).do_usufy(query, **kwargs) | Verifying a usufy query in this platform.
This might be redefined in any class inheriting from Platform.
Args:
-----
query: The element to be searched.
Return:
-------
A list of elements to be appended. | Below is the the instruction that describes the task:
### Input:
Verifying a usufy query in this platform.
This might be redefined in any class inheriting from Platform.
Args:
-----
query: The element to be searched.
Return:
-------
A list of elements to be appended.
### Response:
def do_usufy(self, query, **kwargs):
"""
Verifying a usufy query in this platform.
This might be redefined in any class inheriting from Platform.
Args:
-----
query: The element to be searched.
Return:
-------
A list of elements to be appended.
"""
# Trying to interact with the API Wrapper
try:
self.wrapperAPI = TwitterAPIWrapper()
results = self.wrapperAPI.get_user(query)
for r in results:
# Manually appending the URL
aux = {}
aux["type"]="i3visio.uri"
alias=r["value"].split(' - ')[1]
aux["value"]= self.createURL(word=alias, mode="usufy")
aux["attributes"]= []
r["attributes"].append(aux)
# Standard execution
except Exception, e:
return super(Twitter, self).do_usufy(query, **kwargs) |
def tradeBreaksSSE(symbols=None, on_data=None, token='', version=''):
'''Trade report messages are sent when an order on the IEX Order Book is executed in whole or in part. DEEP sends a Trade report message for every individual fill.
https://iexcloud.io/docs/api/#deep-trades
Args:
symbols (string); Tickers to request
on_data (function): Callback on data
token (string); Access token
version (string); API version
'''
return _runSSE('trade-breaks', symbols, on_data, token, version) | Trade report messages are sent when an order on the IEX Order Book is executed in whole or in part. DEEP sends a Trade report message for every individual fill.
https://iexcloud.io/docs/api/#deep-trades
Args:
symbols (string); Tickers to request
on_data (function): Callback on data
token (string); Access token
version (string); API version | Below is the the instruction that describes the task:
### Input:
Trade report messages are sent when an order on the IEX Order Book is executed in whole or in part. DEEP sends a Trade report message for every individual fill.
https://iexcloud.io/docs/api/#deep-trades
Args:
symbols (string); Tickers to request
on_data (function): Callback on data
token (string); Access token
version (string); API version
### Response:
def tradeBreaksSSE(symbols=None, on_data=None, token='', version=''):
'''Trade report messages are sent when an order on the IEX Order Book is executed in whole or in part. DEEP sends a Trade report message for every individual fill.
https://iexcloud.io/docs/api/#deep-trades
Args:
symbols (string); Tickers to request
on_data (function): Callback on data
token (string); Access token
version (string); API version
'''
return _runSSE('trade-breaks', symbols, on_data, token, version) |
def do_file_update_metadata(client, args):
"""Update file metadata"""
client.update_file_metadata(args.uri, filename=args.filename,
description=args.description, mtime=args.mtime,
privacy=args.privacy)
return True | Update file metadata | Below is the the instruction that describes the task:
### Input:
Update file metadata
### Response:
def do_file_update_metadata(client, args):
"""Update file metadata"""
client.update_file_metadata(args.uri, filename=args.filename,
description=args.description, mtime=args.mtime,
privacy=args.privacy)
return True |
def srfcss(code, bodstr, srflen=_default_len_out):
"""
Translate a surface ID code, together with a body string, to the
corresponding surface name. If no such surface name exists,
return a string representation of the surface ID code.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/srfcss_c.html
:param code: Integer surface ID code to translate to a string.
:type code: int
:param bodstr: Name or ID of body associated with surface.
:type bodstr: str
:param srflen: Available space in output string.
:param srflen: int
:return: String corresponding to surface ID code.
:rtype: str
"""
code = ctypes.c_int(code)
bodstr = stypes.stringToCharP(bodstr)
srfstr = stypes.stringToCharP(srflen)
srflen = ctypes.c_int(srflen)
isname = ctypes.c_int()
libspice.srfcss_c(code, bodstr, srflen, srfstr, ctypes.byref(isname))
return stypes.toPythonString(srfstr), bool(isname.value) | Translate a surface ID code, together with a body string, to the
corresponding surface name. If no such surface name exists,
return a string representation of the surface ID code.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/srfcss_c.html
:param code: Integer surface ID code to translate to a string.
:type code: int
:param bodstr: Name or ID of body associated with surface.
:type bodstr: str
:param srflen: Available space in output string.
:param srflen: int
:return: String corresponding to surface ID code.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Translate a surface ID code, together with a body string, to the
corresponding surface name. If no such surface name exists,
return a string representation of the surface ID code.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/srfcss_c.html
:param code: Integer surface ID code to translate to a string.
:type code: int
:param bodstr: Name or ID of body associated with surface.
:type bodstr: str
:param srflen: Available space in output string.
:param srflen: int
:return: String corresponding to surface ID code.
:rtype: str
### Response:
def srfcss(code, bodstr, srflen=_default_len_out):
"""
Translate a surface ID code, together with a body string, to the
corresponding surface name. If no such surface name exists,
return a string representation of the surface ID code.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/srfcss_c.html
:param code: Integer surface ID code to translate to a string.
:type code: int
:param bodstr: Name or ID of body associated with surface.
:type bodstr: str
:param srflen: Available space in output string.
:param srflen: int
:return: String corresponding to surface ID code.
:rtype: str
"""
code = ctypes.c_int(code)
bodstr = stypes.stringToCharP(bodstr)
srfstr = stypes.stringToCharP(srflen)
srflen = ctypes.c_int(srflen)
isname = ctypes.c_int()
libspice.srfcss_c(code, bodstr, srflen, srfstr, ctypes.byref(isname))
return stypes.toPythonString(srfstr), bool(isname.value) |
def grade(adjective, suffix=COMPARATIVE):
""" Returns the comparative or superlative form of the given (inflected) adjective.
"""
b = predicative(adjective)
# groß => großt, schön => schönst
if suffix == SUPERLATIVE and b.endswith(("s", u"ß")):
suffix = suffix[1:]
# große => großere, schönes => schöneres
return adjective[:len(b)] + suffix + adjective[len(b):] | Returns the comparative or superlative form of the given (inflected) adjective. | Below is the the instruction that describes the task:
### Input:
Returns the comparative or superlative form of the given (inflected) adjective.
### Response:
def grade(adjective, suffix=COMPARATIVE):
""" Returns the comparative or superlative form of the given (inflected) adjective.
"""
b = predicative(adjective)
# groß => großt, schön => schönst
if suffix == SUPERLATIVE and b.endswith(("s", u"ß")):
suffix = suffix[1:]
# große => großere, schönes => schöneres
return adjective[:len(b)] + suffix + adjective[len(b):] |
def use_token(self, token=None):
"""
Function to use static AUTH_TOKEN as auth for the constructor instead of full login process.
**Parameters:**:
- **token**: Static AUTH_TOKEN
**Returns:** Bool on success or failure. In addition the function will mutate the `cloudgenix.API`
constructor items as needed.
"""
api_logger.info('use_token function:')
# check token is a string.
if not isinstance(token, (text_type, binary_type)):
api_logger.debug('"token" was not a text-style string: {}'.format(text_type(token)))
return False
# Start setup of constructor.
session = self._parent_class.expose_session()
# clear cookies
session.cookies.clear()
# Static Token uses X-Auth-Token header instead of cookies.
self._parent_class.add_headers({
'X-Auth-Token': token
})
# Step 2: Get operator profile for tenant ID and other info.
if self.interactive_update_profile_vars():
# pull tenant detail
if self._parent_class.tenant_id:
# add tenant values to API() object
if self.interactive_tenant_update_vars():
# Step 3: Check for ESP/MSP. If so, ask which tenant this session should be for.
if self._parent_class.is_esp:
# ESP/MSP!
choose_status, chosen_client_id = self.interactive_client_choice()
if choose_status:
# attempt to login as client
clogin_resp = self._parent_class.post.login_clients(chosen_client_id, {})
if clogin_resp.cgx_status:
# login successful, update profile and tenant info
c_profile = self.interactive_update_profile_vars()
t_profile = self.interactive_tenant_update_vars()
if c_profile and t_profile:
# successful full client login.
self._parent_class._password = None
return True
else:
if t_profile:
print("ESP Client Tenant detail retrieval failed.")
# clear password out of memory
self._parent_class.email = None
self._parent_class._password = None
return False
else:
print("ESP Client Login failed.")
# clear password out of memory
self._parent_class.email = None
self._parent_class._password = None
return False
else:
print("ESP Client Choice failed.")
# clear password out of memory
self._parent_class.email = None
self._parent_class._password = None
return False
# successful!
# clear password out of memory
self._parent_class._password = None
return True
else:
print("Tenant detail retrieval failed.")
# clear password out of memory
self._parent_class.email = None
self._parent_class._password = None
return False
else:
# Profile detail retrieval failed
self._parent_class.email = None
self._parent_class._password = None
return False
api_logger.info("EMAIL = %s", self._parent_class.email)
api_logger.info("USER_ID = %s", self._parent_class._user_id)
api_logger.info("USER ROLES = %s", json.dumps(self._parent_class.roles))
api_logger.info("TENANT_ID = %s", self._parent_class.tenant_id)
api_logger.info("TENANT_NAME = %s", self._parent_class.tenant_name)
api_logger.info("TOKEN_SESSION = %s", self._parent_class.token_session)
return True | Function to use static AUTH_TOKEN as auth for the constructor instead of full login process.
**Parameters:**:
- **token**: Static AUTH_TOKEN
**Returns:** Bool on success or failure. In addition the function will mutate the `cloudgenix.API`
constructor items as needed. | Below is the the instruction that describes the task:
### Input:
Function to use static AUTH_TOKEN as auth for the constructor instead of full login process.
**Parameters:**:
- **token**: Static AUTH_TOKEN
**Returns:** Bool on success or failure. In addition the function will mutate the `cloudgenix.API`
constructor items as needed.
### Response:
def use_token(self, token=None):
"""
Function to use static AUTH_TOKEN as auth for the constructor instead of full login process.
**Parameters:**:
- **token**: Static AUTH_TOKEN
**Returns:** Bool on success or failure. In addition the function will mutate the `cloudgenix.API`
constructor items as needed.
"""
api_logger.info('use_token function:')
# check token is a string.
if not isinstance(token, (text_type, binary_type)):
api_logger.debug('"token" was not a text-style string: {}'.format(text_type(token)))
return False
# Start setup of constructor.
session = self._parent_class.expose_session()
# clear cookies
session.cookies.clear()
# Static Token uses X-Auth-Token header instead of cookies.
self._parent_class.add_headers({
'X-Auth-Token': token
})
# Step 2: Get operator profile for tenant ID and other info.
if self.interactive_update_profile_vars():
# pull tenant detail
if self._parent_class.tenant_id:
# add tenant values to API() object
if self.interactive_tenant_update_vars():
# Step 3: Check for ESP/MSP. If so, ask which tenant this session should be for.
if self._parent_class.is_esp:
# ESP/MSP!
choose_status, chosen_client_id = self.interactive_client_choice()
if choose_status:
# attempt to login as client
clogin_resp = self._parent_class.post.login_clients(chosen_client_id, {})
if clogin_resp.cgx_status:
# login successful, update profile and tenant info
c_profile = self.interactive_update_profile_vars()
t_profile = self.interactive_tenant_update_vars()
if c_profile and t_profile:
# successful full client login.
self._parent_class._password = None
return True
else:
if t_profile:
print("ESP Client Tenant detail retrieval failed.")
# clear password out of memory
self._parent_class.email = None
self._parent_class._password = None
return False
else:
print("ESP Client Login failed.")
# clear password out of memory
self._parent_class.email = None
self._parent_class._password = None
return False
else:
print("ESP Client Choice failed.")
# clear password out of memory
self._parent_class.email = None
self._parent_class._password = None
return False
# successful!
# clear password out of memory
self._parent_class._password = None
return True
else:
print("Tenant detail retrieval failed.")
# clear password out of memory
self._parent_class.email = None
self._parent_class._password = None
return False
else:
# Profile detail retrieval failed
self._parent_class.email = None
self._parent_class._password = None
return False
api_logger.info("EMAIL = %s", self._parent_class.email)
api_logger.info("USER_ID = %s", self._parent_class._user_id)
api_logger.info("USER ROLES = %s", json.dumps(self._parent_class.roles))
api_logger.info("TENANT_ID = %s", self._parent_class.tenant_id)
api_logger.info("TENANT_NAME = %s", self._parent_class.tenant_name)
api_logger.info("TOKEN_SESSION = %s", self._parent_class.token_session)
return True |
def system_content(self):
r"""A property that returns the content that is rendered
regardless of the :attr:`Message.type`.
In the case of :attr:`MessageType.default`\, this just returns the
regular :attr:`Message.content`. Otherwise this returns an English
message denoting the contents of the system message.
"""
if self.type is MessageType.default:
return self.content
if self.type is MessageType.pins_add:
return '{0.name} pinned a message to this channel.'.format(self.author)
if self.type is MessageType.recipient_add:
return '{0.name} added {1.name} to the group.'.format(self.author, self.mentions[0])
if self.type is MessageType.recipient_remove:
return '{0.name} removed {1.name} from the group.'.format(self.author, self.mentions[0])
if self.type is MessageType.channel_name_change:
return '{0.author.name} changed the channel name: {0.content}'.format(self)
if self.type is MessageType.channel_icon_change:
return '{0.author.name} changed the channel icon.'.format(self)
if self.type is MessageType.new_member:
formats = [
"{0} just joined the server - glhf!",
"{0} just joined. Everyone, look busy!",
"{0} just joined. Can I get a heal?",
"{0} joined your party.",
"{0} joined. You must construct additional pylons.",
"Ermagherd. {0} is here.",
"Welcome, {0}. Stay awhile and listen.",
"Welcome, {0}. We were expecting you ( ͡° ͜ʖ ͡°)",
"Welcome, {0}. We hope you brought pizza.",
"Welcome {0}. Leave your weapons by the door.",
"A wild {0} appeared.",
"Swoooosh. {0} just landed.",
"Brace yourselves. {0} just joined the server.",
"{0} just joined... or did they?",
"{0} just arrived. Seems OP - please nerf.",
"{0} just slid into the server.",
"A {0} has spawned in the server.",
"Big {0} showed up!",
"Where’s {0}? In the server!",
"{0} hopped into the server. Kangaroo!!",
"{0} just showed up. Hold my beer.",
"Challenger approaching - {0} has appeared!",
"It's a bird! It's a plane! Nevermind, it's just {0}.",
"It's {0}! Praise the sun! \\[T]/",
"Never gonna give {0} up. Never gonna let {0} down.",
"{0} has joined the battle bus.",
"Cheers, love! {0}'s here!",
"Hey! Listen! {0} has joined!",
"We've been expecting you {0}",
"It's dangerous to go alone, take {0}!",
"{0} has joined the server! It's super effective!",
"Cheers, love! {0} is here!",
"{0} is here, as the prophecy foretold.",
"{0} has arrived. Party's over.",
"Ready player {0}",
"{0} is here to kick butt and chew bubblegum. And {0} is all out of gum.",
"Hello. Is it {0} you're looking for?",
"{0} has joined. Stay a while and listen!",
"Roses are red, violets are blue, {0} joined this server with you",
]
# manually reconstruct the epoch with millisecond precision, because
# datetime.datetime.timestamp() doesn't return the exact posix
# timestamp with the precision that we need
created_at_ms = int((self.created_at - datetime.datetime(1970, 1, 1)).total_seconds() * 1000)
return formats[created_at_ms % len(formats)].format(self.author.name)
if self.type is MessageType.call:
# we're at the call message type now, which is a bit more complicated.
# we can make the assumption that Message.channel is a PrivateChannel
# with the type ChannelType.group or ChannelType.private
call_ended = self.call.ended_timestamp is not None
if self.channel.me in self.call.participants:
return '{0.author.name} started a call.'.format(self)
elif call_ended:
return 'You missed a call from {0.author.name}'.format(self)
else:
return '{0.author.name} started a call \N{EM DASH} Join the call.'.format(self) | r"""A property that returns the content that is rendered
regardless of the :attr:`Message.type`.
In the case of :attr:`MessageType.default`\, this just returns the
regular :attr:`Message.content`. Otherwise this returns an English
message denoting the contents of the system message. | Below is the the instruction that describes the task:
### Input:
r"""A property that returns the content that is rendered
regardless of the :attr:`Message.type`.
In the case of :attr:`MessageType.default`\, this just returns the
regular :attr:`Message.content`. Otherwise this returns an English
message denoting the contents of the system message.
### Response:
def system_content(self):
r"""A property that returns the content that is rendered
regardless of the :attr:`Message.type`.
In the case of :attr:`MessageType.default`\, this just returns the
regular :attr:`Message.content`. Otherwise this returns an English
message denoting the contents of the system message.
"""
if self.type is MessageType.default:
return self.content
if self.type is MessageType.pins_add:
return '{0.name} pinned a message to this channel.'.format(self.author)
if self.type is MessageType.recipient_add:
return '{0.name} added {1.name} to the group.'.format(self.author, self.mentions[0])
if self.type is MessageType.recipient_remove:
return '{0.name} removed {1.name} from the group.'.format(self.author, self.mentions[0])
if self.type is MessageType.channel_name_change:
return '{0.author.name} changed the channel name: {0.content}'.format(self)
if self.type is MessageType.channel_icon_change:
return '{0.author.name} changed the channel icon.'.format(self)
if self.type is MessageType.new_member:
formats = [
"{0} just joined the server - glhf!",
"{0} just joined. Everyone, look busy!",
"{0} just joined. Can I get a heal?",
"{0} joined your party.",
"{0} joined. You must construct additional pylons.",
"Ermagherd. {0} is here.",
"Welcome, {0}. Stay awhile and listen.",
"Welcome, {0}. We were expecting you ( ͡° ͜ʖ ͡°)",
"Welcome, {0}. We hope you brought pizza.",
"Welcome {0}. Leave your weapons by the door.",
"A wild {0} appeared.",
"Swoooosh. {0} just landed.",
"Brace yourselves. {0} just joined the server.",
"{0} just joined... or did they?",
"{0} just arrived. Seems OP - please nerf.",
"{0} just slid into the server.",
"A {0} has spawned in the server.",
"Big {0} showed up!",
"Where’s {0}? In the server!",
"{0} hopped into the server. Kangaroo!!",
"{0} just showed up. Hold my beer.",
"Challenger approaching - {0} has appeared!",
"It's a bird! It's a plane! Nevermind, it's just {0}.",
"It's {0}! Praise the sun! \\[T]/",
"Never gonna give {0} up. Never gonna let {0} down.",
"{0} has joined the battle bus.",
"Cheers, love! {0}'s here!",
"Hey! Listen! {0} has joined!",
"We've been expecting you {0}",
"It's dangerous to go alone, take {0}!",
"{0} has joined the server! It's super effective!",
"Cheers, love! {0} is here!",
"{0} is here, as the prophecy foretold.",
"{0} has arrived. Party's over.",
"Ready player {0}",
"{0} is here to kick butt and chew bubblegum. And {0} is all out of gum.",
"Hello. Is it {0} you're looking for?",
"{0} has joined. Stay a while and listen!",
"Roses are red, violets are blue, {0} joined this server with you",
]
# manually reconstruct the epoch with millisecond precision, because
# datetime.datetime.timestamp() doesn't return the exact posix
# timestamp with the precision that we need
created_at_ms = int((self.created_at - datetime.datetime(1970, 1, 1)).total_seconds() * 1000)
return formats[created_at_ms % len(formats)].format(self.author.name)
if self.type is MessageType.call:
# we're at the call message type now, which is a bit more complicated.
# we can make the assumption that Message.channel is a PrivateChannel
# with the type ChannelType.group or ChannelType.private
call_ended = self.call.ended_timestamp is not None
if self.channel.me in self.call.participants:
return '{0.author.name} started a call.'.format(self)
elif call_ended:
return 'You missed a call from {0.author.name}'.format(self)
else:
return '{0.author.name} started a call \N{EM DASH} Join the call.'.format(self) |
def build(self, plot):
"""
Build the guides
Parameters
----------
plot : ggplot
ggplot object being drawn
Returns
-------
box : matplotlib.offsetbox.Offsetbox | None
A box that contains all the guides for the plot.
If there are no guides, **None** is returned.
"""
get_property = plot.theme.themeables.property
# by default, guide boxes are vertically aligned
with suppress(KeyError):
self.box_direction = get_property('legend_box')
if self.box_direction is None:
self.box_direction = 'vertical'
with suppress(KeyError):
self.position = get_property('legend_position')
if self.position == 'none':
return
# justification of legend boxes
with suppress(KeyError):
self.box_align = get_property('legend_box_just')
if self.box_align is None:
if self.position in {'left', 'right'}:
tmp = 'left'
else:
tmp = 'center'
self.box_align = tmp
with suppress(KeyError):
self.box_margin = get_property('legend_box_margin')
if self.box_margin is None:
self.box_margin = 10
with suppress(KeyError):
self.spacing = get_property('legend_spacing')
if self.spacing is None:
self.spacing = 10
gdefs = self.train(plot)
if not gdefs:
return
gdefs = self.merge(gdefs)
gdefs = self.create_geoms(gdefs, plot)
if not gdefs:
return
gboxes = self.draw(gdefs, plot.theme)
bigbox = self.assemble(gboxes, gdefs, plot.theme)
return bigbox | Build the guides
Parameters
----------
plot : ggplot
ggplot object being drawn
Returns
-------
box : matplotlib.offsetbox.Offsetbox | None
A box that contains all the guides for the plot.
If there are no guides, **None** is returned. | Below is the the instruction that describes the task:
### Input:
Build the guides
Parameters
----------
plot : ggplot
ggplot object being drawn
Returns
-------
box : matplotlib.offsetbox.Offsetbox | None
A box that contains all the guides for the plot.
If there are no guides, **None** is returned.
### Response:
def build(self, plot):
"""
Build the guides
Parameters
----------
plot : ggplot
ggplot object being drawn
Returns
-------
box : matplotlib.offsetbox.Offsetbox | None
A box that contains all the guides for the plot.
If there are no guides, **None** is returned.
"""
get_property = plot.theme.themeables.property
# by default, guide boxes are vertically aligned
with suppress(KeyError):
self.box_direction = get_property('legend_box')
if self.box_direction is None:
self.box_direction = 'vertical'
with suppress(KeyError):
self.position = get_property('legend_position')
if self.position == 'none':
return
# justification of legend boxes
with suppress(KeyError):
self.box_align = get_property('legend_box_just')
if self.box_align is None:
if self.position in {'left', 'right'}:
tmp = 'left'
else:
tmp = 'center'
self.box_align = tmp
with suppress(KeyError):
self.box_margin = get_property('legend_box_margin')
if self.box_margin is None:
self.box_margin = 10
with suppress(KeyError):
self.spacing = get_property('legend_spacing')
if self.spacing is None:
self.spacing = 10
gdefs = self.train(plot)
if not gdefs:
return
gdefs = self.merge(gdefs)
gdefs = self.create_geoms(gdefs, plot)
if not gdefs:
return
gboxes = self.draw(gdefs, plot.theme)
bigbox = self.assemble(gboxes, gdefs, plot.theme)
return bigbox |
def attach_video(self, video: typing.Union[InputMediaVideo, base.InputFile],
thumb: typing.Union[base.InputFile, base.String] = None,
caption: base.String = None,
width: base.Integer = None, height: base.Integer = None, duration: base.Integer = None):
"""
Attach video
:param video:
:param caption:
:param width:
:param height:
:param duration:
"""
if not isinstance(video, InputMedia):
video = InputMediaVideo(media=video, thumb=thumb, caption=caption,
width=width, height=height, duration=duration)
self.attach(video) | Attach video
:param video:
:param caption:
:param width:
:param height:
:param duration: | Below is the the instruction that describes the task:
### Input:
Attach video
:param video:
:param caption:
:param width:
:param height:
:param duration:
### Response:
def attach_video(self, video: typing.Union[InputMediaVideo, base.InputFile],
thumb: typing.Union[base.InputFile, base.String] = None,
caption: base.String = None,
width: base.Integer = None, height: base.Integer = None, duration: base.Integer = None):
"""
Attach video
:param video:
:param caption:
:param width:
:param height:
:param duration:
"""
if not isinstance(video, InputMedia):
video = InputMediaVideo(media=video, thumb=thumb, caption=caption,
width=width, height=height, duration=duration)
self.attach(video) |
def _ref_bus_angle_constraint(self, buses, Va, xmin, xmax):
""" Adds a constraint on the reference bus angles.
"""
refs = [bus._i for bus in buses if bus.type == REFERENCE]
Varefs = array([b.v_angle for b in buses if b.type == REFERENCE])
xmin[Va.i1 - 1 + refs] = Varefs
xmax[Va.iN - 1 + refs] = Varefs
return xmin, xmax | Adds a constraint on the reference bus angles. | Below is the the instruction that describes the task:
### Input:
Adds a constraint on the reference bus angles.
### Response:
def _ref_bus_angle_constraint(self, buses, Va, xmin, xmax):
""" Adds a constraint on the reference bus angles.
"""
refs = [bus._i for bus in buses if bus.type == REFERENCE]
Varefs = array([b.v_angle for b in buses if b.type == REFERENCE])
xmin[Va.i1 - 1 + refs] = Varefs
xmax[Va.iN - 1 + refs] = Varefs
return xmin, xmax |
def poke(url, accesskey=None, secretkey=None, __method__='GET', **req_args):
"""
Poke the Rancher API. Returns a Rod object instance. Central starting
point for the cattleprod package.
:param url: The full Rancher URL to the API endpoint.
:param accesskey: The rancher access key, optional.
:param secretkey: The rancher secret key, optional.
:param __method__: Internal method, don't use!
:param req_args: Arguments which are passed directly to the requests API.
The accesskey / secretkey values have precedence before simple auth
objects defined in here.
:return: A Rod instance, or anything that the URL returns on a GET request
"""
if accesskey and secretkey:
req_args['auth'] = (accesskey, secretkey)
tmp = requests.request(__method__.lower(), url, **req_args)
tmp.raise_for_status()
if tmp.headers.get('Content-Type').find("json") != -1:
rv = _convert_to_rod(tmp.json(), **req_args)
else:
rv = tmp.content
return rv | Poke the Rancher API. Returns a Rod object instance. Central starting
point for the cattleprod package.
:param url: The full Rancher URL to the API endpoint.
:param accesskey: The rancher access key, optional.
:param secretkey: The rancher secret key, optional.
:param __method__: Internal method, don't use!
:param req_args: Arguments which are passed directly to the requests API.
The accesskey / secretkey values have precedence before simple auth
objects defined in here.
:return: A Rod instance, or anything that the URL returns on a GET request | Below is the the instruction that describes the task:
### Input:
Poke the Rancher API. Returns a Rod object instance. Central starting
point for the cattleprod package.
:param url: The full Rancher URL to the API endpoint.
:param accesskey: The rancher access key, optional.
:param secretkey: The rancher secret key, optional.
:param __method__: Internal method, don't use!
:param req_args: Arguments which are passed directly to the requests API.
The accesskey / secretkey values have precedence before simple auth
objects defined in here.
:return: A Rod instance, or anything that the URL returns on a GET request
### Response:
def poke(url, accesskey=None, secretkey=None, __method__='GET', **req_args):
"""
Poke the Rancher API. Returns a Rod object instance. Central starting
point for the cattleprod package.
:param url: The full Rancher URL to the API endpoint.
:param accesskey: The rancher access key, optional.
:param secretkey: The rancher secret key, optional.
:param __method__: Internal method, don't use!
:param req_args: Arguments which are passed directly to the requests API.
The accesskey / secretkey values have precedence before simple auth
objects defined in here.
:return: A Rod instance, or anything that the URL returns on a GET request
"""
if accesskey and secretkey:
req_args['auth'] = (accesskey, secretkey)
tmp = requests.request(__method__.lower(), url, **req_args)
tmp.raise_for_status()
if tmp.headers.get('Content-Type').find("json") != -1:
rv = _convert_to_rod(tmp.json(), **req_args)
else:
rv = tmp.content
return rv |
def _agent_import_failed(trace):
"""Returns dummy agent class for if PyTorch etc. is not installed."""
class _AgentImportFailed(Trainer):
_name = "AgentImportFailed"
_default_config = with_common_config({})
def _setup(self, config):
raise ImportError(trace)
return _AgentImportFailed | Returns dummy agent class for if PyTorch etc. is not installed. | Below is the the instruction that describes the task:
### Input:
Returns dummy agent class for if PyTorch etc. is not installed.
### Response:
def _agent_import_failed(trace):
"""Returns dummy agent class for if PyTorch etc. is not installed."""
class _AgentImportFailed(Trainer):
_name = "AgentImportFailed"
_default_config = with_common_config({})
def _setup(self, config):
raise ImportError(trace)
return _AgentImportFailed |
def norm(self, valu):
'''
Normalize the value for a given type.
Args:
valu (obj): The value to normalize.
Returns:
((obj,dict)): The normalized valu, info tuple.
Notes:
The info dictionary uses the following key conventions:
subs (dict): The normalized sub-fields as name: valu entries.
'''
func = self._type_norms.get(type(valu))
if func is None:
raise s_exc.NoSuchFunc(name=self.name, mesg='no norm for type: %r' % (type(valu),))
return func(valu) | Normalize the value for a given type.
Args:
valu (obj): The value to normalize.
Returns:
((obj,dict)): The normalized valu, info tuple.
Notes:
The info dictionary uses the following key conventions:
subs (dict): The normalized sub-fields as name: valu entries. | Below is the the instruction that describes the task:
### Input:
Normalize the value for a given type.
Args:
valu (obj): The value to normalize.
Returns:
((obj,dict)): The normalized valu, info tuple.
Notes:
The info dictionary uses the following key conventions:
subs (dict): The normalized sub-fields as name: valu entries.
### Response:
def norm(self, valu):
'''
Normalize the value for a given type.
Args:
valu (obj): The value to normalize.
Returns:
((obj,dict)): The normalized valu, info tuple.
Notes:
The info dictionary uses the following key conventions:
subs (dict): The normalized sub-fields as name: valu entries.
'''
func = self._type_norms.get(type(valu))
if func is None:
raise s_exc.NoSuchFunc(name=self.name, mesg='no norm for type: %r' % (type(valu),))
return func(valu) |
def get_objective_query_session(self, proxy):
"""Gets the ``OsidSession`` associated with the objective query service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveQuerySession``
:rtype: ``osid.learning.ObjectiveQuerySession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_query()`` is ``false``
*compliance: optional -- This method must be implemented if ``supports_objective_query()`` is ``true``.*
"""
if not self.supports_objective_query():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise OperationFailed()
proxy = self._convert_proxy(proxy)
try:
session = sessions.ObjectiveQuerySession(proxy=proxy, runtime=self._runtime)
except AttributeError:
raise OperationFailed()
return session | Gets the ``OsidSession`` associated with the objective query service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveQuerySession``
:rtype: ``osid.learning.ObjectiveQuerySession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_query()`` is ``false``
*compliance: optional -- This method must be implemented if ``supports_objective_query()`` is ``true``.* | Below is the the instruction that describes the task:
### Input:
Gets the ``OsidSession`` associated with the objective query service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveQuerySession``
:rtype: ``osid.learning.ObjectiveQuerySession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_query()`` is ``false``
*compliance: optional -- This method must be implemented if ``supports_objective_query()`` is ``true``.*
### Response:
def get_objective_query_session(self, proxy):
"""Gets the ``OsidSession`` associated with the objective query service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveQuerySession``
:rtype: ``osid.learning.ObjectiveQuerySession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_query()`` is ``false``
*compliance: optional -- This method must be implemented if ``supports_objective_query()`` is ``true``.*
"""
if not self.supports_objective_query():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise OperationFailed()
proxy = self._convert_proxy(proxy)
try:
session = sessions.ObjectiveQuerySession(proxy=proxy, runtime=self._runtime)
except AttributeError:
raise OperationFailed()
return session |
def pop(self, symbol):
"""
Delete current metadata of `symbol`
Parameters
----------
symbol : `str`
symbol name to delete
Returns
-------
Deleted metadata
"""
last_metadata = self.find_one({'symbol': symbol}, sort=[('start_time', pymongo.DESCENDING)])
if last_metadata is None:
raise NoDataFoundException('No metadata found for symbol {}'.format(symbol))
self.find_one_and_delete({'symbol': symbol}, sort=[('start_time', pymongo.DESCENDING)])
mongo_retry(self.find_one_and_update)({'symbol': symbol}, {'$unset': {'end_time': ''}},
sort=[('start_time', pymongo.DESCENDING)])
return last_metadata | Delete current metadata of `symbol`
Parameters
----------
symbol : `str`
symbol name to delete
Returns
-------
Deleted metadata | Below is the the instruction that describes the task:
### Input:
Delete current metadata of `symbol`
Parameters
----------
symbol : `str`
symbol name to delete
Returns
-------
Deleted metadata
### Response:
def pop(self, symbol):
"""
Delete current metadata of `symbol`
Parameters
----------
symbol : `str`
symbol name to delete
Returns
-------
Deleted metadata
"""
last_metadata = self.find_one({'symbol': symbol}, sort=[('start_time', pymongo.DESCENDING)])
if last_metadata is None:
raise NoDataFoundException('No metadata found for symbol {}'.format(symbol))
self.find_one_and_delete({'symbol': symbol}, sort=[('start_time', pymongo.DESCENDING)])
mongo_retry(self.find_one_and_update)({'symbol': symbol}, {'$unset': {'end_time': ''}},
sort=[('start_time', pymongo.DESCENDING)])
return last_metadata |
def create(self, term, options):
"""Create a monitor using passed configuration."""
if not self._state:
raise InvalidState("State was not properly obtained from the app")
options['action'] = 'CREATE'
payload = self._build_payload(term, options)
url = self.ALERTS_CREATE_URL.format(requestX=self._state[3])
self._log.debug("Creating alert using: %s" % url)
params = json.dumps(payload, separators=(',', ':'))
data = {'params': params}
response = self._session.post(url, data=data, headers=self.HEADERS)
if response.status_code != 200:
raise ActionError("Failed to create monitor: %s"
% response.content)
if options.get('exact', False):
term = "\"%s\"" % term
return self.list(term) | Create a monitor using passed configuration. | Below is the the instruction that describes the task:
### Input:
Create a monitor using passed configuration.
### Response:
def create(self, term, options):
"""Create a monitor using passed configuration."""
if not self._state:
raise InvalidState("State was not properly obtained from the app")
options['action'] = 'CREATE'
payload = self._build_payload(term, options)
url = self.ALERTS_CREATE_URL.format(requestX=self._state[3])
self._log.debug("Creating alert using: %s" % url)
params = json.dumps(payload, separators=(',', ':'))
data = {'params': params}
response = self._session.post(url, data=data, headers=self.HEADERS)
if response.status_code != 200:
raise ActionError("Failed to create monitor: %s"
% response.content)
if options.get('exact', False):
term = "\"%s\"" % term
return self.list(term) |
def parse(data):
"""
Parse the given ChangeLog data into a list of Hashes.
@param [String] data File data from the ChangeLog.md
@return [Array<Hash>] Parsed data, e.g. [{ 'version' => ..., 'url' => ..., 'date' => ..., 'content' => ...}, ...]
"""
sections = re.compile("^## .+$", re.MULTILINE).split(data)
headings = re.findall("^## .+?$", data, re.MULTILINE)
sections.pop(0)
parsed = []
def func(h, s):
p = parse_heading(h)
p["content"] = s
parsed.append(p)
list(map(func, headings, sections))
return parsed | Parse the given ChangeLog data into a list of Hashes.
@param [String] data File data from the ChangeLog.md
@return [Array<Hash>] Parsed data, e.g. [{ 'version' => ..., 'url' => ..., 'date' => ..., 'content' => ...}, ...] | Below is the the instruction that describes the task:
### Input:
Parse the given ChangeLog data into a list of Hashes.
@param [String] data File data from the ChangeLog.md
@return [Array<Hash>] Parsed data, e.g. [{ 'version' => ..., 'url' => ..., 'date' => ..., 'content' => ...}, ...]
### Response:
def parse(data):
"""
Parse the given ChangeLog data into a list of Hashes.
@param [String] data File data from the ChangeLog.md
@return [Array<Hash>] Parsed data, e.g. [{ 'version' => ..., 'url' => ..., 'date' => ..., 'content' => ...}, ...]
"""
sections = re.compile("^## .+$", re.MULTILINE).split(data)
headings = re.findall("^## .+?$", data, re.MULTILINE)
sections.pop(0)
parsed = []
def func(h, s):
p = parse_heading(h)
p["content"] = s
parsed.append(p)
list(map(func, headings, sections))
return parsed |
def update_pos(pos_dict, start_key, nbr=2):
"Update the `pos_dict` by moving all positions after `start_key` by `nbr`."
for key,idx in pos_dict.items():
if str.lower(key) >= str.lower(start_key): pos_dict[key] += nbr
return pos_dict | Update the `pos_dict` by moving all positions after `start_key` by `nbr`. | Below is the the instruction that describes the task:
### Input:
Update the `pos_dict` by moving all positions after `start_key` by `nbr`.
### Response:
def update_pos(pos_dict, start_key, nbr=2):
"Update the `pos_dict` by moving all positions after `start_key` by `nbr`."
for key,idx in pos_dict.items():
if str.lower(key) >= str.lower(start_key): pos_dict[key] += nbr
return pos_dict |
def _threshold_nans(data, tolerate_nans):
"""Thresholds data based on proportion of subjects with NaNs
Takes in data and a threshold value (float between 0.0 and 1.0) determining
the permissible proportion of subjects with non-NaN values. For example, if
threshold=.8, any voxel where >= 80% of subjects have non-NaN values will
be left unchanged, while any voxel with < 80% non-NaN values will be
assigned all NaN values and included in the nan_mask output. Note that the
output data has not been masked and will be same shape as the input data,
but may have a different number of NaNs based on the threshold.
Parameters
----------
data : ndarray (n_TRs x n_voxels x n_subjects)
fMRI time series data
tolerate_nans : bool or float (0.0 <= threshold <= 1.0)
Proportion of subjects with non-NaN values required to keep voxel
Returns
-------
data : ndarray (n_TRs x n_voxels x n_subjects)
fMRI time series data with adjusted NaNs
nan_mask : ndarray (n_voxels,)
Boolean mask array of voxels with too many NaNs based on threshold
"""
nans = np.all(np.any(np.isnan(data), axis=0), axis=1)
# Check tolerate_nans input and use either mean/nanmean and exclude voxels
if tolerate_nans is True:
logger.info("ISC computation will tolerate all NaNs when averaging")
elif type(tolerate_nans) is float:
if not 0.0 <= tolerate_nans <= 1.0:
raise ValueError("If threshold to tolerate NaNs is a float, "
"it must be between 0.0 and 1.0; got {0}".format(
tolerate_nans))
nans += ~(np.sum(~np.any(np.isnan(data), axis=0), axis=1) >=
data.shape[-1] * tolerate_nans)
logger.info("ISC computation will tolerate voxels with at least "
"{0} non-NaN values: {1} voxels do not meet "
"threshold".format(tolerate_nans,
np.sum(nans)))
else:
logger.info("ISC computation will not tolerate NaNs when averaging")
mask = ~nans
data = data[:, mask, :]
return data, mask | Thresholds data based on proportion of subjects with NaNs
Takes in data and a threshold value (float between 0.0 and 1.0) determining
the permissible proportion of subjects with non-NaN values. For example, if
threshold=.8, any voxel where >= 80% of subjects have non-NaN values will
be left unchanged, while any voxel with < 80% non-NaN values will be
assigned all NaN values and included in the nan_mask output. Note that the
output data has not been masked and will be same shape as the input data,
but may have a different number of NaNs based on the threshold.
Parameters
----------
data : ndarray (n_TRs x n_voxels x n_subjects)
fMRI time series data
tolerate_nans : bool or float (0.0 <= threshold <= 1.0)
Proportion of subjects with non-NaN values required to keep voxel
Returns
-------
data : ndarray (n_TRs x n_voxels x n_subjects)
fMRI time series data with adjusted NaNs
nan_mask : ndarray (n_voxels,)
Boolean mask array of voxels with too many NaNs based on threshold | Below is the the instruction that describes the task:
### Input:
Thresholds data based on proportion of subjects with NaNs
Takes in data and a threshold value (float between 0.0 and 1.0) determining
the permissible proportion of subjects with non-NaN values. For example, if
threshold=.8, any voxel where >= 80% of subjects have non-NaN values will
be left unchanged, while any voxel with < 80% non-NaN values will be
assigned all NaN values and included in the nan_mask output. Note that the
output data has not been masked and will be same shape as the input data,
but may have a different number of NaNs based on the threshold.
Parameters
----------
data : ndarray (n_TRs x n_voxels x n_subjects)
fMRI time series data
tolerate_nans : bool or float (0.0 <= threshold <= 1.0)
Proportion of subjects with non-NaN values required to keep voxel
Returns
-------
data : ndarray (n_TRs x n_voxels x n_subjects)
fMRI time series data with adjusted NaNs
nan_mask : ndarray (n_voxels,)
Boolean mask array of voxels with too many NaNs based on threshold
### Response:
def _threshold_nans(data, tolerate_nans):
"""Thresholds data based on proportion of subjects with NaNs
Takes in data and a threshold value (float between 0.0 and 1.0) determining
the permissible proportion of subjects with non-NaN values. For example, if
threshold=.8, any voxel where >= 80% of subjects have non-NaN values will
be left unchanged, while any voxel with < 80% non-NaN values will be
assigned all NaN values and included in the nan_mask output. Note that the
output data has not been masked and will be same shape as the input data,
but may have a different number of NaNs based on the threshold.
Parameters
----------
data : ndarray (n_TRs x n_voxels x n_subjects)
fMRI time series data
tolerate_nans : bool or float (0.0 <= threshold <= 1.0)
Proportion of subjects with non-NaN values required to keep voxel
Returns
-------
data : ndarray (n_TRs x n_voxels x n_subjects)
fMRI time series data with adjusted NaNs
nan_mask : ndarray (n_voxels,)
Boolean mask array of voxels with too many NaNs based on threshold
"""
nans = np.all(np.any(np.isnan(data), axis=0), axis=1)
# Check tolerate_nans input and use either mean/nanmean and exclude voxels
if tolerate_nans is True:
logger.info("ISC computation will tolerate all NaNs when averaging")
elif type(tolerate_nans) is float:
if not 0.0 <= tolerate_nans <= 1.0:
raise ValueError("If threshold to tolerate NaNs is a float, "
"it must be between 0.0 and 1.0; got {0}".format(
tolerate_nans))
nans += ~(np.sum(~np.any(np.isnan(data), axis=0), axis=1) >=
data.shape[-1] * tolerate_nans)
logger.info("ISC computation will tolerate voxels with at least "
"{0} non-NaN values: {1} voxels do not meet "
"threshold".format(tolerate_nans,
np.sum(nans)))
else:
logger.info("ISC computation will not tolerate NaNs when averaging")
mask = ~nans
data = data[:, mask, :]
return data, mask |
def scrape(language, method, word, *args, **kwargs):
''' Uses custom scrapers and calls provided method. '''
scraper = Scrape(language, word)
if hasattr(scraper, method):
function = getattr(scraper, method)
if callable(function):
return function(*args, **kwargs)
else:
raise NotImplementedError('The method ' + method + '() is not implemented so far.') | Uses custom scrapers and calls provided method. | Below is the the instruction that describes the task:
### Input:
Uses custom scrapers and calls provided method.
### Response:
def scrape(language, method, word, *args, **kwargs):
''' Uses custom scrapers and calls provided method. '''
scraper = Scrape(language, word)
if hasattr(scraper, method):
function = getattr(scraper, method)
if callable(function):
return function(*args, **kwargs)
else:
raise NotImplementedError('The method ' + method + '() is not implemented so far.') |
def readFile(cls, filepath):
"""Try different encoding to open a file in readonly mode"""
for mode in ("utf-8", 'gbk', 'cp1252', 'windows-1252', 'latin-1'):
try:
with open(filepath, mode='r', encoding=mode) as f:
content = f.read()
cit.info('以 {} 格式打开文件'.format(mode))
return content
except UnicodeDecodeError:
cit.warn('打开文件:尝试 {} 格式失败'.format(mode))
return None | Try different encoding to open a file in readonly mode | Below is the the instruction that describes the task:
### Input:
Try different encoding to open a file in readonly mode
### Response:
def readFile(cls, filepath):
"""Try different encoding to open a file in readonly mode"""
for mode in ("utf-8", 'gbk', 'cp1252', 'windows-1252', 'latin-1'):
try:
with open(filepath, mode='r', encoding=mode) as f:
content = f.read()
cit.info('以 {} 格式打开文件'.format(mode))
return content
except UnicodeDecodeError:
cit.warn('打开文件:尝试 {} 格式失败'.format(mode))
return None |
def _identify_os(self, msg):
'''
Using the prefix of the syslog message,
we are able to identify the operating system and then continue parsing.
'''
ret = []
for dev_os, data in self.compiled_prefixes.items():
# TODO Should we prevent attepmting to determine the OS for the blacklisted?
# [mircea] I think its good from a logging perspective to know at least that
# that the server found the matching and it tells that it won't be processed
# further. Later, we could potentially add an option to control this.
log.debug('Matching under %s', dev_os)
msg_dict = self._identify_prefix(msg, data)
if msg_dict:
log.debug('Adding %s to list of matched OS', dev_os)
ret.append((dev_os, msg_dict))
else:
log.debug('No match found for %s', dev_os)
if not ret:
log.debug('Not matched any OS, returning original log')
msg_dict = {'message': msg}
ret.append((None, msg_dict))
return ret | Using the prefix of the syslog message,
we are able to identify the operating system and then continue parsing. | Below is the the instruction that describes the task:
### Input:
Using the prefix of the syslog message,
we are able to identify the operating system and then continue parsing.
### Response:
def _identify_os(self, msg):
'''
Using the prefix of the syslog message,
we are able to identify the operating system and then continue parsing.
'''
ret = []
for dev_os, data in self.compiled_prefixes.items():
# TODO Should we prevent attepmting to determine the OS for the blacklisted?
# [mircea] I think its good from a logging perspective to know at least that
# that the server found the matching and it tells that it won't be processed
# further. Later, we could potentially add an option to control this.
log.debug('Matching under %s', dev_os)
msg_dict = self._identify_prefix(msg, data)
if msg_dict:
log.debug('Adding %s to list of matched OS', dev_os)
ret.append((dev_os, msg_dict))
else:
log.debug('No match found for %s', dev_os)
if not ret:
log.debug('Not matched any OS, returning original log')
msg_dict = {'message': msg}
ret.append((None, msg_dict))
return ret |
def _render_profile(path, caller, runner):
'''
Render profile as Jinja2.
:param path:
:return:
'''
env = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(path)), trim_blocks=False)
return env.get_template(os.path.basename(path)).render(salt=caller, runners=runner).strip() | Render profile as Jinja2.
:param path:
:return: | Below is the the instruction that describes the task:
### Input:
Render profile as Jinja2.
:param path:
:return:
### Response:
def _render_profile(path, caller, runner):
'''
Render profile as Jinja2.
:param path:
:return:
'''
env = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(path)), trim_blocks=False)
return env.get_template(os.path.basename(path)).render(salt=caller, runners=runner).strip() |
def _log_joint(current_target_log_prob, current_momentum):
"""Log-joint probability given a state's log-probability and momentum."""
momentum_log_prob = -sum(
[tf.reduce_sum(input_tensor=0.5 * (m**2.)) for m in current_momentum])
return current_target_log_prob + momentum_log_prob | Log-joint probability given a state's log-probability and momentum. | Below is the the instruction that describes the task:
### Input:
Log-joint probability given a state's log-probability and momentum.
### Response:
def _log_joint(current_target_log_prob, current_momentum):
"""Log-joint probability given a state's log-probability and momentum."""
momentum_log_prob = -sum(
[tf.reduce_sum(input_tensor=0.5 * (m**2.)) for m in current_momentum])
return current_target_log_prob + momentum_log_prob |
def get_tree(self, *page_numbers):
"""
Return lxml.etree.ElementTree for entire document, or page numbers
given if any.
"""
cache_key = "_".join(map(str, _flatten(page_numbers)))
tree = self._parse_tree_cacher.get(cache_key)
if tree is None:
# set up root
root = parser.makeelement("pdfxml")
if self.doc.info:
for k, v in list(self.doc.info[0].items()):
k = obj_to_string(k)
v = obj_to_string(resolve1(v))
try:
root.set(k, v)
except ValueError as e:
# Sometimes keys have a character in them, like ':',
# that isn't allowed in XML attribute names.
# If that happens we just replace non-word characters
# with '_'.
if "Invalid attribute name" in e.args[0]:
k = re.sub('\W', '_', k)
root.set(k, v)
# Parse pages and append to root.
# If nothing was passed in for page_numbers, we do this for all
# pages, but if None was explicitly passed in, we skip it.
if not(len(page_numbers) == 1 and page_numbers[0] is None):
if page_numbers:
pages = [[n, self.get_layout(self.get_page(n))] for n in
_flatten(page_numbers)]
else:
pages = enumerate(self.get_layouts())
for n, page in pages:
page = self._xmlize(page)
page.set('page_index', obj_to_string(n))
page.set('page_label', self.doc.get_page_number(n))
root.append(page)
self._clean_text(root)
# wrap root in ElementTree
tree = etree.ElementTree(root)
self._parse_tree_cacher.set(cache_key, tree)
return tree | Return lxml.etree.ElementTree for entire document, or page numbers
given if any. | Below is the the instruction that describes the task:
### Input:
Return lxml.etree.ElementTree for entire document, or page numbers
given if any.
### Response:
def get_tree(self, *page_numbers):
"""
Return lxml.etree.ElementTree for entire document, or page numbers
given if any.
"""
cache_key = "_".join(map(str, _flatten(page_numbers)))
tree = self._parse_tree_cacher.get(cache_key)
if tree is None:
# set up root
root = parser.makeelement("pdfxml")
if self.doc.info:
for k, v in list(self.doc.info[0].items()):
k = obj_to_string(k)
v = obj_to_string(resolve1(v))
try:
root.set(k, v)
except ValueError as e:
# Sometimes keys have a character in them, like ':',
# that isn't allowed in XML attribute names.
# If that happens we just replace non-word characters
# with '_'.
if "Invalid attribute name" in e.args[0]:
k = re.sub('\W', '_', k)
root.set(k, v)
# Parse pages and append to root.
# If nothing was passed in for page_numbers, we do this for all
# pages, but if None was explicitly passed in, we skip it.
if not(len(page_numbers) == 1 and page_numbers[0] is None):
if page_numbers:
pages = [[n, self.get_layout(self.get_page(n))] for n in
_flatten(page_numbers)]
else:
pages = enumerate(self.get_layouts())
for n, page in pages:
page = self._xmlize(page)
page.set('page_index', obj_to_string(n))
page.set('page_label', self.doc.get_page_number(n))
root.append(page)
self._clean_text(root)
# wrap root in ElementTree
tree = etree.ElementTree(root)
self._parse_tree_cacher.set(cache_key, tree)
return tree |
def neverCalledWithMatch(cls, spy, *args, **kwargs): #pylint: disable=invalid-name
"""
Checking the inspector is never called with partial SinonMatcher(args/kwargs)
Args: SinonSpy, args/kwargs
"""
cls.__is_spy(spy)
if not (spy.neverCalledWithMatch(*args, **kwargs)):
raise cls.failException(cls.message) | Checking the inspector is never called with partial SinonMatcher(args/kwargs)
Args: SinonSpy, args/kwargs | Below is the the instruction that describes the task:
### Input:
Checking the inspector is never called with partial SinonMatcher(args/kwargs)
Args: SinonSpy, args/kwargs
### Response:
def neverCalledWithMatch(cls, spy, *args, **kwargs): #pylint: disable=invalid-name
"""
Checking the inspector is never called with partial SinonMatcher(args/kwargs)
Args: SinonSpy, args/kwargs
"""
cls.__is_spy(spy)
if not (spy.neverCalledWithMatch(*args, **kwargs)):
raise cls.failException(cls.message) |
def prsint(string):
"""
Parse a string as an integer, encapsulating error handling.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/prsint_c.html
:param string: String representing an integer.
:type string: str
:return: Integer value obtained by parsing string.
:rtype: int
"""
string = stypes.stringToCharP(string)
intval = ctypes.c_int()
libspice.prsint_c(string, ctypes.byref(intval))
return intval.value | Parse a string as an integer, encapsulating error handling.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/prsint_c.html
:param string: String representing an integer.
:type string: str
:return: Integer value obtained by parsing string.
:rtype: int | Below is the the instruction that describes the task:
### Input:
Parse a string as an integer, encapsulating error handling.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/prsint_c.html
:param string: String representing an integer.
:type string: str
:return: Integer value obtained by parsing string.
:rtype: int
### Response:
def prsint(string):
"""
Parse a string as an integer, encapsulating error handling.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/prsint_c.html
:param string: String representing an integer.
:type string: str
:return: Integer value obtained by parsing string.
:rtype: int
"""
string = stypes.stringToCharP(string)
intval = ctypes.c_int()
libspice.prsint_c(string, ctypes.byref(intval))
return intval.value |
def are_metadata_file_based(layer):
"""Check if metadata should be read/written to file or our metadata db.
Determine which metadata lookup system to use (file base or cache db)
based on the layer's provider type. True indicates we should use the
datasource as a file and look for a metadata file, False and we look
in the metadata db.
:param layer: The layer which want to know how the metadata are stored.
:type layer: QgsMapLayer
:returns: True if metadata are stored in a file next to the dataset,
else False if the dataset is remove e.g. a database.
:rtype: bool
:raises: UnsupportedProviderError
"""
try:
provider_type = str(layer.providerType())
except AttributeError:
raise UnsupportedProviderError(
'Could not determine type for provider: %s' %
layer.__class__.__name__)
provider_dict = {
'ogr': True,
'gdal': True,
'gpx': False,
'wms': False,
'spatialite': False,
'delimitedtext': False,
'postgres': False}
file_based_metadata = False
if provider_type in provider_dict:
file_based_metadata = provider_dict[provider_type]
return file_based_metadata | Check if metadata should be read/written to file or our metadata db.
Determine which metadata lookup system to use (file base or cache db)
based on the layer's provider type. True indicates we should use the
datasource as a file and look for a metadata file, False and we look
in the metadata db.
:param layer: The layer which want to know how the metadata are stored.
:type layer: QgsMapLayer
:returns: True if metadata are stored in a file next to the dataset,
else False if the dataset is remove e.g. a database.
:rtype: bool
:raises: UnsupportedProviderError | Below is the the instruction that describes the task:
### Input:
Check if metadata should be read/written to file or our metadata db.
Determine which metadata lookup system to use (file base or cache db)
based on the layer's provider type. True indicates we should use the
datasource as a file and look for a metadata file, False and we look
in the metadata db.
:param layer: The layer which want to know how the metadata are stored.
:type layer: QgsMapLayer
:returns: True if metadata are stored in a file next to the dataset,
else False if the dataset is remove e.g. a database.
:rtype: bool
:raises: UnsupportedProviderError
### Response:
def are_metadata_file_based(layer):
"""Check if metadata should be read/written to file or our metadata db.
Determine which metadata lookup system to use (file base or cache db)
based on the layer's provider type. True indicates we should use the
datasource as a file and look for a metadata file, False and we look
in the metadata db.
:param layer: The layer which want to know how the metadata are stored.
:type layer: QgsMapLayer
:returns: True if metadata are stored in a file next to the dataset,
else False if the dataset is remove e.g. a database.
:rtype: bool
:raises: UnsupportedProviderError
"""
try:
provider_type = str(layer.providerType())
except AttributeError:
raise UnsupportedProviderError(
'Could not determine type for provider: %s' %
layer.__class__.__name__)
provider_dict = {
'ogr': True,
'gdal': True,
'gpx': False,
'wms': False,
'spatialite': False,
'delimitedtext': False,
'postgres': False}
file_based_metadata = False
if provider_type in provider_dict:
file_based_metadata = provider_dict[provider_type]
return file_based_metadata |
def Parse(self, value):
"""Parse a 'Value' declaration.
Args:
value: String line from a template file, must begin with 'Value '.
Raises:
TextFSMTemplateError: Value declaration contains an error.
"""
value_line = value.split(' ')
if len(value_line) < 3:
raise TextFSMTemplateError('Expect at least 3 tokens on line.')
if not value_line[2].startswith('('):
# Options are present
options = value_line[1]
for option in options.split(','):
self._AddOption(option)
# Call option OnCreateOptions callbacks
[option.OnCreateOptions() for option in self.options]
self.name = value_line[2]
self.regex = ' '.join(value_line[3:])
else:
# There were no valid options, so there are no options.
# Treat this argument as the name.
self.name = value_line[1]
self.regex = ' '.join(value_line[2:])
if len(self.name) > self.max_name_len:
raise TextFSMTemplateError(
"Invalid Value name '%s' or name too long." % self.name)
if (not re.match(r'^\(.*\)$', self.regex) or
self.regex.count('(') != self.regex.count(')')):
raise TextFSMTemplateError(
"Value '%s' must be contained within a '()' pair." % self.regex)
self.template = re.sub(r'^\(', '(?P<%s>' % self.name, self.regex) | Parse a 'Value' declaration.
Args:
value: String line from a template file, must begin with 'Value '.
Raises:
TextFSMTemplateError: Value declaration contains an error. | Below is the the instruction that describes the task:
### Input:
Parse a 'Value' declaration.
Args:
value: String line from a template file, must begin with 'Value '.
Raises:
TextFSMTemplateError: Value declaration contains an error.
### Response:
def Parse(self, value):
"""Parse a 'Value' declaration.
Args:
value: String line from a template file, must begin with 'Value '.
Raises:
TextFSMTemplateError: Value declaration contains an error.
"""
value_line = value.split(' ')
if len(value_line) < 3:
raise TextFSMTemplateError('Expect at least 3 tokens on line.')
if not value_line[2].startswith('('):
# Options are present
options = value_line[1]
for option in options.split(','):
self._AddOption(option)
# Call option OnCreateOptions callbacks
[option.OnCreateOptions() for option in self.options]
self.name = value_line[2]
self.regex = ' '.join(value_line[3:])
else:
# There were no valid options, so there are no options.
# Treat this argument as the name.
self.name = value_line[1]
self.regex = ' '.join(value_line[2:])
if len(self.name) > self.max_name_len:
raise TextFSMTemplateError(
"Invalid Value name '%s' or name too long." % self.name)
if (not re.match(r'^\(.*\)$', self.regex) or
self.regex.count('(') != self.regex.count(')')):
raise TextFSMTemplateError(
"Value '%s' must be contained within a '()' pair." % self.regex)
self.template = re.sub(r'^\(', '(?P<%s>' % self.name, self.regex) |
def get_object(self, view_name, view_args, view_kwargs):
"""
Return the object corresponding to a matched URL.
Takes the matched URL conf arguments, and should return an
object instance, or raise an `ObjectDoesNotExist` exception.
"""
lookup_value = view_kwargs.get(self.lookup_url_kwarg)
parent_lookup_value = view_kwargs.get(self.parent_lookup_field)
lookup_kwargs = {
self.lookup_field: lookup_value,
}
# Try to lookup parent attr
if parent_lookup_value:
lookup_kwargs.update({self.parent_lookup_field: parent_lookup_value})
return self.get_queryset().get(**lookup_kwargs) | Return the object corresponding to a matched URL.
Takes the matched URL conf arguments, and should return an
object instance, or raise an `ObjectDoesNotExist` exception. | Below is the the instruction that describes the task:
### Input:
Return the object corresponding to a matched URL.
Takes the matched URL conf arguments, and should return an
object instance, or raise an `ObjectDoesNotExist` exception.
### Response:
def get_object(self, view_name, view_args, view_kwargs):
"""
Return the object corresponding to a matched URL.
Takes the matched URL conf arguments, and should return an
object instance, or raise an `ObjectDoesNotExist` exception.
"""
lookup_value = view_kwargs.get(self.lookup_url_kwarg)
parent_lookup_value = view_kwargs.get(self.parent_lookup_field)
lookup_kwargs = {
self.lookup_field: lookup_value,
}
# Try to lookup parent attr
if parent_lookup_value:
lookup_kwargs.update({self.parent_lookup_field: parent_lookup_value})
return self.get_queryset().get(**lookup_kwargs) |
def register_logger(self, logger):
"""
Register a new logger.
"""
handler = CommandHandler(self)
handler.setFormatter(CommandFormatter())
logger.handlers = [handler]
logger.propagate = False
output = self.output
level = logging.WARNING
if output.is_debug():
level = logging.DEBUG
elif output.is_very_verbose() or output.is_verbose():
level = logging.INFO
logger.setLevel(level) | Register a new logger. | Below is the the instruction that describes the task:
### Input:
Register a new logger.
### Response:
def register_logger(self, logger):
"""
Register a new logger.
"""
handler = CommandHandler(self)
handler.setFormatter(CommandFormatter())
logger.handlers = [handler]
logger.propagate = False
output = self.output
level = logging.WARNING
if output.is_debug():
level = logging.DEBUG
elif output.is_very_verbose() or output.is_verbose():
level = logging.INFO
logger.setLevel(level) |
def mnist_tutorial(train_start=0, train_end=60000, test_start=0,
test_end=10000, nb_epochs=NB_EPOCHS, batch_size=BATCH_SIZE,
learning_rate=LEARNING_RATE, testing=False,
label_smoothing=0.1):
"""
MNIST CleverHans tutorial
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:param nb_epochs: number of epochs to train model
:param batch_size: size of training batches
:param learning_rate: learning rate for training
:param testing: if true, training error is calculated
:param label_smoothing: float, amount of label smoothing for cross entropy
:return: an AccuracyReport object
"""
# Object used to keep track of (and return) key accuracies
report = AccuracyReport()
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
# Force TensorFlow to use single thread to improve reproducibility
config = tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
if keras.backend.image_data_format() != 'channels_last':
raise NotImplementedError("this tutorial requires keras to be configured to channels_last format")
# Create TF session and set as Keras backend session
sess = tf.Session(config=config)
keras.backend.set_session(sess)
# Get MNIST test data
mnist = MNIST(train_start=train_start, train_end=train_end,
test_start=test_start, test_end=test_end)
x_train, y_train = mnist.get_set('train')
x_test, y_test = mnist.get_set('test')
# Obtain Image Parameters
img_rows, img_cols, nchannels = x_train.shape[1:4]
nb_classes = y_train.shape[1]
# Label smoothing
y_train -= label_smoothing * (y_train - 1. / nb_classes)
# Define Keras model
model = cnn_model(img_rows=img_rows, img_cols=img_cols,
channels=nchannels, nb_filters=64,
nb_classes=nb_classes)
print("Defined Keras model.")
# To be able to call the model in the custom loss, we need to call it once
# before, see https://github.com/tensorflow/tensorflow/issues/23769
model(model.input)
# Initialize the Fast Gradient Sign Method (FGSM) attack object
wrap = KerasModelWrapper(model)
fgsm = FastGradientMethod(wrap, sess=sess)
fgsm_params = {'eps': 0.3,
'clip_min': 0.,
'clip_max': 1.}
adv_acc_metric = get_adversarial_acc_metric(model, fgsm, fgsm_params)
model.compile(
optimizer=keras.optimizers.Adam(learning_rate),
loss='categorical_crossentropy',
metrics=['accuracy', adv_acc_metric]
)
# Train an MNIST model
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=nb_epochs,
validation_data=(x_test, y_test),
verbose=2)
# Evaluate the accuracy on legitimate and adversarial test examples
_, acc, adv_acc = model.evaluate(x_test, y_test,
batch_size=batch_size,
verbose=0)
report.clean_train_clean_eval = acc
report.clean_train_adv_eval = adv_acc
print('Test accuracy on legitimate examples: %0.4f' % acc)
print('Test accuracy on adversarial examples: %0.4f\n' % adv_acc)
# Calculate training error
if testing:
_, train_acc, train_adv_acc = model.evaluate(x_train, y_train,
batch_size=batch_size,
verbose=0)
report.train_clean_train_clean_eval = train_acc
report.train_clean_train_adv_eval = train_adv_acc
print("Repeating the process, using adversarial training")
# Redefine Keras model
model_2 = cnn_model(img_rows=img_rows, img_cols=img_cols,
channels=nchannels, nb_filters=64,
nb_classes=nb_classes)
model_2(model_2.input)
wrap_2 = KerasModelWrapper(model_2)
fgsm_2 = FastGradientMethod(wrap_2, sess=sess)
# Use a loss function based on legitimate and adversarial examples
adv_loss_2 = get_adversarial_loss(model_2, fgsm_2, fgsm_params)
adv_acc_metric_2 = get_adversarial_acc_metric(model_2, fgsm_2, fgsm_params)
model_2.compile(
optimizer=keras.optimizers.Adam(learning_rate),
loss=adv_loss_2,
metrics=['accuracy', adv_acc_metric_2]
)
# Train an MNIST model
model_2.fit(x_train, y_train,
batch_size=batch_size,
epochs=nb_epochs,
validation_data=(x_test, y_test),
verbose=2)
# Evaluate the accuracy on legitimate and adversarial test examples
_, acc, adv_acc = model_2.evaluate(x_test, y_test,
batch_size=batch_size,
verbose=0)
report.adv_train_clean_eval = acc
report.adv_train_adv_eval = adv_acc
print('Test accuracy on legitimate examples: %0.4f' % acc)
print('Test accuracy on adversarial examples: %0.4f\n' % adv_acc)
# Calculate training error
if testing:
_, train_acc, train_adv_acc = model_2.evaluate(x_train, y_train,
batch_size=batch_size,
verbose=0)
report.train_adv_train_clean_eval = train_acc
report.train_adv_train_adv_eval = train_adv_acc
return report | MNIST CleverHans tutorial
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:param nb_epochs: number of epochs to train model
:param batch_size: size of training batches
:param learning_rate: learning rate for training
:param testing: if true, training error is calculated
:param label_smoothing: float, amount of label smoothing for cross entropy
:return: an AccuracyReport object | Below is the the instruction that describes the task:
### Input:
MNIST CleverHans tutorial
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:param nb_epochs: number of epochs to train model
:param batch_size: size of training batches
:param learning_rate: learning rate for training
:param testing: if true, training error is calculated
:param label_smoothing: float, amount of label smoothing for cross entropy
:return: an AccuracyReport object
### Response:
def mnist_tutorial(train_start=0, train_end=60000, test_start=0,
test_end=10000, nb_epochs=NB_EPOCHS, batch_size=BATCH_SIZE,
learning_rate=LEARNING_RATE, testing=False,
label_smoothing=0.1):
"""
MNIST CleverHans tutorial
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:param nb_epochs: number of epochs to train model
:param batch_size: size of training batches
:param learning_rate: learning rate for training
:param testing: if true, training error is calculated
:param label_smoothing: float, amount of label smoothing for cross entropy
:return: an AccuracyReport object
"""
# Object used to keep track of (and return) key accuracies
report = AccuracyReport()
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
# Force TensorFlow to use single thread to improve reproducibility
config = tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
if keras.backend.image_data_format() != 'channels_last':
raise NotImplementedError("this tutorial requires keras to be configured to channels_last format")
# Create TF session and set as Keras backend session
sess = tf.Session(config=config)
keras.backend.set_session(sess)
# Get MNIST test data
mnist = MNIST(train_start=train_start, train_end=train_end,
test_start=test_start, test_end=test_end)
x_train, y_train = mnist.get_set('train')
x_test, y_test = mnist.get_set('test')
# Obtain Image Parameters
img_rows, img_cols, nchannels = x_train.shape[1:4]
nb_classes = y_train.shape[1]
# Label smoothing
y_train -= label_smoothing * (y_train - 1. / nb_classes)
# Define Keras model
model = cnn_model(img_rows=img_rows, img_cols=img_cols,
channels=nchannels, nb_filters=64,
nb_classes=nb_classes)
print("Defined Keras model.")
# To be able to call the model in the custom loss, we need to call it once
# before, see https://github.com/tensorflow/tensorflow/issues/23769
model(model.input)
# Initialize the Fast Gradient Sign Method (FGSM) attack object
wrap = KerasModelWrapper(model)
fgsm = FastGradientMethod(wrap, sess=sess)
fgsm_params = {'eps': 0.3,
'clip_min': 0.,
'clip_max': 1.}
adv_acc_metric = get_adversarial_acc_metric(model, fgsm, fgsm_params)
model.compile(
optimizer=keras.optimizers.Adam(learning_rate),
loss='categorical_crossentropy',
metrics=['accuracy', adv_acc_metric]
)
# Train an MNIST model
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=nb_epochs,
validation_data=(x_test, y_test),
verbose=2)
# Evaluate the accuracy on legitimate and adversarial test examples
_, acc, adv_acc = model.evaluate(x_test, y_test,
batch_size=batch_size,
verbose=0)
report.clean_train_clean_eval = acc
report.clean_train_adv_eval = adv_acc
print('Test accuracy on legitimate examples: %0.4f' % acc)
print('Test accuracy on adversarial examples: %0.4f\n' % adv_acc)
# Calculate training error
if testing:
_, train_acc, train_adv_acc = model.evaluate(x_train, y_train,
batch_size=batch_size,
verbose=0)
report.train_clean_train_clean_eval = train_acc
report.train_clean_train_adv_eval = train_adv_acc
print("Repeating the process, using adversarial training")
# Redefine Keras model
model_2 = cnn_model(img_rows=img_rows, img_cols=img_cols,
channels=nchannels, nb_filters=64,
nb_classes=nb_classes)
model_2(model_2.input)
wrap_2 = KerasModelWrapper(model_2)
fgsm_2 = FastGradientMethod(wrap_2, sess=sess)
# Use a loss function based on legitimate and adversarial examples
adv_loss_2 = get_adversarial_loss(model_2, fgsm_2, fgsm_params)
adv_acc_metric_2 = get_adversarial_acc_metric(model_2, fgsm_2, fgsm_params)
model_2.compile(
optimizer=keras.optimizers.Adam(learning_rate),
loss=adv_loss_2,
metrics=['accuracy', adv_acc_metric_2]
)
# Train an MNIST model
model_2.fit(x_train, y_train,
batch_size=batch_size,
epochs=nb_epochs,
validation_data=(x_test, y_test),
verbose=2)
# Evaluate the accuracy on legitimate and adversarial test examples
_, acc, adv_acc = model_2.evaluate(x_test, y_test,
batch_size=batch_size,
verbose=0)
report.adv_train_clean_eval = acc
report.adv_train_adv_eval = adv_acc
print('Test accuracy on legitimate examples: %0.4f' % acc)
print('Test accuracy on adversarial examples: %0.4f\n' % adv_acc)
# Calculate training error
if testing:
_, train_acc, train_adv_acc = model_2.evaluate(x_train, y_train,
batch_size=batch_size,
verbose=0)
report.train_adv_train_clean_eval = train_acc
report.train_adv_train_adv_eval = train_adv_acc
return report |
def p_valueInitializer(p):
"""valueInitializer : identifier defaultValue ';'
| qualifierList identifier defaultValue ';'
"""
if len(p) == 4:
id_ = p[1]
val = p[2]
quals = []
else:
quals = p[1]
id_ = p[2]
val = p[3]
p[0] = (quals, id_, val) | valueInitializer : identifier defaultValue ';'
| qualifierList identifier defaultValue ';' | Below is the the instruction that describes the task:
### Input:
valueInitializer : identifier defaultValue ';'
| qualifierList identifier defaultValue ';'
### Response:
def p_valueInitializer(p):
"""valueInitializer : identifier defaultValue ';'
| qualifierList identifier defaultValue ';'
"""
if len(p) == 4:
id_ = p[1]
val = p[2]
quals = []
else:
quals = p[1]
id_ = p[2]
val = p[3]
p[0] = (quals, id_, val) |
def _parse_group(self, group_name, group):
"""
Parse a group definition from a dynamic inventory. These are top-level
elements which are not '_meta(data)'.
"""
if type(group) == dict:
# Example:
# {
# "mgmt": {
# "hosts": [ "mgmt01", "mgmt02" ],
# "vars": {
# "eth0": {
# "onboot": "yes",
# "nm_controlled": "no"
# }
# }
# }
# }
#
hostnames_in_group = set()
# Group member with hosts and variable definitions.
for hostname in group.get('hosts', []):
self._get_host(hostname)['groups'].add(group_name)
hostnames_in_group.add(hostname)
# Apply variables to all hosts in group
for var_key, var_val in group.get('vars', {}).items():
for hostname in hostnames_in_group:
self._get_host(hostname)['hostvars'][var_key] = var_val
elif type(group) == list:
# List of hostnames for this group
for hostname in group:
self._get_host(hostname)['groups'].add(group_name)
else:
self.log.warning("Invalid element found in dynamic inventory output: {0}".format(type(group))) | Parse a group definition from a dynamic inventory. These are top-level
elements which are not '_meta(data)'. | Below is the the instruction that describes the task:
### Input:
Parse a group definition from a dynamic inventory. These are top-level
elements which are not '_meta(data)'.
### Response:
def _parse_group(self, group_name, group):
"""
Parse a group definition from a dynamic inventory. These are top-level
elements which are not '_meta(data)'.
"""
if type(group) == dict:
# Example:
# {
# "mgmt": {
# "hosts": [ "mgmt01", "mgmt02" ],
# "vars": {
# "eth0": {
# "onboot": "yes",
# "nm_controlled": "no"
# }
# }
# }
# }
#
hostnames_in_group = set()
# Group member with hosts and variable definitions.
for hostname in group.get('hosts', []):
self._get_host(hostname)['groups'].add(group_name)
hostnames_in_group.add(hostname)
# Apply variables to all hosts in group
for var_key, var_val in group.get('vars', {}).items():
for hostname in hostnames_in_group:
self._get_host(hostname)['hostvars'][var_key] = var_val
elif type(group) == list:
# List of hostnames for this group
for hostname in group:
self._get_host(hostname)['groups'].add(group_name)
else:
self.log.warning("Invalid element found in dynamic inventory output: {0}".format(type(group))) |
def create_namespaced_job(self, namespace, body, **kwargs):
"""
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
return data | create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1Job
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
### Response:
def create_namespaced_job(self, namespace, body, **kwargs):
"""
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
return data |
def _init_glyph(self, plot, mapping, properties):
"""
Returns a Bokeh glyph object.
"""
properties = mpl_to_bokeh(properties)
properties = dict(properties, **mapping)
if 'xs' in mapping:
renderer = plot.patches(**properties)
else:
renderer = plot.quad(**properties)
if self.colorbar and 'color_mapper' in self.handles:
self._draw_colorbar(plot, self.handles['color_mapper'])
return renderer, renderer.glyph | Returns a Bokeh glyph object. | Below is the the instruction that describes the task:
### Input:
Returns a Bokeh glyph object.
### Response:
def _init_glyph(self, plot, mapping, properties):
"""
Returns a Bokeh glyph object.
"""
properties = mpl_to_bokeh(properties)
properties = dict(properties, **mapping)
if 'xs' in mapping:
renderer = plot.patches(**properties)
else:
renderer = plot.quad(**properties)
if self.colorbar and 'color_mapper' in self.handles:
self._draw_colorbar(plot, self.handles['color_mapper'])
return renderer, renderer.glyph |
def unfold(self, mode):
"""
Unfolds a dense tensor in mode n.
Parameters
----------
mode : int
Mode in which tensor is unfolded
Returns
-------
unfolded_dtensor : unfolded_dtensor object
Tensor unfolded along mode
Examples
--------
Create dense tensor from numpy array
>>> T = np.zeros((3, 4, 2))
>>> T[:, :, 0] = [[ 1, 4, 7, 10], [ 2, 5, 8, 11], [3, 6, 9, 12]]
>>> T[:, :, 1] = [[13, 16, 19, 22], [14, 17, 20, 23], [15, 18, 21, 24]]
>>> T = dtensor(T)
Unfolding of dense tensors
>>> T.unfold(0)
array([[ 1., 4., 7., 10., 13., 16., 19., 22.],
[ 2., 5., 8., 11., 14., 17., 20., 23.],
[ 3., 6., 9., 12., 15., 18., 21., 24.]])
>>> T.unfold(1)
array([[ 1., 2., 3., 13., 14., 15.],
[ 4., 5., 6., 16., 17., 18.],
[ 7., 8., 9., 19., 20., 21.],
[ 10., 11., 12., 22., 23., 24.]])
>>> T.unfold(2)
array([[ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.,
12.],
[ 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23.,
24.]])
"""
sz = array(self.shape)
N = len(sz)
order = ([mode], from_to_without(N - 1, -1, mode, step=-1, skip=-1))
newsz = (sz[order[0]][0], prod(sz[order[1]]))
arr = self.transpose(axes=(order[0] + order[1]))
arr = arr.reshape(newsz)
return unfolded_dtensor(arr, mode, self.shape) | Unfolds a dense tensor in mode n.
Parameters
----------
mode : int
Mode in which tensor is unfolded
Returns
-------
unfolded_dtensor : unfolded_dtensor object
Tensor unfolded along mode
Examples
--------
Create dense tensor from numpy array
>>> T = np.zeros((3, 4, 2))
>>> T[:, :, 0] = [[ 1, 4, 7, 10], [ 2, 5, 8, 11], [3, 6, 9, 12]]
>>> T[:, :, 1] = [[13, 16, 19, 22], [14, 17, 20, 23], [15, 18, 21, 24]]
>>> T = dtensor(T)
Unfolding of dense tensors
>>> T.unfold(0)
array([[ 1., 4., 7., 10., 13., 16., 19., 22.],
[ 2., 5., 8., 11., 14., 17., 20., 23.],
[ 3., 6., 9., 12., 15., 18., 21., 24.]])
>>> T.unfold(1)
array([[ 1., 2., 3., 13., 14., 15.],
[ 4., 5., 6., 16., 17., 18.],
[ 7., 8., 9., 19., 20., 21.],
[ 10., 11., 12., 22., 23., 24.]])
>>> T.unfold(2)
array([[ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.,
12.],
[ 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23.,
24.]]) | Below is the the instruction that describes the task:
### Input:
Unfolds a dense tensor in mode n.
Parameters
----------
mode : int
Mode in which tensor is unfolded
Returns
-------
unfolded_dtensor : unfolded_dtensor object
Tensor unfolded along mode
Examples
--------
Create dense tensor from numpy array
>>> T = np.zeros((3, 4, 2))
>>> T[:, :, 0] = [[ 1, 4, 7, 10], [ 2, 5, 8, 11], [3, 6, 9, 12]]
>>> T[:, :, 1] = [[13, 16, 19, 22], [14, 17, 20, 23], [15, 18, 21, 24]]
>>> T = dtensor(T)
Unfolding of dense tensors
>>> T.unfold(0)
array([[ 1., 4., 7., 10., 13., 16., 19., 22.],
[ 2., 5., 8., 11., 14., 17., 20., 23.],
[ 3., 6., 9., 12., 15., 18., 21., 24.]])
>>> T.unfold(1)
array([[ 1., 2., 3., 13., 14., 15.],
[ 4., 5., 6., 16., 17., 18.],
[ 7., 8., 9., 19., 20., 21.],
[ 10., 11., 12., 22., 23., 24.]])
>>> T.unfold(2)
array([[ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.,
12.],
[ 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23.,
24.]])
### Response:
def unfold(self, mode):
"""
Unfolds a dense tensor in mode n.
Parameters
----------
mode : int
Mode in which tensor is unfolded
Returns
-------
unfolded_dtensor : unfolded_dtensor object
Tensor unfolded along mode
Examples
--------
Create dense tensor from numpy array
>>> T = np.zeros((3, 4, 2))
>>> T[:, :, 0] = [[ 1, 4, 7, 10], [ 2, 5, 8, 11], [3, 6, 9, 12]]
>>> T[:, :, 1] = [[13, 16, 19, 22], [14, 17, 20, 23], [15, 18, 21, 24]]
>>> T = dtensor(T)
Unfolding of dense tensors
>>> T.unfold(0)
array([[ 1., 4., 7., 10., 13., 16., 19., 22.],
[ 2., 5., 8., 11., 14., 17., 20., 23.],
[ 3., 6., 9., 12., 15., 18., 21., 24.]])
>>> T.unfold(1)
array([[ 1., 2., 3., 13., 14., 15.],
[ 4., 5., 6., 16., 17., 18.],
[ 7., 8., 9., 19., 20., 21.],
[ 10., 11., 12., 22., 23., 24.]])
>>> T.unfold(2)
array([[ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.,
12.],
[ 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23.,
24.]])
"""
sz = array(self.shape)
N = len(sz)
order = ([mode], from_to_without(N - 1, -1, mode, step=-1, skip=-1))
newsz = (sz[order[0]][0], prod(sz[order[1]]))
arr = self.transpose(axes=(order[0] + order[1]))
arr = arr.reshape(newsz)
return unfolded_dtensor(arr, mode, self.shape) |
def wait_for(self, pattern, timeout=None):
"""
Block until a pattern have been found in stdout and stderr
Args:
pattern(:class:`~re.Pattern`): The pattern to search
timeout(int): Maximum number of second to wait. If None, wait infinitely
Raises:
TimeoutError: When timeout is reach
"""
should_continue = True
if self.block:
raise TypeError(NON_BLOCKING_ERROR_MESSAGE)
def stop(signum, frame): # pylint: disable=W0613
nonlocal should_continue
if should_continue:
raise TimeoutError()
if timeout:
signal.signal(signal.SIGALRM, stop)
signal.alarm(timeout)
while should_continue:
output = self.poll_output() + self.poll_error()
filtered = [line for line in output if re.match(pattern, line)]
if filtered:
should_continue = False | Block until a pattern have been found in stdout and stderr
Args:
pattern(:class:`~re.Pattern`): The pattern to search
timeout(int): Maximum number of second to wait. If None, wait infinitely
Raises:
TimeoutError: When timeout is reach | Below is the the instruction that describes the task:
### Input:
Block until a pattern have been found in stdout and stderr
Args:
pattern(:class:`~re.Pattern`): The pattern to search
timeout(int): Maximum number of second to wait. If None, wait infinitely
Raises:
TimeoutError: When timeout is reach
### Response:
def wait_for(self, pattern, timeout=None):
"""
Block until a pattern have been found in stdout and stderr
Args:
pattern(:class:`~re.Pattern`): The pattern to search
timeout(int): Maximum number of second to wait. If None, wait infinitely
Raises:
TimeoutError: When timeout is reach
"""
should_continue = True
if self.block:
raise TypeError(NON_BLOCKING_ERROR_MESSAGE)
def stop(signum, frame): # pylint: disable=W0613
nonlocal should_continue
if should_continue:
raise TimeoutError()
if timeout:
signal.signal(signal.SIGALRM, stop)
signal.alarm(timeout)
while should_continue:
output = self.poll_output() + self.poll_error()
filtered = [line for line in output if re.match(pattern, line)]
if filtered:
should_continue = False |
def cdf(self, y, f):
r"""
Cumulative density function of the likelihood.
Parameters
----------
y: ndarray
query quantiles, i.e.\ :math:`P(Y \leq y)`.
f: ndarray
latent function from the GLM prior (:math:`\mathbf{f} =
\boldsymbol\Phi \mathbf{w}`)
Returns
-------
cdf: ndarray
Cumulative density function evaluated at y.
"""
mu = np.exp(f) if self.tranfcn == 'exp' else softplus(f)
return poisson.cdf(y, mu=mu) | r"""
Cumulative density function of the likelihood.
Parameters
----------
y: ndarray
query quantiles, i.e.\ :math:`P(Y \leq y)`.
f: ndarray
latent function from the GLM prior (:math:`\mathbf{f} =
\boldsymbol\Phi \mathbf{w}`)
Returns
-------
cdf: ndarray
Cumulative density function evaluated at y. | Below is the the instruction that describes the task:
### Input:
r"""
Cumulative density function of the likelihood.
Parameters
----------
y: ndarray
query quantiles, i.e.\ :math:`P(Y \leq y)`.
f: ndarray
latent function from the GLM prior (:math:`\mathbf{f} =
\boldsymbol\Phi \mathbf{w}`)
Returns
-------
cdf: ndarray
Cumulative density function evaluated at y.
### Response:
def cdf(self, y, f):
r"""
Cumulative density function of the likelihood.
Parameters
----------
y: ndarray
query quantiles, i.e.\ :math:`P(Y \leq y)`.
f: ndarray
latent function from the GLM prior (:math:`\mathbf{f} =
\boldsymbol\Phi \mathbf{w}`)
Returns
-------
cdf: ndarray
Cumulative density function evaluated at y.
"""
mu = np.exp(f) if self.tranfcn == 'exp' else softplus(f)
return poisson.cdf(y, mu=mu) |
def get_mtype_and_number_from_code(si, sensor_code):
"""
Given a sensor sensor_code, get motion type and sensor number
:param si: dict, sensor index json dictionary
:param sensor_code: str, a sensor code (e.g. ACCX-UB1-L2C-M)
:return:
"""
mtype_and_ory, x, y, z = sensor_code.split("-")
if mtype_and_ory[-1] in "XYZ" and "ACCX" not in si: # Need to support old sensor_file.json files.
mtype = mtype_and_ory[:-1]
else:
mtype = mtype_and_ory
for m_number in si[mtype]:
cc = get_sensor_code_by_number(si, mtype, m_number)
if cc == sensor_code:
return mtype, m_number
return None, None | Given a sensor sensor_code, get motion type and sensor number
:param si: dict, sensor index json dictionary
:param sensor_code: str, a sensor code (e.g. ACCX-UB1-L2C-M)
:return: | Below is the the instruction that describes the task:
### Input:
Given a sensor sensor_code, get motion type and sensor number
:param si: dict, sensor index json dictionary
:param sensor_code: str, a sensor code (e.g. ACCX-UB1-L2C-M)
:return:
### Response:
def get_mtype_and_number_from_code(si, sensor_code):
"""
Given a sensor sensor_code, get motion type and sensor number
:param si: dict, sensor index json dictionary
:param sensor_code: str, a sensor code (e.g. ACCX-UB1-L2C-M)
:return:
"""
mtype_and_ory, x, y, z = sensor_code.split("-")
if mtype_and_ory[-1] in "XYZ" and "ACCX" not in si: # Need to support old sensor_file.json files.
mtype = mtype_and_ory[:-1]
else:
mtype = mtype_and_ory
for m_number in si[mtype]:
cc = get_sensor_code_by_number(si, mtype, m_number)
if cc == sensor_code:
return mtype, m_number
return None, None |
def _prefix_from_opts(opts):
""" Return a prefix based on options passed from command line
Used by add_prefix() and add_prefix_from_pool() to avoid duplicate
parsing
"""
p = Prefix()
p.prefix = opts.get('prefix')
p.type = opts.get('type')
p.description = opts.get('description')
p.node = opts.get('node')
p.country = opts.get('country')
p.order_id = opts.get('order_id')
p.customer_id = opts.get('customer_id')
p.alarm_priority = opts.get('alarm_priority')
p.comment = opts.get('comment')
p.monitor = _str_to_bool(opts.get('monitor'))
p.vlan = opts.get('vlan')
p.status = opts.get('status') or 'assigned' # default to assigned
p.tags = list(csv.reader([opts.get('tags', '')], escapechar='\\'))[0]
p.expires = opts.get('expires')
return p | Return a prefix based on options passed from command line
Used by add_prefix() and add_prefix_from_pool() to avoid duplicate
parsing | Below is the the instruction that describes the task:
### Input:
Return a prefix based on options passed from command line
Used by add_prefix() and add_prefix_from_pool() to avoid duplicate
parsing
### Response:
def _prefix_from_opts(opts):
""" Return a prefix based on options passed from command line
Used by add_prefix() and add_prefix_from_pool() to avoid duplicate
parsing
"""
p = Prefix()
p.prefix = opts.get('prefix')
p.type = opts.get('type')
p.description = opts.get('description')
p.node = opts.get('node')
p.country = opts.get('country')
p.order_id = opts.get('order_id')
p.customer_id = opts.get('customer_id')
p.alarm_priority = opts.get('alarm_priority')
p.comment = opts.get('comment')
p.monitor = _str_to_bool(opts.get('monitor'))
p.vlan = opts.get('vlan')
p.status = opts.get('status') or 'assigned' # default to assigned
p.tags = list(csv.reader([opts.get('tags', '')], escapechar='\\'))[0]
p.expires = opts.get('expires')
return p |
def _read_select_kqueue(k_queue):
"""
Read PIPES using BSD Kqueue
"""
npipes = len(NonBlockingStreamReader._streams)
# Create list of kevent objects
# pylint: disable=no-member
kevents = [select.kevent(s.stream.fileno(),
filter=select.KQ_FILTER_READ,
flags=select.KQ_EV_ADD | select.KQ_EV_ENABLE)
for s in NonBlockingStreamReader._streams]
while NonBlockingStreamReader._run_flag:
events = k_queue.control(kevents, npipes, 0.5) # Wake up twice in second
for event in events:
if event.filter == select.KQ_FILTER_READ: # pylint: disable=no-member
NonBlockingStreamReader._read_fd(event.ident)
# Check if new pipes added.
if npipes != len(NonBlockingStreamReader._streams):
return | Read PIPES using BSD Kqueue | Below is the the instruction that describes the task:
### Input:
Read PIPES using BSD Kqueue
### Response:
def _read_select_kqueue(k_queue):
"""
Read PIPES using BSD Kqueue
"""
npipes = len(NonBlockingStreamReader._streams)
# Create list of kevent objects
# pylint: disable=no-member
kevents = [select.kevent(s.stream.fileno(),
filter=select.KQ_FILTER_READ,
flags=select.KQ_EV_ADD | select.KQ_EV_ENABLE)
for s in NonBlockingStreamReader._streams]
while NonBlockingStreamReader._run_flag:
events = k_queue.control(kevents, npipes, 0.5) # Wake up twice in second
for event in events:
if event.filter == select.KQ_FILTER_READ: # pylint: disable=no-member
NonBlockingStreamReader._read_fd(event.ident)
# Check if new pipes added.
if npipes != len(NonBlockingStreamReader._streams):
return |
def _load(content_or_fp):
"""YAML Parse a file or str and check version.
"""
try:
data = yaml.load(content_or_fp, Loader=yaml.loader.BaseLoader)
except Exception as e:
raise type(e)('Malformed yaml file:\n%r' % format_exc())
try:
ver = data['spec']
except:
raise ValueError('The file does not specify a spec version')
try:
ver = tuple(map(int, (ver.split("."))))
except:
raise ValueError("Invalid spec version format. Expect 'X.Y'"
" (X and Y integers), found %s" % ver)
if ver > SPEC_VERSION_TUPLE:
raise ValueError('The spec version of the file is '
'%s but the parser is %s. '
'Please update pyvisa-sim.' % (ver, SPEC_VERSION))
return data | YAML Parse a file or str and check version. | Below is the the instruction that describes the task:
### Input:
YAML Parse a file or str and check version.
### Response:
def _load(content_or_fp):
"""YAML Parse a file or str and check version.
"""
try:
data = yaml.load(content_or_fp, Loader=yaml.loader.BaseLoader)
except Exception as e:
raise type(e)('Malformed yaml file:\n%r' % format_exc())
try:
ver = data['spec']
except:
raise ValueError('The file does not specify a spec version')
try:
ver = tuple(map(int, (ver.split("."))))
except:
raise ValueError("Invalid spec version format. Expect 'X.Y'"
" (X and Y integers), found %s" % ver)
if ver > SPEC_VERSION_TUPLE:
raise ValueError('The spec version of the file is '
'%s but the parser is %s. '
'Please update pyvisa-sim.' % (ver, SPEC_VERSION))
return data |
def count_single_dots(self):
"""Count all strokes of this recording that have only a single dot.
"""
pointlist = self.get_pointlist()
single_dots = 0
for stroke in pointlist:
if len(stroke) == 1:
single_dots += 1
return single_dots | Count all strokes of this recording that have only a single dot. | Below is the the instruction that describes the task:
### Input:
Count all strokes of this recording that have only a single dot.
### Response:
def count_single_dots(self):
"""Count all strokes of this recording that have only a single dot.
"""
pointlist = self.get_pointlist()
single_dots = 0
for stroke in pointlist:
if len(stroke) == 1:
single_dots += 1
return single_dots |
async def make_conditional(
self,
request_range: Range,
max_partial_size: Optional[int]=None,
) -> None:
"""Make the response conditional to the
Arguments:
request_range: The range as requested by the request.
max_partial_size: The maximum length the server is willing
to serve in a single response. Defaults to unlimited.
"""
self.accept_ranges = "bytes" # Advertise this ability
if len(request_range.ranges) == 0: # Not a conditional request
return
if request_range.units != "bytes" or len(request_range.ranges) > 1:
from ..exceptions import RequestRangeNotSatisfiable
raise RequestRangeNotSatisfiable()
begin, end = request_range.ranges[0]
try:
complete_length = await self.response.make_conditional( # type: ignore
begin, end, max_partial_size,
)
except AttributeError:
self.response = self.data_body_class(await self.response.convert_to_sequence())
return await self.make_conditional(request_range, max_partial_size)
else:
self.content_length = self.response.end - self.response.begin # type: ignore
if self.content_length != complete_length:
self.content_range = ContentRange(
request_range.units,
self.response.begin, self.response.end - 1, # type: ignore
complete_length,
)
self.status_code = 206 | Make the response conditional to the
Arguments:
request_range: The range as requested by the request.
max_partial_size: The maximum length the server is willing
to serve in a single response. Defaults to unlimited. | Below is the the instruction that describes the task:
### Input:
Make the response conditional to the
Arguments:
request_range: The range as requested by the request.
max_partial_size: The maximum length the server is willing
to serve in a single response. Defaults to unlimited.
### Response:
async def make_conditional(
self,
request_range: Range,
max_partial_size: Optional[int]=None,
) -> None:
"""Make the response conditional to the
Arguments:
request_range: The range as requested by the request.
max_partial_size: The maximum length the server is willing
to serve in a single response. Defaults to unlimited.
"""
self.accept_ranges = "bytes" # Advertise this ability
if len(request_range.ranges) == 0: # Not a conditional request
return
if request_range.units != "bytes" or len(request_range.ranges) > 1:
from ..exceptions import RequestRangeNotSatisfiable
raise RequestRangeNotSatisfiable()
begin, end = request_range.ranges[0]
try:
complete_length = await self.response.make_conditional( # type: ignore
begin, end, max_partial_size,
)
except AttributeError:
self.response = self.data_body_class(await self.response.convert_to_sequence())
return await self.make_conditional(request_range, max_partial_size)
else:
self.content_length = self.response.end - self.response.begin # type: ignore
if self.content_length != complete_length:
self.content_range = ContentRange(
request_range.units,
self.response.begin, self.response.end - 1, # type: ignore
complete_length,
)
self.status_code = 206 |
def backbone(self):
"""Returns a new `Polymer` containing only the backbone atoms.
Notes
-----
Metadata is not currently preserved from the parent object.
Sequence data is retained, but only the main chain atoms are retained.
Returns
-------
bb_poly : Polypeptide
Polymer containing only the backbone atoms of the original
Polymer.
"""
bb_poly = Polypeptide([x.backbone for x in self._monomers], self.id)
return bb_poly | Returns a new `Polymer` containing only the backbone atoms.
Notes
-----
Metadata is not currently preserved from the parent object.
Sequence data is retained, but only the main chain atoms are retained.
Returns
-------
bb_poly : Polypeptide
Polymer containing only the backbone atoms of the original
Polymer. | Below is the the instruction that describes the task:
### Input:
Returns a new `Polymer` containing only the backbone atoms.
Notes
-----
Metadata is not currently preserved from the parent object.
Sequence data is retained, but only the main chain atoms are retained.
Returns
-------
bb_poly : Polypeptide
Polymer containing only the backbone atoms of the original
Polymer.
### Response:
def backbone(self):
"""Returns a new `Polymer` containing only the backbone atoms.
Notes
-----
Metadata is not currently preserved from the parent object.
Sequence data is retained, but only the main chain atoms are retained.
Returns
-------
bb_poly : Polypeptide
Polymer containing only the backbone atoms of the original
Polymer.
"""
bb_poly = Polypeptide([x.backbone for x in self._monomers], self.id)
return bb_poly |
def _orderedCleanDict(attrsObj):
"""
-> dict with false-values removed
Also evaluates attr-instances for false-ness by looking at the values of their properties
"""
def _filt(k, v):
if attr.has(v):
return not not any(attr.astuple(v))
return not not v
return attr.asdict(attrsObj,
dict_factory=OrderedDict,
recurse=False,
filter=_filt) | -> dict with false-values removed
Also evaluates attr-instances for false-ness by looking at the values of their properties | Below is the the instruction that describes the task:
### Input:
-> dict with false-values removed
Also evaluates attr-instances for false-ness by looking at the values of their properties
### Response:
def _orderedCleanDict(attrsObj):
"""
-> dict with false-values removed
Also evaluates attr-instances for false-ness by looking at the values of their properties
"""
def _filt(k, v):
if attr.has(v):
return not not any(attr.astuple(v))
return not not v
return attr.asdict(attrsObj,
dict_factory=OrderedDict,
recurse=False,
filter=_filt) |
def make_anchor_id(self):
"""Return string to use as URL anchor for this comment.
"""
result = re.sub(
'[^a-zA-Z0-9_]', '_', self.user + '_' + self.timestamp)
return result | Return string to use as URL anchor for this comment. | Below is the the instruction that describes the task:
### Input:
Return string to use as URL anchor for this comment.
### Response:
def make_anchor_id(self):
"""Return string to use as URL anchor for this comment.
"""
result = re.sub(
'[^a-zA-Z0-9_]', '_', self.user + '_' + self.timestamp)
return result |
def unescape_html(text):
"""
Removes HTML or XML character references and entities from a text string.
@param text The HTML (or XML) source text.
@return The plain text, as a Unicode string, if necessary.
Source: http://effbot.org/zone/re-sub.htm#unescape-html
"""
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unicode_char(int(text[3:-1], 16))
else:
return unicode_char(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unicode_char(htmlentities.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub(r"&#?\w+;", fixup, text) | Removes HTML or XML character references and entities from a text string.
@param text The HTML (or XML) source text.
@return The plain text, as a Unicode string, if necessary.
Source: http://effbot.org/zone/re-sub.htm#unescape-html | Below is the the instruction that describes the task:
### Input:
Removes HTML or XML character references and entities from a text string.
@param text The HTML (or XML) source text.
@return The plain text, as a Unicode string, if necessary.
Source: http://effbot.org/zone/re-sub.htm#unescape-html
### Response:
def unescape_html(text):
"""
Removes HTML or XML character references and entities from a text string.
@param text The HTML (or XML) source text.
@return The plain text, as a Unicode string, if necessary.
Source: http://effbot.org/zone/re-sub.htm#unescape-html
"""
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unicode_char(int(text[3:-1], 16))
else:
return unicode_char(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unicode_char(htmlentities.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub(r"&#?\w+;", fixup, text) |
def _GetLink(self):
"""Retrieves the link.
Returns:
str: link.
"""
if self._link is None:
if self._tar_info:
self._link = self._tar_info.linkname
return self._link | Retrieves the link.
Returns:
str: link. | Below is the the instruction that describes the task:
### Input:
Retrieves the link.
Returns:
str: link.
### Response:
def _GetLink(self):
"""Retrieves the link.
Returns:
str: link.
"""
if self._link is None:
if self._tar_info:
self._link = self._tar_info.linkname
return self._link |
def sub_dfs_by_num(df, num):
"""Get a generator yielding num consecutive sub-dataframes of the given df.
Arguments
---------
df : pandas.DataFrame
The dataframe for which to get sub-dataframes.
num : int
The number of sub-dataframe to divide the given dataframe into.
Returns
-------
generator
A generator yielding n consecutive sub-dataframes of the given df.
Example
-------
>>> import pandas as pd; import pdutil;
>>> data = [[23, "Jen"], [42, "Ray"], [15, "Fin"]]
>>> df = pd.DataFrame(data, columns=['age', 'name'])
>>> for subdf in pdutil.iter.sub_dfs_by_num(df, 2): print(subdf)
age name
0 23 Jen
1 42 Ray
age name
2 15 Fin
"""
size = len(df) / float(num)
for i in range(num):
yield df.iloc[int(round(size * i)): int(round(size * (i + 1)))] | Get a generator yielding num consecutive sub-dataframes of the given df.
Arguments
---------
df : pandas.DataFrame
The dataframe for which to get sub-dataframes.
num : int
The number of sub-dataframe to divide the given dataframe into.
Returns
-------
generator
A generator yielding n consecutive sub-dataframes of the given df.
Example
-------
>>> import pandas as pd; import pdutil;
>>> data = [[23, "Jen"], [42, "Ray"], [15, "Fin"]]
>>> df = pd.DataFrame(data, columns=['age', 'name'])
>>> for subdf in pdutil.iter.sub_dfs_by_num(df, 2): print(subdf)
age name
0 23 Jen
1 42 Ray
age name
2 15 Fin | Below is the the instruction that describes the task:
### Input:
Get a generator yielding num consecutive sub-dataframes of the given df.
Arguments
---------
df : pandas.DataFrame
The dataframe for which to get sub-dataframes.
num : int
The number of sub-dataframe to divide the given dataframe into.
Returns
-------
generator
A generator yielding n consecutive sub-dataframes of the given df.
Example
-------
>>> import pandas as pd; import pdutil;
>>> data = [[23, "Jen"], [42, "Ray"], [15, "Fin"]]
>>> df = pd.DataFrame(data, columns=['age', 'name'])
>>> for subdf in pdutil.iter.sub_dfs_by_num(df, 2): print(subdf)
age name
0 23 Jen
1 42 Ray
age name
2 15 Fin
### Response:
def sub_dfs_by_num(df, num):
"""Get a generator yielding num consecutive sub-dataframes of the given df.
Arguments
---------
df : pandas.DataFrame
The dataframe for which to get sub-dataframes.
num : int
The number of sub-dataframe to divide the given dataframe into.
Returns
-------
generator
A generator yielding n consecutive sub-dataframes of the given df.
Example
-------
>>> import pandas as pd; import pdutil;
>>> data = [[23, "Jen"], [42, "Ray"], [15, "Fin"]]
>>> df = pd.DataFrame(data, columns=['age', 'name'])
>>> for subdf in pdutil.iter.sub_dfs_by_num(df, 2): print(subdf)
age name
0 23 Jen
1 42 Ray
age name
2 15 Fin
"""
size = len(df) / float(num)
for i in range(num):
yield df.iloc[int(round(size * i)): int(round(size * (i + 1)))] |
def email_domain_free(value):
"""
Confirms that the email address is not using a free service.
@param {str} value
@returns {None}
@raises AssertionError
"""
domain = helpers.get_domain_from_email_address(value)
if domain.lower() in free_domains:
raise ValidationError(MESSAGE_USE_COMPANY_EMAIL) | Confirms that the email address is not using a free service.
@param {str} value
@returns {None}
@raises AssertionError | Below is the the instruction that describes the task:
### Input:
Confirms that the email address is not using a free service.
@param {str} value
@returns {None}
@raises AssertionError
### Response:
def email_domain_free(value):
"""
Confirms that the email address is not using a free service.
@param {str} value
@returns {None}
@raises AssertionError
"""
domain = helpers.get_domain_from_email_address(value)
if domain.lower() in free_domains:
raise ValidationError(MESSAGE_USE_COMPANY_EMAIL) |
Subsets and Splits