code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def default_listener(col_attr, default):
"""Establish a default-setting listener."""
@event.listens_for(col_attr, "init_scalar", retval=True, propagate=True)
def init_scalar(target, value, dict_):
if default.is_callable:
# the callable of ColumnDefault always accepts a context argument
value = default.arg(None)
elif default.is_scalar:
value = default.arg
else:
raise NotImplementedError(
"Can't invoke pre-default for a SQL-level column default")
dict_[col_attr.key] = value
return value | Establish a default-setting listener. | Below is the the instruction that describes the task:
### Input:
Establish a default-setting listener.
### Response:
def default_listener(col_attr, default):
"""Establish a default-setting listener."""
@event.listens_for(col_attr, "init_scalar", retval=True, propagate=True)
def init_scalar(target, value, dict_):
if default.is_callable:
# the callable of ColumnDefault always accepts a context argument
value = default.arg(None)
elif default.is_scalar:
value = default.arg
else:
raise NotImplementedError(
"Can't invoke pre-default for a SQL-level column default")
dict_[col_attr.key] = value
return value |
def movies_opening(self, **kwargs):
"""Gets the current opening movies from the API.
Args:
limit (optional): limits the number of movies returned, default=10
country (optional): localized data for selected country, default="us"
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('movies_opening')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response | Gets the current opening movies from the API.
Args:
limit (optional): limits the number of movies returned, default=10
country (optional): localized data for selected country, default="us"
Returns:
A dict respresentation of the JSON returned from the API. | Below is the the instruction that describes the task:
### Input:
Gets the current opening movies from the API.
Args:
limit (optional): limits the number of movies returned, default=10
country (optional): localized data for selected country, default="us"
Returns:
A dict respresentation of the JSON returned from the API.
### Response:
def movies_opening(self, **kwargs):
"""Gets the current opening movies from the API.
Args:
limit (optional): limits the number of movies returned, default=10
country (optional): localized data for selected country, default="us"
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('movies_opening')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response |
def safe_dir(obj, predicate = None):
'''
"Safely" obtain a list of attributes for an
# object.
Python's dynamic properties are incredibly useful
but there's a serious lack of good introspection
tools. Python provides an inspect module which does
introspection, but at its heart it relies on dir()
and getattr(), both of which can be overridden by
classes.
Django of course overrides both of these, which
means using Python's "proper" introspection class
will change the object we're trying to display.
Worse, if you have models with circular references,
attempting to recursively introspect anything that
touches the circular reference will trigger an
infinite recursion loop, crashing the application.
In particular, the trigger for this seems to be
the use of getattr() inside the inspect.getmembers()
method. To work around this, we access the object's
native properties dict.
Unfortunately, using the raw __dict__ will fail
because it doesn't account for base class properties,
methods, or anything else fancy. So this function
attempts to enumerate all of those items. Like the
inspect.getmembers() call, we accept a predicate
which will be used to filter the results.
'''
# safely get all the classes we need to look at
obj_mro = [ obj ]
if hasattr(obj.__class__, '__mro__'):
obj_mro.extend(obj.__class__.__mro__)
else:
obj_mro.extend(obj.__class__)
# a set of attributes we will test
found_attrs = {}
if hasattr(obj, '__dict__'):
for c in obj_mro:
# if hasattr(c, '__name__'):
# debug_name = c.__name__
# else:
# debug_name = c.__class__.__name__ + ' instance'
# print 'MRO item:', debug_name
if hasattr(c, '__dict__'):
keylist = c.__dict__.keys()
for k in keylist:
if k not in found_attrs:
try:
v = obj.__dict__[k]
except KeyError: #, AttributeError:
# so actually AttributeError should
# never happen, but it seems a few
# classes will actually report they
# have the __dict__ attribute and
# then throw an AttributeError when
# you try to access it
continue
if predicate is None or predicate(v):
found_attrs[k] = v
# print len(keylist), len(c.__dict__.keys())
# print 'before:', keylist
# print ' after:', c.__dict__.keys()
return sorted(found_attrs.items(), lambda a,b: cmp(a[0],b[0])) | "Safely" obtain a list of attributes for an
# object.
Python's dynamic properties are incredibly useful
but there's a serious lack of good introspection
tools. Python provides an inspect module which does
introspection, but at its heart it relies on dir()
and getattr(), both of which can be overridden by
classes.
Django of course overrides both of these, which
means using Python's "proper" introspection class
will change the object we're trying to display.
Worse, if you have models with circular references,
attempting to recursively introspect anything that
touches the circular reference will trigger an
infinite recursion loop, crashing the application.
In particular, the trigger for this seems to be
the use of getattr() inside the inspect.getmembers()
method. To work around this, we access the object's
native properties dict.
Unfortunately, using the raw __dict__ will fail
because it doesn't account for base class properties,
methods, or anything else fancy. So this function
attempts to enumerate all of those items. Like the
inspect.getmembers() call, we accept a predicate
which will be used to filter the results. | Below is the the instruction that describes the task:
### Input:
"Safely" obtain a list of attributes for an
# object.
Python's dynamic properties are incredibly useful
but there's a serious lack of good introspection
tools. Python provides an inspect module which does
introspection, but at its heart it relies on dir()
and getattr(), both of which can be overridden by
classes.
Django of course overrides both of these, which
means using Python's "proper" introspection class
will change the object we're trying to display.
Worse, if you have models with circular references,
attempting to recursively introspect anything that
touches the circular reference will trigger an
infinite recursion loop, crashing the application.
In particular, the trigger for this seems to be
the use of getattr() inside the inspect.getmembers()
method. To work around this, we access the object's
native properties dict.
Unfortunately, using the raw __dict__ will fail
because it doesn't account for base class properties,
methods, or anything else fancy. So this function
attempts to enumerate all of those items. Like the
inspect.getmembers() call, we accept a predicate
which will be used to filter the results.
### Response:
def safe_dir(obj, predicate = None):
'''
"Safely" obtain a list of attributes for an
# object.
Python's dynamic properties are incredibly useful
but there's a serious lack of good introspection
tools. Python provides an inspect module which does
introspection, but at its heart it relies on dir()
and getattr(), both of which can be overridden by
classes.
Django of course overrides both of these, which
means using Python's "proper" introspection class
will change the object we're trying to display.
Worse, if you have models with circular references,
attempting to recursively introspect anything that
touches the circular reference will trigger an
infinite recursion loop, crashing the application.
In particular, the trigger for this seems to be
the use of getattr() inside the inspect.getmembers()
method. To work around this, we access the object's
native properties dict.
Unfortunately, using the raw __dict__ will fail
because it doesn't account for base class properties,
methods, or anything else fancy. So this function
attempts to enumerate all of those items. Like the
inspect.getmembers() call, we accept a predicate
which will be used to filter the results.
'''
# safely get all the classes we need to look at
obj_mro = [ obj ]
if hasattr(obj.__class__, '__mro__'):
obj_mro.extend(obj.__class__.__mro__)
else:
obj_mro.extend(obj.__class__)
# a set of attributes we will test
found_attrs = {}
if hasattr(obj, '__dict__'):
for c in obj_mro:
# if hasattr(c, '__name__'):
# debug_name = c.__name__
# else:
# debug_name = c.__class__.__name__ + ' instance'
# print 'MRO item:', debug_name
if hasattr(c, '__dict__'):
keylist = c.__dict__.keys()
for k in keylist:
if k not in found_attrs:
try:
v = obj.__dict__[k]
except KeyError: #, AttributeError:
# so actually AttributeError should
# never happen, but it seems a few
# classes will actually report they
# have the __dict__ attribute and
# then throw an AttributeError when
# you try to access it
continue
if predicate is None or predicate(v):
found_attrs[k] = v
# print len(keylist), len(c.__dict__.keys())
# print 'before:', keylist
# print ' after:', c.__dict__.keys()
return sorted(found_attrs.items(), lambda a,b: cmp(a[0],b[0])) |
def remove(self, value):
"""
Remove element *value* from the set. Raises :exc:`KeyError` if it
is not contained in the set.
"""
# Raise TypeError if value is not hashable
hash(value)
result = self.redis.srem(self.key, self._pickle(value))
if not result:
raise KeyError(value) | Remove element *value* from the set. Raises :exc:`KeyError` if it
is not contained in the set. | Below is the the instruction that describes the task:
### Input:
Remove element *value* from the set. Raises :exc:`KeyError` if it
is not contained in the set.
### Response:
def remove(self, value):
"""
Remove element *value* from the set. Raises :exc:`KeyError` if it
is not contained in the set.
"""
# Raise TypeError if value is not hashable
hash(value)
result = self.redis.srem(self.key, self._pickle(value))
if not result:
raise KeyError(value) |
def cbday_roll(self):
"""
Define default roll function to be called in apply method.
"""
cbday = CustomBusinessDay(n=self.n, normalize=False, **self.kwds)
if self._prefix.endswith('S'):
# MonthBegin
roll_func = cbday.rollforward
else:
# MonthEnd
roll_func = cbday.rollback
return roll_func | Define default roll function to be called in apply method. | Below is the the instruction that describes the task:
### Input:
Define default roll function to be called in apply method.
### Response:
def cbday_roll(self):
"""
Define default roll function to be called in apply method.
"""
cbday = CustomBusinessDay(n=self.n, normalize=False, **self.kwds)
if self._prefix.endswith('S'):
# MonthBegin
roll_func = cbday.rollforward
else:
# MonthEnd
roll_func = cbday.rollback
return roll_func |
def coerce_to_pendulum_date(x: PotentialDatetimeType,
assume_local: bool = False) -> Optional[Date]:
"""
Converts something to a :class:`pendulum.Date`.
Args:
x: something that may be coercible to a date
assume_local: if ``True``, assume local timezone; if ``False``, assume
UTC
Returns:
a :class:`pendulum.Date`, or ``None``.
Raises:
pendulum.parsing.exceptions.ParserError: if a string fails to parse
ValueError: if no conversion possible
"""
p = coerce_to_pendulum(x, assume_local=assume_local)
return None if p is None else p.date() | Converts something to a :class:`pendulum.Date`.
Args:
x: something that may be coercible to a date
assume_local: if ``True``, assume local timezone; if ``False``, assume
UTC
Returns:
a :class:`pendulum.Date`, or ``None``.
Raises:
pendulum.parsing.exceptions.ParserError: if a string fails to parse
ValueError: if no conversion possible | Below is the the instruction that describes the task:
### Input:
Converts something to a :class:`pendulum.Date`.
Args:
x: something that may be coercible to a date
assume_local: if ``True``, assume local timezone; if ``False``, assume
UTC
Returns:
a :class:`pendulum.Date`, or ``None``.
Raises:
pendulum.parsing.exceptions.ParserError: if a string fails to parse
ValueError: if no conversion possible
### Response:
def coerce_to_pendulum_date(x: PotentialDatetimeType,
assume_local: bool = False) -> Optional[Date]:
"""
Converts something to a :class:`pendulum.Date`.
Args:
x: something that may be coercible to a date
assume_local: if ``True``, assume local timezone; if ``False``, assume
UTC
Returns:
a :class:`pendulum.Date`, or ``None``.
Raises:
pendulum.parsing.exceptions.ParserError: if a string fails to parse
ValueError: if no conversion possible
"""
p = coerce_to_pendulum(x, assume_local=assume_local)
return None if p is None else p.date() |
def is_subtype_of(self, type_name, context=None):
"""Whether this class is a subtype of the given type.
:param type_name: The name of the type of check against.
:type type_name: str
:returns: True if this class is a subtype of the given type,
False otherwise.
:rtype: bool
"""
if self.qname() == type_name:
return True
for anc in self.ancestors(context=context):
if anc.qname() == type_name:
return True
return False | Whether this class is a subtype of the given type.
:param type_name: The name of the type of check against.
:type type_name: str
:returns: True if this class is a subtype of the given type,
False otherwise.
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Whether this class is a subtype of the given type.
:param type_name: The name of the type of check against.
:type type_name: str
:returns: True if this class is a subtype of the given type,
False otherwise.
:rtype: bool
### Response:
def is_subtype_of(self, type_name, context=None):
"""Whether this class is a subtype of the given type.
:param type_name: The name of the type of check against.
:type type_name: str
:returns: True if this class is a subtype of the given type,
False otherwise.
:rtype: bool
"""
if self.qname() == type_name:
return True
for anc in self.ancestors(context=context):
if anc.qname() == type_name:
return True
return False |
def command_exists(command, noop_invocation, exc_msg):
"""
Verify that the provided command exists. Raise CommandDoesNotExistException in case of an
error or if the command does not exist.
:param command: str, command to check (python 3 only)
:param noop_invocation: list of str, command to check (python 2 only)
:param exc_msg: str, message of exception when command does not exist
:return: bool, True if everything's all right (otherwise exception is thrown)
"""
try:
found = bool(shutil.which(command)) # py3 only
except AttributeError: # py2 branch
try:
p = subprocess.Popen(noop_invocation, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError:
found = False
else:
stdout, stderr = p.communicate()
found = p.returncode == 0
if not found:
logger.error("`%s` exited with a non-zero return code (%s)",
noop_invocation, p.returncode)
logger.error("command stdout = %s", stdout)
logger.error("command stderr = %s", stderr)
if not found:
raise CommandDoesNotExistException(exc_msg)
return True | Verify that the provided command exists. Raise CommandDoesNotExistException in case of an
error or if the command does not exist.
:param command: str, command to check (python 3 only)
:param noop_invocation: list of str, command to check (python 2 only)
:param exc_msg: str, message of exception when command does not exist
:return: bool, True if everything's all right (otherwise exception is thrown) | Below is the the instruction that describes the task:
### Input:
Verify that the provided command exists. Raise CommandDoesNotExistException in case of an
error or if the command does not exist.
:param command: str, command to check (python 3 only)
:param noop_invocation: list of str, command to check (python 2 only)
:param exc_msg: str, message of exception when command does not exist
:return: bool, True if everything's all right (otherwise exception is thrown)
### Response:
def command_exists(command, noop_invocation, exc_msg):
"""
Verify that the provided command exists. Raise CommandDoesNotExistException in case of an
error or if the command does not exist.
:param command: str, command to check (python 3 only)
:param noop_invocation: list of str, command to check (python 2 only)
:param exc_msg: str, message of exception when command does not exist
:return: bool, True if everything's all right (otherwise exception is thrown)
"""
try:
found = bool(shutil.which(command)) # py3 only
except AttributeError: # py2 branch
try:
p = subprocess.Popen(noop_invocation, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError:
found = False
else:
stdout, stderr = p.communicate()
found = p.returncode == 0
if not found:
logger.error("`%s` exited with a non-zero return code (%s)",
noop_invocation, p.returncode)
logger.error("command stdout = %s", stdout)
logger.error("command stderr = %s", stderr)
if not found:
raise CommandDoesNotExistException(exc_msg)
return True |
def refresh_db(force=False, **kwargs):
'''
Use pkg update to get latest pkg_summary
force
Pass -f so that the cache is always refreshed.
.. versionadded:: 2018.3.0
CLI Example:
.. code-block:: bash
salt '*' pkg.refresh_db
'''
# Remove rtag file to keep multiple refreshes from happening in pkg states
salt.utils.pkg.clear_rtag(__opts__)
pkgin = _check_pkgin()
if pkgin:
cmd = [pkgin, 'up']
if force:
cmd.insert(1, '-f')
call = __salt__['cmd.run_all'](cmd, output_loglevel='trace')
if call['retcode'] != 0:
comment = ''
if 'stderr' in call:
comment += call['stderr']
raise CommandExecutionError(comment)
return True | Use pkg update to get latest pkg_summary
force
Pass -f so that the cache is always refreshed.
.. versionadded:: 2018.3.0
CLI Example:
.. code-block:: bash
salt '*' pkg.refresh_db | Below is the the instruction that describes the task:
### Input:
Use pkg update to get latest pkg_summary
force
Pass -f so that the cache is always refreshed.
.. versionadded:: 2018.3.0
CLI Example:
.. code-block:: bash
salt '*' pkg.refresh_db
### Response:
def refresh_db(force=False, **kwargs):
'''
Use pkg update to get latest pkg_summary
force
Pass -f so that the cache is always refreshed.
.. versionadded:: 2018.3.0
CLI Example:
.. code-block:: bash
salt '*' pkg.refresh_db
'''
# Remove rtag file to keep multiple refreshes from happening in pkg states
salt.utils.pkg.clear_rtag(__opts__)
pkgin = _check_pkgin()
if pkgin:
cmd = [pkgin, 'up']
if force:
cmd.insert(1, '-f')
call = __salt__['cmd.run_all'](cmd, output_loglevel='trace')
if call['retcode'] != 0:
comment = ''
if 'stderr' in call:
comment += call['stderr']
raise CommandExecutionError(comment)
return True |
def get_changes(self, dest_attr, new_name=None, resources=None,
task_handle=taskhandle.NullTaskHandle()):
"""Return the changes needed for this refactoring
Parameters:
- `dest_attr`: the name of the destination attribute
- `new_name`: the name of the new method; if `None` uses
the old name
- `resources` can be a list of `rope.base.resources.File`\s to
apply this refactoring on. If `None`, the restructuring
will be applied to all python files.
"""
changes = ChangeSet('Moving method <%s>' % self.method_name)
if resources is None:
resources = self.project.get_python_files()
if new_name is None:
new_name = self.get_method_name()
resource1, start1, end1, new_content1 = \
self._get_changes_made_by_old_class(dest_attr, new_name)
collector1 = codeanalyze.ChangeCollector(resource1.read())
collector1.add_change(start1, end1, new_content1)
resource2, start2, end2, new_content2 = \
self._get_changes_made_by_new_class(dest_attr, new_name)
if resource1 == resource2:
collector1.add_change(start2, end2, new_content2)
else:
collector2 = codeanalyze.ChangeCollector(resource2.read())
collector2.add_change(start2, end2, new_content2)
result = collector2.get_changed()
import_tools = importutils.ImportTools(self.project)
new_imports = self._get_used_imports(import_tools)
if new_imports:
goal_pymodule = libutils.get_string_module(
self.project, result, resource2)
result = _add_imports_to_module(
import_tools, goal_pymodule, new_imports)
if resource2 in resources:
changes.add_change(ChangeContents(resource2, result))
if resource1 in resources:
changes.add_change(ChangeContents(resource1,
collector1.get_changed()))
return changes | Return the changes needed for this refactoring
Parameters:
- `dest_attr`: the name of the destination attribute
- `new_name`: the name of the new method; if `None` uses
the old name
- `resources` can be a list of `rope.base.resources.File`\s to
apply this refactoring on. If `None`, the restructuring
will be applied to all python files. | Below is the the instruction that describes the task:
### Input:
Return the changes needed for this refactoring
Parameters:
- `dest_attr`: the name of the destination attribute
- `new_name`: the name of the new method; if `None` uses
the old name
- `resources` can be a list of `rope.base.resources.File`\s to
apply this refactoring on. If `None`, the restructuring
will be applied to all python files.
### Response:
def get_changes(self, dest_attr, new_name=None, resources=None,
task_handle=taskhandle.NullTaskHandle()):
"""Return the changes needed for this refactoring
Parameters:
- `dest_attr`: the name of the destination attribute
- `new_name`: the name of the new method; if `None` uses
the old name
- `resources` can be a list of `rope.base.resources.File`\s to
apply this refactoring on. If `None`, the restructuring
will be applied to all python files.
"""
changes = ChangeSet('Moving method <%s>' % self.method_name)
if resources is None:
resources = self.project.get_python_files()
if new_name is None:
new_name = self.get_method_name()
resource1, start1, end1, new_content1 = \
self._get_changes_made_by_old_class(dest_attr, new_name)
collector1 = codeanalyze.ChangeCollector(resource1.read())
collector1.add_change(start1, end1, new_content1)
resource2, start2, end2, new_content2 = \
self._get_changes_made_by_new_class(dest_attr, new_name)
if resource1 == resource2:
collector1.add_change(start2, end2, new_content2)
else:
collector2 = codeanalyze.ChangeCollector(resource2.read())
collector2.add_change(start2, end2, new_content2)
result = collector2.get_changed()
import_tools = importutils.ImportTools(self.project)
new_imports = self._get_used_imports(import_tools)
if new_imports:
goal_pymodule = libutils.get_string_module(
self.project, result, resource2)
result = _add_imports_to_module(
import_tools, goal_pymodule, new_imports)
if resource2 in resources:
changes.add_change(ChangeContents(resource2, result))
if resource1 in resources:
changes.add_change(ChangeContents(resource1,
collector1.get_changed()))
return changes |
def set_broad_fig_style(self):
'''4 times width, 1.5 times height'''
plt.rcParams.update({
'figure.figsize' : [self.frontierswidth/self.inchpercm*4, self.frontierswidth/self.inchpercm*1.5],
}) | 4 times width, 1.5 times height | Below is the the instruction that describes the task:
### Input:
4 times width, 1.5 times height
### Response:
def set_broad_fig_style(self):
'''4 times width, 1.5 times height'''
plt.rcParams.update({
'figure.figsize' : [self.frontierswidth/self.inchpercm*4, self.frontierswidth/self.inchpercm*1.5],
}) |
def html_table_color(row, item, color=(0, 0, 0)):
"""
Return background color of each cell of table.
:param row: row dictionary
:type row : dict
:param item: cell number
:type item : int
:param color : input color
:type color : tuple
:return: background color as list [R,G,B]
"""
result = [0, 0, 0]
color_list = color_check(color)
max_color = max(color_list)
back_color_index = 255 - int((item / (sum(list(row.values())) + 1)) * 255)
for i in range(3):
result[i] = back_color_index - (max_color - color_list[i])
if result[i] < 0:
result[i] = 0
return result | Return background color of each cell of table.
:param row: row dictionary
:type row : dict
:param item: cell number
:type item : int
:param color : input color
:type color : tuple
:return: background color as list [R,G,B] | Below is the the instruction that describes the task:
### Input:
Return background color of each cell of table.
:param row: row dictionary
:type row : dict
:param item: cell number
:type item : int
:param color : input color
:type color : tuple
:return: background color as list [R,G,B]
### Response:
def html_table_color(row, item, color=(0, 0, 0)):
"""
Return background color of each cell of table.
:param row: row dictionary
:type row : dict
:param item: cell number
:type item : int
:param color : input color
:type color : tuple
:return: background color as list [R,G,B]
"""
result = [0, 0, 0]
color_list = color_check(color)
max_color = max(color_list)
back_color_index = 255 - int((item / (sum(list(row.values())) + 1)) * 255)
for i in range(3):
result[i] = back_color_index - (max_color - color_list[i])
if result[i] < 0:
result[i] = 0
return result |
def _divide_bundles(bundles):
"""Take each subsegment inside a bundle and put it in its own bundle,
copying the bundle metadata."""
divided = []
for bund in bundles:
for t in bund['times']:
new_bund = bund.copy()
new_bund['times'] = [t]
divided.append(new_bund)
return divided | Take each subsegment inside a bundle and put it in its own bundle,
copying the bundle metadata. | Below is the the instruction that describes the task:
### Input:
Take each subsegment inside a bundle and put it in its own bundle,
copying the bundle metadata.
### Response:
def _divide_bundles(bundles):
"""Take each subsegment inside a bundle and put it in its own bundle,
copying the bundle metadata."""
divided = []
for bund in bundles:
for t in bund['times']:
new_bund = bund.copy()
new_bund['times'] = [t]
divided.append(new_bund)
return divided |
def register_upload_callback(self, *args, **kwargs):
"""Registers an Upload function (see :ref:`upload-plugin`)
to handle a certain form.
Refer to :func:`sijax.plugin.upload.register_upload_callback`
for more details.
This method passes some additional arguments to your handler
functions - the ``flask.request.files`` object.
Your upload handler function's signature should look like this::
def func(obj_response, files, form_values)
:return: string - javascript code that initializes the form
"""
if 'args_extra' not in kwargs:
kwargs['args_extra'] = [request.files]
return sijax.plugin.upload.register_upload_callback(self._sijax, *args, **kwargs) | Registers an Upload function (see :ref:`upload-plugin`)
to handle a certain form.
Refer to :func:`sijax.plugin.upload.register_upload_callback`
for more details.
This method passes some additional arguments to your handler
functions - the ``flask.request.files`` object.
Your upload handler function's signature should look like this::
def func(obj_response, files, form_values)
:return: string - javascript code that initializes the form | Below is the the instruction that describes the task:
### Input:
Registers an Upload function (see :ref:`upload-plugin`)
to handle a certain form.
Refer to :func:`sijax.plugin.upload.register_upload_callback`
for more details.
This method passes some additional arguments to your handler
functions - the ``flask.request.files`` object.
Your upload handler function's signature should look like this::
def func(obj_response, files, form_values)
:return: string - javascript code that initializes the form
### Response:
def register_upload_callback(self, *args, **kwargs):
"""Registers an Upload function (see :ref:`upload-plugin`)
to handle a certain form.
Refer to :func:`sijax.plugin.upload.register_upload_callback`
for more details.
This method passes some additional arguments to your handler
functions - the ``flask.request.files`` object.
Your upload handler function's signature should look like this::
def func(obj_response, files, form_values)
:return: string - javascript code that initializes the form
"""
if 'args_extra' not in kwargs:
kwargs['args_extra'] = [request.files]
return sijax.plugin.upload.register_upload_callback(self._sijax, *args, **kwargs) |
def _updateType(self):
"""Make sure that the class behaves like the data structure that it
is, so that we don't get a ListFile trying to represent a dict."""
data = self._data()
# Change type if needed
if isinstance(data, dict) and isinstance(self, ListFile):
self.__class__ = DictFile
elif isinstance(data, list) and isinstance(self, DictFile):
self.__class__ = ListFile | Make sure that the class behaves like the data structure that it
is, so that we don't get a ListFile trying to represent a dict. | Below is the the instruction that describes the task:
### Input:
Make sure that the class behaves like the data structure that it
is, so that we don't get a ListFile trying to represent a dict.
### Response:
def _updateType(self):
"""Make sure that the class behaves like the data structure that it
is, so that we don't get a ListFile trying to represent a dict."""
data = self._data()
# Change type if needed
if isinstance(data, dict) and isinstance(self, ListFile):
self.__class__ = DictFile
elif isinstance(data, list) and isinstance(self, DictFile):
self.__class__ = ListFile |
def compute_hr(sig_len, qrs_inds, fs):
"""
Compute instantaneous heart rate from peak indices.
Parameters
----------
sig_len : int
The length of the corresponding signal
qrs_inds : numpy array
The qrs index locations
fs : int, or float
The corresponding signal's sampling frequency.
Returns
-------
heart_rate : numpy array
An array of the instantaneous heart rate, with the length of the
corresponding signal. Contains numpy.nan where heart rate could
not be computed.
"""
heart_rate = np.full(sig_len, np.nan, dtype='float32')
if len(qrs_inds) < 2:
return heart_rate
for i in range(0, len(qrs_inds)-2):
a = qrs_inds[i]
b = qrs_inds[i+1]
c = qrs_inds[i+2]
rr = (b-a) * (1.0 / fs) * 1000
hr = 60000.0 / rr
heart_rate[b+1:c+1] = hr
heart_rate[qrs_inds[-1]:] = heart_rate[qrs_inds[-1]]
return heart_rate | Compute instantaneous heart rate from peak indices.
Parameters
----------
sig_len : int
The length of the corresponding signal
qrs_inds : numpy array
The qrs index locations
fs : int, or float
The corresponding signal's sampling frequency.
Returns
-------
heart_rate : numpy array
An array of the instantaneous heart rate, with the length of the
corresponding signal. Contains numpy.nan where heart rate could
not be computed. | Below is the the instruction that describes the task:
### Input:
Compute instantaneous heart rate from peak indices.
Parameters
----------
sig_len : int
The length of the corresponding signal
qrs_inds : numpy array
The qrs index locations
fs : int, or float
The corresponding signal's sampling frequency.
Returns
-------
heart_rate : numpy array
An array of the instantaneous heart rate, with the length of the
corresponding signal. Contains numpy.nan where heart rate could
not be computed.
### Response:
def compute_hr(sig_len, qrs_inds, fs):
"""
Compute instantaneous heart rate from peak indices.
Parameters
----------
sig_len : int
The length of the corresponding signal
qrs_inds : numpy array
The qrs index locations
fs : int, or float
The corresponding signal's sampling frequency.
Returns
-------
heart_rate : numpy array
An array of the instantaneous heart rate, with the length of the
corresponding signal. Contains numpy.nan where heart rate could
not be computed.
"""
heart_rate = np.full(sig_len, np.nan, dtype='float32')
if len(qrs_inds) < 2:
return heart_rate
for i in range(0, len(qrs_inds)-2):
a = qrs_inds[i]
b = qrs_inds[i+1]
c = qrs_inds[i+2]
rr = (b-a) * (1.0 / fs) * 1000
hr = 60000.0 / rr
heart_rate[b+1:c+1] = hr
heart_rate[qrs_inds[-1]:] = heart_rate[qrs_inds[-1]]
return heart_rate |
def get1(self, name, **kwargs):
"""
Look up gender for a single name.
See :py:meth:`get`.
Doesn't support retheader option.
"""
if 'retheader' in kwargs:
raise GenderizeException(
"get1() doesn't support the retheader option.")
return self.get([name], **kwargs)[0] | Look up gender for a single name.
See :py:meth:`get`.
Doesn't support retheader option. | Below is the the instruction that describes the task:
### Input:
Look up gender for a single name.
See :py:meth:`get`.
Doesn't support retheader option.
### Response:
def get1(self, name, **kwargs):
"""
Look up gender for a single name.
See :py:meth:`get`.
Doesn't support retheader option.
"""
if 'retheader' in kwargs:
raise GenderizeException(
"get1() doesn't support the retheader option.")
return self.get([name], **kwargs)[0] |
def add_transitions_to_closest_sibling_state_from_selected_state():
""" Generates the outcome transitions from outcomes with positive outcome_id to the closest next state
:return:
"""
task_string = "create transition"
sub_task_string = "to closest sibling state"
selected_state_m, msg = get_selected_single_state_model_and_check_for_its_parent()
if selected_state_m is None:
logger.warning("Can not {0} {1}: {2}".format(task_string, sub_task_string, msg))
return
logger.debug("Check to {0} {1} ...".format(task_string, sub_task_string))
state = selected_state_m.state
parent_state = state.parent
# find closest other state to connect to -> to_state
closest_sibling_state_tuple = gui_helper_meta_data.get_closest_sibling_state(selected_state_m, 'outcome')
if closest_sibling_state_tuple is None:
logger.info("Can not {0} {1}: There is no other sibling state.".format(task_string, sub_task_string))
return
distance, sibling_state_m = closest_sibling_state_tuple
to_state = sibling_state_m.state
# find all possible from outcomes
from_outcomes = get_all_outcomes_except_of_abort_and_preempt(state)
from_oc_not_connected = [oc for oc in from_outcomes if not state.parent.get_transition_for_outcome(state, oc)]
# all ports not connected connect to next state income
if from_oc_not_connected:
logger.debug("Create transition {0} ...".format(sub_task_string))
for from_outcome in from_oc_not_connected:
parent_state.add_transition(state.state_id, from_outcome.outcome_id, to_state.state_id, None)
# no transitions are removed if not all connected to the same other state
else:
target = remove_transitions_if_target_is_the_same(from_outcomes)
if target:
target_state_id, _ = target
if not target_state_id == to_state.state_id:
logger.info("Removed transitions from outcomes {0} "
"because all point to the same target.".format(sub_task_string.replace('closest ', '')))
add_transitions_to_closest_sibling_state_from_selected_state()
else:
logger.info("Removed transitions from outcomes {0} "
"because all point to the same target.".format(sub_task_string))
return True
logger.info("Will not {0} {1}: Not clear situation of connected transitions."
"There will be no transitions to other states be touched.".format(task_string, sub_task_string))
return True | Generates the outcome transitions from outcomes with positive outcome_id to the closest next state
:return: | Below is the the instruction that describes the task:
### Input:
Generates the outcome transitions from outcomes with positive outcome_id to the closest next state
:return:
### Response:
def add_transitions_to_closest_sibling_state_from_selected_state():
""" Generates the outcome transitions from outcomes with positive outcome_id to the closest next state
:return:
"""
task_string = "create transition"
sub_task_string = "to closest sibling state"
selected_state_m, msg = get_selected_single_state_model_and_check_for_its_parent()
if selected_state_m is None:
logger.warning("Can not {0} {1}: {2}".format(task_string, sub_task_string, msg))
return
logger.debug("Check to {0} {1} ...".format(task_string, sub_task_string))
state = selected_state_m.state
parent_state = state.parent
# find closest other state to connect to -> to_state
closest_sibling_state_tuple = gui_helper_meta_data.get_closest_sibling_state(selected_state_m, 'outcome')
if closest_sibling_state_tuple is None:
logger.info("Can not {0} {1}: There is no other sibling state.".format(task_string, sub_task_string))
return
distance, sibling_state_m = closest_sibling_state_tuple
to_state = sibling_state_m.state
# find all possible from outcomes
from_outcomes = get_all_outcomes_except_of_abort_and_preempt(state)
from_oc_not_connected = [oc for oc in from_outcomes if not state.parent.get_transition_for_outcome(state, oc)]
# all ports not connected connect to next state income
if from_oc_not_connected:
logger.debug("Create transition {0} ...".format(sub_task_string))
for from_outcome in from_oc_not_connected:
parent_state.add_transition(state.state_id, from_outcome.outcome_id, to_state.state_id, None)
# no transitions are removed if not all connected to the same other state
else:
target = remove_transitions_if_target_is_the_same(from_outcomes)
if target:
target_state_id, _ = target
if not target_state_id == to_state.state_id:
logger.info("Removed transitions from outcomes {0} "
"because all point to the same target.".format(sub_task_string.replace('closest ', '')))
add_transitions_to_closest_sibling_state_from_selected_state()
else:
logger.info("Removed transitions from outcomes {0} "
"because all point to the same target.".format(sub_task_string))
return True
logger.info("Will not {0} {1}: Not clear situation of connected transitions."
"There will be no transitions to other states be touched.".format(task_string, sub_task_string))
return True |
def create(cls, name, negotiation_expiration=200000,
negotiation_retry_timer=500,
negotiation_retry_max_number=32,
negotiation_retry_timer_max=7000,
certificate_cache_crl_validity=90000,
mobike_after_sa_update=False,
mobike_before_sa_update=False,
mobike_no_rrc=True):
"""
Create a new gateway setting profile.
:param str name: name of profile
:param int negotiation_expiration: expire after (ms)
:param int negotiation_retry_timer: retry time length (ms)
:param int negotiation_retry_max_num: max number of retries allowed
:param int negotiation_retry_timer_max: maximum length for retry (ms)
:param int certificate_cache_crl_validity: cert cache validity (seconds)
:param boolean mobike_after_sa_update: Whether the After SA flag is set
for Mobike Policy
:param boolean mobike_before_sa_update: Whether the Before SA flag is
set for Mobike Policy
:param boolean mobike_no_rrc: Whether the No RRC flag is set for
Mobike Policy
:raises CreateElementFailed: failed creating profile
:return: instance with meta
:rtype: GatewaySettings
"""
json = {'name': name,
'negotiation_expiration': negotiation_expiration,
'negotiation_retry_timer': negotiation_retry_timer,
'negotiation_retry_max_number': negotiation_retry_max_number,
'negotiation_retry_timer_max': negotiation_retry_timer_max,
'certificate_cache_crl_validity': certificate_cache_crl_validity,
'mobike_after_sa_update': mobike_after_sa_update,
'mobike_before_sa_update': mobike_before_sa_update,
'mobike_no_rrc': mobike_no_rrc}
return ElementCreator(cls, json) | Create a new gateway setting profile.
:param str name: name of profile
:param int negotiation_expiration: expire after (ms)
:param int negotiation_retry_timer: retry time length (ms)
:param int negotiation_retry_max_num: max number of retries allowed
:param int negotiation_retry_timer_max: maximum length for retry (ms)
:param int certificate_cache_crl_validity: cert cache validity (seconds)
:param boolean mobike_after_sa_update: Whether the After SA flag is set
for Mobike Policy
:param boolean mobike_before_sa_update: Whether the Before SA flag is
set for Mobike Policy
:param boolean mobike_no_rrc: Whether the No RRC flag is set for
Mobike Policy
:raises CreateElementFailed: failed creating profile
:return: instance with meta
:rtype: GatewaySettings | Below is the the instruction that describes the task:
### Input:
Create a new gateway setting profile.
:param str name: name of profile
:param int negotiation_expiration: expire after (ms)
:param int negotiation_retry_timer: retry time length (ms)
:param int negotiation_retry_max_num: max number of retries allowed
:param int negotiation_retry_timer_max: maximum length for retry (ms)
:param int certificate_cache_crl_validity: cert cache validity (seconds)
:param boolean mobike_after_sa_update: Whether the After SA flag is set
for Mobike Policy
:param boolean mobike_before_sa_update: Whether the Before SA flag is
set for Mobike Policy
:param boolean mobike_no_rrc: Whether the No RRC flag is set for
Mobike Policy
:raises CreateElementFailed: failed creating profile
:return: instance with meta
:rtype: GatewaySettings
### Response:
def create(cls, name, negotiation_expiration=200000,
negotiation_retry_timer=500,
negotiation_retry_max_number=32,
negotiation_retry_timer_max=7000,
certificate_cache_crl_validity=90000,
mobike_after_sa_update=False,
mobike_before_sa_update=False,
mobike_no_rrc=True):
"""
Create a new gateway setting profile.
:param str name: name of profile
:param int negotiation_expiration: expire after (ms)
:param int negotiation_retry_timer: retry time length (ms)
:param int negotiation_retry_max_num: max number of retries allowed
:param int negotiation_retry_timer_max: maximum length for retry (ms)
:param int certificate_cache_crl_validity: cert cache validity (seconds)
:param boolean mobike_after_sa_update: Whether the After SA flag is set
for Mobike Policy
:param boolean mobike_before_sa_update: Whether the Before SA flag is
set for Mobike Policy
:param boolean mobike_no_rrc: Whether the No RRC flag is set for
Mobike Policy
:raises CreateElementFailed: failed creating profile
:return: instance with meta
:rtype: GatewaySettings
"""
json = {'name': name,
'negotiation_expiration': negotiation_expiration,
'negotiation_retry_timer': negotiation_retry_timer,
'negotiation_retry_max_number': negotiation_retry_max_number,
'negotiation_retry_timer_max': negotiation_retry_timer_max,
'certificate_cache_crl_validity': certificate_cache_crl_validity,
'mobike_after_sa_update': mobike_after_sa_update,
'mobike_before_sa_update': mobike_before_sa_update,
'mobike_no_rrc': mobike_no_rrc}
return ElementCreator(cls, json) |
def update_offset(self, new_offset):
"""Updates how many data points to skip in caculations.
Always use this function to update offset instead of directly setting
self.offset.
Args:
new_offset: The new offset.
"""
self.offset = new_offset
self.data_points = self._data_points[self.offset:]
self.timestamps = self._timestamps[self.offset:] | Updates how many data points to skip in caculations.
Always use this function to update offset instead of directly setting
self.offset.
Args:
new_offset: The new offset. | Below is the the instruction that describes the task:
### Input:
Updates how many data points to skip in caculations.
Always use this function to update offset instead of directly setting
self.offset.
Args:
new_offset: The new offset.
### Response:
def update_offset(self, new_offset):
"""Updates how many data points to skip in caculations.
Always use this function to update offset instead of directly setting
self.offset.
Args:
new_offset: The new offset.
"""
self.offset = new_offset
self.data_points = self._data_points[self.offset:]
self.timestamps = self._timestamps[self.offset:] |
def update(self, tag_id, name):
"""
编辑标签
:param tag_id: 标签id,由微信分配
:param name: 标签名字(30个字符以内)
:return: 返回的 JSON 数据包
"""
name = to_text(name)
return self._post(
'tags/update',
data={
'tag': {
'id': int(tag_id),
'name': name
}
}
) | 编辑标签
:param tag_id: 标签id,由微信分配
:param name: 标签名字(30个字符以内)
:return: 返回的 JSON 数据包 | Below is the the instruction that describes the task:
### Input:
编辑标签
:param tag_id: 标签id,由微信分配
:param name: 标签名字(30个字符以内)
:return: 返回的 JSON 数据包
### Response:
def update(self, tag_id, name):
"""
编辑标签
:param tag_id: 标签id,由微信分配
:param name: 标签名字(30个字符以内)
:return: 返回的 JSON 数据包
"""
name = to_text(name)
return self._post(
'tags/update',
data={
'tag': {
'id': int(tag_id),
'name': name
}
}
) |
def srepr(expr, indented=False, cache=None):
"""Render the given expression into a string that can be evaluated in an
appropriate context to re-instantiate an identical expression. If
`indented` is False (default), the resulting string is a single line.
Otherwise, the result is a multiline string, and each positional and
keyword argument of each `Expression` is on a separate line, recursively
indented to produce a tree-like output. The `cache` may be used to generate
more readable expressions.
Example:
>>> hs = LocalSpace('1')
>>> A = OperatorSymbol('A', hs=hs); B = OperatorSymbol('B', hs=hs)
>>> expr = A + B
>>> srepr(expr)
"OperatorPlus(OperatorSymbol('A', hs=LocalSpace('1')), OperatorSymbol('B', hs=LocalSpace('1')))"
>>> eval(srepr(expr)) == expr
True
>>> srepr(expr, cache={hs:'hs'})
"OperatorPlus(OperatorSymbol('A', hs=hs), OperatorSymbol('B', hs=hs))"
>>> eval(srepr(expr, cache={hs:'hs'})) == expr
True
>>> print(srepr(expr, indented=True))
OperatorPlus(
OperatorSymbol(
'A',
hs=LocalSpace(
'1')),
OperatorSymbol(
'B',
hs=LocalSpace(
'1')))
>>> eval(srepr(expr, indented=True)) == expr
True
See also:
:func:`~qnet.printing.tree.print_tree`, respectively
:func:`qnet.printing.tree.tree`, produces an output similar to
the indented :func:`srepr`, for interactive use. Their result
cannot be evaluated and the exact output depends on
:func:`init_printing`.
:func:`~qnet.printing.dot.dotprint` provides a way to graphically
explore the tree structure of an expression.
"""
if indented:
printer = IndentedSReprPrinter(cache=cache)
else:
printer = QnetSReprPrinter(cache=cache)
return printer.doprint(expr) | Render the given expression into a string that can be evaluated in an
appropriate context to re-instantiate an identical expression. If
`indented` is False (default), the resulting string is a single line.
Otherwise, the result is a multiline string, and each positional and
keyword argument of each `Expression` is on a separate line, recursively
indented to produce a tree-like output. The `cache` may be used to generate
more readable expressions.
Example:
>>> hs = LocalSpace('1')
>>> A = OperatorSymbol('A', hs=hs); B = OperatorSymbol('B', hs=hs)
>>> expr = A + B
>>> srepr(expr)
"OperatorPlus(OperatorSymbol('A', hs=LocalSpace('1')), OperatorSymbol('B', hs=LocalSpace('1')))"
>>> eval(srepr(expr)) == expr
True
>>> srepr(expr, cache={hs:'hs'})
"OperatorPlus(OperatorSymbol('A', hs=hs), OperatorSymbol('B', hs=hs))"
>>> eval(srepr(expr, cache={hs:'hs'})) == expr
True
>>> print(srepr(expr, indented=True))
OperatorPlus(
OperatorSymbol(
'A',
hs=LocalSpace(
'1')),
OperatorSymbol(
'B',
hs=LocalSpace(
'1')))
>>> eval(srepr(expr, indented=True)) == expr
True
See also:
:func:`~qnet.printing.tree.print_tree`, respectively
:func:`qnet.printing.tree.tree`, produces an output similar to
the indented :func:`srepr`, for interactive use. Their result
cannot be evaluated and the exact output depends on
:func:`init_printing`.
:func:`~qnet.printing.dot.dotprint` provides a way to graphically
explore the tree structure of an expression. | Below is the the instruction that describes the task:
### Input:
Render the given expression into a string that can be evaluated in an
appropriate context to re-instantiate an identical expression. If
`indented` is False (default), the resulting string is a single line.
Otherwise, the result is a multiline string, and each positional and
keyword argument of each `Expression` is on a separate line, recursively
indented to produce a tree-like output. The `cache` may be used to generate
more readable expressions.
Example:
>>> hs = LocalSpace('1')
>>> A = OperatorSymbol('A', hs=hs); B = OperatorSymbol('B', hs=hs)
>>> expr = A + B
>>> srepr(expr)
"OperatorPlus(OperatorSymbol('A', hs=LocalSpace('1')), OperatorSymbol('B', hs=LocalSpace('1')))"
>>> eval(srepr(expr)) == expr
True
>>> srepr(expr, cache={hs:'hs'})
"OperatorPlus(OperatorSymbol('A', hs=hs), OperatorSymbol('B', hs=hs))"
>>> eval(srepr(expr, cache={hs:'hs'})) == expr
True
>>> print(srepr(expr, indented=True))
OperatorPlus(
OperatorSymbol(
'A',
hs=LocalSpace(
'1')),
OperatorSymbol(
'B',
hs=LocalSpace(
'1')))
>>> eval(srepr(expr, indented=True)) == expr
True
See also:
:func:`~qnet.printing.tree.print_tree`, respectively
:func:`qnet.printing.tree.tree`, produces an output similar to
the indented :func:`srepr`, for interactive use. Their result
cannot be evaluated and the exact output depends on
:func:`init_printing`.
:func:`~qnet.printing.dot.dotprint` provides a way to graphically
explore the tree structure of an expression.
### Response:
def srepr(expr, indented=False, cache=None):
"""Render the given expression into a string that can be evaluated in an
appropriate context to re-instantiate an identical expression. If
`indented` is False (default), the resulting string is a single line.
Otherwise, the result is a multiline string, and each positional and
keyword argument of each `Expression` is on a separate line, recursively
indented to produce a tree-like output. The `cache` may be used to generate
more readable expressions.
Example:
>>> hs = LocalSpace('1')
>>> A = OperatorSymbol('A', hs=hs); B = OperatorSymbol('B', hs=hs)
>>> expr = A + B
>>> srepr(expr)
"OperatorPlus(OperatorSymbol('A', hs=LocalSpace('1')), OperatorSymbol('B', hs=LocalSpace('1')))"
>>> eval(srepr(expr)) == expr
True
>>> srepr(expr, cache={hs:'hs'})
"OperatorPlus(OperatorSymbol('A', hs=hs), OperatorSymbol('B', hs=hs))"
>>> eval(srepr(expr, cache={hs:'hs'})) == expr
True
>>> print(srepr(expr, indented=True))
OperatorPlus(
OperatorSymbol(
'A',
hs=LocalSpace(
'1')),
OperatorSymbol(
'B',
hs=LocalSpace(
'1')))
>>> eval(srepr(expr, indented=True)) == expr
True
See also:
:func:`~qnet.printing.tree.print_tree`, respectively
:func:`qnet.printing.tree.tree`, produces an output similar to
the indented :func:`srepr`, for interactive use. Their result
cannot be evaluated and the exact output depends on
:func:`init_printing`.
:func:`~qnet.printing.dot.dotprint` provides a way to graphically
explore the tree structure of an expression.
"""
if indented:
printer = IndentedSReprPrinter(cache=cache)
else:
printer = QnetSReprPrinter(cache=cache)
return printer.doprint(expr) |
def setup(self, **kwargs):
"""
Setting up Grab instance configuration.
"""
for key in kwargs:
if key not in self.config.keys():
raise error.GrabMisuseError('Unknown option: %s' % key)
if 'url' in kwargs:
if self.config.get('url'):
kwargs['url'] = self.make_url_absolute(kwargs['url'])
self.config.update(kwargs) | Setting up Grab instance configuration. | Below is the the instruction that describes the task:
### Input:
Setting up Grab instance configuration.
### Response:
def setup(self, **kwargs):
"""
Setting up Grab instance configuration.
"""
for key in kwargs:
if key not in self.config.keys():
raise error.GrabMisuseError('Unknown option: %s' % key)
if 'url' in kwargs:
if self.config.get('url'):
kwargs['url'] = self.make_url_absolute(kwargs['url'])
self.config.update(kwargs) |
def performance_diagram(roc_objs, obj_labels, colors, markers, filename, figsize=(8, 8),
xlabel="Success Ratio (1-FAR)",
ylabel="Probability of Detection", ticks=np.arange(0, 1.1, 0.1),
dpi=300, csi_cmap="Blues",
csi_label="Critical Success Index", title="Performance Diagram",
legend_params=None, bootstrap_sets=None, ci=(2.5, 97.5), label_fontsize=14,
title_fontsize=16, tick_fontsize=12):
"""
Draws a performance diagram from a set of DistributedROC objects.
A performance diagram is a variation on the ROC curve in which the Probability of False Detection on the
x-axis has been replaced with the Success Ratio (1-False Alarm Ratio or Precision). The diagram also shows
the Critical Success Index (CSI or Threat Score) as a series of curved contours, and the frequency bias as
angled diagonal lines. Points along the 1:1 diagonal are unbiased, and better performing models should appear
in the upper right corner. The performance diagram is particularly useful for displaying verification for
severe weather warnings as it displays all three commonly used statistics (POD, FAR, and CSI) simultaneously
on the same chart.
Args:
roc_objs (list): DistributedROC objects being plotted.
obj_labels: list or array of labels describing each DistributedROC object.
obj_labels (list): Label describing the forecast associated with a DistributedROC object.
colors (list): List of matplotlib-readable colors (names or hex-values) for each curve.
markers (list): Matplotlib marker (e.g. *, o, v, etc.) for each curve.
filename (str): Name of figure file being saved.
figsize (tuple): (Width, height) of the figure in inches.
xlabel (str): Label for the x-axis.
ylabel (str): Label for the y-axis.
title (str): The title of the figure.
ticks (numpy.ndarray): Values shown on the x and y axes.
dpi (int): Figure resolution in dots per inch.
csi_cmap (str): Matplotlib colormap used to fill CSI contours.
csi_label (str): Label for CSI colormap.
legend_params (None or dict): Keyword arguments for the formatting of the figure legend.
bootstrap_sets (list): A list of arrays of bootstrapped DistributedROC objects. If not None,
confidence regions will be plotted.
ci (tuple): tuple of bootstrap confidence interval percentiles.
label_fontsize (int): Font size of the x and y axis labels.
title_fontsize (int): Font size of the title.
tick_fontsize (int): Font size of the x and y tick labels.
Examples:
>>> from hagelslag.evaluation import DistributedROC
>>> import numpy as np
>>> forecasts = np.random.random(1000)
>>> obs = np.random.random_integers(0, 1, 1000)
>>> roc = DistributedROC()
>>> roc.update(forecasts, obs)
>>> performance_diagram([roc], ["Random"], ["orange"], ["o"], "random_performance.png")
"""
if legend_params is None:
legend_params = dict(loc=4, fontsize=10, framealpha=1, frameon=True)
plt.figure(figsize=figsize)
grid_ticks = np.arange(0, 1.01, 0.01)
sr_g, pod_g = np.meshgrid(grid_ticks, grid_ticks)
bias = pod_g / sr_g
csi = 1.0 / (1.0 / sr_g + 1.0 / pod_g - 1.0)
csi_contour = plt.contourf(sr_g, pod_g, csi, np.arange(0.1, 1.1, 0.1), extend="max", cmap=csi_cmap)
b_contour = plt.contour(sr_g, pod_g, bias, [0.5, 1, 1.5, 2, 4], colors="k", linestyles="dashed")
plt.clabel(b_contour, fmt="%1.1f", manual=[(0.2, 0.9), (0.4, 0.9), (0.6, 0.9), (0.7, 0.7)])
if bootstrap_sets is not None:
for b, b_set in enumerate(bootstrap_sets):
perf_curves = np.dstack([b_roc.performance_curve().values for b_roc in b_set])
pod_range = np.nanpercentile(perf_curves[:, 0], ci, axis=1)
sr_range = np.nanpercentile(1 - perf_curves[:, 1], ci, axis=1)
pod_poly = np.concatenate((pod_range[1], pod_range[0, ::-1]))
sr_poly = np.concatenate((sr_range[1], sr_range[0, ::-1]))
pod_poly[np.isnan(pod_poly)] = 0
sr_poly[np.isnan(sr_poly)] = 1
plt.fill(sr_poly, pod_poly, alpha=0.5, color=colors[b])
for r, roc_obj in enumerate(roc_objs):
perf_data = roc_obj.performance_curve()
plt.plot(1 - perf_data["FAR"], perf_data["POD"], marker=markers[r], color=colors[r], label=obj_labels[r])
cbar = plt.colorbar(csi_contour)
cbar.set_label(csi_label)
plt.xlabel(xlabel, fontsize=label_fontsize)
plt.ylabel(ylabel, fontsize=label_fontsize)
plt.xticks(ticks, fontsize=tick_fontsize)
plt.yticks(ticks, fontsize=tick_fontsize)
plt.title(title, fontsize=title_fontsize)
plt.legend(**legend_params)
plt.savefig(filename, dpi=dpi, bbox_inches="tight")
plt.close() | Draws a performance diagram from a set of DistributedROC objects.
A performance diagram is a variation on the ROC curve in which the Probability of False Detection on the
x-axis has been replaced with the Success Ratio (1-False Alarm Ratio or Precision). The diagram also shows
the Critical Success Index (CSI or Threat Score) as a series of curved contours, and the frequency bias as
angled diagonal lines. Points along the 1:1 diagonal are unbiased, and better performing models should appear
in the upper right corner. The performance diagram is particularly useful for displaying verification for
severe weather warnings as it displays all three commonly used statistics (POD, FAR, and CSI) simultaneously
on the same chart.
Args:
roc_objs (list): DistributedROC objects being plotted.
obj_labels: list or array of labels describing each DistributedROC object.
obj_labels (list): Label describing the forecast associated with a DistributedROC object.
colors (list): List of matplotlib-readable colors (names or hex-values) for each curve.
markers (list): Matplotlib marker (e.g. *, o, v, etc.) for each curve.
filename (str): Name of figure file being saved.
figsize (tuple): (Width, height) of the figure in inches.
xlabel (str): Label for the x-axis.
ylabel (str): Label for the y-axis.
title (str): The title of the figure.
ticks (numpy.ndarray): Values shown on the x and y axes.
dpi (int): Figure resolution in dots per inch.
csi_cmap (str): Matplotlib colormap used to fill CSI contours.
csi_label (str): Label for CSI colormap.
legend_params (None or dict): Keyword arguments for the formatting of the figure legend.
bootstrap_sets (list): A list of arrays of bootstrapped DistributedROC objects. If not None,
confidence regions will be plotted.
ci (tuple): tuple of bootstrap confidence interval percentiles.
label_fontsize (int): Font size of the x and y axis labels.
title_fontsize (int): Font size of the title.
tick_fontsize (int): Font size of the x and y tick labels.
Examples:
>>> from hagelslag.evaluation import DistributedROC
>>> import numpy as np
>>> forecasts = np.random.random(1000)
>>> obs = np.random.random_integers(0, 1, 1000)
>>> roc = DistributedROC()
>>> roc.update(forecasts, obs)
>>> performance_diagram([roc], ["Random"], ["orange"], ["o"], "random_performance.png") | Below is the the instruction that describes the task:
### Input:
Draws a performance diagram from a set of DistributedROC objects.
A performance diagram is a variation on the ROC curve in which the Probability of False Detection on the
x-axis has been replaced with the Success Ratio (1-False Alarm Ratio or Precision). The diagram also shows
the Critical Success Index (CSI or Threat Score) as a series of curved contours, and the frequency bias as
angled diagonal lines. Points along the 1:1 diagonal are unbiased, and better performing models should appear
in the upper right corner. The performance diagram is particularly useful for displaying verification for
severe weather warnings as it displays all three commonly used statistics (POD, FAR, and CSI) simultaneously
on the same chart.
Args:
roc_objs (list): DistributedROC objects being plotted.
obj_labels: list or array of labels describing each DistributedROC object.
obj_labels (list): Label describing the forecast associated with a DistributedROC object.
colors (list): List of matplotlib-readable colors (names or hex-values) for each curve.
markers (list): Matplotlib marker (e.g. *, o, v, etc.) for each curve.
filename (str): Name of figure file being saved.
figsize (tuple): (Width, height) of the figure in inches.
xlabel (str): Label for the x-axis.
ylabel (str): Label for the y-axis.
title (str): The title of the figure.
ticks (numpy.ndarray): Values shown on the x and y axes.
dpi (int): Figure resolution in dots per inch.
csi_cmap (str): Matplotlib colormap used to fill CSI contours.
csi_label (str): Label for CSI colormap.
legend_params (None or dict): Keyword arguments for the formatting of the figure legend.
bootstrap_sets (list): A list of arrays of bootstrapped DistributedROC objects. If not None,
confidence regions will be plotted.
ci (tuple): tuple of bootstrap confidence interval percentiles.
label_fontsize (int): Font size of the x and y axis labels.
title_fontsize (int): Font size of the title.
tick_fontsize (int): Font size of the x and y tick labels.
Examples:
>>> from hagelslag.evaluation import DistributedROC
>>> import numpy as np
>>> forecasts = np.random.random(1000)
>>> obs = np.random.random_integers(0, 1, 1000)
>>> roc = DistributedROC()
>>> roc.update(forecasts, obs)
>>> performance_diagram([roc], ["Random"], ["orange"], ["o"], "random_performance.png")
### Response:
def performance_diagram(roc_objs, obj_labels, colors, markers, filename, figsize=(8, 8),
xlabel="Success Ratio (1-FAR)",
ylabel="Probability of Detection", ticks=np.arange(0, 1.1, 0.1),
dpi=300, csi_cmap="Blues",
csi_label="Critical Success Index", title="Performance Diagram",
legend_params=None, bootstrap_sets=None, ci=(2.5, 97.5), label_fontsize=14,
title_fontsize=16, tick_fontsize=12):
"""
Draws a performance diagram from a set of DistributedROC objects.
A performance diagram is a variation on the ROC curve in which the Probability of False Detection on the
x-axis has been replaced with the Success Ratio (1-False Alarm Ratio or Precision). The diagram also shows
the Critical Success Index (CSI or Threat Score) as a series of curved contours, and the frequency bias as
angled diagonal lines. Points along the 1:1 diagonal are unbiased, and better performing models should appear
in the upper right corner. The performance diagram is particularly useful for displaying verification for
severe weather warnings as it displays all three commonly used statistics (POD, FAR, and CSI) simultaneously
on the same chart.
Args:
roc_objs (list): DistributedROC objects being plotted.
obj_labels: list or array of labels describing each DistributedROC object.
obj_labels (list): Label describing the forecast associated with a DistributedROC object.
colors (list): List of matplotlib-readable colors (names or hex-values) for each curve.
markers (list): Matplotlib marker (e.g. *, o, v, etc.) for each curve.
filename (str): Name of figure file being saved.
figsize (tuple): (Width, height) of the figure in inches.
xlabel (str): Label for the x-axis.
ylabel (str): Label for the y-axis.
title (str): The title of the figure.
ticks (numpy.ndarray): Values shown on the x and y axes.
dpi (int): Figure resolution in dots per inch.
csi_cmap (str): Matplotlib colormap used to fill CSI contours.
csi_label (str): Label for CSI colormap.
legend_params (None or dict): Keyword arguments for the formatting of the figure legend.
bootstrap_sets (list): A list of arrays of bootstrapped DistributedROC objects. If not None,
confidence regions will be plotted.
ci (tuple): tuple of bootstrap confidence interval percentiles.
label_fontsize (int): Font size of the x and y axis labels.
title_fontsize (int): Font size of the title.
tick_fontsize (int): Font size of the x and y tick labels.
Examples:
>>> from hagelslag.evaluation import DistributedROC
>>> import numpy as np
>>> forecasts = np.random.random(1000)
>>> obs = np.random.random_integers(0, 1, 1000)
>>> roc = DistributedROC()
>>> roc.update(forecasts, obs)
>>> performance_diagram([roc], ["Random"], ["orange"], ["o"], "random_performance.png")
"""
if legend_params is None:
legend_params = dict(loc=4, fontsize=10, framealpha=1, frameon=True)
plt.figure(figsize=figsize)
grid_ticks = np.arange(0, 1.01, 0.01)
sr_g, pod_g = np.meshgrid(grid_ticks, grid_ticks)
bias = pod_g / sr_g
csi = 1.0 / (1.0 / sr_g + 1.0 / pod_g - 1.0)
csi_contour = plt.contourf(sr_g, pod_g, csi, np.arange(0.1, 1.1, 0.1), extend="max", cmap=csi_cmap)
b_contour = plt.contour(sr_g, pod_g, bias, [0.5, 1, 1.5, 2, 4], colors="k", linestyles="dashed")
plt.clabel(b_contour, fmt="%1.1f", manual=[(0.2, 0.9), (0.4, 0.9), (0.6, 0.9), (0.7, 0.7)])
if bootstrap_sets is not None:
for b, b_set in enumerate(bootstrap_sets):
perf_curves = np.dstack([b_roc.performance_curve().values for b_roc in b_set])
pod_range = np.nanpercentile(perf_curves[:, 0], ci, axis=1)
sr_range = np.nanpercentile(1 - perf_curves[:, 1], ci, axis=1)
pod_poly = np.concatenate((pod_range[1], pod_range[0, ::-1]))
sr_poly = np.concatenate((sr_range[1], sr_range[0, ::-1]))
pod_poly[np.isnan(pod_poly)] = 0
sr_poly[np.isnan(sr_poly)] = 1
plt.fill(sr_poly, pod_poly, alpha=0.5, color=colors[b])
for r, roc_obj in enumerate(roc_objs):
perf_data = roc_obj.performance_curve()
plt.plot(1 - perf_data["FAR"], perf_data["POD"], marker=markers[r], color=colors[r], label=obj_labels[r])
cbar = plt.colorbar(csi_contour)
cbar.set_label(csi_label)
plt.xlabel(xlabel, fontsize=label_fontsize)
plt.ylabel(ylabel, fontsize=label_fontsize)
plt.xticks(ticks, fontsize=tick_fontsize)
plt.yticks(ticks, fontsize=tick_fontsize)
plt.title(title, fontsize=title_fontsize)
plt.legend(**legend_params)
plt.savefig(filename, dpi=dpi, bbox_inches="tight")
plt.close() |
def calculateProbableRootOfGeneTree(speciesTree, geneTree, processID=lambda x : x):
"""
Goes through each root possible branch making it the root.
Returns tree that requires the minimum number of duplications.
"""
#get all rooted trees
#run dup calc on each tree
#return tree with fewest number of dups
if geneTree.traversalID.midEnd <= 3:
return (0, 0, geneTree)
checkGeneTreeMatchesSpeciesTree(speciesTree, geneTree, processID)
l = []
def fn(tree):
if tree.traversalID.mid != geneTree.left.traversalID.mid and tree.traversalID.mid != geneTree.right.traversalID.mid:
newGeneTree = moveRoot(geneTree, tree.traversalID.mid)
binaryTree_depthFirstNumbers(newGeneTree)
dupCount, lossCount = calculateDupsAndLossesByReconcilingTrees(speciesTree, newGeneTree, processID)
l.append((dupCount, lossCount, newGeneTree))
if tree.internal:
fn(tree.left)
fn(tree.right)
fn(geneTree)
l.sort()
return l[0][2], l[0][0], l[0][1] | Goes through each root possible branch making it the root.
Returns tree that requires the minimum number of duplications. | Below is the the instruction that describes the task:
### Input:
Goes through each root possible branch making it the root.
Returns tree that requires the minimum number of duplications.
### Response:
def calculateProbableRootOfGeneTree(speciesTree, geneTree, processID=lambda x : x):
"""
Goes through each root possible branch making it the root.
Returns tree that requires the minimum number of duplications.
"""
#get all rooted trees
#run dup calc on each tree
#return tree with fewest number of dups
if geneTree.traversalID.midEnd <= 3:
return (0, 0, geneTree)
checkGeneTreeMatchesSpeciesTree(speciesTree, geneTree, processID)
l = []
def fn(tree):
if tree.traversalID.mid != geneTree.left.traversalID.mid and tree.traversalID.mid != geneTree.right.traversalID.mid:
newGeneTree = moveRoot(geneTree, tree.traversalID.mid)
binaryTree_depthFirstNumbers(newGeneTree)
dupCount, lossCount = calculateDupsAndLossesByReconcilingTrees(speciesTree, newGeneTree, processID)
l.append((dupCount, lossCount, newGeneTree))
if tree.internal:
fn(tree.left)
fn(tree.right)
fn(geneTree)
l.sort()
return l[0][2], l[0][0], l[0][1] |
def start(
self,
phone=lambda: input('Please enter your phone (or bot token): '),
password=lambda: getpass.getpass('Please enter your password: '),
*,
bot_token=None, force_sms=False, code_callback=None,
first_name='New User', last_name='', max_attempts=3):
"""
Convenience method to interactively connect and sign in if required,
also taking into consideration that 2FA may be enabled in the account.
If the phone doesn't belong to an existing account (and will hence
`sign_up` for a new one), **you are agreeing to Telegram's
Terms of Service. This is required and your account
will be banned otherwise.** See https://telegram.org/tos
and https://core.telegram.org/api/terms.
Example usage:
>>> client = ...
>>> client.start(phone)
Please enter the code you received: 12345
Please enter your password: *******
(You are now logged in)
If the event loop is already running, this method returns a
coroutine that you should await on your own code; otherwise
the loop is ran until said coroutine completes.
Args:
phone (`str` | `int` | `callable`):
The phone (or callable without arguments to get it)
to which the code will be sent. If a bot-token-like
string is given, it will be used as such instead.
The argument may be a coroutine.
password (`str`, `callable`, optional):
The password for 2 Factor Authentication (2FA).
This is only required if it is enabled in your account.
The argument may be a coroutine.
bot_token (`str`):
Bot Token obtained by `@BotFather <https://t.me/BotFather>`_
to log in as a bot. Cannot be specified with ``phone`` (only
one of either allowed).
force_sms (`bool`, optional):
Whether to force sending the code request as SMS.
This only makes sense when signing in with a `phone`.
code_callback (`callable`, optional):
A callable that will be used to retrieve the Telegram
login code. Defaults to `input()`.
The argument may be a coroutine.
first_name (`str`, optional):
The first name to be used if signing up. This has no
effect if the account already exists and you sign in.
last_name (`str`, optional):
Similar to the first name, but for the last. Optional.
max_attempts (`int`, optional):
How many times the code/password callback should be
retried or switching between signing in and signing up.
Returns:
This `TelegramClient`, so initialization
can be chained with ``.start()``.
"""
if code_callback is None:
def code_callback():
return input('Please enter the code you received: ')
elif not callable(code_callback):
raise ValueError(
'The code_callback parameter needs to be a callable '
'function that returns the code you received by Telegram.'
)
if not phone and not bot_token:
raise ValueError('No phone number or bot token provided.')
if phone and bot_token and not callable(phone):
raise ValueError('Both a phone and a bot token provided, '
'must only provide one of either')
coro = self._start(
phone=phone,
password=password,
bot_token=bot_token,
force_sms=force_sms,
code_callback=code_callback,
first_name=first_name,
last_name=last_name,
max_attempts=max_attempts
)
return (
coro if self.loop.is_running()
else self.loop.run_until_complete(coro)
) | Convenience method to interactively connect and sign in if required,
also taking into consideration that 2FA may be enabled in the account.
If the phone doesn't belong to an existing account (and will hence
`sign_up` for a new one), **you are agreeing to Telegram's
Terms of Service. This is required and your account
will be banned otherwise.** See https://telegram.org/tos
and https://core.telegram.org/api/terms.
Example usage:
>>> client = ...
>>> client.start(phone)
Please enter the code you received: 12345
Please enter your password: *******
(You are now logged in)
If the event loop is already running, this method returns a
coroutine that you should await on your own code; otherwise
the loop is ran until said coroutine completes.
Args:
phone (`str` | `int` | `callable`):
The phone (or callable without arguments to get it)
to which the code will be sent. If a bot-token-like
string is given, it will be used as such instead.
The argument may be a coroutine.
password (`str`, `callable`, optional):
The password for 2 Factor Authentication (2FA).
This is only required if it is enabled in your account.
The argument may be a coroutine.
bot_token (`str`):
Bot Token obtained by `@BotFather <https://t.me/BotFather>`_
to log in as a bot. Cannot be specified with ``phone`` (only
one of either allowed).
force_sms (`bool`, optional):
Whether to force sending the code request as SMS.
This only makes sense when signing in with a `phone`.
code_callback (`callable`, optional):
A callable that will be used to retrieve the Telegram
login code. Defaults to `input()`.
The argument may be a coroutine.
first_name (`str`, optional):
The first name to be used if signing up. This has no
effect if the account already exists and you sign in.
last_name (`str`, optional):
Similar to the first name, but for the last. Optional.
max_attempts (`int`, optional):
How many times the code/password callback should be
retried or switching between signing in and signing up.
Returns:
This `TelegramClient`, so initialization
can be chained with ``.start()``. | Below is the the instruction that describes the task:
### Input:
Convenience method to interactively connect and sign in if required,
also taking into consideration that 2FA may be enabled in the account.
If the phone doesn't belong to an existing account (and will hence
`sign_up` for a new one), **you are agreeing to Telegram's
Terms of Service. This is required and your account
will be banned otherwise.** See https://telegram.org/tos
and https://core.telegram.org/api/terms.
Example usage:
>>> client = ...
>>> client.start(phone)
Please enter the code you received: 12345
Please enter your password: *******
(You are now logged in)
If the event loop is already running, this method returns a
coroutine that you should await on your own code; otherwise
the loop is ran until said coroutine completes.
Args:
phone (`str` | `int` | `callable`):
The phone (or callable without arguments to get it)
to which the code will be sent. If a bot-token-like
string is given, it will be used as such instead.
The argument may be a coroutine.
password (`str`, `callable`, optional):
The password for 2 Factor Authentication (2FA).
This is only required if it is enabled in your account.
The argument may be a coroutine.
bot_token (`str`):
Bot Token obtained by `@BotFather <https://t.me/BotFather>`_
to log in as a bot. Cannot be specified with ``phone`` (only
one of either allowed).
force_sms (`bool`, optional):
Whether to force sending the code request as SMS.
This only makes sense when signing in with a `phone`.
code_callback (`callable`, optional):
A callable that will be used to retrieve the Telegram
login code. Defaults to `input()`.
The argument may be a coroutine.
first_name (`str`, optional):
The first name to be used if signing up. This has no
effect if the account already exists and you sign in.
last_name (`str`, optional):
Similar to the first name, but for the last. Optional.
max_attempts (`int`, optional):
How many times the code/password callback should be
retried or switching between signing in and signing up.
Returns:
This `TelegramClient`, so initialization
can be chained with ``.start()``.
### Response:
def start(
self,
phone=lambda: input('Please enter your phone (or bot token): '),
password=lambda: getpass.getpass('Please enter your password: '),
*,
bot_token=None, force_sms=False, code_callback=None,
first_name='New User', last_name='', max_attempts=3):
"""
Convenience method to interactively connect and sign in if required,
also taking into consideration that 2FA may be enabled in the account.
If the phone doesn't belong to an existing account (and will hence
`sign_up` for a new one), **you are agreeing to Telegram's
Terms of Service. This is required and your account
will be banned otherwise.** See https://telegram.org/tos
and https://core.telegram.org/api/terms.
Example usage:
>>> client = ...
>>> client.start(phone)
Please enter the code you received: 12345
Please enter your password: *******
(You are now logged in)
If the event loop is already running, this method returns a
coroutine that you should await on your own code; otherwise
the loop is ran until said coroutine completes.
Args:
phone (`str` | `int` | `callable`):
The phone (or callable without arguments to get it)
to which the code will be sent. If a bot-token-like
string is given, it will be used as such instead.
The argument may be a coroutine.
password (`str`, `callable`, optional):
The password for 2 Factor Authentication (2FA).
This is only required if it is enabled in your account.
The argument may be a coroutine.
bot_token (`str`):
Bot Token obtained by `@BotFather <https://t.me/BotFather>`_
to log in as a bot. Cannot be specified with ``phone`` (only
one of either allowed).
force_sms (`bool`, optional):
Whether to force sending the code request as SMS.
This only makes sense when signing in with a `phone`.
code_callback (`callable`, optional):
A callable that will be used to retrieve the Telegram
login code. Defaults to `input()`.
The argument may be a coroutine.
first_name (`str`, optional):
The first name to be used if signing up. This has no
effect if the account already exists and you sign in.
last_name (`str`, optional):
Similar to the first name, but for the last. Optional.
max_attempts (`int`, optional):
How many times the code/password callback should be
retried or switching between signing in and signing up.
Returns:
This `TelegramClient`, so initialization
can be chained with ``.start()``.
"""
if code_callback is None:
def code_callback():
return input('Please enter the code you received: ')
elif not callable(code_callback):
raise ValueError(
'The code_callback parameter needs to be a callable '
'function that returns the code you received by Telegram.'
)
if not phone and not bot_token:
raise ValueError('No phone number or bot token provided.')
if phone and bot_token and not callable(phone):
raise ValueError('Both a phone and a bot token provided, '
'must only provide one of either')
coro = self._start(
phone=phone,
password=password,
bot_token=bot_token,
force_sms=force_sms,
code_callback=code_callback,
first_name=first_name,
last_name=last_name,
max_attempts=max_attempts
)
return (
coro if self.loop.is_running()
else self.loop.run_until_complete(coro)
) |
def render_homepage(config, env):
"""Render the homepage.jinja template."""
template = env.get_template('homepage.jinja')
rendered_page = template.render(
config=config)
return rendered_page | Render the homepage.jinja template. | Below is the the instruction that describes the task:
### Input:
Render the homepage.jinja template.
### Response:
def render_homepage(config, env):
"""Render the homepage.jinja template."""
template = env.get_template('homepage.jinja')
rendered_page = template.render(
config=config)
return rendered_page |
def dynamic_part_name(raml_resource, route_name, pk_field):
""" Generate a dynamic part for a resource :raml_resource:.
A dynamic part is generated using 2 parts: :route_name: of the
resource and the dynamic part of first dynamic child resources. If
:raml_resource: has no dynamic child resources, 'id' is used as the
2nd part.
E.g. if your dynamic part on route 'stories' is named 'superId' then
dynamic part will be 'stories_superId'.
:param raml_resource: Instance of ramlfications.raml.ResourceNode for
which dynamic part name is being generated.
:param route_name: Cleaned name of :raml_resource:
:param pk_field: Model Primary Key field name.
"""
subresources = get_resource_children(raml_resource)
dynamic_uris = [res.path for res in subresources
if is_dynamic_uri(res.path)]
if dynamic_uris:
dynamic_part = extract_dynamic_part(dynamic_uris[0])
else:
dynamic_part = pk_field
return '_'.join([route_name, dynamic_part]) | Generate a dynamic part for a resource :raml_resource:.
A dynamic part is generated using 2 parts: :route_name: of the
resource and the dynamic part of first dynamic child resources. If
:raml_resource: has no dynamic child resources, 'id' is used as the
2nd part.
E.g. if your dynamic part on route 'stories' is named 'superId' then
dynamic part will be 'stories_superId'.
:param raml_resource: Instance of ramlfications.raml.ResourceNode for
which dynamic part name is being generated.
:param route_name: Cleaned name of :raml_resource:
:param pk_field: Model Primary Key field name. | Below is the the instruction that describes the task:
### Input:
Generate a dynamic part for a resource :raml_resource:.
A dynamic part is generated using 2 parts: :route_name: of the
resource and the dynamic part of first dynamic child resources. If
:raml_resource: has no dynamic child resources, 'id' is used as the
2nd part.
E.g. if your dynamic part on route 'stories' is named 'superId' then
dynamic part will be 'stories_superId'.
:param raml_resource: Instance of ramlfications.raml.ResourceNode for
which dynamic part name is being generated.
:param route_name: Cleaned name of :raml_resource:
:param pk_field: Model Primary Key field name.
### Response:
def dynamic_part_name(raml_resource, route_name, pk_field):
""" Generate a dynamic part for a resource :raml_resource:.
A dynamic part is generated using 2 parts: :route_name: of the
resource and the dynamic part of first dynamic child resources. If
:raml_resource: has no dynamic child resources, 'id' is used as the
2nd part.
E.g. if your dynamic part on route 'stories' is named 'superId' then
dynamic part will be 'stories_superId'.
:param raml_resource: Instance of ramlfications.raml.ResourceNode for
which dynamic part name is being generated.
:param route_name: Cleaned name of :raml_resource:
:param pk_field: Model Primary Key field name.
"""
subresources = get_resource_children(raml_resource)
dynamic_uris = [res.path for res in subresources
if is_dynamic_uri(res.path)]
if dynamic_uris:
dynamic_part = extract_dynamic_part(dynamic_uris[0])
else:
dynamic_part = pk_field
return '_'.join([route_name, dynamic_part]) |
def correct(tokens, term_freq):
"""
Correct a list of tokens, according to the term_freq
"""
log = []
output = []
for token in tokens:
corrected = _correct(token, term_freq)
if corrected != token:
log.append((token, corrected))
output.append(corrected)
return output, log | Correct a list of tokens, according to the term_freq | Below is the the instruction that describes the task:
### Input:
Correct a list of tokens, according to the term_freq
### Response:
def correct(tokens, term_freq):
"""
Correct a list of tokens, according to the term_freq
"""
log = []
output = []
for token in tokens:
corrected = _correct(token, term_freq)
if corrected != token:
log.append((token, corrected))
output.append(corrected)
return output, log |
def create_equipamento_roteiro(self):
"""Get an instance of equipamento_roteiro services facade."""
return EquipamentoRoteiro(
self.networkapi_url,
self.user,
self.password,
self.user_ldap) | Get an instance of equipamento_roteiro services facade. | Below is the the instruction that describes the task:
### Input:
Get an instance of equipamento_roteiro services facade.
### Response:
def create_equipamento_roteiro(self):
"""Get an instance of equipamento_roteiro services facade."""
return EquipamentoRoteiro(
self.networkapi_url,
self.user,
self.password,
self.user_ldap) |
def _serialize(self, include_run_logs=False, strict_json=False):
""" Serialize a representation of this Job to a Python dict object. """
# return tasks in sorted order if graph is in a valid state
try:
topo_sorted = self.topological_sort()
t = [self.tasks[task]._serialize(include_run_logs=include_run_logs,
strict_json=strict_json)
for task in topo_sorted]
except:
t = [task._serialize(include_run_logs=include_run_logs,
strict_json=strict_json)
for task in self.tasks.itervalues()]
dependencies = {}
for k, v in self.graph.iteritems():
dependencies[k] = list(v)
result = {'job_id': self.job_id,
'name': self.name,
'parent_id': self.parent.dagobah_id,
'tasks': t,
'dependencies': dependencies,
'status': self.state.status,
'cron_schedule': self.cron_schedule,
'next_run': self.next_run,
'notes': self.notes}
if strict_json:
result = json.loads(json.dumps(result, cls=StrictJSONEncoder))
return result | Serialize a representation of this Job to a Python dict object. | Below is the the instruction that describes the task:
### Input:
Serialize a representation of this Job to a Python dict object.
### Response:
def _serialize(self, include_run_logs=False, strict_json=False):
""" Serialize a representation of this Job to a Python dict object. """
# return tasks in sorted order if graph is in a valid state
try:
topo_sorted = self.topological_sort()
t = [self.tasks[task]._serialize(include_run_logs=include_run_logs,
strict_json=strict_json)
for task in topo_sorted]
except:
t = [task._serialize(include_run_logs=include_run_logs,
strict_json=strict_json)
for task in self.tasks.itervalues()]
dependencies = {}
for k, v in self.graph.iteritems():
dependencies[k] = list(v)
result = {'job_id': self.job_id,
'name': self.name,
'parent_id': self.parent.dagobah_id,
'tasks': t,
'dependencies': dependencies,
'status': self.state.status,
'cron_schedule': self.cron_schedule,
'next_run': self.next_run,
'notes': self.notes}
if strict_json:
result = json.loads(json.dumps(result, cls=StrictJSONEncoder))
return result |
def __setWildcardSymbol(self, value):
"""self.__wildcardSymbol variable setter"""
errors = []
if not value is str and not value.split():
errors.append('wildcardSymbol_ERROR : Symbol : must be char or string!')
else:
self.__wildcardSymbol = value
if errors:
view.Tli.showErrors('SymbolError', errors) | self.__wildcardSymbol variable setter | Below is the the instruction that describes the task:
### Input:
self.__wildcardSymbol variable setter
### Response:
def __setWildcardSymbol(self, value):
"""self.__wildcardSymbol variable setter"""
errors = []
if not value is str and not value.split():
errors.append('wildcardSymbol_ERROR : Symbol : must be char or string!')
else:
self.__wildcardSymbol = value
if errors:
view.Tli.showErrors('SymbolError', errors) |
def clone(self, opts):
'''
Create a new instance of this type with the specified options.
Args:
opts (dict): The type specific options for the new instance.
'''
topt = self.opts.copy()
topt.update(opts)
return self.__class__(self.modl, self.name, self.info, topt) | Create a new instance of this type with the specified options.
Args:
opts (dict): The type specific options for the new instance. | Below is the the instruction that describes the task:
### Input:
Create a new instance of this type with the specified options.
Args:
opts (dict): The type specific options for the new instance.
### Response:
def clone(self, opts):
'''
Create a new instance of this type with the specified options.
Args:
opts (dict): The type specific options for the new instance.
'''
topt = self.opts.copy()
topt.update(opts)
return self.__class__(self.modl, self.name, self.info, topt) |
def send_to_graphite(self, metric, value, timestamp=None):
"""
Inner store a new metric and flush to Graphite if the flush threshold is reached.
If no timestamp is provided, get the current time for the metric timestam.
:param metric: metric name in dotted format
:type metric: str
:param value:
:type value: float
:param timestamp: metric timestamp
:type timestamp: int
"""
# Manage Graphite part
if not self.statsd_enabled or not self.carbon:
return
if timestamp is None:
timestamp = int(time.time())
self.my_metrics.append(('.'.join([self.statsd_prefix, self.name, metric]),
(timestamp, value)))
if self.metrics_count >= self.metrics_flush_count:
self.carbon.add_data_list(self.my_metrics)
self.flush() | Inner store a new metric and flush to Graphite if the flush threshold is reached.
If no timestamp is provided, get the current time for the metric timestam.
:param metric: metric name in dotted format
:type metric: str
:param value:
:type value: float
:param timestamp: metric timestamp
:type timestamp: int | Below is the the instruction that describes the task:
### Input:
Inner store a new metric and flush to Graphite if the flush threshold is reached.
If no timestamp is provided, get the current time for the metric timestam.
:param metric: metric name in dotted format
:type metric: str
:param value:
:type value: float
:param timestamp: metric timestamp
:type timestamp: int
### Response:
def send_to_graphite(self, metric, value, timestamp=None):
"""
Inner store a new metric and flush to Graphite if the flush threshold is reached.
If no timestamp is provided, get the current time for the metric timestam.
:param metric: metric name in dotted format
:type metric: str
:param value:
:type value: float
:param timestamp: metric timestamp
:type timestamp: int
"""
# Manage Graphite part
if not self.statsd_enabled or not self.carbon:
return
if timestamp is None:
timestamp = int(time.time())
self.my_metrics.append(('.'.join([self.statsd_prefix, self.name, metric]),
(timestamp, value)))
if self.metrics_count >= self.metrics_flush_count:
self.carbon.add_data_list(self.my_metrics)
self.flush() |
def latmio_dir(R, itr, D=None, seed=None):
'''
This function "latticizes" a directed network, while preserving the in-
and out-degree distributions. In weighted networks, the function
preserves the out-strength but not the in-strength distributions.
Parameters
----------
R : NxN np.ndarray
directed binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
D : np.ndarray | None
distance-to-diagonal matrix. Defaults to the actual distance matrix
if not specified.
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
Rlatt : NxN np.ndarray
latticized network in original node ordering
Rrp : NxN np.ndarray
latticized network in node ordering used for latticization
ind_rp : Nx1 np.ndarray
node ordering used for latticization
eff : int
number of actual rewirings carried out
'''
rng = get_rng(seed)
n = len(R)
ind_rp = rng.permutation(n) # randomly reorder matrix
R = R.copy()
R = R[np.ix_(ind_rp, ind_rp)]
# create distance to diagonal matrix if not specified by user
if D is None:
D = np.zeros((n, n))
un = np.mod(range(1, n), n)
um = np.mod(range(n - 1, 0, -1), n)
u = np.append((0,), np.where(un < um, un, um))
for v in range(int(np.ceil(n / 2))):
D[n - v - 1, :] = np.append(u[v + 1:], u[:v + 1])
D[v, :] = D[n - v - 1, :][::-1]
i, j = np.where(R)
k = len(i)
itr *= k
# maximal number of rewiring attempts per iteration
max_attempts = np.round(n * k / (n * (n - 1)))
# actual number of successful rewirings
eff = 0
for it in range(itr):
att = 0
while att <= max_attempts: # while not rewired
while True:
e1 = rng.randint(k)
e2 = rng.randint(k)
while e1 == e2:
e2 = rng.randint(k)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break
# rewiring condition
if not (R[a, d] or R[c, b]):
# lattice condition
if (D[a, b] * R[a, b] + D[c, d] * R[c, d] >= D[a, d] * R[a, b] + D[c, b] * R[c, d]):
R[a, d] = R[a, b]
R[a, b] = 0
R[c, b] = R[c, d]
R[c, d] = 0
j.setflags(write=True)
j[e1] = d
j[e2] = b # reassign edge indices
eff += 1
break
att += 1
Rlatt = R[np.ix_(ind_rp[::-1], ind_rp[::-1])] # reverse random permutation
return Rlatt, R, ind_rp, eff | This function "latticizes" a directed network, while preserving the in-
and out-degree distributions. In weighted networks, the function
preserves the out-strength but not the in-strength distributions.
Parameters
----------
R : NxN np.ndarray
directed binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
D : np.ndarray | None
distance-to-diagonal matrix. Defaults to the actual distance matrix
if not specified.
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
Rlatt : NxN np.ndarray
latticized network in original node ordering
Rrp : NxN np.ndarray
latticized network in node ordering used for latticization
ind_rp : Nx1 np.ndarray
node ordering used for latticization
eff : int
number of actual rewirings carried out | Below is the the instruction that describes the task:
### Input:
This function "latticizes" a directed network, while preserving the in-
and out-degree distributions. In weighted networks, the function
preserves the out-strength but not the in-strength distributions.
Parameters
----------
R : NxN np.ndarray
directed binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
D : np.ndarray | None
distance-to-diagonal matrix. Defaults to the actual distance matrix
if not specified.
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
Rlatt : NxN np.ndarray
latticized network in original node ordering
Rrp : NxN np.ndarray
latticized network in node ordering used for latticization
ind_rp : Nx1 np.ndarray
node ordering used for latticization
eff : int
number of actual rewirings carried out
### Response:
def latmio_dir(R, itr, D=None, seed=None):
'''
This function "latticizes" a directed network, while preserving the in-
and out-degree distributions. In weighted networks, the function
preserves the out-strength but not the in-strength distributions.
Parameters
----------
R : NxN np.ndarray
directed binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
D : np.ndarray | None
distance-to-diagonal matrix. Defaults to the actual distance matrix
if not specified.
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
Rlatt : NxN np.ndarray
latticized network in original node ordering
Rrp : NxN np.ndarray
latticized network in node ordering used for latticization
ind_rp : Nx1 np.ndarray
node ordering used for latticization
eff : int
number of actual rewirings carried out
'''
rng = get_rng(seed)
n = len(R)
ind_rp = rng.permutation(n) # randomly reorder matrix
R = R.copy()
R = R[np.ix_(ind_rp, ind_rp)]
# create distance to diagonal matrix if not specified by user
if D is None:
D = np.zeros((n, n))
un = np.mod(range(1, n), n)
um = np.mod(range(n - 1, 0, -1), n)
u = np.append((0,), np.where(un < um, un, um))
for v in range(int(np.ceil(n / 2))):
D[n - v - 1, :] = np.append(u[v + 1:], u[:v + 1])
D[v, :] = D[n - v - 1, :][::-1]
i, j = np.where(R)
k = len(i)
itr *= k
# maximal number of rewiring attempts per iteration
max_attempts = np.round(n * k / (n * (n - 1)))
# actual number of successful rewirings
eff = 0
for it in range(itr):
att = 0
while att <= max_attempts: # while not rewired
while True:
e1 = rng.randint(k)
e2 = rng.randint(k)
while e1 == e2:
e2 = rng.randint(k)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break
# rewiring condition
if not (R[a, d] or R[c, b]):
# lattice condition
if (D[a, b] * R[a, b] + D[c, d] * R[c, d] >= D[a, d] * R[a, b] + D[c, b] * R[c, d]):
R[a, d] = R[a, b]
R[a, b] = 0
R[c, b] = R[c, d]
R[c, d] = 0
j.setflags(write=True)
j[e1] = d
j[e2] = b # reassign edge indices
eff += 1
break
att += 1
Rlatt = R[np.ix_(ind_rp[::-1], ind_rp[::-1])] # reverse random permutation
return Rlatt, R, ind_rp, eff |
def _set_mirror(self, v, load=False):
"""
Setter method for mirror, mapped from YANG variable /openflow_global/openflow/mirror (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_mirror is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mirror() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=mirror.mirror, is_container='container', presence=False, yang_name="mirror", rest_name="mirror", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Openflow Mirror interface', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mirror must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=mirror.mirror, is_container='container', presence=False, yang_name="mirror", rest_name="mirror", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Openflow Mirror interface', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='container', is_config=True)""",
})
self.__mirror = t
if hasattr(self, '_set'):
self._set() | Setter method for mirror, mapped from YANG variable /openflow_global/openflow/mirror (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_mirror is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mirror() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for mirror, mapped from YANG variable /openflow_global/openflow/mirror (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_mirror is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mirror() directly.
### Response:
def _set_mirror(self, v, load=False):
"""
Setter method for mirror, mapped from YANG variable /openflow_global/openflow/mirror (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_mirror is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mirror() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=mirror.mirror, is_container='container', presence=False, yang_name="mirror", rest_name="mirror", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Openflow Mirror interface', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mirror must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=mirror.mirror, is_container='container', presence=False, yang_name="mirror", rest_name="mirror", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Openflow Mirror interface', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='container', is_config=True)""",
})
self.__mirror = t
if hasattr(self, '_set'):
self._set() |
def init_app(self, app, datastore=None, register_blueprint=None, **kwargs):
"""Initializes the Flask-Security extension for the specified
application and datastore implementation.
:param app: The application.
:param datastore: An instance of a user datastore.
:param register_blueprint: to register the Security blueprint or not.
"""
self.app = app
if datastore is None:
datastore = self._datastore
if register_blueprint is None:
register_blueprint = self._register_blueprint
for key, value in self._kwargs.items():
kwargs.setdefault(key, value)
for key, value in _default_config.items():
app.config.setdefault('SECURITY_' + key, value)
for key, value in _default_messages.items():
app.config.setdefault('SECURITY_MSG_' + key, value)
identity_loaded.connect_via(app)(_on_identity_loaded)
self._state = state = _get_state(app, datastore, **kwargs)
if register_blueprint:
app.register_blueprint(create_blueprint(state, __name__))
app.context_processor(_context_processor)
@app.before_first_request
def _register_i18n():
if '_' not in app.jinja_env.globals:
app.jinja_env.globals['_'] = state.i18n_domain.gettext
state.render_template = self.render_template
state.send_mail = self.send_mail
app.extensions['security'] = state
if hasattr(app, 'cli'):
from .cli import users, roles
if state.cli_users_name:
app.cli.add_command(users, state.cli_users_name)
if state.cli_roles_name:
app.cli.add_command(roles, state.cli_roles_name)
return state | Initializes the Flask-Security extension for the specified
application and datastore implementation.
:param app: The application.
:param datastore: An instance of a user datastore.
:param register_blueprint: to register the Security blueprint or not. | Below is the the instruction that describes the task:
### Input:
Initializes the Flask-Security extension for the specified
application and datastore implementation.
:param app: The application.
:param datastore: An instance of a user datastore.
:param register_blueprint: to register the Security blueprint or not.
### Response:
def init_app(self, app, datastore=None, register_blueprint=None, **kwargs):
"""Initializes the Flask-Security extension for the specified
application and datastore implementation.
:param app: The application.
:param datastore: An instance of a user datastore.
:param register_blueprint: to register the Security blueprint or not.
"""
self.app = app
if datastore is None:
datastore = self._datastore
if register_blueprint is None:
register_blueprint = self._register_blueprint
for key, value in self._kwargs.items():
kwargs.setdefault(key, value)
for key, value in _default_config.items():
app.config.setdefault('SECURITY_' + key, value)
for key, value in _default_messages.items():
app.config.setdefault('SECURITY_MSG_' + key, value)
identity_loaded.connect_via(app)(_on_identity_loaded)
self._state = state = _get_state(app, datastore, **kwargs)
if register_blueprint:
app.register_blueprint(create_blueprint(state, __name__))
app.context_processor(_context_processor)
@app.before_first_request
def _register_i18n():
if '_' not in app.jinja_env.globals:
app.jinja_env.globals['_'] = state.i18n_domain.gettext
state.render_template = self.render_template
state.send_mail = self.send_mail
app.extensions['security'] = state
if hasattr(app, 'cli'):
from .cli import users, roles
if state.cli_users_name:
app.cli.add_command(users, state.cli_users_name)
if state.cli_roles_name:
app.cli.add_command(roles, state.cli_roles_name)
return state |
def parse_datetime(s: str) -> datetime.date:
"""Try to parse a datetime object from a standard datetime format or date format."""
for fmt in (CREATION_DATE_FMT, PUBLISHED_DATE_FMT, PUBLISHED_DATE_FMT_2):
try:
dt = datetime.strptime(s, fmt)
except ValueError:
pass
else:
return dt
raise ValueError('Incorrect datetime format for {}'.format(s)) | Try to parse a datetime object from a standard datetime format or date format. | Below is the the instruction that describes the task:
### Input:
Try to parse a datetime object from a standard datetime format or date format.
### Response:
def parse_datetime(s: str) -> datetime.date:
"""Try to parse a datetime object from a standard datetime format or date format."""
for fmt in (CREATION_DATE_FMT, PUBLISHED_DATE_FMT, PUBLISHED_DATE_FMT_2):
try:
dt = datetime.strptime(s, fmt)
except ValueError:
pass
else:
return dt
raise ValueError('Incorrect datetime format for {}'.format(s)) |
def first_or_create(self, _attributes=None, **attributes):
"""
Get the first related record matching the attributes or create it.
:param attributes: The attributes
:type attributes: dict
:rtype: Model
"""
if _attributes is not None:
attributes.update(_attributes)
instance = self.where(attributes).first()
if instance is None:
instance = self.create(**attributes)
return instance | Get the first related record matching the attributes or create it.
:param attributes: The attributes
:type attributes: dict
:rtype: Model | Below is the the instruction that describes the task:
### Input:
Get the first related record matching the attributes or create it.
:param attributes: The attributes
:type attributes: dict
:rtype: Model
### Response:
def first_or_create(self, _attributes=None, **attributes):
"""
Get the first related record matching the attributes or create it.
:param attributes: The attributes
:type attributes: dict
:rtype: Model
"""
if _attributes is not None:
attributes.update(_attributes)
instance = self.where(attributes).first()
if instance is None:
instance = self.create(**attributes)
return instance |
def tokenize(s, locale, update=None):
"""
Tokenize `s` according to corresponding locale dictionary.
Don't use this for serious text processing.
"""
zhdict = getdict(locale)
pfset = pfsdict[locale]
if update:
zhdict = zhdict.copy()
zhdict.update(update)
newset = set()
for word in update:
for ch in range(len(word)):
newset.add(word[:ch+1])
pfset = pfset | newset
ch = []
N = len(s)
pos = 0
while pos < N:
i = pos
frag = s[pos]
maxword = None
maxpos = 0
while i < N and frag in pfset:
if frag in zhdict:
maxword = frag
maxpos = i
i += 1
frag = s[pos:i+1]
if maxword is None:
maxword = s[pos]
pos += 1
else:
pos = maxpos + 1
ch.append(maxword)
return ch | Tokenize `s` according to corresponding locale dictionary.
Don't use this for serious text processing. | Below is the the instruction that describes the task:
### Input:
Tokenize `s` according to corresponding locale dictionary.
Don't use this for serious text processing.
### Response:
def tokenize(s, locale, update=None):
"""
Tokenize `s` according to corresponding locale dictionary.
Don't use this for serious text processing.
"""
zhdict = getdict(locale)
pfset = pfsdict[locale]
if update:
zhdict = zhdict.copy()
zhdict.update(update)
newset = set()
for word in update:
for ch in range(len(word)):
newset.add(word[:ch+1])
pfset = pfset | newset
ch = []
N = len(s)
pos = 0
while pos < N:
i = pos
frag = s[pos]
maxword = None
maxpos = 0
while i < N and frag in pfset:
if frag in zhdict:
maxword = frag
maxpos = i
i += 1
frag = s[pos:i+1]
if maxword is None:
maxword = s[pos]
pos += 1
else:
pos = maxpos + 1
ch.append(maxword)
return ch |
def fulfill_lock_reward_condition(event, agreement_id, price, consumer_account):
"""
Fulfill the lock reward condition.
:param event: AttributeDict with the event data.
:param agreement_id: id of the agreement, hex str
:param price: Asset price, int
:param consumer_account: Account instance of the consumer
"""
logger.debug(f"about to lock reward after event {event}.")
keeper = Keeper.get_instance()
tx_hash = None
try:
keeper.token.token_approve(keeper.lock_reward_condition.address, price, consumer_account)
tx_hash = keeper.lock_reward_condition.fulfill(
agreement_id, keeper.escrow_reward_condition.address, price, consumer_account
)
process_tx_receipt(
tx_hash,
keeper.lock_reward_condition.FULFILLED_EVENT,
'LockRewardCondition.Fulfilled'
)
except Exception as e:
logger.debug(f'error locking reward: {e}')
if not tx_hash:
raise e | Fulfill the lock reward condition.
:param event: AttributeDict with the event data.
:param agreement_id: id of the agreement, hex str
:param price: Asset price, int
:param consumer_account: Account instance of the consumer | Below is the the instruction that describes the task:
### Input:
Fulfill the lock reward condition.
:param event: AttributeDict with the event data.
:param agreement_id: id of the agreement, hex str
:param price: Asset price, int
:param consumer_account: Account instance of the consumer
### Response:
def fulfill_lock_reward_condition(event, agreement_id, price, consumer_account):
"""
Fulfill the lock reward condition.
:param event: AttributeDict with the event data.
:param agreement_id: id of the agreement, hex str
:param price: Asset price, int
:param consumer_account: Account instance of the consumer
"""
logger.debug(f"about to lock reward after event {event}.")
keeper = Keeper.get_instance()
tx_hash = None
try:
keeper.token.token_approve(keeper.lock_reward_condition.address, price, consumer_account)
tx_hash = keeper.lock_reward_condition.fulfill(
agreement_id, keeper.escrow_reward_condition.address, price, consumer_account
)
process_tx_receipt(
tx_hash,
keeper.lock_reward_condition.FULFILLED_EVENT,
'LockRewardCondition.Fulfilled'
)
except Exception as e:
logger.debug(f'error locking reward: {e}')
if not tx_hash:
raise e |
def from_data(data):
"""
Construct a Prettytable from list of rows.
"""
if len(data) == 0: # pragma: no cover
return None
else:
ptable = PrettyTable()
ptable.field_names = data[0].keys()
for row in data:
ptable.add_row(row)
return ptable | Construct a Prettytable from list of rows. | Below is the the instruction that describes the task:
### Input:
Construct a Prettytable from list of rows.
### Response:
def from_data(data):
"""
Construct a Prettytable from list of rows.
"""
if len(data) == 0: # pragma: no cover
return None
else:
ptable = PrettyTable()
ptable.field_names = data[0].keys()
for row in data:
ptable.add_row(row)
return ptable |
def abort(self, exception=exc.ConnectError):
"""
Aborts a connection and puts all pending futures into an error state.
If ``sys.exc_info()`` is set (i.e. this is being called in an exception
handler) then pending futures will have that exc info set. Otherwise
the given ``exception`` parameter is used (defaults to
``ConnectError``).
"""
log.warning("Aborting connection to %s:%s", self.host, self.port)
def abort_pending(f):
exc_info = sys.exc_info()
# TODO
log.debug('Abort pending: {}'.format(f))
if False and any(exc_info):
f.set_exc_info(exc_info)
else:
f.set_exception(exception(self.host, self.port))
for pending in self.drain_all_pending():
if pending.done() or pending.cancelled():
continue
abort_pending(pending) | Aborts a connection and puts all pending futures into an error state.
If ``sys.exc_info()`` is set (i.e. this is being called in an exception
handler) then pending futures will have that exc info set. Otherwise
the given ``exception`` parameter is used (defaults to
``ConnectError``). | Below is the the instruction that describes the task:
### Input:
Aborts a connection and puts all pending futures into an error state.
If ``sys.exc_info()`` is set (i.e. this is being called in an exception
handler) then pending futures will have that exc info set. Otherwise
the given ``exception`` parameter is used (defaults to
``ConnectError``).
### Response:
def abort(self, exception=exc.ConnectError):
"""
Aborts a connection and puts all pending futures into an error state.
If ``sys.exc_info()`` is set (i.e. this is being called in an exception
handler) then pending futures will have that exc info set. Otherwise
the given ``exception`` parameter is used (defaults to
``ConnectError``).
"""
log.warning("Aborting connection to %s:%s", self.host, self.port)
def abort_pending(f):
exc_info = sys.exc_info()
# TODO
log.debug('Abort pending: {}'.format(f))
if False and any(exc_info):
f.set_exc_info(exc_info)
else:
f.set_exception(exception(self.host, self.port))
for pending in self.drain_all_pending():
if pending.done() or pending.cancelled():
continue
abort_pending(pending) |
def connect(self, *, db=None):
"""
Attempt to connect to device. If unable, attempt to connect to a controller database
(so the user can use previously saved data).
"""
if not self.properties.network:
self.new_state(DeviceFromDB)
else:
try:
name = self.properties.network.read(
"{} device {} objectName".format(
self.properties.address, self.properties.device_id
)
)
segmentation = self.properties.network.read(
"{} device {} segmentationSupported".format(
self.properties.address, self.properties.device_id
)
)
if not self.segmentation_supported or segmentation not in (
"segmentedTransmit",
"segmentedBoth",
):
segmentation_supported = False
self._log.debug("Segmentation not supported")
else:
segmentation_supported = True
if name:
if segmentation_supported:
self.new_state(RPMDeviceConnected)
else:
self.new_state(RPDeviceConnected)
except SegmentationNotSupported:
self.segmentation_supported = False
self._log.warning(
"Segmentation not supported.... expect slow responses."
)
self.new_state(RPDeviceConnected)
except (NoResponseFromController, AttributeError) as error:
if self.properties.db_name:
self.new_state(DeviceFromDB)
else:
self._log.warning(
"Offline: provide database name to load stored data."
)
self._log.warning("Ex. controller.connect(db = 'backup')") | Attempt to connect to device. If unable, attempt to connect to a controller database
(so the user can use previously saved data). | Below is the the instruction that describes the task:
### Input:
Attempt to connect to device. If unable, attempt to connect to a controller database
(so the user can use previously saved data).
### Response:
def connect(self, *, db=None):
"""
Attempt to connect to device. If unable, attempt to connect to a controller database
(so the user can use previously saved data).
"""
if not self.properties.network:
self.new_state(DeviceFromDB)
else:
try:
name = self.properties.network.read(
"{} device {} objectName".format(
self.properties.address, self.properties.device_id
)
)
segmentation = self.properties.network.read(
"{} device {} segmentationSupported".format(
self.properties.address, self.properties.device_id
)
)
if not self.segmentation_supported or segmentation not in (
"segmentedTransmit",
"segmentedBoth",
):
segmentation_supported = False
self._log.debug("Segmentation not supported")
else:
segmentation_supported = True
if name:
if segmentation_supported:
self.new_state(RPMDeviceConnected)
else:
self.new_state(RPDeviceConnected)
except SegmentationNotSupported:
self.segmentation_supported = False
self._log.warning(
"Segmentation not supported.... expect slow responses."
)
self.new_state(RPDeviceConnected)
except (NoResponseFromController, AttributeError) as error:
if self.properties.db_name:
self.new_state(DeviceFromDB)
else:
self._log.warning(
"Offline: provide database name to load stored data."
)
self._log.warning("Ex. controller.connect(db = 'backup')") |
def constraint(self):
"""Constraint string"""
constraint_arr = []
if self._not_null:
constraint_arr.append("PRIMARY KEY" if self._pk else "NOT NULL")
if self._unique:
constraint_arr.append("UNIQUE")
return " ".join(constraint_arr) | Constraint string | Below is the the instruction that describes the task:
### Input:
Constraint string
### Response:
def constraint(self):
"""Constraint string"""
constraint_arr = []
if self._not_null:
constraint_arr.append("PRIMARY KEY" if self._pk else "NOT NULL")
if self._unique:
constraint_arr.append("UNIQUE")
return " ".join(constraint_arr) |
def overlay_gateway_monitor_direction(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
monitor = ET.SubElement(overlay_gateway, "monitor")
session_key = ET.SubElement(monitor, "session")
session_key.text = kwargs.pop('session')
direction = ET.SubElement(monitor, "direction")
direction.text = kwargs.pop('direction')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def overlay_gateway_monitor_direction(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
monitor = ET.SubElement(overlay_gateway, "monitor")
session_key = ET.SubElement(monitor, "session")
session_key.text = kwargs.pop('session')
direction = ET.SubElement(monitor, "direction")
direction.text = kwargs.pop('direction')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def as_dict(self, default=None):
"""
Returns a ``SettingDict`` object for this queryset.
"""
settings = SettingDict(queryset=self, default=default)
return settings | Returns a ``SettingDict`` object for this queryset. | Below is the the instruction that describes the task:
### Input:
Returns a ``SettingDict`` object for this queryset.
### Response:
def as_dict(self, default=None):
"""
Returns a ``SettingDict`` object for this queryset.
"""
settings = SettingDict(queryset=self, default=default)
return settings |
def patches_until(self, patch):
""" Returns a list of patches before patch from the patches list
including the provided patch
"""
return [line.get_patch() for line in self._patchlines_until(patch) if
line.get_patch()] | Returns a list of patches before patch from the patches list
including the provided patch | Below is the the instruction that describes the task:
### Input:
Returns a list of patches before patch from the patches list
including the provided patch
### Response:
def patches_until(self, patch):
""" Returns a list of patches before patch from the patches list
including the provided patch
"""
return [line.get_patch() for line in self._patchlines_until(patch) if
line.get_patch()] |
def _validate_props(self):
"""Assert that all the properties are valid on validate()"""
for key, prop in iteritems(self._props):
try:
value = self._get(key)
err_msg = 'Invalid value for property {}: {}'.format(key, value)
if value is not None:
change = dict(name=key, previous=value, value=value,
mode='validate')
self._notify(change)
if not prop.equal(value, change['value']):
raise utils.ValidationError(err_msg, 'invalid',
prop.name, self)
if not prop.assert_valid(self):
raise utils.ValidationError(err_msg, 'invalid',
prop.name, self)
except utils.ValidationError as val_err:
if getattr(self, '_validation_error_tuples', None) is not None:
self._validation_error_tuples += val_err.error_tuples
else:
raise
return True | Assert that all the properties are valid on validate() | Below is the the instruction that describes the task:
### Input:
Assert that all the properties are valid on validate()
### Response:
def _validate_props(self):
"""Assert that all the properties are valid on validate()"""
for key, prop in iteritems(self._props):
try:
value = self._get(key)
err_msg = 'Invalid value for property {}: {}'.format(key, value)
if value is not None:
change = dict(name=key, previous=value, value=value,
mode='validate')
self._notify(change)
if not prop.equal(value, change['value']):
raise utils.ValidationError(err_msg, 'invalid',
prop.name, self)
if not prop.assert_valid(self):
raise utils.ValidationError(err_msg, 'invalid',
prop.name, self)
except utils.ValidationError as val_err:
if getattr(self, '_validation_error_tuples', None) is not None:
self._validation_error_tuples += val_err.error_tuples
else:
raise
return True |
def __recognize_union(self, node: yaml.Node,
expected_type: Type) -> RecResult:
"""Recognize a node that we expect to be one of a union of types.
Args:
node: The node to recognize.
expected_type: Union[...something...]
Returns:
The specific type that was recognized, multiple, or none.
"""
logger.debug('Recognizing as a union')
recognized_types = []
message = ''
union_types = generic_type_args(expected_type)
logger.debug('Union types {}'.format(union_types))
for possible_type in union_types:
recognized_type, msg = self.recognize(node, possible_type)
if len(recognized_type) == 0:
message += msg
recognized_types.extend(recognized_type)
recognized_types = list(set(recognized_types))
if bool in recognized_types and bool_union_fix in recognized_types:
recognized_types.remove(bool_union_fix)
if len(recognized_types) == 0:
return recognized_types, message
elif len(recognized_types) > 1:
message = ('{}{}Could not determine which of the following types'
' this is: {}').format(node.start_mark, os.linesep,
recognized_types)
return recognized_types, message
return recognized_types, '' | Recognize a node that we expect to be one of a union of types.
Args:
node: The node to recognize.
expected_type: Union[...something...]
Returns:
The specific type that was recognized, multiple, or none. | Below is the the instruction that describes the task:
### Input:
Recognize a node that we expect to be one of a union of types.
Args:
node: The node to recognize.
expected_type: Union[...something...]
Returns:
The specific type that was recognized, multiple, or none.
### Response:
def __recognize_union(self, node: yaml.Node,
expected_type: Type) -> RecResult:
"""Recognize a node that we expect to be one of a union of types.
Args:
node: The node to recognize.
expected_type: Union[...something...]
Returns:
The specific type that was recognized, multiple, or none.
"""
logger.debug('Recognizing as a union')
recognized_types = []
message = ''
union_types = generic_type_args(expected_type)
logger.debug('Union types {}'.format(union_types))
for possible_type in union_types:
recognized_type, msg = self.recognize(node, possible_type)
if len(recognized_type) == 0:
message += msg
recognized_types.extend(recognized_type)
recognized_types = list(set(recognized_types))
if bool in recognized_types and bool_union_fix in recognized_types:
recognized_types.remove(bool_union_fix)
if len(recognized_types) == 0:
return recognized_types, message
elif len(recognized_types) > 1:
message = ('{}{}Could not determine which of the following types'
' this is: {}').format(node.start_mark, os.linesep,
recognized_types)
return recognized_types, message
return recognized_types, '' |
def step(self, input_token):
"""Performs a step to establish the context as an acceptor.
This method should be called in a loop and fed input tokens from the initiator, and its
output tokens should be sent to the initiator, until this context's :attr:`established`
attribute is True.
:param input_token: The input token from the initiator (required).
:type input_token: bytes
:returns: either a byte string with the next token to send to the initiator,
or None if there is no further token to send to the initiator.
:raises: :exc:`~gssapi.error.GSSException` if there is an error establishing the context.
"""
minor_status = ffi.new('OM_uint32[1]')
input_token_buffer = ffi.new('gss_buffer_desc[1]')
input_token_buffer[0].length = len(input_token)
c_str_import_token = ffi.new('char[]', input_token)
input_token_buffer[0].value = c_str_import_token
mech_type = ffi.new('gss_OID[1]')
output_token_buffer = ffi.new('gss_buffer_desc[1]')
src_name_handle = ffi.new('gss_name_t[1]')
actual_flags = ffi.new('OM_uint32[1]')
time_rec = ffi.new('OM_uint32[1]')
delegated_cred_handle = ffi.new('gss_cred_id_t[1]')
if self._cred_object is not None:
cred = self._cred_object._cred[0]
else:
cred = ffi.cast('gss_cred_id_t', C.GSS_C_NO_CREDENTIAL)
retval = C.gss_accept_sec_context(
minor_status,
self._ctx,
cred,
input_token_buffer,
self._channel_bindings,
src_name_handle,
mech_type,
output_token_buffer,
actual_flags,
time_rec,
delegated_cred_handle
)
if src_name_handle[0]:
src_name = MechName(src_name_handle, mech_type[0]) # make sure src_name is GC'd
try:
if output_token_buffer[0].length != 0:
out_token = _buf_to_str(output_token_buffer[0])
else:
out_token = None
if GSS_ERROR(retval):
if minor_status[0] and mech_type[0]:
raise _exception_for_status(retval, minor_status[0], mech_type[0], out_token)
else:
raise _exception_for_status(retval, minor_status[0], None, out_token)
self.established = not (retval & C.GSS_S_CONTINUE_NEEDED)
self.flags = actual_flags[0]
if (self.flags & C.GSS_C_DELEG_FLAG):
self.delegated_cred = Credential(delegated_cred_handle)
if mech_type[0]:
self.mech_type = OID(mech_type[0][0])
if src_name_handle[0]:
src_name._mech_type = self.mech_type
self.peer_name = src_name
return out_token
except:
if self._ctx:
C.gss_delete_sec_context(
minor_status,
self._ctx,
ffi.cast('gss_buffer_t', C.GSS_C_NO_BUFFER)
)
self._reset_flags()
raise
finally:
if output_token_buffer[0].length != 0:
C.gss_release_buffer(minor_status, output_token_buffer)
# if self.delegated_cred is present, it will handle gss_release_cred:
if delegated_cred_handle[0] and not self.delegated_cred:
C.gss_release_cred(minor_status, delegated_cred_handle) | Performs a step to establish the context as an acceptor.
This method should be called in a loop and fed input tokens from the initiator, and its
output tokens should be sent to the initiator, until this context's :attr:`established`
attribute is True.
:param input_token: The input token from the initiator (required).
:type input_token: bytes
:returns: either a byte string with the next token to send to the initiator,
or None if there is no further token to send to the initiator.
:raises: :exc:`~gssapi.error.GSSException` if there is an error establishing the context. | Below is the the instruction that describes the task:
### Input:
Performs a step to establish the context as an acceptor.
This method should be called in a loop and fed input tokens from the initiator, and its
output tokens should be sent to the initiator, until this context's :attr:`established`
attribute is True.
:param input_token: The input token from the initiator (required).
:type input_token: bytes
:returns: either a byte string with the next token to send to the initiator,
or None if there is no further token to send to the initiator.
:raises: :exc:`~gssapi.error.GSSException` if there is an error establishing the context.
### Response:
def step(self, input_token):
"""Performs a step to establish the context as an acceptor.
This method should be called in a loop and fed input tokens from the initiator, and its
output tokens should be sent to the initiator, until this context's :attr:`established`
attribute is True.
:param input_token: The input token from the initiator (required).
:type input_token: bytes
:returns: either a byte string with the next token to send to the initiator,
or None if there is no further token to send to the initiator.
:raises: :exc:`~gssapi.error.GSSException` if there is an error establishing the context.
"""
minor_status = ffi.new('OM_uint32[1]')
input_token_buffer = ffi.new('gss_buffer_desc[1]')
input_token_buffer[0].length = len(input_token)
c_str_import_token = ffi.new('char[]', input_token)
input_token_buffer[0].value = c_str_import_token
mech_type = ffi.new('gss_OID[1]')
output_token_buffer = ffi.new('gss_buffer_desc[1]')
src_name_handle = ffi.new('gss_name_t[1]')
actual_flags = ffi.new('OM_uint32[1]')
time_rec = ffi.new('OM_uint32[1]')
delegated_cred_handle = ffi.new('gss_cred_id_t[1]')
if self._cred_object is not None:
cred = self._cred_object._cred[0]
else:
cred = ffi.cast('gss_cred_id_t', C.GSS_C_NO_CREDENTIAL)
retval = C.gss_accept_sec_context(
minor_status,
self._ctx,
cred,
input_token_buffer,
self._channel_bindings,
src_name_handle,
mech_type,
output_token_buffer,
actual_flags,
time_rec,
delegated_cred_handle
)
if src_name_handle[0]:
src_name = MechName(src_name_handle, mech_type[0]) # make sure src_name is GC'd
try:
if output_token_buffer[0].length != 0:
out_token = _buf_to_str(output_token_buffer[0])
else:
out_token = None
if GSS_ERROR(retval):
if minor_status[0] and mech_type[0]:
raise _exception_for_status(retval, minor_status[0], mech_type[0], out_token)
else:
raise _exception_for_status(retval, minor_status[0], None, out_token)
self.established = not (retval & C.GSS_S_CONTINUE_NEEDED)
self.flags = actual_flags[0]
if (self.flags & C.GSS_C_DELEG_FLAG):
self.delegated_cred = Credential(delegated_cred_handle)
if mech_type[0]:
self.mech_type = OID(mech_type[0][0])
if src_name_handle[0]:
src_name._mech_type = self.mech_type
self.peer_name = src_name
return out_token
except:
if self._ctx:
C.gss_delete_sec_context(
minor_status,
self._ctx,
ffi.cast('gss_buffer_t', C.GSS_C_NO_BUFFER)
)
self._reset_flags()
raise
finally:
if output_token_buffer[0].length != 0:
C.gss_release_buffer(minor_status, output_token_buffer)
# if self.delegated_cred is present, it will handle gss_release_cred:
if delegated_cred_handle[0] and not self.delegated_cred:
C.gss_release_cred(minor_status, delegated_cred_handle) |
def setup_console(output):
"""Console setup."""
global console
# All console related operations is handled via the ConsoleOutput class
console = ConsoleOutput(output, streamlink)
console.json = args.json
# Handle SIGTERM just like SIGINT
signal.signal(signal.SIGTERM, signal.default_int_handler) | Console setup. | Below is the the instruction that describes the task:
### Input:
Console setup.
### Response:
def setup_console(output):
"""Console setup."""
global console
# All console related operations is handled via the ConsoleOutput class
console = ConsoleOutput(output, streamlink)
console.json = args.json
# Handle SIGTERM just like SIGINT
signal.signal(signal.SIGTERM, signal.default_int_handler) |
def tar_runner(self):
""" Returns a tar with the runner script.
"""
script_bytes = self.RUNNER.read_bytes()
tarstream = BytesIO()
tar = tarfile.TarFile(fileobj=tarstream, mode='w')
tarinfo = tarfile.TarInfo(name="runner.py")
tarinfo.size = len(script_bytes)
tarinfo.mtime = int(time.time())
tar.addfile(tarinfo, BytesIO(script_bytes))
tar.close()
return tarstream.getvalue() | Returns a tar with the runner script. | Below is the the instruction that describes the task:
### Input:
Returns a tar with the runner script.
### Response:
def tar_runner(self):
""" Returns a tar with the runner script.
"""
script_bytes = self.RUNNER.read_bytes()
tarstream = BytesIO()
tar = tarfile.TarFile(fileobj=tarstream, mode='w')
tarinfo = tarfile.TarInfo(name="runner.py")
tarinfo.size = len(script_bytes)
tarinfo.mtime = int(time.time())
tar.addfile(tarinfo, BytesIO(script_bytes))
tar.close()
return tarstream.getvalue() |
def operation_effects(self, op_id, cursor=None, order='asc', limit=10):
"""This endpoint represents all effects that occurred as a result of a
given operation.
`GET /operations/{id}/effects{?cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/effects-for-operation.html>`_
:param int op_id: The operation ID to get effects on.
:param int cursor: A paging token, specifying where to start returning records from.
:param str order: The order in which to return rows, "asc" or "desc".
:param int limit: Maximum number of records to return.
:return: A list of effects on the given operation.
:rtype: dict
"""
endpoint = '/operations/{op_id}/effects'.format(op_id=op_id)
params = self.__query_params(cursor=cursor, order=order, limit=limit)
return self.query(endpoint, params) | This endpoint represents all effects that occurred as a result of a
given operation.
`GET /operations/{id}/effects{?cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/effects-for-operation.html>`_
:param int op_id: The operation ID to get effects on.
:param int cursor: A paging token, specifying where to start returning records from.
:param str order: The order in which to return rows, "asc" or "desc".
:param int limit: Maximum number of records to return.
:return: A list of effects on the given operation.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
This endpoint represents all effects that occurred as a result of a
given operation.
`GET /operations/{id}/effects{?cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/effects-for-operation.html>`_
:param int op_id: The operation ID to get effects on.
:param int cursor: A paging token, specifying where to start returning records from.
:param str order: The order in which to return rows, "asc" or "desc".
:param int limit: Maximum number of records to return.
:return: A list of effects on the given operation.
:rtype: dict
### Response:
def operation_effects(self, op_id, cursor=None, order='asc', limit=10):
"""This endpoint represents all effects that occurred as a result of a
given operation.
`GET /operations/{id}/effects{?cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/effects-for-operation.html>`_
:param int op_id: The operation ID to get effects on.
:param int cursor: A paging token, specifying where to start returning records from.
:param str order: The order in which to return rows, "asc" or "desc".
:param int limit: Maximum number of records to return.
:return: A list of effects on the given operation.
:rtype: dict
"""
endpoint = '/operations/{op_id}/effects'.format(op_id=op_id)
params = self.__query_params(cursor=cursor, order=order, limit=limit)
return self.query(endpoint, params) |
def reset(self):
'''
Initialize this type for a new simulated history of K/L ratio.
Parameters
----------
None
Returns
-------
None
'''
self.initializeSim()
self.aLvlNow = self.kInit*np.ones(self.AgentCount) # Start simulation near SS
self.aNrmNow = self.aLvlNow/self.pLvlNow | Initialize this type for a new simulated history of K/L ratio.
Parameters
----------
None
Returns
-------
None | Below is the the instruction that describes the task:
### Input:
Initialize this type for a new simulated history of K/L ratio.
Parameters
----------
None
Returns
-------
None
### Response:
def reset(self):
'''
Initialize this type for a new simulated history of K/L ratio.
Parameters
----------
None
Returns
-------
None
'''
self.initializeSim()
self.aLvlNow = self.kInit*np.ones(self.AgentCount) # Start simulation near SS
self.aNrmNow = self.aLvlNow/self.pLvlNow |
def create_nic(self, datacenter_id, server_id, nic):
"""
Creates a NIC on the specified server.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param server_id: The unique ID of the server.
:type server_id: ``str``
:param nic: A NIC dict.
:type nic: ``dict``
"""
data = json.dumps(self._create_nic_dict(nic))
response = self._perform_request(
url='/datacenters/%s/servers/%s/nics' % (
datacenter_id,
server_id),
method='POST',
data=data)
return response | Creates a NIC on the specified server.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param server_id: The unique ID of the server.
:type server_id: ``str``
:param nic: A NIC dict.
:type nic: ``dict`` | Below is the the instruction that describes the task:
### Input:
Creates a NIC on the specified server.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param server_id: The unique ID of the server.
:type server_id: ``str``
:param nic: A NIC dict.
:type nic: ``dict``
### Response:
def create_nic(self, datacenter_id, server_id, nic):
"""
Creates a NIC on the specified server.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param server_id: The unique ID of the server.
:type server_id: ``str``
:param nic: A NIC dict.
:type nic: ``dict``
"""
data = json.dumps(self._create_nic_dict(nic))
response = self._perform_request(
url='/datacenters/%s/servers/%s/nics' % (
datacenter_id,
server_id),
method='POST',
data=data)
return response |
def handle(self,
t_input: inference.TranslatorInput,
t_output: inference.TranslatorOutput,
t_walltime: float = 0.):
"""
:param t_input: Translator input.
:param t_output: Translator output.
:param t_walltime: Total wall-clock time for translation.
"""
line = "{sent_id} ||| {target} ||| {score:f} ||| {source} ||| {source_len:d} ||| {target_len:d}\n"
self.stream.write(line.format(sent_id=t_input.sentence_id,
target=" ".join(t_output.tokens),
score=t_output.score,
source=" ".join(t_input.tokens),
source_len=len(t_input.tokens),
target_len=len(t_output.tokens)))
attention_matrix = t_output.attention_matrix.T
for i in range(0, attention_matrix.shape[0]):
attention_vector = attention_matrix[i]
self.stream.write(" ".join(["%f" % value for value in attention_vector]))
self.stream.write("\n")
self.stream.write("\n")
self.stream.flush() | :param t_input: Translator input.
:param t_output: Translator output.
:param t_walltime: Total wall-clock time for translation. | Below is the the instruction that describes the task:
### Input:
:param t_input: Translator input.
:param t_output: Translator output.
:param t_walltime: Total wall-clock time for translation.
### Response:
def handle(self,
t_input: inference.TranslatorInput,
t_output: inference.TranslatorOutput,
t_walltime: float = 0.):
"""
:param t_input: Translator input.
:param t_output: Translator output.
:param t_walltime: Total wall-clock time for translation.
"""
line = "{sent_id} ||| {target} ||| {score:f} ||| {source} ||| {source_len:d} ||| {target_len:d}\n"
self.stream.write(line.format(sent_id=t_input.sentence_id,
target=" ".join(t_output.tokens),
score=t_output.score,
source=" ".join(t_input.tokens),
source_len=len(t_input.tokens),
target_len=len(t_output.tokens)))
attention_matrix = t_output.attention_matrix.T
for i in range(0, attention_matrix.shape[0]):
attention_vector = attention_matrix[i]
self.stream.write(" ".join(["%f" % value for value in attention_vector]))
self.stream.write("\n")
self.stream.write("\n")
self.stream.flush() |
def _disambiguate_sid_ksid( words_layer, text, scope=CLAUSES ):
''' Disambiguates verb forms based on existence of 2nd person pronoun ('sina') in given scope.
The scope could be either CLAUSES or SENTENCES.
'''
assert scope in [CLAUSES, SENTENCES], '(!) The scope should be either "clauses" or "sentences".'
group_indices = get_unique_clause_indices( text ) if scope==CLAUSES else get_unique_sentence_indices( text )
i = 0
gr_2nd_person_pron = {}
while i < len( words_layer ):
gr_index = group_indices[i]
if gr_index not in gr_2nd_person_pron:
# 1) Find out whether the current group (clause or sentence) contains "sina"
j = i
gr_2nd_person_pron_found = False
while j < len( words_layer ):
if group_indices[j] == gr_index:
forms = [ a[FORM] for a in words_layer[j][ANALYSIS] ]
lemmas = [ a[ROOT] for a in words_layer[j][ANALYSIS] ]
if 'sina' in lemmas and 'Sg Nom' in forms:
gr_2nd_person_pron_found = True
break
if group_indices[j] >= gr_index+10: # do not venture too far ...
break
j += 1
gr_2nd_person_pron[gr_index] = gr_2nd_person_pron_found
forms = [ a[FORM] for a in words_layer[i][ANALYSIS] ]
# 2) Disambiguate verb forms based on existence of 'sina' in the clause
if ('Pers Prt Ind Pl3 Aff' in forms and 'Pers Prt Ind Sg2 Aff' in forms): # -sid
if not gr_2nd_person_pron[ gr_index ]:
_keep_analyses( words_layer[i][ANALYSIS], ['Pers Prt Ind Pl3 Aff'], ['Pers Prt Ind Pl3 Aff', 'Pers Prt Ind Sg2 Aff'] )
else:
_keep_analyses( words_layer[i][ANALYSIS], ['Pers Prt Ind Sg2 Aff'], ['Pers Prt Ind Pl3 Aff', 'Pers Prt Ind Sg2 Aff'] )
if ('Pers Prs Cond Pl3 Aff' in forms and 'Pers Prs Cond Sg2 Aff' in forms): # -ksid
if not gr_2nd_person_pron[ gr_index ]:
_keep_analyses( words_layer[i][ANALYSIS], ['Pers Prs Cond Pl3 Aff'], ['Pers Prs Cond Pl3 Aff', 'Pers Prs Cond Sg2 Aff'] )
else:
_keep_analyses( words_layer[i][ANALYSIS], ['Pers Prs Cond Sg2 Aff'], ['Pers Prs Cond Pl3 Aff', 'Pers Prs Cond Sg2 Aff'] )
if ('Pers Prt Cond Pl3 Aff' in forms and 'Pers Prt Cond Sg2 Aff' in forms): # -nuksid
if not gr_2nd_person_pron[ gr_index ]:
_keep_analyses( words_layer[i][ANALYSIS], ['Pers Prt Cond Pl3 Aff'], ['Pers Prt Cond Pl3 Aff', 'Pers Prt Cond Sg2 Aff'] )
else:
_keep_analyses( words_layer[i][ANALYSIS], ['Pers Prt Cond Sg2 Aff'], ['Pers Prt Cond Pl3 Aff', 'Pers Prt Cond Sg2 Aff'] )
i += 1 | Disambiguates verb forms based on existence of 2nd person pronoun ('sina') in given scope.
The scope could be either CLAUSES or SENTENCES. | Below is the the instruction that describes the task:
### Input:
Disambiguates verb forms based on existence of 2nd person pronoun ('sina') in given scope.
The scope could be either CLAUSES or SENTENCES.
### Response:
def _disambiguate_sid_ksid( words_layer, text, scope=CLAUSES ):
''' Disambiguates verb forms based on existence of 2nd person pronoun ('sina') in given scope.
The scope could be either CLAUSES or SENTENCES.
'''
assert scope in [CLAUSES, SENTENCES], '(!) The scope should be either "clauses" or "sentences".'
group_indices = get_unique_clause_indices( text ) if scope==CLAUSES else get_unique_sentence_indices( text )
i = 0
gr_2nd_person_pron = {}
while i < len( words_layer ):
gr_index = group_indices[i]
if gr_index not in gr_2nd_person_pron:
# 1) Find out whether the current group (clause or sentence) contains "sina"
j = i
gr_2nd_person_pron_found = False
while j < len( words_layer ):
if group_indices[j] == gr_index:
forms = [ a[FORM] for a in words_layer[j][ANALYSIS] ]
lemmas = [ a[ROOT] for a in words_layer[j][ANALYSIS] ]
if 'sina' in lemmas and 'Sg Nom' in forms:
gr_2nd_person_pron_found = True
break
if group_indices[j] >= gr_index+10: # do not venture too far ...
break
j += 1
gr_2nd_person_pron[gr_index] = gr_2nd_person_pron_found
forms = [ a[FORM] for a in words_layer[i][ANALYSIS] ]
# 2) Disambiguate verb forms based on existence of 'sina' in the clause
if ('Pers Prt Ind Pl3 Aff' in forms and 'Pers Prt Ind Sg2 Aff' in forms): # -sid
if not gr_2nd_person_pron[ gr_index ]:
_keep_analyses( words_layer[i][ANALYSIS], ['Pers Prt Ind Pl3 Aff'], ['Pers Prt Ind Pl3 Aff', 'Pers Prt Ind Sg2 Aff'] )
else:
_keep_analyses( words_layer[i][ANALYSIS], ['Pers Prt Ind Sg2 Aff'], ['Pers Prt Ind Pl3 Aff', 'Pers Prt Ind Sg2 Aff'] )
if ('Pers Prs Cond Pl3 Aff' in forms and 'Pers Prs Cond Sg2 Aff' in forms): # -ksid
if not gr_2nd_person_pron[ gr_index ]:
_keep_analyses( words_layer[i][ANALYSIS], ['Pers Prs Cond Pl3 Aff'], ['Pers Prs Cond Pl3 Aff', 'Pers Prs Cond Sg2 Aff'] )
else:
_keep_analyses( words_layer[i][ANALYSIS], ['Pers Prs Cond Sg2 Aff'], ['Pers Prs Cond Pl3 Aff', 'Pers Prs Cond Sg2 Aff'] )
if ('Pers Prt Cond Pl3 Aff' in forms and 'Pers Prt Cond Sg2 Aff' in forms): # -nuksid
if not gr_2nd_person_pron[ gr_index ]:
_keep_analyses( words_layer[i][ANALYSIS], ['Pers Prt Cond Pl3 Aff'], ['Pers Prt Cond Pl3 Aff', 'Pers Prt Cond Sg2 Aff'] )
else:
_keep_analyses( words_layer[i][ANALYSIS], ['Pers Prt Cond Sg2 Aff'], ['Pers Prt Cond Pl3 Aff', 'Pers Prt Cond Sg2 Aff'] )
i += 1 |
def _map_center(self, coord, val):
''' Identitify the center of the Image correspond to one coordinate. '''
if self.ppd in [4, 8, 16, 32, 64]:
res = {'lat': 0, 'long': 360}
return res[coord] / 2.0
elif self.ppd in [128]:
res = {'lat': 90, 'long': 90}
return (val // res[coord] + 1) * res[coord] - res[coord] / 2.0
elif self.ppd in [256]:
res = {'lat': 60, 'long': 90}
return (val // res[coord] + 1) * res[coord] - res[coord] / 2.0 | Identitify the center of the Image correspond to one coordinate. | Below is the the instruction that describes the task:
### Input:
Identitify the center of the Image correspond to one coordinate.
### Response:
def _map_center(self, coord, val):
''' Identitify the center of the Image correspond to one coordinate. '''
if self.ppd in [4, 8, 16, 32, 64]:
res = {'lat': 0, 'long': 360}
return res[coord] / 2.0
elif self.ppd in [128]:
res = {'lat': 90, 'long': 90}
return (val // res[coord] + 1) * res[coord] - res[coord] / 2.0
elif self.ppd in [256]:
res = {'lat': 60, 'long': 90}
return (val // res[coord] + 1) * res[coord] - res[coord] / 2.0 |
def _check_available(name):
'''
Returns boolean telling whether or not the named service is available
'''
_status = _systemctl_status(name)
sd_version = salt.utils.systemd.version(__context__)
if sd_version is not None and sd_version >= 231:
# systemd 231 changed the output of "systemctl status" for unknown
# services, and also made it return an exit status of 4. If we are on
# a new enough version, check the retcode, otherwise fall back to
# parsing the "systemctl status" output.
# See: https://github.com/systemd/systemd/pull/3385
# Also: https://github.com/systemd/systemd/commit/3dced37
return 0 <= _status['retcode'] < 4
out = _status['stdout'].lower()
if 'could not be found' in out:
# Catch cases where the systemd version is < 231 but the return code
# and output changes have been backported (e.g. RHEL 7.3).
return False
for line in salt.utils.itertools.split(out, '\n'):
match = re.match(r'\s+loaded:\s+(\S+)', line)
if match:
ret = match.group(1) != 'not-found'
break
else:
raise CommandExecutionError(
'Failed to get information on unit \'%s\'' % name
)
return ret | Returns boolean telling whether or not the named service is available | Below is the the instruction that describes the task:
### Input:
Returns boolean telling whether or not the named service is available
### Response:
def _check_available(name):
'''
Returns boolean telling whether or not the named service is available
'''
_status = _systemctl_status(name)
sd_version = salt.utils.systemd.version(__context__)
if sd_version is not None and sd_version >= 231:
# systemd 231 changed the output of "systemctl status" for unknown
# services, and also made it return an exit status of 4. If we are on
# a new enough version, check the retcode, otherwise fall back to
# parsing the "systemctl status" output.
# See: https://github.com/systemd/systemd/pull/3385
# Also: https://github.com/systemd/systemd/commit/3dced37
return 0 <= _status['retcode'] < 4
out = _status['stdout'].lower()
if 'could not be found' in out:
# Catch cases where the systemd version is < 231 but the return code
# and output changes have been backported (e.g. RHEL 7.3).
return False
for line in salt.utils.itertools.split(out, '\n'):
match = re.match(r'\s+loaded:\s+(\S+)', line)
if match:
ret = match.group(1) != 'not-found'
break
else:
raise CommandExecutionError(
'Failed to get information on unit \'%s\'' % name
)
return ret |
def select(self, space, key=None, **kwargs) -> _MethodRet:
"""
Select request coroutine.
Examples:
.. code-block:: pycon
>>> await conn.select('tester')
<Response sync=3 rowcount=2 data=[
<TarantoolTuple id=1 name='one'>,
<TarantoolTuple id=2 name='two'>
]>
>>> res = await conn.select('_space', ['tester'], index='name')
>>> res.data
[<TarantoolTuple id=512
owner=1
name='tester'
engine='memtx'
field_count=0
flags={}
format=[
{'name': 'id', 'type': 'unsigned'},
{'name': 'name', 'type': 'string'}
]>]
:param space: space id or space name.
:param key: key to select
:param offset: offset to use
:param limit: limit to use
:param index: index id or name
:param iterator: one of the following
* iterator id (int number),
* :class:`asynctnt.Iterator` object
* string with an iterator name
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance
"""
return self._db.select(space, key, **kwargs) | Select request coroutine.
Examples:
.. code-block:: pycon
>>> await conn.select('tester')
<Response sync=3 rowcount=2 data=[
<TarantoolTuple id=1 name='one'>,
<TarantoolTuple id=2 name='two'>
]>
>>> res = await conn.select('_space', ['tester'], index='name')
>>> res.data
[<TarantoolTuple id=512
owner=1
name='tester'
engine='memtx'
field_count=0
flags={}
format=[
{'name': 'id', 'type': 'unsigned'},
{'name': 'name', 'type': 'string'}
]>]
:param space: space id or space name.
:param key: key to select
:param offset: offset to use
:param limit: limit to use
:param index: index id or name
:param iterator: one of the following
* iterator id (int number),
* :class:`asynctnt.Iterator` object
* string with an iterator name
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance | Below is the the instruction that describes the task:
### Input:
Select request coroutine.
Examples:
.. code-block:: pycon
>>> await conn.select('tester')
<Response sync=3 rowcount=2 data=[
<TarantoolTuple id=1 name='one'>,
<TarantoolTuple id=2 name='two'>
]>
>>> res = await conn.select('_space', ['tester'], index='name')
>>> res.data
[<TarantoolTuple id=512
owner=1
name='tester'
engine='memtx'
field_count=0
flags={}
format=[
{'name': 'id', 'type': 'unsigned'},
{'name': 'name', 'type': 'string'}
]>]
:param space: space id or space name.
:param key: key to select
:param offset: offset to use
:param limit: limit to use
:param index: index id or name
:param iterator: one of the following
* iterator id (int number),
* :class:`asynctnt.Iterator` object
* string with an iterator name
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance
### Response:
def select(self, space, key=None, **kwargs) -> _MethodRet:
"""
Select request coroutine.
Examples:
.. code-block:: pycon
>>> await conn.select('tester')
<Response sync=3 rowcount=2 data=[
<TarantoolTuple id=1 name='one'>,
<TarantoolTuple id=2 name='two'>
]>
>>> res = await conn.select('_space', ['tester'], index='name')
>>> res.data
[<TarantoolTuple id=512
owner=1
name='tester'
engine='memtx'
field_count=0
flags={}
format=[
{'name': 'id', 'type': 'unsigned'},
{'name': 'name', 'type': 'string'}
]>]
:param space: space id or space name.
:param key: key to select
:param offset: offset to use
:param limit: limit to use
:param index: index id or name
:param iterator: one of the following
* iterator id (int number),
* :class:`asynctnt.Iterator` object
* string with an iterator name
:param timeout: Request timeout
:returns: :class:`asynctnt.Response` instance
"""
return self._db.select(space, key, **kwargs) |
def with_variants(self, variants: Union['Variant', List['Variant']]) -> 'CentralDogma':
"""Create a new entity with the given variants.
:param variants: An optional variant or list of variants
Example Usage:
>>> app = Protein(name='APP', namespace='HGNC')
>>> ab42 = app.with_variants([Fragment(start=672, stop=713)])
>>> assert 'p(HGNC:APP, frag(672_713))' == ab42.as_bel()
"""
return self.__class__(
namespace=self.namespace,
name=self.name,
identifier=self.identifier,
variants=variants,
) | Create a new entity with the given variants.
:param variants: An optional variant or list of variants
Example Usage:
>>> app = Protein(name='APP', namespace='HGNC')
>>> ab42 = app.with_variants([Fragment(start=672, stop=713)])
>>> assert 'p(HGNC:APP, frag(672_713))' == ab42.as_bel() | Below is the the instruction that describes the task:
### Input:
Create a new entity with the given variants.
:param variants: An optional variant or list of variants
Example Usage:
>>> app = Protein(name='APP', namespace='HGNC')
>>> ab42 = app.with_variants([Fragment(start=672, stop=713)])
>>> assert 'p(HGNC:APP, frag(672_713))' == ab42.as_bel()
### Response:
def with_variants(self, variants: Union['Variant', List['Variant']]) -> 'CentralDogma':
"""Create a new entity with the given variants.
:param variants: An optional variant or list of variants
Example Usage:
>>> app = Protein(name='APP', namespace='HGNC')
>>> ab42 = app.with_variants([Fragment(start=672, stop=713)])
>>> assert 'p(HGNC:APP, frag(672_713))' == ab42.as_bel()
"""
return self.__class__(
namespace=self.namespace,
name=self.name,
identifier=self.identifier,
variants=variants,
) |
def get_course_video_ids_with_youtube_profile(course_ids=None, offset=None, limit=None):
"""
Returns a list that contains all the course ids and video ids with the youtube profile
Args:
course_ids (list): valid course ids
limit (int): batch records limit
offset (int): an offset for selecting a batch
Returns:
(list): Tuples of course_id, edx_video_id and youtube video url
"""
course_videos = (CourseVideo.objects.select_related('video')
.prefetch_related('video__encoded_videos', 'video__encoded_videos__profile')
.filter(video__encoded_videos__profile__profile_name='youtube')
.order_by('id')
.distinct())
if course_ids:
course_videos = course_videos.filter(course_id__in=course_ids)
course_videos = course_videos.values_list('course_id', 'video__edx_video_id')
if limit is not None and offset is not None:
course_videos = course_videos[offset: offset+limit]
course_videos_with_yt_profile = []
for course_id, edx_video_id in course_videos:
yt_profile = EncodedVideo.objects.filter(
video__edx_video_id=edx_video_id,
profile__profile_name='youtube'
).first()
if yt_profile:
course_videos_with_yt_profile.append((
course_id, edx_video_id, yt_profile.url
))
return course_videos_with_yt_profile | Returns a list that contains all the course ids and video ids with the youtube profile
Args:
course_ids (list): valid course ids
limit (int): batch records limit
offset (int): an offset for selecting a batch
Returns:
(list): Tuples of course_id, edx_video_id and youtube video url | Below is the the instruction that describes the task:
### Input:
Returns a list that contains all the course ids and video ids with the youtube profile
Args:
course_ids (list): valid course ids
limit (int): batch records limit
offset (int): an offset for selecting a batch
Returns:
(list): Tuples of course_id, edx_video_id and youtube video url
### Response:
def get_course_video_ids_with_youtube_profile(course_ids=None, offset=None, limit=None):
"""
Returns a list that contains all the course ids and video ids with the youtube profile
Args:
course_ids (list): valid course ids
limit (int): batch records limit
offset (int): an offset for selecting a batch
Returns:
(list): Tuples of course_id, edx_video_id and youtube video url
"""
course_videos = (CourseVideo.objects.select_related('video')
.prefetch_related('video__encoded_videos', 'video__encoded_videos__profile')
.filter(video__encoded_videos__profile__profile_name='youtube')
.order_by('id')
.distinct())
if course_ids:
course_videos = course_videos.filter(course_id__in=course_ids)
course_videos = course_videos.values_list('course_id', 'video__edx_video_id')
if limit is not None and offset is not None:
course_videos = course_videos[offset: offset+limit]
course_videos_with_yt_profile = []
for course_id, edx_video_id in course_videos:
yt_profile = EncodedVideo.objects.filter(
video__edx_video_id=edx_video_id,
profile__profile_name='youtube'
).first()
if yt_profile:
course_videos_with_yt_profile.append((
course_id, edx_video_id, yt_profile.url
))
return course_videos_with_yt_profile |
def is_whitelisted(self, request: AxesHttpRequest, credentials: dict = None) -> bool: # pylint: disable=unused-argument
"""
Checks if the request or given credentials are whitelisted for access.
"""
if is_client_ip_address_whitelisted(request):
return True
if is_client_method_whitelisted(request):
return True
return False | Checks if the request or given credentials are whitelisted for access. | Below is the the instruction that describes the task:
### Input:
Checks if the request or given credentials are whitelisted for access.
### Response:
def is_whitelisted(self, request: AxesHttpRequest, credentials: dict = None) -> bool: # pylint: disable=unused-argument
"""
Checks if the request or given credentials are whitelisted for access.
"""
if is_client_ip_address_whitelisted(request):
return True
if is_client_method_whitelisted(request):
return True
return False |
def handle_write(self):
"""get a PDU from the queue and send it."""
if _debug: UDPDirector._debug("handle_write")
try:
pdu = self.request.get()
sent = self.socket.sendto(pdu.pduData, pdu.pduDestination)
if _debug: UDPDirector._debug(" - sent %d octets to %s", sent, pdu.pduDestination)
except socket.error, err:
if _debug: UDPDirector._debug(" - socket error: %s", err)
# get the peer
peer = self.peers.get(pdu.pduDestination, None)
if peer:
# let the actor handle the error
peer.handle_error(err)
else:
# let the director handle the error
self.handle_error(err) | get a PDU from the queue and send it. | Below is the the instruction that describes the task:
### Input:
get a PDU from the queue and send it.
### Response:
def handle_write(self):
"""get a PDU from the queue and send it."""
if _debug: UDPDirector._debug("handle_write")
try:
pdu = self.request.get()
sent = self.socket.sendto(pdu.pduData, pdu.pduDestination)
if _debug: UDPDirector._debug(" - sent %d octets to %s", sent, pdu.pduDestination)
except socket.error, err:
if _debug: UDPDirector._debug(" - socket error: %s", err)
# get the peer
peer = self.peers.get(pdu.pduDestination, None)
if peer:
# let the actor handle the error
peer.handle_error(err)
else:
# let the director handle the error
self.handle_error(err) |
def process_trun(trun):
"""Goes through the trun and processes "run.log" """
trun["log_content"] = runlogs_to_html(trun["res_root"])
trun["aux_list"] = aux_listing(trun["aux_root"])
trun["hnames"] = extract_hook_names(trun)
return True | Goes through the trun and processes "run.log" | Below is the the instruction that describes the task:
### Input:
Goes through the trun and processes "run.log"
### Response:
def process_trun(trun):
"""Goes through the trun and processes "run.log" """
trun["log_content"] = runlogs_to_html(trun["res_root"])
trun["aux_list"] = aux_listing(trun["aux_root"])
trun["hnames"] = extract_hook_names(trun)
return True |
def save_account(self, account: Account) -> None:
""" Account was saved. """
person = account.person
if self._primary_group == 'institute':
lgroup = self._get_group(person.institute.group.name)
elif self._primary_group == 'default_project':
if account.default_project is None:
lgroup = self._get_group(self._default_primary_group)
else:
lgroup = self._get_group(account.default_project.group.name)
else:
raise RuntimeError("Unknown value of PRIMARY_GROUP.")
if account.default_project is None:
default_project = "none"
else:
default_project = account.default_project.pid
try:
luser = self._get_account(account.username)
changes = changeset(luser, {})
new_user = False
except ObjectDoesNotExist:
new_user = True
luser = self._account_class()
changes = changeset(luser, {
'uid': account.username
})
changes = changes.merge({
'gidNumber': lgroup['gidNumber'],
'givenName': person.first_name,
'sn': person.last_name,
'telephoneNumber': _str_or_none(person.telephone),
'mail': _str_or_none(person.email),
'title': _str_or_none(person.title),
'o': person.institute.name,
'cn': person.full_name,
'default_project': default_project,
'loginShell': account.shell,
'locked': account.is_locked()
})
save(changes, database=self._database)
if new_user:
# add all groups
for group in account.person.groups.all():
self.add_account_to_group(account, group) | Account was saved. | Below is the the instruction that describes the task:
### Input:
Account was saved.
### Response:
def save_account(self, account: Account) -> None:
""" Account was saved. """
person = account.person
if self._primary_group == 'institute':
lgroup = self._get_group(person.institute.group.name)
elif self._primary_group == 'default_project':
if account.default_project is None:
lgroup = self._get_group(self._default_primary_group)
else:
lgroup = self._get_group(account.default_project.group.name)
else:
raise RuntimeError("Unknown value of PRIMARY_GROUP.")
if account.default_project is None:
default_project = "none"
else:
default_project = account.default_project.pid
try:
luser = self._get_account(account.username)
changes = changeset(luser, {})
new_user = False
except ObjectDoesNotExist:
new_user = True
luser = self._account_class()
changes = changeset(luser, {
'uid': account.username
})
changes = changes.merge({
'gidNumber': lgroup['gidNumber'],
'givenName': person.first_name,
'sn': person.last_name,
'telephoneNumber': _str_or_none(person.telephone),
'mail': _str_or_none(person.email),
'title': _str_or_none(person.title),
'o': person.institute.name,
'cn': person.full_name,
'default_project': default_project,
'loginShell': account.shell,
'locked': account.is_locked()
})
save(changes, database=self._database)
if new_user:
# add all groups
for group in account.person.groups.all():
self.add_account_to_group(account, group) |
def tables(auth=None, eager=True):
"""Returns a list of tables for the given user."""
auth = auth or []
dynamodb = boto.connect_dynamodb(*auth)
return [table(t, auth, eager=eager) for t in dynamodb.list_tables()] | Returns a list of tables for the given user. | Below is the the instruction that describes the task:
### Input:
Returns a list of tables for the given user.
### Response:
def tables(auth=None, eager=True):
"""Returns a list of tables for the given user."""
auth = auth or []
dynamodb = boto.connect_dynamodb(*auth)
return [table(t, auth, eager=eager) for t in dynamodb.list_tables()] |
def pickle_data(data, picklefile):
"""Helper function to pickle `data` in `picklefile`."""
with open(picklefile, 'wb') as f:
pickle.dump(data, f, protocol=2) | Helper function to pickle `data` in `picklefile`. | Below is the the instruction that describes the task:
### Input:
Helper function to pickle `data` in `picklefile`.
### Response:
def pickle_data(data, picklefile):
"""Helper function to pickle `data` in `picklefile`."""
with open(picklefile, 'wb') as f:
pickle.dump(data, f, protocol=2) |
def heading(headingtext, headinglevel, lang='en'):
'''Make a new heading, return the heading element'''
lmap = {'en': 'Heading', 'it': 'Titolo'}
# Make our elements
paragraph = makeelement('p')
pr = makeelement('pPr')
pStyle = makeelement(
'pStyle', attributes={'val': lmap[lang]+str(headinglevel)})
run = makeelement('r')
text = makeelement('t', tagtext=headingtext)
# Add the text the run, and the run to the paragraph
pr.append(pStyle)
run.append(text)
paragraph.append(pr)
paragraph.append(run)
# Return the combined paragraph
return paragraph | Make a new heading, return the heading element | Below is the the instruction that describes the task:
### Input:
Make a new heading, return the heading element
### Response:
def heading(headingtext, headinglevel, lang='en'):
'''Make a new heading, return the heading element'''
lmap = {'en': 'Heading', 'it': 'Titolo'}
# Make our elements
paragraph = makeelement('p')
pr = makeelement('pPr')
pStyle = makeelement(
'pStyle', attributes={'val': lmap[lang]+str(headinglevel)})
run = makeelement('r')
text = makeelement('t', tagtext=headingtext)
# Add the text the run, and the run to the paragraph
pr.append(pStyle)
run.append(text)
paragraph.append(pr)
paragraph.append(run)
# Return the combined paragraph
return paragraph |
def get_conflicting_tools(self, request_only=False):
"""Returns tools of the same name provided by more than one package.
Args:
request_only: If True, only return the key from resolved packages
that were also present in the request.
Returns:
Dict of {tool-name: set([Variant])}.
"""
from collections import defaultdict
tool_sets = defaultdict(set)
tools_dict = self.get_tools(request_only=request_only)
for variant, tools in tools_dict.itervalues():
for tool in tools:
tool_sets[tool].add(variant)
conflicts = dict((k, v) for k, v in tool_sets.iteritems() if len(v) > 1)
return conflicts | Returns tools of the same name provided by more than one package.
Args:
request_only: If True, only return the key from resolved packages
that were also present in the request.
Returns:
Dict of {tool-name: set([Variant])}. | Below is the the instruction that describes the task:
### Input:
Returns tools of the same name provided by more than one package.
Args:
request_only: If True, only return the key from resolved packages
that were also present in the request.
Returns:
Dict of {tool-name: set([Variant])}.
### Response:
def get_conflicting_tools(self, request_only=False):
"""Returns tools of the same name provided by more than one package.
Args:
request_only: If True, only return the key from resolved packages
that were also present in the request.
Returns:
Dict of {tool-name: set([Variant])}.
"""
from collections import defaultdict
tool_sets = defaultdict(set)
tools_dict = self.get_tools(request_only=request_only)
for variant, tools in tools_dict.itervalues():
for tool in tools:
tool_sets[tool].add(variant)
conflicts = dict((k, v) for k, v in tool_sets.iteritems() if len(v) > 1)
return conflicts |
def course_key_is_valid(course_key):
"""
Course key object validation
"""
if course_key is None:
return False
try:
CourseKey.from_string(text_type(course_key))
except (InvalidKeyError, UnicodeDecodeError):
return False
return True | Course key object validation | Below is the the instruction that describes the task:
### Input:
Course key object validation
### Response:
def course_key_is_valid(course_key):
"""
Course key object validation
"""
if course_key is None:
return False
try:
CourseKey.from_string(text_type(course_key))
except (InvalidKeyError, UnicodeDecodeError):
return False
return True |
def show(context, id):
"""show(context, id)
Show a file.
>>> dcictl file-show [OPTIONS]
:param string id: ID of the file to show [required]
"""
content = file.content(context, id=id)
click.echo(content.text) | show(context, id)
Show a file.
>>> dcictl file-show [OPTIONS]
:param string id: ID of the file to show [required] | Below is the the instruction that describes the task:
### Input:
show(context, id)
Show a file.
>>> dcictl file-show [OPTIONS]
:param string id: ID of the file to show [required]
### Response:
def show(context, id):
"""show(context, id)
Show a file.
>>> dcictl file-show [OPTIONS]
:param string id: ID of the file to show [required]
"""
content = file.content(context, id=id)
click.echo(content.text) |
def unpack_from(cls, payload, expected_parts):
"""Unpack parts from payload"""
for num_part in iter_range(expected_parts):
hdr = payload.read(cls.header_size)
try:
part_header = PartHeader(*cls.header_struct.unpack(hdr))
except struct.error:
raise InterfaceError("No valid part header")
if part_header.payload_size % 8 != 0:
part_payload_size = part_header.payload_size + 8 - (part_header.payload_size % 8)
else:
part_payload_size = part_header.payload_size
pl = payload.read(part_payload_size)
part_payload = io.BytesIO(pl)
try:
_PartClass = PART_MAPPING[part_header.part_kind]
except KeyError:
raise InterfaceError("Unknown part kind %s" % part_header.part_kind)
debug('%s (%d/%d): %s', _PartClass.__name__, num_part+1, expected_parts, str(part_header))
debug('Read %d bytes payload for part %d', part_payload_size, num_part + 1)
init_arguments = _PartClass.unpack_data(part_header.argument_count, part_payload)
debug('Part data: %s', init_arguments)
part = _PartClass(*init_arguments)
part.header = part_header
part.attribute = part_header.part_attributes
part.source = 'server'
if pyhdb.tracing:
part.trace_header = humanhexlify(hdr[:part_header.payload_size])
part.trace_payload = humanhexlify(pl, 30)
yield part | Unpack parts from payload | Below is the the instruction that describes the task:
### Input:
Unpack parts from payload
### Response:
def unpack_from(cls, payload, expected_parts):
"""Unpack parts from payload"""
for num_part in iter_range(expected_parts):
hdr = payload.read(cls.header_size)
try:
part_header = PartHeader(*cls.header_struct.unpack(hdr))
except struct.error:
raise InterfaceError("No valid part header")
if part_header.payload_size % 8 != 0:
part_payload_size = part_header.payload_size + 8 - (part_header.payload_size % 8)
else:
part_payload_size = part_header.payload_size
pl = payload.read(part_payload_size)
part_payload = io.BytesIO(pl)
try:
_PartClass = PART_MAPPING[part_header.part_kind]
except KeyError:
raise InterfaceError("Unknown part kind %s" % part_header.part_kind)
debug('%s (%d/%d): %s', _PartClass.__name__, num_part+1, expected_parts, str(part_header))
debug('Read %d bytes payload for part %d', part_payload_size, num_part + 1)
init_arguments = _PartClass.unpack_data(part_header.argument_count, part_payload)
debug('Part data: %s', init_arguments)
part = _PartClass(*init_arguments)
part.header = part_header
part.attribute = part_header.part_attributes
part.source = 'server'
if pyhdb.tracing:
part.trace_header = humanhexlify(hdr[:part_header.payload_size])
part.trace_payload = humanhexlify(pl, 30)
yield part |
def _reload_version(self):
"""
Packages installed by distutils (e.g. numpy or scipy),
which uses an old safe_version, and so
their version numbers can get mangled when
converted to filenames (e.g., 1.11.0.dev0+2329eae to
1.11.0.dev0_2329eae). These distributions will not be
parsed properly
downstream by Distribution and safe_version, so
take an extra step and try to get the version number from
the metadata file itself instead of the filename.
"""
md_version = _version_from_file(self._get_metadata(self.PKG_INFO))
if md_version:
self._version = md_version
return self | Packages installed by distutils (e.g. numpy or scipy),
which uses an old safe_version, and so
their version numbers can get mangled when
converted to filenames (e.g., 1.11.0.dev0+2329eae to
1.11.0.dev0_2329eae). These distributions will not be
parsed properly
downstream by Distribution and safe_version, so
take an extra step and try to get the version number from
the metadata file itself instead of the filename. | Below is the the instruction that describes the task:
### Input:
Packages installed by distutils (e.g. numpy or scipy),
which uses an old safe_version, and so
their version numbers can get mangled when
converted to filenames (e.g., 1.11.0.dev0+2329eae to
1.11.0.dev0_2329eae). These distributions will not be
parsed properly
downstream by Distribution and safe_version, so
take an extra step and try to get the version number from
the metadata file itself instead of the filename.
### Response:
def _reload_version(self):
"""
Packages installed by distutils (e.g. numpy or scipy),
which uses an old safe_version, and so
their version numbers can get mangled when
converted to filenames (e.g., 1.11.0.dev0+2329eae to
1.11.0.dev0_2329eae). These distributions will not be
parsed properly
downstream by Distribution and safe_version, so
take an extra step and try to get the version number from
the metadata file itself instead of the filename.
"""
md_version = _version_from_file(self._get_metadata(self.PKG_INFO))
if md_version:
self._version = md_version
return self |
def get_login_redirect(self, provider, account):
"""Return url to redirect authenticated users."""
info = self.model._meta.app_label, self.model._meta.model_name
# inline import to prevent circular imports.
from .admin import PRESERVED_FILTERS_SESSION_KEY
preserved_filters = self.request.session.get(PRESERVED_FILTERS_SESSION_KEY, None)
redirect_url = reverse('admin:%s_%s_changelist' % info)
if preserved_filters:
redirect_url = add_preserved_filters(
{'preserved_filters': preserved_filters, 'opts': self.model._meta}, redirect_url)
return redirect_url | Return url to redirect authenticated users. | Below is the the instruction that describes the task:
### Input:
Return url to redirect authenticated users.
### Response:
def get_login_redirect(self, provider, account):
"""Return url to redirect authenticated users."""
info = self.model._meta.app_label, self.model._meta.model_name
# inline import to prevent circular imports.
from .admin import PRESERVED_FILTERS_SESSION_KEY
preserved_filters = self.request.session.get(PRESERVED_FILTERS_SESSION_KEY, None)
redirect_url = reverse('admin:%s_%s_changelist' % info)
if preserved_filters:
redirect_url = add_preserved_filters(
{'preserved_filters': preserved_filters, 'opts': self.model._meta}, redirect_url)
return redirect_url |
def get_default_plot_params(plot_type=''):
"""
Return the default parameters used for figure generation.
Parameter
---------
plot_type : string-optional
Defines some plot-specific parameters to use by default
Returns
-------
default_plot_params : dictionary
Dictionary key-value pairs used to define plotting parameters
"""
default_plot_params = {
'autoclose': False,
'autoscale_y':True,
'axis_bg_color':'w',
'colormap':'summer',
'color_order': ['#0072b2','#d55e00', '#009e73', '#cc79a7', '#f0e442', '#56b4e9'], #Seaborn 'colorblind'
'figsize':(8,6), #Inches
'fontsize':14,
'grid':'on',
'interactive': True,
'line_color':'default',
'line_style':'-',
'line_width': 2.0,
'marker':None,
'output_fig_path':'test',
'output_fig_type':'png',
'overlay_average':False,
'overlay_average_color': '#424949',
'reverse_x': False,
'save_fig': False,
'suppress_fig':False,
'tick_fontsize':12,
'title':'',
'use_colormap':False,
'use_sci_format_xaxis':False,
'use_sci_format_yaxis':True,
'xlabel':'',
'xlim':None,
'ylabel':'',
'ylim':None,
}
# Some other good choices for color order!
# 'color_order': ['#4c72b0', '#55a868', '#c44e52', '#8172b2', '#ccb974', '#64b5cd'] #Based on a Seaborn 'deep' color palette
# 'color_order': ['#4878cf', '#6acc65', '#d65f5f', '#b47cc7', '#c4ad66', '#77bedb'] #Seaborn 'muted'
# 'color_order': ['#92c6ff', '#97f0aa', '#ff9f9a', '#d0bbff', '#fffea3', '#b0e0e6'] #Seaborn 'pastel'
# Add more default plot-type specific parameters to the dictionary
if plot_type is 'spectrum': # Best for plotting 1-5 lines on the same axis
default_plot_params.update({'reverse_x':True})
elif plot_type is 'fid': # Best for plotting 1-5 lines on the same axis
default_plot_params.update({'reverse_x':False})
elif plot_type is 'spectra': # Best for plotting >5 lines on the same axis
default_plot_params.update({'reverse_x':True,'line_width':1.0,
'overlay_average':True})
elif plot_type is 'fids': # Best for plotting >5 lines on the same axis
default_plot_params.update({'reverse_x':False,'line_width':1.0,
'overlay_average':True})
else:
# Note: Here is a good spot to add customized plotting parameters for specific kinds of plots
pass
return default_plot_params | Return the default parameters used for figure generation.
Parameter
---------
plot_type : string-optional
Defines some plot-specific parameters to use by default
Returns
-------
default_plot_params : dictionary
Dictionary key-value pairs used to define plotting parameters | Below is the the instruction that describes the task:
### Input:
Return the default parameters used for figure generation.
Parameter
---------
plot_type : string-optional
Defines some plot-specific parameters to use by default
Returns
-------
default_plot_params : dictionary
Dictionary key-value pairs used to define plotting parameters
### Response:
def get_default_plot_params(plot_type=''):
"""
Return the default parameters used for figure generation.
Parameter
---------
plot_type : string-optional
Defines some plot-specific parameters to use by default
Returns
-------
default_plot_params : dictionary
Dictionary key-value pairs used to define plotting parameters
"""
default_plot_params = {
'autoclose': False,
'autoscale_y':True,
'axis_bg_color':'w',
'colormap':'summer',
'color_order': ['#0072b2','#d55e00', '#009e73', '#cc79a7', '#f0e442', '#56b4e9'], #Seaborn 'colorblind'
'figsize':(8,6), #Inches
'fontsize':14,
'grid':'on',
'interactive': True,
'line_color':'default',
'line_style':'-',
'line_width': 2.0,
'marker':None,
'output_fig_path':'test',
'output_fig_type':'png',
'overlay_average':False,
'overlay_average_color': '#424949',
'reverse_x': False,
'save_fig': False,
'suppress_fig':False,
'tick_fontsize':12,
'title':'',
'use_colormap':False,
'use_sci_format_xaxis':False,
'use_sci_format_yaxis':True,
'xlabel':'',
'xlim':None,
'ylabel':'',
'ylim':None,
}
# Some other good choices for color order!
# 'color_order': ['#4c72b0', '#55a868', '#c44e52', '#8172b2', '#ccb974', '#64b5cd'] #Based on a Seaborn 'deep' color palette
# 'color_order': ['#4878cf', '#6acc65', '#d65f5f', '#b47cc7', '#c4ad66', '#77bedb'] #Seaborn 'muted'
# 'color_order': ['#92c6ff', '#97f0aa', '#ff9f9a', '#d0bbff', '#fffea3', '#b0e0e6'] #Seaborn 'pastel'
# Add more default plot-type specific parameters to the dictionary
if plot_type is 'spectrum': # Best for plotting 1-5 lines on the same axis
default_plot_params.update({'reverse_x':True})
elif plot_type is 'fid': # Best for plotting 1-5 lines on the same axis
default_plot_params.update({'reverse_x':False})
elif plot_type is 'spectra': # Best for plotting >5 lines on the same axis
default_plot_params.update({'reverse_x':True,'line_width':1.0,
'overlay_average':True})
elif plot_type is 'fids': # Best for plotting >5 lines on the same axis
default_plot_params.update({'reverse_x':False,'line_width':1.0,
'overlay_average':True})
else:
# Note: Here is a good spot to add customized plotting parameters for specific kinds of plots
pass
return default_plot_params |
def reverse(cls, value, prop, visitor):
"""Like :py:meth:`normalize.visitor.VisitorPattern.apply` but called
for ``cast`` operations. The default implementation passes through but
squashes exceptions, just like apply.
"""
return (
None if isinstance(value, (AttributeError, KeyError)) else
value
) | Like :py:meth:`normalize.visitor.VisitorPattern.apply` but called
for ``cast`` operations. The default implementation passes through but
squashes exceptions, just like apply. | Below is the the instruction that describes the task:
### Input:
Like :py:meth:`normalize.visitor.VisitorPattern.apply` but called
for ``cast`` operations. The default implementation passes through but
squashes exceptions, just like apply.
### Response:
def reverse(cls, value, prop, visitor):
"""Like :py:meth:`normalize.visitor.VisitorPattern.apply` but called
for ``cast`` operations. The default implementation passes through but
squashes exceptions, just like apply.
"""
return (
None if isinstance(value, (AttributeError, KeyError)) else
value
) |
def validate_protected_resource_request(self, uri, http_method='GET',
body=None, headers=None, realms=None):
"""Create a request token response, with a new request token if valid.
:param uri: The full URI of the token request.
:param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
:param body: The request body as a string.
:param headers: The request headers as a dict.
:param realms: A list of realms the resource is protected under.
This will be supplied to the ``validate_realms``
method of the request validator.
:returns: A tuple of 2 elements.
1. True if valid, False otherwise.
2. An oauthlib.common.Request object.
"""
try:
request = self._create_request(uri, http_method, body, headers)
except errors.OAuth1Error:
return False, None
try:
self._check_transport_security(request)
self._check_mandatory_parameters(request)
except errors.OAuth1Error:
return False, request
if not request.resource_owner_key:
return False, request
if not self.request_validator.check_access_token(
request.resource_owner_key):
return False, request
if not self.request_validator.validate_timestamp_and_nonce(
request.client_key, request.timestamp, request.nonce, request,
access_token=request.resource_owner_key):
return False, request
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid client credentials.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy client is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable client enumeration
valid_client = self.request_validator.validate_client_key(
request.client_key, request)
if not valid_client:
request.client_key = self.request_validator.dummy_client
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid or expired token.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy token is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable resource owner enumeration
valid_resource_owner = self.request_validator.validate_access_token(
request.client_key, request.resource_owner_key, request)
if not valid_resource_owner:
request.resource_owner_key = self.request_validator.dummy_access_token
# Note that `realm`_ is only used in authorization headers and how
# it should be interepreted is not included in the OAuth spec.
# However they could be seen as a scope or realm to which the
# client has access and as such every client should be checked
# to ensure it is authorized access to that scope or realm.
# .. _`realm`: https://tools.ietf.org/html/rfc2617#section-1.2
#
# Note that early exit would enable client realm access enumeration.
#
# The require_realm indicates this is the first step in the OAuth
# workflow where a client requests access to a specific realm.
# This first step (obtaining request token) need not require a realm
# and can then be identified by checking the require_resource_owner
# flag and abscence of realm.
#
# Clients obtaining an access token will not supply a realm and it will
# not be checked. Instead the previously requested realm should be
# transferred from the request token to the access token.
#
# Access to protected resources will always validate the realm but note
# that the realm is now tied to the access token and not provided by
# the client.
valid_realm = self.request_validator.validate_realms(request.client_key,
request.resource_owner_key, request, uri=request.uri,
realms=realms)
valid_signature = self._check_signature(request)
# log the results to the validator_log
# this lets us handle internal reporting and analysis
request.validator_log['client'] = valid_client
request.validator_log['resource_owner'] = valid_resource_owner
request.validator_log['realm'] = valid_realm
request.validator_log['signature'] = valid_signature
# We delay checking validity until the very end, using dummy values for
# calculations and fetching secrets/keys to ensure the flow of every
# request remains almost identical regardless of whether valid values
# have been supplied. This ensures near constant time execution and
# prevents malicious users from guessing sensitive information
v = all((valid_client, valid_resource_owner, valid_realm,
valid_signature))
if not v:
log.info("[Failure] request verification failed.")
log.info("Valid client: %s", valid_client)
log.info("Valid token: %s", valid_resource_owner)
log.info("Valid realm: %s", valid_realm)
log.info("Valid signature: %s", valid_signature)
return v, request | Create a request token response, with a new request token if valid.
:param uri: The full URI of the token request.
:param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
:param body: The request body as a string.
:param headers: The request headers as a dict.
:param realms: A list of realms the resource is protected under.
This will be supplied to the ``validate_realms``
method of the request validator.
:returns: A tuple of 2 elements.
1. True if valid, False otherwise.
2. An oauthlib.common.Request object. | Below is the the instruction that describes the task:
### Input:
Create a request token response, with a new request token if valid.
:param uri: The full URI of the token request.
:param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
:param body: The request body as a string.
:param headers: The request headers as a dict.
:param realms: A list of realms the resource is protected under.
This will be supplied to the ``validate_realms``
method of the request validator.
:returns: A tuple of 2 elements.
1. True if valid, False otherwise.
2. An oauthlib.common.Request object.
### Response:
def validate_protected_resource_request(self, uri, http_method='GET',
body=None, headers=None, realms=None):
"""Create a request token response, with a new request token if valid.
:param uri: The full URI of the token request.
:param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
:param body: The request body as a string.
:param headers: The request headers as a dict.
:param realms: A list of realms the resource is protected under.
This will be supplied to the ``validate_realms``
method of the request validator.
:returns: A tuple of 2 elements.
1. True if valid, False otherwise.
2. An oauthlib.common.Request object.
"""
try:
request = self._create_request(uri, http_method, body, headers)
except errors.OAuth1Error:
return False, None
try:
self._check_transport_security(request)
self._check_mandatory_parameters(request)
except errors.OAuth1Error:
return False, request
if not request.resource_owner_key:
return False, request
if not self.request_validator.check_access_token(
request.resource_owner_key):
return False, request
if not self.request_validator.validate_timestamp_and_nonce(
request.client_key, request.timestamp, request.nonce, request,
access_token=request.resource_owner_key):
return False, request
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid client credentials.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy client is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable client enumeration
valid_client = self.request_validator.validate_client_key(
request.client_key, request)
if not valid_client:
request.client_key = self.request_validator.dummy_client
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid or expired token.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy token is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable resource owner enumeration
valid_resource_owner = self.request_validator.validate_access_token(
request.client_key, request.resource_owner_key, request)
if not valid_resource_owner:
request.resource_owner_key = self.request_validator.dummy_access_token
# Note that `realm`_ is only used in authorization headers and how
# it should be interepreted is not included in the OAuth spec.
# However they could be seen as a scope or realm to which the
# client has access and as such every client should be checked
# to ensure it is authorized access to that scope or realm.
# .. _`realm`: https://tools.ietf.org/html/rfc2617#section-1.2
#
# Note that early exit would enable client realm access enumeration.
#
# The require_realm indicates this is the first step in the OAuth
# workflow where a client requests access to a specific realm.
# This first step (obtaining request token) need not require a realm
# and can then be identified by checking the require_resource_owner
# flag and abscence of realm.
#
# Clients obtaining an access token will not supply a realm and it will
# not be checked. Instead the previously requested realm should be
# transferred from the request token to the access token.
#
# Access to protected resources will always validate the realm but note
# that the realm is now tied to the access token and not provided by
# the client.
valid_realm = self.request_validator.validate_realms(request.client_key,
request.resource_owner_key, request, uri=request.uri,
realms=realms)
valid_signature = self._check_signature(request)
# log the results to the validator_log
# this lets us handle internal reporting and analysis
request.validator_log['client'] = valid_client
request.validator_log['resource_owner'] = valid_resource_owner
request.validator_log['realm'] = valid_realm
request.validator_log['signature'] = valid_signature
# We delay checking validity until the very end, using dummy values for
# calculations and fetching secrets/keys to ensure the flow of every
# request remains almost identical regardless of whether valid values
# have been supplied. This ensures near constant time execution and
# prevents malicious users from guessing sensitive information
v = all((valid_client, valid_resource_owner, valid_realm,
valid_signature))
if not v:
log.info("[Failure] request verification failed.")
log.info("Valid client: %s", valid_client)
log.info("Valid token: %s", valid_resource_owner)
log.info("Valid realm: %s", valid_realm)
log.info("Valid signature: %s", valid_signature)
return v, request |
def people(self):
"""
Retrieve all people of the company
:return: list of people objects
:rtype: list
"""
return fields.ListField(name=HightonConstants.PEOPLE, init_class=Person).decode(
self.element_from_string(
self._get_request(
endpoint=self.ENDPOINT + '/' + str(self.id) + '/people',
).text
)
) | Retrieve all people of the company
:return: list of people objects
:rtype: list | Below is the the instruction that describes the task:
### Input:
Retrieve all people of the company
:return: list of people objects
:rtype: list
### Response:
def people(self):
"""
Retrieve all people of the company
:return: list of people objects
:rtype: list
"""
return fields.ListField(name=HightonConstants.PEOPLE, init_class=Person).decode(
self.element_from_string(
self._get_request(
endpoint=self.ENDPOINT + '/' + str(self.id) + '/people',
).text
)
) |
def truncate_to(value: Decimal, currency: str) -> Decimal:
"""Truncates a value to the number of decimals corresponding to the currency"""
decimal_places = DECIMALS.get(currency.upper(), 2)
return truncate(value, decimal_places) | Truncates a value to the number of decimals corresponding to the currency | Below is the the instruction that describes the task:
### Input:
Truncates a value to the number of decimals corresponding to the currency
### Response:
def truncate_to(value: Decimal, currency: str) -> Decimal:
"""Truncates a value to the number of decimals corresponding to the currency"""
decimal_places = DECIMALS.get(currency.upper(), 2)
return truncate(value, decimal_places) |
def find_column(t):
"""Get cursor position, based on previous newline"""
pos = t.lexer.lexpos
data = t.lexer.lexdata
last_cr = data.rfind('\n', 0, pos)
if last_cr < 0:
last_cr = -1
column = pos - last_cr
return column | Get cursor position, based on previous newline | Below is the the instruction that describes the task:
### Input:
Get cursor position, based on previous newline
### Response:
def find_column(t):
"""Get cursor position, based on previous newline"""
pos = t.lexer.lexpos
data = t.lexer.lexdata
last_cr = data.rfind('\n', 0, pos)
if last_cr < 0:
last_cr = -1
column = pos - last_cr
return column |
def get_all_api_keys(self, **kwargs): # noqa: E501
"""Get all API keys # noqa: E501
An endpoint for retrieving API keys in an array, optionally filtered by the owner. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/api-keys -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_all_api_keys(asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param int limit: The number of results to return (2-1000), default is 50.
:param str after: The entity ID to fetch after the given one.
:param str order: The order of the records based on creation time, ASC or DESC; by default ASC
:param str include: Comma separated additional data to return. Currently supported: total_count
:param str key__eq: API key filter.
:param str owner__eq: Owner name filter.
:return: ApiKeyInfoRespList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.get_all_api_keys_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_all_api_keys_with_http_info(**kwargs) # noqa: E501
return data | Get all API keys # noqa: E501
An endpoint for retrieving API keys in an array, optionally filtered by the owner. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/api-keys -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_all_api_keys(asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param int limit: The number of results to return (2-1000), default is 50.
:param str after: The entity ID to fetch after the given one.
:param str order: The order of the records based on creation time, ASC or DESC; by default ASC
:param str include: Comma separated additional data to return. Currently supported: total_count
:param str key__eq: API key filter.
:param str owner__eq: Owner name filter.
:return: ApiKeyInfoRespList
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Get all API keys # noqa: E501
An endpoint for retrieving API keys in an array, optionally filtered by the owner. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/api-keys -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_all_api_keys(asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param int limit: The number of results to return (2-1000), default is 50.
:param str after: The entity ID to fetch after the given one.
:param str order: The order of the records based on creation time, ASC or DESC; by default ASC
:param str include: Comma separated additional data to return. Currently supported: total_count
:param str key__eq: API key filter.
:param str owner__eq: Owner name filter.
:return: ApiKeyInfoRespList
If the method is called asynchronously,
returns the request thread.
### Response:
def get_all_api_keys(self, **kwargs): # noqa: E501
"""Get all API keys # noqa: E501
An endpoint for retrieving API keys in an array, optionally filtered by the owner. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/api-keys -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_all_api_keys(asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param int limit: The number of results to return (2-1000), default is 50.
:param str after: The entity ID to fetch after the given one.
:param str order: The order of the records based on creation time, ASC or DESC; by default ASC
:param str include: Comma separated additional data to return. Currently supported: total_count
:param str key__eq: API key filter.
:param str owner__eq: Owner name filter.
:return: ApiKeyInfoRespList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.get_all_api_keys_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_all_api_keys_with_http_info(**kwargs) # noqa: E501
return data |
def _create_factor_rule(tok):
"""
Simple helper method for creating factor node objects based on node name.
"""
if tok[0] == 'IPV4':
return IPV4Rule(tok[1])
if tok[0] == 'IPV6':
return IPV6Rule(tok[1])
if tok[0] == 'DATETIME':
return DatetimeRule(tok[1])
if tok[0] == 'TIMEDELTA':
return TimedeltaRule(tok[1])
if tok[0] == 'INTEGER':
return IntegerRule(tok[1])
if tok[0] == 'FLOAT':
return FloatRule(tok[1])
if tok[0] == 'VARIABLE':
return VariableRule(tok[1])
return ConstantRule(tok[1]) | Simple helper method for creating factor node objects based on node name. | Below is the the instruction that describes the task:
### Input:
Simple helper method for creating factor node objects based on node name.
### Response:
def _create_factor_rule(tok):
"""
Simple helper method for creating factor node objects based on node name.
"""
if tok[0] == 'IPV4':
return IPV4Rule(tok[1])
if tok[0] == 'IPV6':
return IPV6Rule(tok[1])
if tok[0] == 'DATETIME':
return DatetimeRule(tok[1])
if tok[0] == 'TIMEDELTA':
return TimedeltaRule(tok[1])
if tok[0] == 'INTEGER':
return IntegerRule(tok[1])
if tok[0] == 'FLOAT':
return FloatRule(tok[1])
if tok[0] == 'VARIABLE':
return VariableRule(tok[1])
return ConstantRule(tok[1]) |
def create_processing_context(feedback):
"""
Creates a default processing context
:param feedback: Linked processing feedback object
:type feedback: QgsProcessingFeedback
:return: Processing context
:rtype: QgsProcessingContext
"""
context = QgsProcessingContext()
context.setFeedback(feedback)
context.setProject(QgsProject.instance())
# skip Processing geometry checks - Inasafe has its own geometry validation
# routines which have already been used
context.setInvalidGeometryCheck(QgsFeatureRequest.GeometryNoCheck)
return context | Creates a default processing context
:param feedback: Linked processing feedback object
:type feedback: QgsProcessingFeedback
:return: Processing context
:rtype: QgsProcessingContext | Below is the the instruction that describes the task:
### Input:
Creates a default processing context
:param feedback: Linked processing feedback object
:type feedback: QgsProcessingFeedback
:return: Processing context
:rtype: QgsProcessingContext
### Response:
def create_processing_context(feedback):
"""
Creates a default processing context
:param feedback: Linked processing feedback object
:type feedback: QgsProcessingFeedback
:return: Processing context
:rtype: QgsProcessingContext
"""
context = QgsProcessingContext()
context.setFeedback(feedback)
context.setProject(QgsProject.instance())
# skip Processing geometry checks - Inasafe has its own geometry validation
# routines which have already been used
context.setInvalidGeometryCheck(QgsFeatureRequest.GeometryNoCheck)
return context |
async def execute_method(self, method, **params):
"""Execute a specified Slack Web API method.
Arguments:
method (:py:class:`str`): The name of the method.
**params (:py:class:`dict`): Any additional parameters
required.
Returns:
:py:class:`dict`: The JSON data from the response.
Raises:
:py:class:`aiohttp.web_exceptions.HTTPException`: If the HTTP
request returns a code other than 200 (OK).
SlackApiError: If the Slack API is reached but the response
contains an error message.
"""
url = self.url_builder(method, url_params=params)
logger.info('Executing method %r', method)
response = await aiohttp.get(url)
logger.info('Status: %r', response.status)
if response.status == 200:
json = await response.json()
logger.debug('...with JSON %r', json)
if json.get('ok'):
return json
raise SlackApiError(json['error'])
else:
raise_for_status(response) | Execute a specified Slack Web API method.
Arguments:
method (:py:class:`str`): The name of the method.
**params (:py:class:`dict`): Any additional parameters
required.
Returns:
:py:class:`dict`: The JSON data from the response.
Raises:
:py:class:`aiohttp.web_exceptions.HTTPException`: If the HTTP
request returns a code other than 200 (OK).
SlackApiError: If the Slack API is reached but the response
contains an error message. | Below is the the instruction that describes the task:
### Input:
Execute a specified Slack Web API method.
Arguments:
method (:py:class:`str`): The name of the method.
**params (:py:class:`dict`): Any additional parameters
required.
Returns:
:py:class:`dict`: The JSON data from the response.
Raises:
:py:class:`aiohttp.web_exceptions.HTTPException`: If the HTTP
request returns a code other than 200 (OK).
SlackApiError: If the Slack API is reached but the response
contains an error message.
### Response:
async def execute_method(self, method, **params):
"""Execute a specified Slack Web API method.
Arguments:
method (:py:class:`str`): The name of the method.
**params (:py:class:`dict`): Any additional parameters
required.
Returns:
:py:class:`dict`: The JSON data from the response.
Raises:
:py:class:`aiohttp.web_exceptions.HTTPException`: If the HTTP
request returns a code other than 200 (OK).
SlackApiError: If the Slack API is reached but the response
contains an error message.
"""
url = self.url_builder(method, url_params=params)
logger.info('Executing method %r', method)
response = await aiohttp.get(url)
logger.info('Status: %r', response.status)
if response.status == 200:
json = await response.json()
logger.debug('...with JSON %r', json)
if json.get('ok'):
return json
raise SlackApiError(json['error'])
else:
raise_for_status(response) |
def get_report_column_builders(self, request, model):
"""
Returns builders for column names and column values
for the elements, being each element like this:
1. A header fetcher: will retrieve the title for the column.
2. A value fetcher: will retrieve the value for the column.
:param model: model to analyze and fetch.
:param request: current request being processed.
:return: A list of TrackingReportColumn pairs.
"""
meta = model._meta
field_names = set(field.name for field in meta.fields)
list_report = self.get_list_report(request) or field_names
_s = self._cell_value
def header(list_report_item):
if list_report_item in field_names:
return text_type(capfirst(meta.get_field(list_report_item).verbose_name))
else:
if isinstance(list_report_item, string_types):
# model member (method or property)
model_member = getattr(model, list_report_item, None)
# method check
if functions.is_method(model_member, functions.METHOD_UNBOUND|functions.METHOD_INSTANCE):
return text_type(capfirst(getattr(model_member, 'short_description',
list_report_item.replace('_', ' '))))
# property check
if isinstance(model_member, property):
if model_member.getter:
return text_type(capfirst(getattr(model_member, 'short_description',
list_report_item.replace('_', ' '))))
raise ValueError('Property item in `list_report` member, or returned by `get_list_report()` '
'must be readable')
# report member (method)
report_member = getattr(self, list_report_item, None)
if functions.is_method(report_member, functions.METHOD_UNBOUND|functions.METHOD_INSTANCE):
return text_type(capfirst(getattr(report_member, 'short_description',
list_report_item.replace('_', ' '))))
# regular callable
if callable(list_report_item):
return text_type(capfirst(getattr(list_report_item, 'short_description', None) or
getattr(list_report_item, '__name__', None) or '<unknown>'))
# invalid value
raise TypeError('Item in `list_report` member, or returned by `get_list_report()` must be a model '
'field name, or model instance method, current report''s instance method, or '
'a regular callable')
def fetcher(list_report_item):
if list_report_item in field_names:
return lambda obj: _s(getattr(obj, list_report_item))
else:
if isinstance(list_report_item, string_types):
# model member (method or property)
model_member = getattr(model, list_report_item, None)
# method check
if functions.is_method(model_member, functions.METHOD_UNBOUND|functions.METHOD_INSTANCE):
return lambda obj: _s(getattr(obj, list_report_item)())
# property check
if isinstance(model_member, property):
if model_member.getter:
return lambda obj: _s(getattr(obj, list_report_item))
raise ValueError('Property item in `list_report` member, or returned by `get_list_report()` '
'must be readable')
# report member (method)
report_member = getattr(self, list_report_item, None)
if functions.is_method(report_member, functions.METHOD_UNBOUND|functions.METHOD_INSTANCE):
return lambda obj: _s(getattr(self, list_report_item)(obj))
# regular callable
if callable(list_report_item):
return lambda obj: _s(list_report_item(obj))
# invalid value
raise TypeError('Item in `list_report` member, or returned by `get_list_report()` must be a model '
'field name, or model instance method, current report''s instance method, or '
'a regular callable')
return [TrackingReportColumn(header=header(item), fetcher=fetcher(item)) for item in list_report] | Returns builders for column names and column values
for the elements, being each element like this:
1. A header fetcher: will retrieve the title for the column.
2. A value fetcher: will retrieve the value for the column.
:param model: model to analyze and fetch.
:param request: current request being processed.
:return: A list of TrackingReportColumn pairs. | Below is the the instruction that describes the task:
### Input:
Returns builders for column names and column values
for the elements, being each element like this:
1. A header fetcher: will retrieve the title for the column.
2. A value fetcher: will retrieve the value for the column.
:param model: model to analyze and fetch.
:param request: current request being processed.
:return: A list of TrackingReportColumn pairs.
### Response:
def get_report_column_builders(self, request, model):
"""
Returns builders for column names and column values
for the elements, being each element like this:
1. A header fetcher: will retrieve the title for the column.
2. A value fetcher: will retrieve the value for the column.
:param model: model to analyze and fetch.
:param request: current request being processed.
:return: A list of TrackingReportColumn pairs.
"""
meta = model._meta
field_names = set(field.name for field in meta.fields)
list_report = self.get_list_report(request) or field_names
_s = self._cell_value
def header(list_report_item):
if list_report_item in field_names:
return text_type(capfirst(meta.get_field(list_report_item).verbose_name))
else:
if isinstance(list_report_item, string_types):
# model member (method or property)
model_member = getattr(model, list_report_item, None)
# method check
if functions.is_method(model_member, functions.METHOD_UNBOUND|functions.METHOD_INSTANCE):
return text_type(capfirst(getattr(model_member, 'short_description',
list_report_item.replace('_', ' '))))
# property check
if isinstance(model_member, property):
if model_member.getter:
return text_type(capfirst(getattr(model_member, 'short_description',
list_report_item.replace('_', ' '))))
raise ValueError('Property item in `list_report` member, or returned by `get_list_report()` '
'must be readable')
# report member (method)
report_member = getattr(self, list_report_item, None)
if functions.is_method(report_member, functions.METHOD_UNBOUND|functions.METHOD_INSTANCE):
return text_type(capfirst(getattr(report_member, 'short_description',
list_report_item.replace('_', ' '))))
# regular callable
if callable(list_report_item):
return text_type(capfirst(getattr(list_report_item, 'short_description', None) or
getattr(list_report_item, '__name__', None) or '<unknown>'))
# invalid value
raise TypeError('Item in `list_report` member, or returned by `get_list_report()` must be a model '
'field name, or model instance method, current report''s instance method, or '
'a regular callable')
def fetcher(list_report_item):
if list_report_item in field_names:
return lambda obj: _s(getattr(obj, list_report_item))
else:
if isinstance(list_report_item, string_types):
# model member (method or property)
model_member = getattr(model, list_report_item, None)
# method check
if functions.is_method(model_member, functions.METHOD_UNBOUND|functions.METHOD_INSTANCE):
return lambda obj: _s(getattr(obj, list_report_item)())
# property check
if isinstance(model_member, property):
if model_member.getter:
return lambda obj: _s(getattr(obj, list_report_item))
raise ValueError('Property item in `list_report` member, or returned by `get_list_report()` '
'must be readable')
# report member (method)
report_member = getattr(self, list_report_item, None)
if functions.is_method(report_member, functions.METHOD_UNBOUND|functions.METHOD_INSTANCE):
return lambda obj: _s(getattr(self, list_report_item)(obj))
# regular callable
if callable(list_report_item):
return lambda obj: _s(list_report_item(obj))
# invalid value
raise TypeError('Item in `list_report` member, or returned by `get_list_report()` must be a model '
'field name, or model instance method, current report''s instance method, or '
'a regular callable')
return [TrackingReportColumn(header=header(item), fetcher=fetcher(item)) for item in list_report] |
def prox_xline(x, step):
"""Projection onto line in x"""
if not np.isscalar(x):
x= x[0]
if x > 0.5:
return np.array([0.5])
else:
return np.array([x]) | Projection onto line in x | Below is the the instruction that describes the task:
### Input:
Projection onto line in x
### Response:
def prox_xline(x, step):
"""Projection onto line in x"""
if not np.isscalar(x):
x= x[0]
if x > 0.5:
return np.array([0.5])
else:
return np.array([x]) |
def distance_covariance_sqr(x, y, **kwargs):
"""
distance_covariance_sqr(x, y, *, exponent=1)
Computes the usual (biased) estimator for the squared distance covariance
between two random vectors.
Parameters
----------
x: array_like
First random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
y: array_like
Second random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
exponent: float
Exponent of the Euclidean distance, in the range :math:`(0, 2)`.
Equivalently, it is twice the Hurst parameter of fractional Brownian
motion.
Returns
-------
numpy scalar
Biased estimator of the squared distance covariance.
See Also
--------
distance_covariance
u_distance_covariance_sqr
Notes
-----
The algorithm uses the fast distance covariance algorithm proposed in
:cite:`b-fast_distance_correlation` when possible.
Examples
--------
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12],
... [13, 14, 15, 16]])
>>> b = np.array([[1], [0], [0], [1]])
>>> dcor.distance_covariance_sqr(a, a)
52.0
>>> dcor.distance_covariance_sqr(a, b)
1.0
>>> dcor.distance_covariance_sqr(b, b)
0.25
>>> dcor.distance_covariance_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS
0.3705904...
"""
if _can_use_fast_algorithm(x, y, **kwargs):
return _distance_covariance_sqr_fast(x, y)
else:
return _distance_covariance_sqr_naive(x, y, **kwargs) | distance_covariance_sqr(x, y, *, exponent=1)
Computes the usual (biased) estimator for the squared distance covariance
between two random vectors.
Parameters
----------
x: array_like
First random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
y: array_like
Second random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
exponent: float
Exponent of the Euclidean distance, in the range :math:`(0, 2)`.
Equivalently, it is twice the Hurst parameter of fractional Brownian
motion.
Returns
-------
numpy scalar
Biased estimator of the squared distance covariance.
See Also
--------
distance_covariance
u_distance_covariance_sqr
Notes
-----
The algorithm uses the fast distance covariance algorithm proposed in
:cite:`b-fast_distance_correlation` when possible.
Examples
--------
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12],
... [13, 14, 15, 16]])
>>> b = np.array([[1], [0], [0], [1]])
>>> dcor.distance_covariance_sqr(a, a)
52.0
>>> dcor.distance_covariance_sqr(a, b)
1.0
>>> dcor.distance_covariance_sqr(b, b)
0.25
>>> dcor.distance_covariance_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS
0.3705904... | Below is the the instruction that describes the task:
### Input:
distance_covariance_sqr(x, y, *, exponent=1)
Computes the usual (biased) estimator for the squared distance covariance
between two random vectors.
Parameters
----------
x: array_like
First random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
y: array_like
Second random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
exponent: float
Exponent of the Euclidean distance, in the range :math:`(0, 2)`.
Equivalently, it is twice the Hurst parameter of fractional Brownian
motion.
Returns
-------
numpy scalar
Biased estimator of the squared distance covariance.
See Also
--------
distance_covariance
u_distance_covariance_sqr
Notes
-----
The algorithm uses the fast distance covariance algorithm proposed in
:cite:`b-fast_distance_correlation` when possible.
Examples
--------
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12],
... [13, 14, 15, 16]])
>>> b = np.array([[1], [0], [0], [1]])
>>> dcor.distance_covariance_sqr(a, a)
52.0
>>> dcor.distance_covariance_sqr(a, b)
1.0
>>> dcor.distance_covariance_sqr(b, b)
0.25
>>> dcor.distance_covariance_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS
0.3705904...
### Response:
def distance_covariance_sqr(x, y, **kwargs):
"""
distance_covariance_sqr(x, y, *, exponent=1)
Computes the usual (biased) estimator for the squared distance covariance
between two random vectors.
Parameters
----------
x: array_like
First random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
y: array_like
Second random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
exponent: float
Exponent of the Euclidean distance, in the range :math:`(0, 2)`.
Equivalently, it is twice the Hurst parameter of fractional Brownian
motion.
Returns
-------
numpy scalar
Biased estimator of the squared distance covariance.
See Also
--------
distance_covariance
u_distance_covariance_sqr
Notes
-----
The algorithm uses the fast distance covariance algorithm proposed in
:cite:`b-fast_distance_correlation` when possible.
Examples
--------
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12],
... [13, 14, 15, 16]])
>>> b = np.array([[1], [0], [0], [1]])
>>> dcor.distance_covariance_sqr(a, a)
52.0
>>> dcor.distance_covariance_sqr(a, b)
1.0
>>> dcor.distance_covariance_sqr(b, b)
0.25
>>> dcor.distance_covariance_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS
0.3705904...
"""
if _can_use_fast_algorithm(x, y, **kwargs):
return _distance_covariance_sqr_fast(x, y)
else:
return _distance_covariance_sqr_naive(x, y, **kwargs) |
def init(self, conn):
"""
Create the version table and run the base script on an empty database.
:param conn: a DB API 2 connection
"""
base = self.read_scripts()[0]['fname']
logging.info('Creating the initial schema from %s', base)
apply_sql_script(conn, os.path.join(self.upgrade_dir, base))
self.install_versioning(conn) | Create the version table and run the base script on an empty database.
:param conn: a DB API 2 connection | Below is the the instruction that describes the task:
### Input:
Create the version table and run the base script on an empty database.
:param conn: a DB API 2 connection
### Response:
def init(self, conn):
"""
Create the version table and run the base script on an empty database.
:param conn: a DB API 2 connection
"""
base = self.read_scripts()[0]['fname']
logging.info('Creating the initial schema from %s', base)
apply_sql_script(conn, os.path.join(self.upgrade_dir, base))
self.install_versioning(conn) |
def ensure_new_style_deprecation(cli_ctx, kwargs, object_type):
""" Helper method to make the previous string-based deprecate_info kwarg
work with the new style. """
deprecate_info = kwargs.get('deprecate_info', None)
if isinstance(deprecate_info, Deprecated):
deprecate_info.object_type = object_type
elif isinstance(deprecate_info, STRING_TYPES):
deprecate_info = Deprecated(cli_ctx, redirect=deprecate_info, object_type=object_type)
kwargs['deprecate_info'] = deprecate_info
return deprecate_info | Helper method to make the previous string-based deprecate_info kwarg
work with the new style. | Below is the the instruction that describes the task:
### Input:
Helper method to make the previous string-based deprecate_info kwarg
work with the new style.
### Response:
def ensure_new_style_deprecation(cli_ctx, kwargs, object_type):
""" Helper method to make the previous string-based deprecate_info kwarg
work with the new style. """
deprecate_info = kwargs.get('deprecate_info', None)
if isinstance(deprecate_info, Deprecated):
deprecate_info.object_type = object_type
elif isinstance(deprecate_info, STRING_TYPES):
deprecate_info = Deprecated(cli_ctx, redirect=deprecate_info, object_type=object_type)
kwargs['deprecate_info'] = deprecate_info
return deprecate_info |
def disable_signing(self):
'''disable MAVLink2 signing'''
self.mav.signing.secret_key = None
self.mav.signing.sign_outgoing = False
self.mav.signing.allow_unsigned_callback = None
self.mav.signing.link_id = 0
self.mav.signing.timestamp = 0 | disable MAVLink2 signing | Below is the the instruction that describes the task:
### Input:
disable MAVLink2 signing
### Response:
def disable_signing(self):
'''disable MAVLink2 signing'''
self.mav.signing.secret_key = None
self.mav.signing.sign_outgoing = False
self.mav.signing.allow_unsigned_callback = None
self.mav.signing.link_id = 0
self.mav.signing.timestamp = 0 |
def get_beam(secret_uuid):
"""
Get a beam from the session with `secret_uuid`.
Parameters
----------
secret_uuid : str
Returns
-------
The beam object if it exists, otherwise `None`.
"""
beam_dir = get_beam_cache_directory()
beam_filename = os.path.join(beam_dir, secret_uuid)
if os.path.isfile(beam_filename):
with open(beam_filename, 'rb') as handle:
beam = pickle.load(handle)
return beam
else:
return None | Get a beam from the session with `secret_uuid`.
Parameters
----------
secret_uuid : str
Returns
-------
The beam object if it exists, otherwise `None`. | Below is the the instruction that describes the task:
### Input:
Get a beam from the session with `secret_uuid`.
Parameters
----------
secret_uuid : str
Returns
-------
The beam object if it exists, otherwise `None`.
### Response:
def get_beam(secret_uuid):
"""
Get a beam from the session with `secret_uuid`.
Parameters
----------
secret_uuid : str
Returns
-------
The beam object if it exists, otherwise `None`.
"""
beam_dir = get_beam_cache_directory()
beam_filename = os.path.join(beam_dir, secret_uuid)
if os.path.isfile(beam_filename):
with open(beam_filename, 'rb') as handle:
beam = pickle.load(handle)
return beam
else:
return None |
def _parse_auth_message(self, auth_message):
"""
Parse a message to see if we have ip addresses or users that we care about
:param auth_message: The auth message to parse
:return: Result
"""
result = {}
has_matched = False
for regex in REGEXES_INVALID_USER:
# Check for the invalid user/ip messages
m = re.search(regex, auth_message)
if m and not has_matched:
has_matched = True
# Save the username and IP
result['username'] = m.group('user')
result['ip'] = m.group('ip')
for regex in REGEXES_INVALID_IP:
# Check for the invalid ip messages
m = re.search(regex, auth_message)
if m and not has_matched:
has_matched = True
# Save the IP
result['ip'] = m.group('ip')
for regex in REGEXES_IGNORE:
# Check for messages we want to ignore
m = re.search(regex, auth_message)
if m and not has_matched:
has_matched = True
# If it's an ssh log and we don't know what it is, handle that
if not has_matched:
sys.stderr.write("Unhandled auth message: %s\n" % auth_message)
return result | Parse a message to see if we have ip addresses or users that we care about
:param auth_message: The auth message to parse
:return: Result | Below is the the instruction that describes the task:
### Input:
Parse a message to see if we have ip addresses or users that we care about
:param auth_message: The auth message to parse
:return: Result
### Response:
def _parse_auth_message(self, auth_message):
"""
Parse a message to see if we have ip addresses or users that we care about
:param auth_message: The auth message to parse
:return: Result
"""
result = {}
has_matched = False
for regex in REGEXES_INVALID_USER:
# Check for the invalid user/ip messages
m = re.search(regex, auth_message)
if m and not has_matched:
has_matched = True
# Save the username and IP
result['username'] = m.group('user')
result['ip'] = m.group('ip')
for regex in REGEXES_INVALID_IP:
# Check for the invalid ip messages
m = re.search(regex, auth_message)
if m and not has_matched:
has_matched = True
# Save the IP
result['ip'] = m.group('ip')
for regex in REGEXES_IGNORE:
# Check for messages we want to ignore
m = re.search(regex, auth_message)
if m and not has_matched:
has_matched = True
# If it's an ssh log and we don't know what it is, handle that
if not has_matched:
sys.stderr.write("Unhandled auth message: %s\n" % auth_message)
return result |
def _subscribe_resp(self, data):
""" Handle a subscribe response.
:param data: Payload.
:returns: State (ON/OFF)
"""
if _is_subscribe_response(data):
status = bytes([data[23]])
_LOGGER.debug("Successfully subscribed to %s, state: %s",
self.host, ord(status))
return status | Handle a subscribe response.
:param data: Payload.
:returns: State (ON/OFF) | Below is the the instruction that describes the task:
### Input:
Handle a subscribe response.
:param data: Payload.
:returns: State (ON/OFF)
### Response:
def _subscribe_resp(self, data):
""" Handle a subscribe response.
:param data: Payload.
:returns: State (ON/OFF)
"""
if _is_subscribe_response(data):
status = bytes([data[23]])
_LOGGER.debug("Successfully subscribed to %s, state: %s",
self.host, ord(status))
return status |
def data_to_df(self, sysbase=False):
"""
Return a pandas.DataFrame of device parameters.
:param sysbase: save per unit values in system base
"""
p_dict_comp = self.data_to_dict(sysbase=sysbase)
self._check_pd()
self.param_df = pd.DataFrame(data=p_dict_comp).set_index('idx')
return self.param_df | Return a pandas.DataFrame of device parameters.
:param sysbase: save per unit values in system base | Below is the the instruction that describes the task:
### Input:
Return a pandas.DataFrame of device parameters.
:param sysbase: save per unit values in system base
### Response:
def data_to_df(self, sysbase=False):
"""
Return a pandas.DataFrame of device parameters.
:param sysbase: save per unit values in system base
"""
p_dict_comp = self.data_to_dict(sysbase=sysbase)
self._check_pd()
self.param_df = pd.DataFrame(data=p_dict_comp).set_index('idx')
return self.param_df |
def imsave(path, img, channel_first=False, as_uint16=False, auto_scale=True):
"""
Save image by pypng module.
Args:
path (str): output filename
img (numpy.ndarray): Image array to save. Image shape is considered as (height, width, channel) by default.
channel_first:
This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width).
Default value is False, which means the img shape is (height, width, channel)
as_uint16 (bool):
If True, save image as uint16.
auto_scale (bool) :
Whether upscale pixel values or not.
If you want to save float image, this argument must be True.
In pypng backend, all below are supported.
- float ([0, 1]) to uint8 ([0, 255]) (if img.dtype==float and upscale==True and as_uint16==False)
- float to uint16 ([0, 65535]) (if img.dtype==float and upscale==True and as_uint16==True)
- uint8 to uint16 are supported (if img.dtype==np.uint8 and upscale==True and as_uint16==True)
"""
img = _imsave_before(img, channel_first, auto_scale)
if auto_scale:
img = upscale_pixel_intensity(img, as_uint16)
img = check_type_and_cast_if_necessary(img, as_uint16)
bitdepth = 8 if img.dtype == np.uint8 else 16
grayscale = True if len(img.shape) == 2 or (
len(img.shape) == 3 and img.shape[-1] == 1) else False
writer = png.Writer(img.shape[1], img.shape[0],
greyscale=grayscale, bitdepth=bitdepth)
writer.write(open(path, "wb"), img.reshape(img.shape[0], -1)) | Save image by pypng module.
Args:
path (str): output filename
img (numpy.ndarray): Image array to save. Image shape is considered as (height, width, channel) by default.
channel_first:
This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width).
Default value is False, which means the img shape is (height, width, channel)
as_uint16 (bool):
If True, save image as uint16.
auto_scale (bool) :
Whether upscale pixel values or not.
If you want to save float image, this argument must be True.
In pypng backend, all below are supported.
- float ([0, 1]) to uint8 ([0, 255]) (if img.dtype==float and upscale==True and as_uint16==False)
- float to uint16 ([0, 65535]) (if img.dtype==float and upscale==True and as_uint16==True)
- uint8 to uint16 are supported (if img.dtype==np.uint8 and upscale==True and as_uint16==True) | Below is the the instruction that describes the task:
### Input:
Save image by pypng module.
Args:
path (str): output filename
img (numpy.ndarray): Image array to save. Image shape is considered as (height, width, channel) by default.
channel_first:
This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width).
Default value is False, which means the img shape is (height, width, channel)
as_uint16 (bool):
If True, save image as uint16.
auto_scale (bool) :
Whether upscale pixel values or not.
If you want to save float image, this argument must be True.
In pypng backend, all below are supported.
- float ([0, 1]) to uint8 ([0, 255]) (if img.dtype==float and upscale==True and as_uint16==False)
- float to uint16 ([0, 65535]) (if img.dtype==float and upscale==True and as_uint16==True)
- uint8 to uint16 are supported (if img.dtype==np.uint8 and upscale==True and as_uint16==True)
### Response:
def imsave(path, img, channel_first=False, as_uint16=False, auto_scale=True):
"""
Save image by pypng module.
Args:
path (str): output filename
img (numpy.ndarray): Image array to save. Image shape is considered as (height, width, channel) by default.
channel_first:
This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width).
Default value is False, which means the img shape is (height, width, channel)
as_uint16 (bool):
If True, save image as uint16.
auto_scale (bool) :
Whether upscale pixel values or not.
If you want to save float image, this argument must be True.
In pypng backend, all below are supported.
- float ([0, 1]) to uint8 ([0, 255]) (if img.dtype==float and upscale==True and as_uint16==False)
- float to uint16 ([0, 65535]) (if img.dtype==float and upscale==True and as_uint16==True)
- uint8 to uint16 are supported (if img.dtype==np.uint8 and upscale==True and as_uint16==True)
"""
img = _imsave_before(img, channel_first, auto_scale)
if auto_scale:
img = upscale_pixel_intensity(img, as_uint16)
img = check_type_and_cast_if_necessary(img, as_uint16)
bitdepth = 8 if img.dtype == np.uint8 else 16
grayscale = True if len(img.shape) == 2 or (
len(img.shape) == 3 and img.shape[-1] == 1) else False
writer = png.Writer(img.shape[1], img.shape[0],
greyscale=grayscale, bitdepth=bitdepth)
writer.write(open(path, "wb"), img.reshape(img.shape[0], -1)) |
def api(self, name, url, **kwargs):
"""Generic API method."""
if name not in self._apis:
raise ValueError('API name must be one of {0}, not {1!r}.'.format(
tuple(self._apis), name))
fields = kwargs.get('fields')
timeout = kwargs.get('timeout')
text = kwargs.get('text')
html = kwargs.get('html')
if text and html:
raise ValueError(u'Both `text` and `html` arguments provided!')
params = {'url': url, 'token': self._token}
if timeout:
params['timeout'] = timeout
if fields:
if not isinstance(fields, str):
fields = ','.join(sorted(fields))
params['fields'] = fields
url = self.endpoint(name)
if text or html:
content_type = html and 'text/html' or 'text/plain'
return self._post(url, text or html, content_type, params=params)
return self._get(url, params=params) | Generic API method. | Below is the the instruction that describes the task:
### Input:
Generic API method.
### Response:
def api(self, name, url, **kwargs):
"""Generic API method."""
if name not in self._apis:
raise ValueError('API name must be one of {0}, not {1!r}.'.format(
tuple(self._apis), name))
fields = kwargs.get('fields')
timeout = kwargs.get('timeout')
text = kwargs.get('text')
html = kwargs.get('html')
if text and html:
raise ValueError(u'Both `text` and `html` arguments provided!')
params = {'url': url, 'token': self._token}
if timeout:
params['timeout'] = timeout
if fields:
if not isinstance(fields, str):
fields = ','.join(sorted(fields))
params['fields'] = fields
url = self.endpoint(name)
if text or html:
content_type = html and 'text/html' or 'text/plain'
return self._post(url, text or html, content_type, params=params)
return self._get(url, params=params) |
def sort_variants(vcf_handle):
"""Sort the variants of a vcf file
Args:
vcf_handle
mode (str): position or rank score
Returns:
sorted_variants (Iterable): An iterable with sorted variants
"""
logger.debug("Creating temp file")
temp_file = NamedTemporaryFile(delete=False)
temp_file.close()
logger.debug("Opening temp file with codecs")
temp_file_handle = codecs.open(
temp_file.name,
mode='w',
encoding='utf-8',
errors='replace'
)
try:
with codecs.open(temp_file.name,mode='w',encoding='utf-8',errors='replace') as f:
for line in vcf_handle:
if not line.startswith('#'):
line = line.rstrip().split('\t')
chrom = line[0]
priority = get_chromosome_priority(chrom)
print_line = "{0}\t{1}\n".format(priority, '\t'.join(line))
f.write(print_line)
#Sort the variants
sort_variant_file(temp_file.name)
with codecs.open(temp_file.name,mode='r',encoding='utf-8',errors='replace') as f:
for line in f:
line = line.rstrip().split('\t')
yield '\t'.join(line[1:])
except Exception as err:
logger.error("Something went wrong")
logger.error(err)
finally:
logger.debug("Deleting temp file")
os.remove(temp_file.name)
logger.debug("Temp file deleted") | Sort the variants of a vcf file
Args:
vcf_handle
mode (str): position or rank score
Returns:
sorted_variants (Iterable): An iterable with sorted variants | Below is the the instruction that describes the task:
### Input:
Sort the variants of a vcf file
Args:
vcf_handle
mode (str): position or rank score
Returns:
sorted_variants (Iterable): An iterable with sorted variants
### Response:
def sort_variants(vcf_handle):
"""Sort the variants of a vcf file
Args:
vcf_handle
mode (str): position or rank score
Returns:
sorted_variants (Iterable): An iterable with sorted variants
"""
logger.debug("Creating temp file")
temp_file = NamedTemporaryFile(delete=False)
temp_file.close()
logger.debug("Opening temp file with codecs")
temp_file_handle = codecs.open(
temp_file.name,
mode='w',
encoding='utf-8',
errors='replace'
)
try:
with codecs.open(temp_file.name,mode='w',encoding='utf-8',errors='replace') as f:
for line in vcf_handle:
if not line.startswith('#'):
line = line.rstrip().split('\t')
chrom = line[0]
priority = get_chromosome_priority(chrom)
print_line = "{0}\t{1}\n".format(priority, '\t'.join(line))
f.write(print_line)
#Sort the variants
sort_variant_file(temp_file.name)
with codecs.open(temp_file.name,mode='r',encoding='utf-8',errors='replace') as f:
for line in f:
line = line.rstrip().split('\t')
yield '\t'.join(line[1:])
except Exception as err:
logger.error("Something went wrong")
logger.error(err)
finally:
logger.debug("Deleting temp file")
os.remove(temp_file.name)
logger.debug("Temp file deleted") |
def match(self, regex, flags=0):
"""
Matches the specified *regex* from the current character of the *scanner*
and returns the result. The Scanners column and line numbers are updated
respectively.
# Arguments
regex (str, Pattern): The regex to match.
flags (int): The flags to use when compiling the pattern.
"""
if isinstance(regex, str):
regex = re.compile(regex, flags)
match = regex.match(self.text, self.index)
if not match:
return None
start, end = match.start(), match.end()
lines = self.text.count('\n', start, end)
self.index = end
if lines:
self.colno = end - self.text.rfind('\n', start, end) - 1
self.lineno += lines
else:
self.colno += end - start
return match | Matches the specified *regex* from the current character of the *scanner*
and returns the result. The Scanners column and line numbers are updated
respectively.
# Arguments
regex (str, Pattern): The regex to match.
flags (int): The flags to use when compiling the pattern. | Below is the the instruction that describes the task:
### Input:
Matches the specified *regex* from the current character of the *scanner*
and returns the result. The Scanners column and line numbers are updated
respectively.
# Arguments
regex (str, Pattern): The regex to match.
flags (int): The flags to use when compiling the pattern.
### Response:
def match(self, regex, flags=0):
"""
Matches the specified *regex* from the current character of the *scanner*
and returns the result. The Scanners column and line numbers are updated
respectively.
# Arguments
regex (str, Pattern): The regex to match.
flags (int): The flags to use when compiling the pattern.
"""
if isinstance(regex, str):
regex = re.compile(regex, flags)
match = regex.match(self.text, self.index)
if not match:
return None
start, end = match.start(), match.end()
lines = self.text.count('\n', start, end)
self.index = end
if lines:
self.colno = end - self.text.rfind('\n', start, end) - 1
self.lineno += lines
else:
self.colno += end - start
return match |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.