code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def register_pretty(type=None, predicate=None):
"""Returns a decorator that registers the decorated function
as the pretty printer for instances of ``type``.
:param type: the type to register the pretty printer for, or a ``str``
to indicate the module and name, e.g.: ``'collections.Counter'``.
:param predicate: a predicate function that takes one argument
and returns a boolean indicating if the value
should be handled by the registered pretty printer.
Only one of ``type`` and ``predicate`` may be supplied. That means
that ``predicate`` will be run on unregistered types only.
The decorated function must accept exactly two positional arguments:
- ``value`` to pretty print, and
- ``ctx``, a context value.
Here's an example of the pretty printer for OrderedDict:
.. code:: python
from collections import OrderedDict
from prettyprinter import register_pretty, pretty_call
@register_pretty(OrderedDict)
def pretty_orderreddict(value, ctx):
return pretty_call(ctx, OrderedDict, list(value.items()))
"""
if type is None and predicate is None:
raise ValueError(
"You must provide either the 'type' or 'predicate' argument."
)
if type is not None and predicate is not None:
raise ValueError(
"You must provide either the 'type' or 'predicate' argument,"
"but not both"
)
if predicate is not None:
if not callable(predicate):
raise ValueError(
"Expected a callable for 'predicate', got {}".format(
repr(predicate)
)
)
def decorator(fn):
sig = inspect.signature(fn)
value = None
ctx = None
try:
sig.bind(value, ctx)
except TypeError:
fnname = '{}.{}'.format(
fn.__module__,
fn.__qualname__
)
raise ValueError(
"Functions decorated with register_pretty must accept "
"exactly two positional parameters: 'value' and 'ctx'. "
"The function signature for {} was not compatible.".format(
fnname
)
)
if type:
if isinstance(type, str):
# We don't wrap this with _run_pretty,
# so that when we register this printer with an actual
# class, we can call register_pretty(cls)(fn)
_DEFERRED_DISPATCH_BY_NAME[type] = fn
else:
pretty_dispatch.register(type, partial(_run_pretty, fn))
else:
assert callable(predicate)
_PREDICATE_REGISTRY.append((predicate, fn))
return fn
return decorator | Returns a decorator that registers the decorated function
as the pretty printer for instances of ``type``.
:param type: the type to register the pretty printer for, or a ``str``
to indicate the module and name, e.g.: ``'collections.Counter'``.
:param predicate: a predicate function that takes one argument
and returns a boolean indicating if the value
should be handled by the registered pretty printer.
Only one of ``type`` and ``predicate`` may be supplied. That means
that ``predicate`` will be run on unregistered types only.
The decorated function must accept exactly two positional arguments:
- ``value`` to pretty print, and
- ``ctx``, a context value.
Here's an example of the pretty printer for OrderedDict:
.. code:: python
from collections import OrderedDict
from prettyprinter import register_pretty, pretty_call
@register_pretty(OrderedDict)
def pretty_orderreddict(value, ctx):
return pretty_call(ctx, OrderedDict, list(value.items())) | Below is the the instruction that describes the task:
### Input:
Returns a decorator that registers the decorated function
as the pretty printer for instances of ``type``.
:param type: the type to register the pretty printer for, or a ``str``
to indicate the module and name, e.g.: ``'collections.Counter'``.
:param predicate: a predicate function that takes one argument
and returns a boolean indicating if the value
should be handled by the registered pretty printer.
Only one of ``type`` and ``predicate`` may be supplied. That means
that ``predicate`` will be run on unregistered types only.
The decorated function must accept exactly two positional arguments:
- ``value`` to pretty print, and
- ``ctx``, a context value.
Here's an example of the pretty printer for OrderedDict:
.. code:: python
from collections import OrderedDict
from prettyprinter import register_pretty, pretty_call
@register_pretty(OrderedDict)
def pretty_orderreddict(value, ctx):
return pretty_call(ctx, OrderedDict, list(value.items()))
### Response:
def register_pretty(type=None, predicate=None):
"""Returns a decorator that registers the decorated function
as the pretty printer for instances of ``type``.
:param type: the type to register the pretty printer for, or a ``str``
to indicate the module and name, e.g.: ``'collections.Counter'``.
:param predicate: a predicate function that takes one argument
and returns a boolean indicating if the value
should be handled by the registered pretty printer.
Only one of ``type`` and ``predicate`` may be supplied. That means
that ``predicate`` will be run on unregistered types only.
The decorated function must accept exactly two positional arguments:
- ``value`` to pretty print, and
- ``ctx``, a context value.
Here's an example of the pretty printer for OrderedDict:
.. code:: python
from collections import OrderedDict
from prettyprinter import register_pretty, pretty_call
@register_pretty(OrderedDict)
def pretty_orderreddict(value, ctx):
return pretty_call(ctx, OrderedDict, list(value.items()))
"""
if type is None and predicate is None:
raise ValueError(
"You must provide either the 'type' or 'predicate' argument."
)
if type is not None and predicate is not None:
raise ValueError(
"You must provide either the 'type' or 'predicate' argument,"
"but not both"
)
if predicate is not None:
if not callable(predicate):
raise ValueError(
"Expected a callable for 'predicate', got {}".format(
repr(predicate)
)
)
def decorator(fn):
sig = inspect.signature(fn)
value = None
ctx = None
try:
sig.bind(value, ctx)
except TypeError:
fnname = '{}.{}'.format(
fn.__module__,
fn.__qualname__
)
raise ValueError(
"Functions decorated with register_pretty must accept "
"exactly two positional parameters: 'value' and 'ctx'. "
"The function signature for {} was not compatible.".format(
fnname
)
)
if type:
if isinstance(type, str):
# We don't wrap this with _run_pretty,
# so that when we register this printer with an actual
# class, we can call register_pretty(cls)(fn)
_DEFERRED_DISPATCH_BY_NAME[type] = fn
else:
pretty_dispatch.register(type, partial(_run_pretty, fn))
else:
assert callable(predicate)
_PREDICATE_REGISTRY.append((predicate, fn))
return fn
return decorator |
def update_ipsec_site_connection(self, ipsecsite_conn, body=None):
"""Updates an IPsecSiteConnection."""
return self.put(
self.ipsec_site_connection_path % (ipsecsite_conn), body=body
) | Updates an IPsecSiteConnection. | Below is the the instruction that describes the task:
### Input:
Updates an IPsecSiteConnection.
### Response:
def update_ipsec_site_connection(self, ipsecsite_conn, body=None):
"""Updates an IPsecSiteConnection."""
return self.put(
self.ipsec_site_connection_path % (ipsecsite_conn), body=body
) |
def dictlist_convert_to_float(dict_list: Iterable[Dict], key: str) -> None:
"""
Process an iterable of dictionaries. For each dictionary ``d``, convert
(in place) ``d[key]`` to a float. If that fails, convert it to ``None``.
"""
for d in dict_list:
try:
d[key] = float(d[key])
except ValueError:
d[key] = None | Process an iterable of dictionaries. For each dictionary ``d``, convert
(in place) ``d[key]`` to a float. If that fails, convert it to ``None``. | Below is the the instruction that describes the task:
### Input:
Process an iterable of dictionaries. For each dictionary ``d``, convert
(in place) ``d[key]`` to a float. If that fails, convert it to ``None``.
### Response:
def dictlist_convert_to_float(dict_list: Iterable[Dict], key: str) -> None:
"""
Process an iterable of dictionaries. For each dictionary ``d``, convert
(in place) ``d[key]`` to a float. If that fails, convert it to ``None``.
"""
for d in dict_list:
try:
d[key] = float(d[key])
except ValueError:
d[key] = None |
def PlayWaveFile(filePath: str = r'C:\Windows\Media\notify.wav', isAsync: bool = False, isLoop: bool = False) -> bool:
"""
Call PlaySound from Win32.
filePath: str, if emtpy, stop playing the current sound.
isAsync: bool, if True, the sound is played asynchronously and returns immediately.
isLoop: bool, if True, the sound plays repeatedly until PlayWaveFile(None) is called again, must also set isAsync to True.
Return bool, True if succeed otherwise False.
"""
if filePath:
SND_ASYNC = 0x0001
SND_NODEFAULT = 0x0002
SND_LOOP = 0x0008
SND_FILENAME = 0x20000
flags = SND_NODEFAULT | SND_FILENAME
if isAsync:
flags |= SND_ASYNC
if isLoop:
flags |= SND_LOOP
flags |= SND_ASYNC
return bool(ctypes.windll.winmm.PlaySoundW(ctypes.c_wchar_p(filePath), ctypes.c_void_p(0), flags))
else:
return bool(ctypes.windll.winmm.PlaySoundW(ctypes.c_wchar_p(0), ctypes.c_void_p(0), 0)) | Call PlaySound from Win32.
filePath: str, if emtpy, stop playing the current sound.
isAsync: bool, if True, the sound is played asynchronously and returns immediately.
isLoop: bool, if True, the sound plays repeatedly until PlayWaveFile(None) is called again, must also set isAsync to True.
Return bool, True if succeed otherwise False. | Below is the the instruction that describes the task:
### Input:
Call PlaySound from Win32.
filePath: str, if emtpy, stop playing the current sound.
isAsync: bool, if True, the sound is played asynchronously and returns immediately.
isLoop: bool, if True, the sound plays repeatedly until PlayWaveFile(None) is called again, must also set isAsync to True.
Return bool, True if succeed otherwise False.
### Response:
def PlayWaveFile(filePath: str = r'C:\Windows\Media\notify.wav', isAsync: bool = False, isLoop: bool = False) -> bool:
"""
Call PlaySound from Win32.
filePath: str, if emtpy, stop playing the current sound.
isAsync: bool, if True, the sound is played asynchronously and returns immediately.
isLoop: bool, if True, the sound plays repeatedly until PlayWaveFile(None) is called again, must also set isAsync to True.
Return bool, True if succeed otherwise False.
"""
if filePath:
SND_ASYNC = 0x0001
SND_NODEFAULT = 0x0002
SND_LOOP = 0x0008
SND_FILENAME = 0x20000
flags = SND_NODEFAULT | SND_FILENAME
if isAsync:
flags |= SND_ASYNC
if isLoop:
flags |= SND_LOOP
flags |= SND_ASYNC
return bool(ctypes.windll.winmm.PlaySoundW(ctypes.c_wchar_p(filePath), ctypes.c_void_p(0), flags))
else:
return bool(ctypes.windll.winmm.PlaySoundW(ctypes.c_wchar_p(0), ctypes.c_void_p(0), 0)) |
def get_config(self):
"""Returns ownCloud config information
:returns: array of tuples (key, value) for each information
e.g. [('version', '1.7'), ('website', 'ownCloud'), ('host', 'cloud.example.com'),
('contact', ''), ('ssl', 'false')]
:raises: HTTPResponseError in case an HTTP error status was returned
"""
path = 'config'
res = self._make_ocs_request(
'GET',
'',
path
)
if res.status_code == 200:
tree = ET.fromstring(res.content)
self._check_ocs_status(tree)
values = []
element = tree.find('data')
if element is not None:
keys = ['version', 'website', 'host', 'contact', 'ssl']
for key in keys:
text = element.find(key).text or ''
values.append(text)
return zip(keys, values)
else:
return None
raise HTTPResponseError(res) | Returns ownCloud config information
:returns: array of tuples (key, value) for each information
e.g. [('version', '1.7'), ('website', 'ownCloud'), ('host', 'cloud.example.com'),
('contact', ''), ('ssl', 'false')]
:raises: HTTPResponseError in case an HTTP error status was returned | Below is the the instruction that describes the task:
### Input:
Returns ownCloud config information
:returns: array of tuples (key, value) for each information
e.g. [('version', '1.7'), ('website', 'ownCloud'), ('host', 'cloud.example.com'),
('contact', ''), ('ssl', 'false')]
:raises: HTTPResponseError in case an HTTP error status was returned
### Response:
def get_config(self):
"""Returns ownCloud config information
:returns: array of tuples (key, value) for each information
e.g. [('version', '1.7'), ('website', 'ownCloud'), ('host', 'cloud.example.com'),
('contact', ''), ('ssl', 'false')]
:raises: HTTPResponseError in case an HTTP error status was returned
"""
path = 'config'
res = self._make_ocs_request(
'GET',
'',
path
)
if res.status_code == 200:
tree = ET.fromstring(res.content)
self._check_ocs_status(tree)
values = []
element = tree.find('data')
if element is not None:
keys = ['version', 'website', 'host', 'contact', 'ssl']
for key in keys:
text = element.find(key).text or ''
values.append(text)
return zip(keys, values)
else:
return None
raise HTTPResponseError(res) |
def _geom_type(self, source):
"""gets geometry type(s) of specified layer"""
if isinstance(source, AbstractLayer):
query = source.orig_query
else:
query = 'SELECT * FROM "{table}"'.format(table=source)
resp = self.sql_client.send(
utils.minify_sql((
'SELECT',
' CASE WHEN ST_GeometryType(the_geom)',
' in (\'ST_Point\', \'ST_MultiPoint\')',
' THEN \'point\'',
' WHEN ST_GeometryType(the_geom)',
' in (\'ST_LineString\', \'ST_MultiLineString\')',
' THEN \'line\'',
' WHEN ST_GeometryType(the_geom)',
' in (\'ST_Polygon\', \'ST_MultiPolygon\')',
' THEN \'polygon\'',
' ELSE null END AS geom_type,',
' count(*) as cnt',
'FROM ({query}) AS _wrap',
'WHERE the_geom IS NOT NULL',
'GROUP BY 1',
'ORDER BY 2 DESC',
)).format(query=query),
**DEFAULT_SQL_ARGS)
if resp['total_rows'] > 1:
warn('There are multiple geometry types in {query}: '
'{geoms}. Styling by `{common_geom}`, the most common'.format(
query=query,
geoms=','.join(g['geom_type'] for g in resp['rows']),
common_geom=resp['rows'][0]['geom_type']))
elif resp['total_rows'] == 0:
raise ValueError('No geometry for layer. Check all layer tables '
'and queries to ensure there are geometries.')
return resp['rows'][0]['geom_type'] | gets geometry type(s) of specified layer | Below is the the instruction that describes the task:
### Input:
gets geometry type(s) of specified layer
### Response:
def _geom_type(self, source):
"""gets geometry type(s) of specified layer"""
if isinstance(source, AbstractLayer):
query = source.orig_query
else:
query = 'SELECT * FROM "{table}"'.format(table=source)
resp = self.sql_client.send(
utils.minify_sql((
'SELECT',
' CASE WHEN ST_GeometryType(the_geom)',
' in (\'ST_Point\', \'ST_MultiPoint\')',
' THEN \'point\'',
' WHEN ST_GeometryType(the_geom)',
' in (\'ST_LineString\', \'ST_MultiLineString\')',
' THEN \'line\'',
' WHEN ST_GeometryType(the_geom)',
' in (\'ST_Polygon\', \'ST_MultiPolygon\')',
' THEN \'polygon\'',
' ELSE null END AS geom_type,',
' count(*) as cnt',
'FROM ({query}) AS _wrap',
'WHERE the_geom IS NOT NULL',
'GROUP BY 1',
'ORDER BY 2 DESC',
)).format(query=query),
**DEFAULT_SQL_ARGS)
if resp['total_rows'] > 1:
warn('There are multiple geometry types in {query}: '
'{geoms}. Styling by `{common_geom}`, the most common'.format(
query=query,
geoms=','.join(g['geom_type'] for g in resp['rows']),
common_geom=resp['rows'][0]['geom_type']))
elif resp['total_rows'] == 0:
raise ValueError('No geometry for layer. Check all layer tables '
'and queries to ensure there are geometries.')
return resp['rows'][0]['geom_type'] |
def _check_1st_line(line, **kwargs):
"""First line check.
Check that the first line has a known component name followed by a colon
and then a short description of the commit.
:param line: first line
:type line: str
:param components: list of known component names
:type line: list
:param max_first_line: maximum length of the first line
:type max_first_line: int
:return: errors as in (code, line number, *args)
:rtype: list
"""
components = kwargs.get("components", ())
max_first_line = kwargs.get("max_first_line", 50)
errors = []
lineno = 1
if len(line) > max_first_line:
errors.append(("M190", lineno, max_first_line, len(line)))
if line.endswith("."):
errors.append(("M191", lineno))
if ':' not in line:
errors.append(("M110", lineno))
else:
component, msg = line.split(':', 1)
if component not in components:
errors.append(("M111", lineno, component))
return errors | First line check.
Check that the first line has a known component name followed by a colon
and then a short description of the commit.
:param line: first line
:type line: str
:param components: list of known component names
:type line: list
:param max_first_line: maximum length of the first line
:type max_first_line: int
:return: errors as in (code, line number, *args)
:rtype: list | Below is the the instruction that describes the task:
### Input:
First line check.
Check that the first line has a known component name followed by a colon
and then a short description of the commit.
:param line: first line
:type line: str
:param components: list of known component names
:type line: list
:param max_first_line: maximum length of the first line
:type max_first_line: int
:return: errors as in (code, line number, *args)
:rtype: list
### Response:
def _check_1st_line(line, **kwargs):
"""First line check.
Check that the first line has a known component name followed by a colon
and then a short description of the commit.
:param line: first line
:type line: str
:param components: list of known component names
:type line: list
:param max_first_line: maximum length of the first line
:type max_first_line: int
:return: errors as in (code, line number, *args)
:rtype: list
"""
components = kwargs.get("components", ())
max_first_line = kwargs.get("max_first_line", 50)
errors = []
lineno = 1
if len(line) > max_first_line:
errors.append(("M190", lineno, max_first_line, len(line)))
if line.endswith("."):
errors.append(("M191", lineno))
if ':' not in line:
errors.append(("M110", lineno))
else:
component, msg = line.split(':', 1)
if component not in components:
errors.append(("M111", lineno, component))
return errors |
def describe(path):
"""
Return a textual description of the file pointed by this path.
Options:
- "symbolic link"
- "directory"
- "'.' directory"
- "'..' directory"
- "regular file"
- "regular empty file"
- "non existent"
- "entry"
"""
if os.path.islink(path):
return 'symbolic link'
elif os.path.isdir(path):
if path == '.':
return 'directory'
elif path == '..':
return 'directory'
else:
if os.path.basename(path) == '.':
return "'.' directory"
elif os.path.basename(path) == '..':
return "'..' directory"
else:
return 'directory'
elif os.path.isfile(path):
if os.path.getsize(path) == 0:
return 'regular empty file'
else:
return 'regular file'
elif not os.path.exists(path):
return 'non existent'
else:
return 'entry' | Return a textual description of the file pointed by this path.
Options:
- "symbolic link"
- "directory"
- "'.' directory"
- "'..' directory"
- "regular file"
- "regular empty file"
- "non existent"
- "entry" | Below is the the instruction that describes the task:
### Input:
Return a textual description of the file pointed by this path.
Options:
- "symbolic link"
- "directory"
- "'.' directory"
- "'..' directory"
- "regular file"
- "regular empty file"
- "non existent"
- "entry"
### Response:
def describe(path):
"""
Return a textual description of the file pointed by this path.
Options:
- "symbolic link"
- "directory"
- "'.' directory"
- "'..' directory"
- "regular file"
- "regular empty file"
- "non existent"
- "entry"
"""
if os.path.islink(path):
return 'symbolic link'
elif os.path.isdir(path):
if path == '.':
return 'directory'
elif path == '..':
return 'directory'
else:
if os.path.basename(path) == '.':
return "'.' directory"
elif os.path.basename(path) == '..':
return "'..' directory"
else:
return 'directory'
elif os.path.isfile(path):
if os.path.getsize(path) == 0:
return 'regular empty file'
else:
return 'regular file'
elif not os.path.exists(path):
return 'non existent'
else:
return 'entry' |
def get_pp_name(self):
'''Determine the pseudopotential names from the output'''
ppnames = []
# Find the number of atom types
natomtypes = int(self._get_line('number of atomic types', self.outputf).split()[5])
# Find the pseudopotential names
with open(self.outputf) as fp:
for line in fp:
if "PseudoPot. #" in line:
ppnames.append(Scalar(value=next(fp).split('/')[-1].rstrip()))
if len(ppnames) == natomtypes:
return Value(scalars=ppnames)
raise Exception('Could not find %i pseudopotential names'%natomtypes) | Determine the pseudopotential names from the output | Below is the the instruction that describes the task:
### Input:
Determine the pseudopotential names from the output
### Response:
def get_pp_name(self):
'''Determine the pseudopotential names from the output'''
ppnames = []
# Find the number of atom types
natomtypes = int(self._get_line('number of atomic types', self.outputf).split()[5])
# Find the pseudopotential names
with open(self.outputf) as fp:
for line in fp:
if "PseudoPot. #" in line:
ppnames.append(Scalar(value=next(fp).split('/')[-1].rstrip()))
if len(ppnames) == natomtypes:
return Value(scalars=ppnames)
raise Exception('Could not find %i pseudopotential names'%natomtypes) |
def _ParseCmdItem(self, cmd_input, template_file=None):
"""Creates Texttable with output of command.
Args:
cmd_input: String, Device response.
template_file: File object, template to parse with.
Returns:
TextTable containing command output.
Raises:
CliTableError: A template was not found for the given command.
"""
# Build FSM machine from the template.
fsm = textfsm.TextFSM(template_file)
if not self._keys:
self._keys = set(fsm.GetValuesByAttrib('Key'))
# Pass raw data through FSM.
table = texttable.TextTable()
table.header = fsm.header
# Fill TextTable from record entries.
for record in fsm.ParseText(cmd_input):
table.Append(record)
return table | Creates Texttable with output of command.
Args:
cmd_input: String, Device response.
template_file: File object, template to parse with.
Returns:
TextTable containing command output.
Raises:
CliTableError: A template was not found for the given command. | Below is the the instruction that describes the task:
### Input:
Creates Texttable with output of command.
Args:
cmd_input: String, Device response.
template_file: File object, template to parse with.
Returns:
TextTable containing command output.
Raises:
CliTableError: A template was not found for the given command.
### Response:
def _ParseCmdItem(self, cmd_input, template_file=None):
"""Creates Texttable with output of command.
Args:
cmd_input: String, Device response.
template_file: File object, template to parse with.
Returns:
TextTable containing command output.
Raises:
CliTableError: A template was not found for the given command.
"""
# Build FSM machine from the template.
fsm = textfsm.TextFSM(template_file)
if not self._keys:
self._keys = set(fsm.GetValuesByAttrib('Key'))
# Pass raw data through FSM.
table = texttable.TextTable()
table.header = fsm.header
# Fill TextTable from record entries.
for record in fsm.ParseText(cmd_input):
table.Append(record)
return table |
def tag(value):
"""
Add a tag with generated id.
:param value: everything working with the str() function
"""
rdict = load_feedback()
tests = rdict.setdefault("tests", {})
tests["*auto-tag-" + str(hash(str(value)))] = str(value)
save_feedback(rdict) | Add a tag with generated id.
:param value: everything working with the str() function | Below is the the instruction that describes the task:
### Input:
Add a tag with generated id.
:param value: everything working with the str() function
### Response:
def tag(value):
"""
Add a tag with generated id.
:param value: everything working with the str() function
"""
rdict = load_feedback()
tests = rdict.setdefault("tests", {})
tests["*auto-tag-" + str(hash(str(value)))] = str(value)
save_feedback(rdict) |
def check_state(self, pair, state):
"""
Updates the state of a check.
"""
self.__log_info('Check %s %s -> %s', pair, pair.state, state)
pair.state = state | Updates the state of a check. | Below is the the instruction that describes the task:
### Input:
Updates the state of a check.
### Response:
def check_state(self, pair, state):
"""
Updates the state of a check.
"""
self.__log_info('Check %s %s -> %s', pair, pair.state, state)
pair.state = state |
def get_existing_thumbnail(self, thumbnail_options, high_resolution=False):
"""
Return a ``ThumbnailFile`` containing an existing thumbnail for a set
of thumbnail options, or ``None`` if not found.
"""
thumbnail_options = self.get_options(thumbnail_options)
names = [
self.get_thumbnail_name(
thumbnail_options, transparent=False,
high_resolution=high_resolution)]
transparent_name = self.get_thumbnail_name(
thumbnail_options, transparent=True,
high_resolution=high_resolution)
if transparent_name not in names:
names.append(transparent_name)
for filename in names:
exists = self.thumbnail_exists(filename)
if exists:
thumbnail_file = ThumbnailFile(
name=filename, storage=self.thumbnail_storage,
thumbnail_options=thumbnail_options)
if settings.THUMBNAIL_CACHE_DIMENSIONS:
# If this wasn't local storage, exists will be a thumbnail
# instance so we can store the image dimensions now to save
# a future potential query.
thumbnail_file.set_image_dimensions(exists)
return thumbnail_file | Return a ``ThumbnailFile`` containing an existing thumbnail for a set
of thumbnail options, or ``None`` if not found. | Below is the the instruction that describes the task:
### Input:
Return a ``ThumbnailFile`` containing an existing thumbnail for a set
of thumbnail options, or ``None`` if not found.
### Response:
def get_existing_thumbnail(self, thumbnail_options, high_resolution=False):
"""
Return a ``ThumbnailFile`` containing an existing thumbnail for a set
of thumbnail options, or ``None`` if not found.
"""
thumbnail_options = self.get_options(thumbnail_options)
names = [
self.get_thumbnail_name(
thumbnail_options, transparent=False,
high_resolution=high_resolution)]
transparent_name = self.get_thumbnail_name(
thumbnail_options, transparent=True,
high_resolution=high_resolution)
if transparent_name not in names:
names.append(transparent_name)
for filename in names:
exists = self.thumbnail_exists(filename)
if exists:
thumbnail_file = ThumbnailFile(
name=filename, storage=self.thumbnail_storage,
thumbnail_options=thumbnail_options)
if settings.THUMBNAIL_CACHE_DIMENSIONS:
# If this wasn't local storage, exists will be a thumbnail
# instance so we can store the image dimensions now to save
# a future potential query.
thumbnail_file.set_image_dimensions(exists)
return thumbnail_file |
def launch_background_job(self, job, on_error=None, on_success=None):
"""Launch the callable job in background thread.
Succes or failure are controlled by on_error and on_success
"""
if not self.main.mode_online:
self.sortie_erreur_GUI(
"Local mode activated. Can't run background task !")
self.reset()
return
on_error = on_error or self.sortie_erreur_GUI
on_success = on_success or self.sortie_standard_GUI
def thread_end(r):
on_success(r)
self.update()
def thread_error(r):
on_error(r)
self.reset()
logging.info(
f"Launching background task from interface {self.__class__.__name__} ...")
th = threads.worker(job, thread_error, thread_end)
self._add_thread(th) | Launch the callable job in background thread.
Succes or failure are controlled by on_error and on_success | Below is the the instruction that describes the task:
### Input:
Launch the callable job in background thread.
Succes or failure are controlled by on_error and on_success
### Response:
def launch_background_job(self, job, on_error=None, on_success=None):
"""Launch the callable job in background thread.
Succes or failure are controlled by on_error and on_success
"""
if not self.main.mode_online:
self.sortie_erreur_GUI(
"Local mode activated. Can't run background task !")
self.reset()
return
on_error = on_error or self.sortie_erreur_GUI
on_success = on_success or self.sortie_standard_GUI
def thread_end(r):
on_success(r)
self.update()
def thread_error(r):
on_error(r)
self.reset()
logging.info(
f"Launching background task from interface {self.__class__.__name__} ...")
th = threads.worker(job, thread_error, thread_end)
self._add_thread(th) |
def clear_trace_filter_cache():
'''
Clear the trace filter cache.
Call this after reloading.
'''
global should_trace_hook
try:
# Need to temporarily disable a hook because otherwise
# _filename_to_ignored_lines.clear() will never complete.
old_hook = should_trace_hook
should_trace_hook = None
# Clear the linecache
linecache.clearcache()
_filename_to_ignored_lines.clear()
finally:
should_trace_hook = old_hook | Clear the trace filter cache.
Call this after reloading. | Below is the the instruction that describes the task:
### Input:
Clear the trace filter cache.
Call this after reloading.
### Response:
def clear_trace_filter_cache():
'''
Clear the trace filter cache.
Call this after reloading.
'''
global should_trace_hook
try:
# Need to temporarily disable a hook because otherwise
# _filename_to_ignored_lines.clear() will never complete.
old_hook = should_trace_hook
should_trace_hook = None
# Clear the linecache
linecache.clearcache()
_filename_to_ignored_lines.clear()
finally:
should_trace_hook = old_hook |
def remove_all(self, key):
"""
Transactional implementation of :func:`MultiMap.remove_all(key)
<hazelcast.proxy.multi_map.MultiMap.remove_all>`
:param key: (object), the key of the entries to remove.
:return: (list), the collection of the values associated with the key.
"""
check_not_none(key, "key can't be none")
return self._encode_invoke(transactional_multi_map_remove_codec, key=self._to_data(key)) | Transactional implementation of :func:`MultiMap.remove_all(key)
<hazelcast.proxy.multi_map.MultiMap.remove_all>`
:param key: (object), the key of the entries to remove.
:return: (list), the collection of the values associated with the key. | Below is the the instruction that describes the task:
### Input:
Transactional implementation of :func:`MultiMap.remove_all(key)
<hazelcast.proxy.multi_map.MultiMap.remove_all>`
:param key: (object), the key of the entries to remove.
:return: (list), the collection of the values associated with the key.
### Response:
def remove_all(self, key):
"""
Transactional implementation of :func:`MultiMap.remove_all(key)
<hazelcast.proxy.multi_map.MultiMap.remove_all>`
:param key: (object), the key of the entries to remove.
:return: (list), the collection of the values associated with the key.
"""
check_not_none(key, "key can't be none")
return self._encode_invoke(transactional_multi_map_remove_codec, key=self._to_data(key)) |
def pretty_print_counters(counters):
"""print counters hierarchically.
Each counter is a pair of a string and a number.
The string can have slashes, meaning that the number also counts towards
each prefix. e.g. "parameters/trainable" counts towards both "parameters"
and "parameters/trainable".
Args:
counters: a list of (string, number) pairs
Returns:
a string
"""
totals = collections.defaultdict(int)
for (name, val) in counters:
prefixes = [name[:i] for i in xrange(len(name)) if name[i] == "/"] + [name]
for p in prefixes:
totals[p] += val
parts = []
for name, val in sorted(six.iteritems(totals)):
parts.append(" " * name.count("/") + "%s: %.3g" % (name, val))
return "\n".join(parts) | print counters hierarchically.
Each counter is a pair of a string and a number.
The string can have slashes, meaning that the number also counts towards
each prefix. e.g. "parameters/trainable" counts towards both "parameters"
and "parameters/trainable".
Args:
counters: a list of (string, number) pairs
Returns:
a string | Below is the the instruction that describes the task:
### Input:
print counters hierarchically.
Each counter is a pair of a string and a number.
The string can have slashes, meaning that the number also counts towards
each prefix. e.g. "parameters/trainable" counts towards both "parameters"
and "parameters/trainable".
Args:
counters: a list of (string, number) pairs
Returns:
a string
### Response:
def pretty_print_counters(counters):
"""print counters hierarchically.
Each counter is a pair of a string and a number.
The string can have slashes, meaning that the number also counts towards
each prefix. e.g. "parameters/trainable" counts towards both "parameters"
and "parameters/trainable".
Args:
counters: a list of (string, number) pairs
Returns:
a string
"""
totals = collections.defaultdict(int)
for (name, val) in counters:
prefixes = [name[:i] for i in xrange(len(name)) if name[i] == "/"] + [name]
for p in prefixes:
totals[p] += val
parts = []
for name, val in sorted(six.iteritems(totals)):
parts.append(" " * name.count("/") + "%s: %.3g" % (name, val))
return "\n".join(parts) |
def fetch_reference_restriction(self, ):
"""Fetch whether referencing is restricted
:returns: True, if referencing is restricted
:rtype: :class:`bool`
:raises: None
"""
inter = self.get_refobjinter()
restricted = self.status() is not None
return restricted or inter.fetch_action_restriction(self, 'reference') | Fetch whether referencing is restricted
:returns: True, if referencing is restricted
:rtype: :class:`bool`
:raises: None | Below is the the instruction that describes the task:
### Input:
Fetch whether referencing is restricted
:returns: True, if referencing is restricted
:rtype: :class:`bool`
:raises: None
### Response:
def fetch_reference_restriction(self, ):
"""Fetch whether referencing is restricted
:returns: True, if referencing is restricted
:rtype: :class:`bool`
:raises: None
"""
inter = self.get_refobjinter()
restricted = self.status() is not None
return restricted or inter.fetch_action_restriction(self, 'reference') |
def whoami(self,
note=None,
loglevel=logging.DEBUG):
"""Returns the current user by executing "whoami".
@param note: See send()
@return: the output of "whoami"
@rtype: string
"""
shutit = self.shutit
shutit.handle_note(note)
res = self.send_and_get_output(' command whoami',
echo=False,
loglevel=loglevel).strip()
if res == '':
res = self.send_and_get_output(' command id -u -n',
echo=False,
loglevel=loglevel).strip()
shutit.handle_note_after(note=note)
return res | Returns the current user by executing "whoami".
@param note: See send()
@return: the output of "whoami"
@rtype: string | Below is the the instruction that describes the task:
### Input:
Returns the current user by executing "whoami".
@param note: See send()
@return: the output of "whoami"
@rtype: string
### Response:
def whoami(self,
note=None,
loglevel=logging.DEBUG):
"""Returns the current user by executing "whoami".
@param note: See send()
@return: the output of "whoami"
@rtype: string
"""
shutit = self.shutit
shutit.handle_note(note)
res = self.send_and_get_output(' command whoami',
echo=False,
loglevel=loglevel).strip()
if res == '':
res = self.send_and_get_output(' command id -u -n',
echo=False,
loglevel=loglevel).strip()
shutit.handle_note_after(note=note)
return res |
def _filesystems(config='/etc/filesystems', leading_key=True):
'''
Return the contents of the filesystems in an OrderedDict
config
File containing filesystem infomation
leading_key
True return dictionary keyed by 'name' and value as dictionary with other keys, values (name excluded)
OrderedDict({ '/dir' : OrderedDict({'dev': '/dev/hd8', .... }}))
False return dictionary keyed by 'name' and value as dictionary with all keys, values (name included)
OrderedDict({ '/dir' : OrderedDict({'name': '/dir', 'dev': '/dev/hd8', ... })})
'''
ret = OrderedDict()
lines = []
parsing_block = False
if not os.path.isfile(config) or 'AIX' not in __grains__['kernel']:
return ret
# read in block of filesystems, block starts with '/' till empty line
with salt.utils.files.fopen(config) as ifile:
for line in ifile:
line = salt.utils.stringutils.to_unicode(line)
# skip till first entry
if not line.startswith('/') and not parsing_block:
continue
if line.startswith('/'):
parsing_block = True
lines.append(line)
elif not line.split():
parsing_block = False
try:
entry = _FileSystemsEntry.dict_from_lines(
lines,
_FileSystemsEntry.compatibility_keys)
lines = []
if 'opts' in entry:
entry['opts'] = entry['opts'].split(',')
while entry['name'] in ret:
entry['name'] += '_'
if leading_key:
ret[entry.pop('name')] = entry
else:
ret[entry['name']] = entry
except _FileSystemsEntry.ParseError:
pass
else:
lines.append(line)
return ret | Return the contents of the filesystems in an OrderedDict
config
File containing filesystem infomation
leading_key
True return dictionary keyed by 'name' and value as dictionary with other keys, values (name excluded)
OrderedDict({ '/dir' : OrderedDict({'dev': '/dev/hd8', .... }}))
False return dictionary keyed by 'name' and value as dictionary with all keys, values (name included)
OrderedDict({ '/dir' : OrderedDict({'name': '/dir', 'dev': '/dev/hd8', ... })}) | Below is the the instruction that describes the task:
### Input:
Return the contents of the filesystems in an OrderedDict
config
File containing filesystem infomation
leading_key
True return dictionary keyed by 'name' and value as dictionary with other keys, values (name excluded)
OrderedDict({ '/dir' : OrderedDict({'dev': '/dev/hd8', .... }}))
False return dictionary keyed by 'name' and value as dictionary with all keys, values (name included)
OrderedDict({ '/dir' : OrderedDict({'name': '/dir', 'dev': '/dev/hd8', ... })})
### Response:
def _filesystems(config='/etc/filesystems', leading_key=True):
'''
Return the contents of the filesystems in an OrderedDict
config
File containing filesystem infomation
leading_key
True return dictionary keyed by 'name' and value as dictionary with other keys, values (name excluded)
OrderedDict({ '/dir' : OrderedDict({'dev': '/dev/hd8', .... }}))
False return dictionary keyed by 'name' and value as dictionary with all keys, values (name included)
OrderedDict({ '/dir' : OrderedDict({'name': '/dir', 'dev': '/dev/hd8', ... })})
'''
ret = OrderedDict()
lines = []
parsing_block = False
if not os.path.isfile(config) or 'AIX' not in __grains__['kernel']:
return ret
# read in block of filesystems, block starts with '/' till empty line
with salt.utils.files.fopen(config) as ifile:
for line in ifile:
line = salt.utils.stringutils.to_unicode(line)
# skip till first entry
if not line.startswith('/') and not parsing_block:
continue
if line.startswith('/'):
parsing_block = True
lines.append(line)
elif not line.split():
parsing_block = False
try:
entry = _FileSystemsEntry.dict_from_lines(
lines,
_FileSystemsEntry.compatibility_keys)
lines = []
if 'opts' in entry:
entry['opts'] = entry['opts'].split(',')
while entry['name'] in ret:
entry['name'] += '_'
if leading_key:
ret[entry.pop('name')] = entry
else:
ret[entry['name']] = entry
except _FileSystemsEntry.ParseError:
pass
else:
lines.append(line)
return ret |
async def callproc(self, procname, args=()):
"""Execute stored procedure procname with args
Compatibility warning: PEP-249 specifies that any modified
parameters must be returned. This is currently impossible
as they are only available by storing them in a server
variable and then retrieved by a query. Since stored
procedures return zero or more result sets, there is no
reliable way to get at OUT or INOUT parameters via callproc.
The server variables are named @_procname_n, where procname
is the parameter above and n is the position of the parameter
(from zero). Once all result sets generated by the procedure
have been fetched, you can issue a SELECT @_procname_0, ...
query using .execute() to get any OUT or INOUT values.
Compatibility warning: The act of calling a stored procedure
itself creates an empty result set. This appears after any
result sets generated by the procedure. This is non-standard
behavior with respect to the DB-API. Be sure to use nextset()
to advance through all result sets; otherwise you may get
disconnected.
:param procname: ``str``, name of procedure to execute on server
:param args: `sequence of parameters to use with procedure
:returns: the original args.
"""
conn = self._get_db()
if self._echo:
logger.info("CALL %s", procname)
logger.info("%r", args)
for index, arg in enumerate(args):
q = "SET @_%s_%d=%s" % (procname, index, conn.escape(arg))
await self._query(q)
await self.nextset()
_args = ','.join('@_%s_%d' % (procname, i) for i in range(len(args)))
q = "CALL %s(%s)" % (procname, _args)
await self._query(q)
self._executed = q
return args | Execute stored procedure procname with args
Compatibility warning: PEP-249 specifies that any modified
parameters must be returned. This is currently impossible
as they are only available by storing them in a server
variable and then retrieved by a query. Since stored
procedures return zero or more result sets, there is no
reliable way to get at OUT or INOUT parameters via callproc.
The server variables are named @_procname_n, where procname
is the parameter above and n is the position of the parameter
(from zero). Once all result sets generated by the procedure
have been fetched, you can issue a SELECT @_procname_0, ...
query using .execute() to get any OUT or INOUT values.
Compatibility warning: The act of calling a stored procedure
itself creates an empty result set. This appears after any
result sets generated by the procedure. This is non-standard
behavior with respect to the DB-API. Be sure to use nextset()
to advance through all result sets; otherwise you may get
disconnected.
:param procname: ``str``, name of procedure to execute on server
:param args: `sequence of parameters to use with procedure
:returns: the original args. | Below is the the instruction that describes the task:
### Input:
Execute stored procedure procname with args
Compatibility warning: PEP-249 specifies that any modified
parameters must be returned. This is currently impossible
as they are only available by storing them in a server
variable and then retrieved by a query. Since stored
procedures return zero or more result sets, there is no
reliable way to get at OUT or INOUT parameters via callproc.
The server variables are named @_procname_n, where procname
is the parameter above and n is the position of the parameter
(from zero). Once all result sets generated by the procedure
have been fetched, you can issue a SELECT @_procname_0, ...
query using .execute() to get any OUT or INOUT values.
Compatibility warning: The act of calling a stored procedure
itself creates an empty result set. This appears after any
result sets generated by the procedure. This is non-standard
behavior with respect to the DB-API. Be sure to use nextset()
to advance through all result sets; otherwise you may get
disconnected.
:param procname: ``str``, name of procedure to execute on server
:param args: `sequence of parameters to use with procedure
:returns: the original args.
### Response:
async def callproc(self, procname, args=()):
"""Execute stored procedure procname with args
Compatibility warning: PEP-249 specifies that any modified
parameters must be returned. This is currently impossible
as they are only available by storing them in a server
variable and then retrieved by a query. Since stored
procedures return zero or more result sets, there is no
reliable way to get at OUT or INOUT parameters via callproc.
The server variables are named @_procname_n, where procname
is the parameter above and n is the position of the parameter
(from zero). Once all result sets generated by the procedure
have been fetched, you can issue a SELECT @_procname_0, ...
query using .execute() to get any OUT or INOUT values.
Compatibility warning: The act of calling a stored procedure
itself creates an empty result set. This appears after any
result sets generated by the procedure. This is non-standard
behavior with respect to the DB-API. Be sure to use nextset()
to advance through all result sets; otherwise you may get
disconnected.
:param procname: ``str``, name of procedure to execute on server
:param args: `sequence of parameters to use with procedure
:returns: the original args.
"""
conn = self._get_db()
if self._echo:
logger.info("CALL %s", procname)
logger.info("%r", args)
for index, arg in enumerate(args):
q = "SET @_%s_%d=%s" % (procname, index, conn.escape(arg))
await self._query(q)
await self.nextset()
_args = ','.join('@_%s_%d' % (procname, i) for i in range(len(args)))
q = "CALL %s(%s)" % (procname, _args)
await self._query(q)
self._executed = q
return args |
def indent(self, levels, first_line=None):
"""Increase indentation by ``levels`` levels."""
self._indentation_levels.append(levels)
self._indent_first_line.append(first_line) | Increase indentation by ``levels`` levels. | Below is the the instruction that describes the task:
### Input:
Increase indentation by ``levels`` levels.
### Response:
def indent(self, levels, first_line=None):
"""Increase indentation by ``levels`` levels."""
self._indentation_levels.append(levels)
self._indent_first_line.append(first_line) |
def update(self, level):
"""Update the current scope by going back `level` levels.
Parameters
----------
level : int or None, optional, default None
"""
sl = level + 1
# add sl frames to the scope starting with the
# most distant and overwriting with more current
# makes sure that we can capture variable scope
stack = inspect.stack()
try:
self._get_vars(stack[:sl], scopes=['locals'])
finally:
del stack[:], stack | Update the current scope by going back `level` levels.
Parameters
----------
level : int or None, optional, default None | Below is the the instruction that describes the task:
### Input:
Update the current scope by going back `level` levels.
Parameters
----------
level : int or None, optional, default None
### Response:
def update(self, level):
"""Update the current scope by going back `level` levels.
Parameters
----------
level : int or None, optional, default None
"""
sl = level + 1
# add sl frames to the scope starting with the
# most distant and overwriting with more current
# makes sure that we can capture variable scope
stack = inspect.stack()
try:
self._get_vars(stack[:sl], scopes=['locals'])
finally:
del stack[:], stack |
def enable_qt4(self, app=None):
"""Enable event loop integration with PyQt4.
Parameters
----------
app : Qt Application, optional.
Running application to use. If not given, we probe Qt for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the PyOS_InputHook for PyQt4, which allows
the PyQt4 to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`QApplication`
as follows::
from PyQt4 import QtCore
app = QtGui.QApplication(sys.argv)
"""
from IPython.lib.inputhookqt4 import create_inputhook_qt4
app, inputhook_qt4 = create_inputhook_qt4(self, app)
self.set_inputhook(inputhook_qt4)
self._current_gui = GUI_QT4
app._in_event_loop = True
self._apps[GUI_QT4] = app
return app | Enable event loop integration with PyQt4.
Parameters
----------
app : Qt Application, optional.
Running application to use. If not given, we probe Qt for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the PyOS_InputHook for PyQt4, which allows
the PyQt4 to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`QApplication`
as follows::
from PyQt4 import QtCore
app = QtGui.QApplication(sys.argv) | Below is the the instruction that describes the task:
### Input:
Enable event loop integration with PyQt4.
Parameters
----------
app : Qt Application, optional.
Running application to use. If not given, we probe Qt for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the PyOS_InputHook for PyQt4, which allows
the PyQt4 to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`QApplication`
as follows::
from PyQt4 import QtCore
app = QtGui.QApplication(sys.argv)
### Response:
def enable_qt4(self, app=None):
"""Enable event loop integration with PyQt4.
Parameters
----------
app : Qt Application, optional.
Running application to use. If not given, we probe Qt for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the PyOS_InputHook for PyQt4, which allows
the PyQt4 to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`QApplication`
as follows::
from PyQt4 import QtCore
app = QtGui.QApplication(sys.argv)
"""
from IPython.lib.inputhookqt4 import create_inputhook_qt4
app, inputhook_qt4 = create_inputhook_qt4(self, app)
self.set_inputhook(inputhook_qt4)
self._current_gui = GUI_QT4
app._in_event_loop = True
self._apps[GUI_QT4] = app
return app |
def links_to(self, other, tpe):
"""
adds a link from this thing to other thing
using type (is_a, has_a, uses, contains, part_of)
"""
if self.check_type(tpe):
self.links.append([other, tpe])
else:
raise Exception('aikif.core_data cannot process this object type') | adds a link from this thing to other thing
using type (is_a, has_a, uses, contains, part_of) | Below is the the instruction that describes the task:
### Input:
adds a link from this thing to other thing
using type (is_a, has_a, uses, contains, part_of)
### Response:
def links_to(self, other, tpe):
"""
adds a link from this thing to other thing
using type (is_a, has_a, uses, contains, part_of)
"""
if self.check_type(tpe):
self.links.append([other, tpe])
else:
raise Exception('aikif.core_data cannot process this object type') |
def catch_gzip_errors(f):
"""
A decorator to handle gzip encoding errors which have been known to
happen during hydration.
"""
def new_f(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except requests.exceptions.ContentDecodingError as e:
log.warning("caught gzip error: %s", e)
self.connect()
return f(self, *args, **kwargs)
return new_f | A decorator to handle gzip encoding errors which have been known to
happen during hydration. | Below is the the instruction that describes the task:
### Input:
A decorator to handle gzip encoding errors which have been known to
happen during hydration.
### Response:
def catch_gzip_errors(f):
"""
A decorator to handle gzip encoding errors which have been known to
happen during hydration.
"""
def new_f(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except requests.exceptions.ContentDecodingError as e:
log.warning("caught gzip error: %s", e)
self.connect()
return f(self, *args, **kwargs)
return new_f |
def yVal_xml(self):
"""
Return the ``<c:yVal>`` element for this series as unicode text. This
element contains the Y values for this series.
"""
return self._yVal_tmpl.format(**{
'nsdecls': '',
'numRef_xml': self.numRef_xml(
self._series.y_values_ref, self._series.number_format,
self._series.y_values
),
}) | Return the ``<c:yVal>`` element for this series as unicode text. This
element contains the Y values for this series. | Below is the the instruction that describes the task:
### Input:
Return the ``<c:yVal>`` element for this series as unicode text. This
element contains the Y values for this series.
### Response:
def yVal_xml(self):
"""
Return the ``<c:yVal>`` element for this series as unicode text. This
element contains the Y values for this series.
"""
return self._yVal_tmpl.format(**{
'nsdecls': '',
'numRef_xml': self.numRef_xml(
self._series.y_values_ref, self._series.number_format,
self._series.y_values
),
}) |
def get_user_matches(session, user_id, from_timestamp=None, limit=None):
"""Get recent matches by user."""
return get_recent_matches(session, '{}{}/{}/Matches/games/matches/user/{}/0'.format(
session.auth.base_url, PROFILE_URL, user_id, user_id), from_timestamp, limit) | Get recent matches by user. | Below is the the instruction that describes the task:
### Input:
Get recent matches by user.
### Response:
def get_user_matches(session, user_id, from_timestamp=None, limit=None):
"""Get recent matches by user."""
return get_recent_matches(session, '{}{}/{}/Matches/games/matches/user/{}/0'.format(
session.auth.base_url, PROFILE_URL, user_id, user_id), from_timestamp, limit) |
def throw_to(self, target, *a):
self.ready.append((getcurrent(), ()))
"""
if len(a) == 1 and isinstance(a[0], preserve_exception):
return target.throw(a[0].typ, a[0].val, a[0].tb)
"""
return target.throw(*a) | if len(a) == 1 and isinstance(a[0], preserve_exception):
return target.throw(a[0].typ, a[0].val, a[0].tb) | Below is the the instruction that describes the task:
### Input:
if len(a) == 1 and isinstance(a[0], preserve_exception):
return target.throw(a[0].typ, a[0].val, a[0].tb)
### Response:
def throw_to(self, target, *a):
self.ready.append((getcurrent(), ()))
"""
if len(a) == 1 and isinstance(a[0], preserve_exception):
return target.throw(a[0].typ, a[0].val, a[0].tb)
"""
return target.throw(*a) |
def bat_to_sh(file_path):
"""Convert honeybee .bat file to .sh file.
WARNING: This is a very simple function and doesn't handle any edge cases.
"""
sh_file = file_path[:-4] + '.sh'
with open(file_path, 'rb') as inf, open(sh_file, 'wb') as outf:
outf.write('#!/usr/bin/env bash\n\n')
for line in inf:
# pass the path lines, etc to get to the commands
if line.strip():
continue
else:
break
for line in inf:
if line.startswith('echo'):
continue
modified_line = line.replace('c:\\radiance\\bin\\', '').replace('\\', '/')
outf.write(modified_line)
print('bash file is created at:\n\t%s' % sh_file)
# Heroku - Make command.sh executable
st = os.stat(sh_file)
os.chmod(sh_file, st.st_mode | 0o111)
return sh_file | Convert honeybee .bat file to .sh file.
WARNING: This is a very simple function and doesn't handle any edge cases. | Below is the the instruction that describes the task:
### Input:
Convert honeybee .bat file to .sh file.
WARNING: This is a very simple function and doesn't handle any edge cases.
### Response:
def bat_to_sh(file_path):
"""Convert honeybee .bat file to .sh file.
WARNING: This is a very simple function and doesn't handle any edge cases.
"""
sh_file = file_path[:-4] + '.sh'
with open(file_path, 'rb') as inf, open(sh_file, 'wb') as outf:
outf.write('#!/usr/bin/env bash\n\n')
for line in inf:
# pass the path lines, etc to get to the commands
if line.strip():
continue
else:
break
for line in inf:
if line.startswith('echo'):
continue
modified_line = line.replace('c:\\radiance\\bin\\', '').replace('\\', '/')
outf.write(modified_line)
print('bash file is created at:\n\t%s' % sh_file)
# Heroku - Make command.sh executable
st = os.stat(sh_file)
os.chmod(sh_file, st.st_mode | 0o111)
return sh_file |
def operations(nsteps):
'''Returns the number of operations needed for nsteps of GMRES'''
return {'A': 1 + nsteps,
'M': 2 + nsteps,
'Ml': 2 + nsteps,
'Mr': 1 + nsteps,
'ip_B': 2 + nsteps + nsteps*(nsteps+1)/2,
'axpy': 4 + 2*nsteps + nsteps*(nsteps+1)/2
} | Returns the number of operations needed for nsteps of GMRES | Below is the the instruction that describes the task:
### Input:
Returns the number of operations needed for nsteps of GMRES
### Response:
def operations(nsteps):
'''Returns the number of operations needed for nsteps of GMRES'''
return {'A': 1 + nsteps,
'M': 2 + nsteps,
'Ml': 2 + nsteps,
'Mr': 1 + nsteps,
'ip_B': 2 + nsteps + nsteps*(nsteps+1)/2,
'axpy': 4 + 2*nsteps + nsteps*(nsteps+1)/2
} |
def _run_select_solution(ploidy_outdirs, work_dir, data):
"""Select optimal
"""
out_file = os.path.join(work_dir, "optimalClusters.txt")
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
ploidy_inputs = " ".join(["--ploidyRun%s=%s" % (p, d) for p, d in ploidy_outdirs])
cmd = "titanCNA_selectSolution.R {ploidy_inputs} --outFile={tx_out_file}"
do.run(cmd.format(**locals()), "TitanCNA: select optimal solution")
return out_file | Select optimal | Below is the the instruction that describes the task:
### Input:
Select optimal
### Response:
def _run_select_solution(ploidy_outdirs, work_dir, data):
"""Select optimal
"""
out_file = os.path.join(work_dir, "optimalClusters.txt")
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
ploidy_inputs = " ".join(["--ploidyRun%s=%s" % (p, d) for p, d in ploidy_outdirs])
cmd = "titanCNA_selectSolution.R {ploidy_inputs} --outFile={tx_out_file}"
do.run(cmd.format(**locals()), "TitanCNA: select optimal solution")
return out_file |
def getFileAndName(self, *args, **kwargs):
'''
Give a requested page (note: the arguments for this call are forwarded to getpage()),
return the content at the target URL and the filename for the target content as a
2-tuple (pgctnt, hName) for the content at the target URL.
The filename specified in the content-disposition header is used, if present. Otherwise,
the last section of the url path segment is treated as the filename.
'''
pgctnt, hName, mime = self.getFileNameMime(*args, **kwargs)
return pgctnt, hName | Give a requested page (note: the arguments for this call are forwarded to getpage()),
return the content at the target URL and the filename for the target content as a
2-tuple (pgctnt, hName) for the content at the target URL.
The filename specified in the content-disposition header is used, if present. Otherwise,
the last section of the url path segment is treated as the filename. | Below is the the instruction that describes the task:
### Input:
Give a requested page (note: the arguments for this call are forwarded to getpage()),
return the content at the target URL and the filename for the target content as a
2-tuple (pgctnt, hName) for the content at the target URL.
The filename specified in the content-disposition header is used, if present. Otherwise,
the last section of the url path segment is treated as the filename.
### Response:
def getFileAndName(self, *args, **kwargs):
'''
Give a requested page (note: the arguments for this call are forwarded to getpage()),
return the content at the target URL and the filename for the target content as a
2-tuple (pgctnt, hName) for the content at the target URL.
The filename specified in the content-disposition header is used, if present. Otherwise,
the last section of the url path segment is treated as the filename.
'''
pgctnt, hName, mime = self.getFileNameMime(*args, **kwargs)
return pgctnt, hName |
def open_recruitment(self, n=1):
"""Return initial experiment URL list.
"""
logger.info("Multi recruitment running for {} participants".format(n))
recruitments = []
messages = {}
remaining = n
for recruiter, count in self.recruiters(n):
if not count:
break
if recruiter.nickname in messages:
result = recruiter.recruit(count)
recruitments.extend(result)
else:
result = recruiter.open_recruitment(count)
recruitments.extend(result["items"])
messages[recruiter.nickname] = result["message"]
remaining -= count
if remaining <= 0:
break
logger.info(
(
"Multi-recruited {} out of {} participants, " "using {} recruiters."
).format(n - remaining, n, len(messages))
)
return {"items": recruitments, "message": "\n".join(messages.values())} | Return initial experiment URL list. | Below is the the instruction that describes the task:
### Input:
Return initial experiment URL list.
### Response:
def open_recruitment(self, n=1):
"""Return initial experiment URL list.
"""
logger.info("Multi recruitment running for {} participants".format(n))
recruitments = []
messages = {}
remaining = n
for recruiter, count in self.recruiters(n):
if not count:
break
if recruiter.nickname in messages:
result = recruiter.recruit(count)
recruitments.extend(result)
else:
result = recruiter.open_recruitment(count)
recruitments.extend(result["items"])
messages[recruiter.nickname] = result["message"]
remaining -= count
if remaining <= 0:
break
logger.info(
(
"Multi-recruited {} out of {} participants, " "using {} recruiters."
).format(n - remaining, n, len(messages))
)
return {"items": recruitments, "message": "\n".join(messages.values())} |
def muc(clusters, mention_to_gold):
"""
Counts the mentions in each predicted cluster which need to be re-allocated in
order for each predicted cluster to be contained by the respective gold cluster.
<http://aclweb.org/anthology/M/M95/M95-1005.pdf>
"""
true_p, all_p = 0, 0
for cluster in clusters:
all_p += len(cluster) - 1
true_p += len(cluster)
linked = set()
for mention in cluster:
if mention in mention_to_gold:
linked.add(mention_to_gold[mention])
else:
true_p -= 1
true_p -= len(linked)
return true_p, all_p | Counts the mentions in each predicted cluster which need to be re-allocated in
order for each predicted cluster to be contained by the respective gold cluster.
<http://aclweb.org/anthology/M/M95/M95-1005.pdf> | Below is the the instruction that describes the task:
### Input:
Counts the mentions in each predicted cluster which need to be re-allocated in
order for each predicted cluster to be contained by the respective gold cluster.
<http://aclweb.org/anthology/M/M95/M95-1005.pdf>
### Response:
def muc(clusters, mention_to_gold):
"""
Counts the mentions in each predicted cluster which need to be re-allocated in
order for each predicted cluster to be contained by the respective gold cluster.
<http://aclweb.org/anthology/M/M95/M95-1005.pdf>
"""
true_p, all_p = 0, 0
for cluster in clusters:
all_p += len(cluster) - 1
true_p += len(cluster)
linked = set()
for mention in cluster:
if mention in mention_to_gold:
linked.add(mention_to_gold[mention])
else:
true_p -= 1
true_p -= len(linked)
return true_p, all_p |
def select_window(pymux, variables):
"""
Select a window. E.g: select-window -t :3
"""
window_id = variables['<target-window>']
def invalid_window():
raise CommandException('Invalid window: %s' % window_id)
if window_id.startswith(':'):
try:
number = int(window_id[1:])
except ValueError:
invalid_window()
else:
w = pymux.arrangement.get_window_by_index(number)
if w:
pymux.arrangement.set_active_window(w)
else:
invalid_window()
else:
invalid_window() | Select a window. E.g: select-window -t :3 | Below is the the instruction that describes the task:
### Input:
Select a window. E.g: select-window -t :3
### Response:
def select_window(pymux, variables):
"""
Select a window. E.g: select-window -t :3
"""
window_id = variables['<target-window>']
def invalid_window():
raise CommandException('Invalid window: %s' % window_id)
if window_id.startswith(':'):
try:
number = int(window_id[1:])
except ValueError:
invalid_window()
else:
w = pymux.arrangement.get_window_by_index(number)
if w:
pymux.arrangement.set_active_window(w)
else:
invalid_window()
else:
invalid_window() |
def rolling(self, *args, **kwargs):
"""
Return a rolling grouper, providing rolling functionality per group.
"""
from pandas.core.window import RollingGroupby
return RollingGroupby(self, *args, **kwargs) | Return a rolling grouper, providing rolling functionality per group. | Below is the the instruction that describes the task:
### Input:
Return a rolling grouper, providing rolling functionality per group.
### Response:
def rolling(self, *args, **kwargs):
"""
Return a rolling grouper, providing rolling functionality per group.
"""
from pandas.core.window import RollingGroupby
return RollingGroupby(self, *args, **kwargs) |
def get_search_results(portal_type=None, uid=None, **kw):
"""Search the catalog and return the results
:returns: Catalog search results
:rtype: iterable
"""
# If we have an UID, return the object immediately
if uid is not None:
logger.info("UID '%s' found, returning the object immediately" % uid)
return u.to_list(get_object_by_uid(uid))
# allow to search search for the Plone Site with portal_type
include_portal = False
if u.to_string(portal_type) == "Plone Site":
include_portal = True
# The request may contain a list of portal_types, e.g.
# `?portal_type=Document&portal_type=Plone Site`
if "Plone Site" in u.to_list(req.get("portal_type")):
include_portal = True
# Build and execute a catalog query
results = search(portal_type=portal_type, uid=uid, **kw)
if include_portal:
results = list(results) + u.to_list(get_portal())
return results | Search the catalog and return the results
:returns: Catalog search results
:rtype: iterable | Below is the the instruction that describes the task:
### Input:
Search the catalog and return the results
:returns: Catalog search results
:rtype: iterable
### Response:
def get_search_results(portal_type=None, uid=None, **kw):
"""Search the catalog and return the results
:returns: Catalog search results
:rtype: iterable
"""
# If we have an UID, return the object immediately
if uid is not None:
logger.info("UID '%s' found, returning the object immediately" % uid)
return u.to_list(get_object_by_uid(uid))
# allow to search search for the Plone Site with portal_type
include_portal = False
if u.to_string(portal_type) == "Plone Site":
include_portal = True
# The request may contain a list of portal_types, e.g.
# `?portal_type=Document&portal_type=Plone Site`
if "Plone Site" in u.to_list(req.get("portal_type")):
include_portal = True
# Build and execute a catalog query
results = search(portal_type=portal_type, uid=uid, **kw)
if include_portal:
results = list(results) + u.to_list(get_portal())
return results |
def record_results(self, results):
"""
Record the results of this experiment, by updating the tag.
:param results: A dictionary containing the results of the experiment.
:type results: dict
"""
repository = Repo(self.__repository_directory, search_parent_directories=True)
for tag in repository.tags:
if tag.name == self.__tag_name:
tag_object = tag
break
else:
raise Exception("Experiment tag has been deleted since experiment started")
data = json.loads(tag_object.tag.message)
data["results"] = results
TagReference.create(repository, self.__tag_name, message=json.dumps(data),
ref=tag_object.tag.object, force=True)
self.__results_recorded = True | Record the results of this experiment, by updating the tag.
:param results: A dictionary containing the results of the experiment.
:type results: dict | Below is the the instruction that describes the task:
### Input:
Record the results of this experiment, by updating the tag.
:param results: A dictionary containing the results of the experiment.
:type results: dict
### Response:
def record_results(self, results):
"""
Record the results of this experiment, by updating the tag.
:param results: A dictionary containing the results of the experiment.
:type results: dict
"""
repository = Repo(self.__repository_directory, search_parent_directories=True)
for tag in repository.tags:
if tag.name == self.__tag_name:
tag_object = tag
break
else:
raise Exception("Experiment tag has been deleted since experiment started")
data = json.loads(tag_object.tag.message)
data["results"] = results
TagReference.create(repository, self.__tag_name, message=json.dumps(data),
ref=tag_object.tag.object, force=True)
self.__results_recorded = True |
def get_datatype_str(self, element, length):
'''get_datatype_str
High-level api: Produce a string that indicates the data type of a node.
Parameters
----------
element : `Element`
A node in model tree.
length : `int`
String length that has been consumed.
Returns
-------
str
A string that indicates the data type of a node.
'''
spaces = ' '*(self.get_width(element) - length)
type_info = element.get('type')
ret = ''
if type_info == 'anyxml' or type_info == 'anydata':
ret = spaces + '<{}>'.format(type_info)
elif element.get('datatype') is not None:
ret = spaces + element.get('datatype')
if element.get('if-feature') is not None:
return ret + ' {' + element.get('if-feature') + '}?'
else:
return ret | get_datatype_str
High-level api: Produce a string that indicates the data type of a node.
Parameters
----------
element : `Element`
A node in model tree.
length : `int`
String length that has been consumed.
Returns
-------
str
A string that indicates the data type of a node. | Below is the the instruction that describes the task:
### Input:
get_datatype_str
High-level api: Produce a string that indicates the data type of a node.
Parameters
----------
element : `Element`
A node in model tree.
length : `int`
String length that has been consumed.
Returns
-------
str
A string that indicates the data type of a node.
### Response:
def get_datatype_str(self, element, length):
'''get_datatype_str
High-level api: Produce a string that indicates the data type of a node.
Parameters
----------
element : `Element`
A node in model tree.
length : `int`
String length that has been consumed.
Returns
-------
str
A string that indicates the data type of a node.
'''
spaces = ' '*(self.get_width(element) - length)
type_info = element.get('type')
ret = ''
if type_info == 'anyxml' or type_info == 'anydata':
ret = spaces + '<{}>'.format(type_info)
elif element.get('datatype') is not None:
ret = spaces + element.get('datatype')
if element.get('if-feature') is not None:
return ret + ' {' + element.get('if-feature') + '}?'
else:
return ret |
def EnableNetworkInterfaces(
self, interfaces, logger, dhclient_script=None):
"""Enable the list of network interfaces.
Args:
interfaces: list of string, the output device names to enable.
logger: logger object, used to write to SysLog and serial port.
dhclient_script: string, the path to a dhclient script used by dhclient.
"""
interfaces_to_up = [i for i in interfaces if i != 'eth0']
if interfaces_to_up:
logger.info('Enabling the Ethernet interfaces %s.', interfaces_to_up)
self._WriteIfcfg(interfaces_to_up, logger)
self._Ifup(interfaces_to_up, logger) | Enable the list of network interfaces.
Args:
interfaces: list of string, the output device names to enable.
logger: logger object, used to write to SysLog and serial port.
dhclient_script: string, the path to a dhclient script used by dhclient. | Below is the the instruction that describes the task:
### Input:
Enable the list of network interfaces.
Args:
interfaces: list of string, the output device names to enable.
logger: logger object, used to write to SysLog and serial port.
dhclient_script: string, the path to a dhclient script used by dhclient.
### Response:
def EnableNetworkInterfaces(
self, interfaces, logger, dhclient_script=None):
"""Enable the list of network interfaces.
Args:
interfaces: list of string, the output device names to enable.
logger: logger object, used to write to SysLog and serial port.
dhclient_script: string, the path to a dhclient script used by dhclient.
"""
interfaces_to_up = [i for i in interfaces if i != 'eth0']
if interfaces_to_up:
logger.info('Enabling the Ethernet interfaces %s.', interfaces_to_up)
self._WriteIfcfg(interfaces_to_up, logger)
self._Ifup(interfaces_to_up, logger) |
def fileopenbox(msg=None, title=None, default='*', filetypes=None, multiple=False):
"""
A dialog to get a file name.
**About the "default" argument**
The "default" argument specifies a filepath that (normally)
contains one or more wildcards.
fileopenbox will display only files that match the default filepath.
If omitted, defaults to "\*" (all files in the current directory).
WINDOWS EXAMPLE::
...default="c:/myjunk/*.py"
will open in directory c:\\myjunk\\ and show all Python files.
WINDOWS EXAMPLE::
...default="c:/myjunk/test*.py"
will open in directory c:\\myjunk\\ and show all Python files
whose names begin with "test".
Note that on Windows, fileopenbox automatically changes the path
separator to the Windows path separator (backslash).
**About the "filetypes" argument**
If specified, it should contain a list of items,
where each item is either:
- a string containing a filemask # e.g. "\*.txt"
- a list of strings, where all of the strings except the last one
are filemasks (each beginning with "\*.",
such as "\*.txt" for text files, "\*.py" for Python files, etc.).
and the last string contains a filetype description
EXAMPLE::
filetypes = ["*.css", ["*.htm", "*.html", "HTML files"] ]
.. note:: If the filetypes list does not contain ("All files","*"), it will be added.
If the filetypes list does not contain a filemask that includes
the extension of the "default" argument, it will be added.
For example, if default="\*abc.py"
and no filetypes argument was specified, then
"\*.py" will automatically be added to the filetypes argument.
:param str msg: the msg to be displayed.
:param str title: the window title
:param str default: filepath with wildcards
:param object filetypes: filemasks that a user can choose, e.g. "\*.txt"
:param bool multiple: If true, more than one file can be selected
:return: the name of a file, or None if user chose to cancel
"""
localRoot = Tk()
localRoot.withdraw()
initialbase, initialfile, initialdir, filetypes = fileboxSetup(
default, filetypes)
# ------------------------------------------------------------
# if initialfile contains no wildcards; we don't want an
# initial file. It won't be used anyway.
# Also: if initialbase is simply "*", we don't want an
# initialfile; it is not doing any useful work.
# ------------------------------------------------------------
if (initialfile.find("*") < 0) and (initialfile.find("?") < 0):
initialfile = None
elif initialbase == "*":
initialfile = None
func = ut.tk_FileDialog.askopenfilenames if multiple else ut.tk_FileDialog.askopenfilename
ret_val = func(parent=localRoot, title=getFileDialogTitle(msg, title), initialdir=initialdir, initialfile=initialfile, filetypes=filetypes
)
if multiple:
f = [os.path.normpath(x) for x in localRoot.tk.splitlist(ret_val)]
else:
f = os.path.normpath(ret_val)
localRoot.destroy()
if not f:
return None
return f | A dialog to get a file name.
**About the "default" argument**
The "default" argument specifies a filepath that (normally)
contains one or more wildcards.
fileopenbox will display only files that match the default filepath.
If omitted, defaults to "\*" (all files in the current directory).
WINDOWS EXAMPLE::
...default="c:/myjunk/*.py"
will open in directory c:\\myjunk\\ and show all Python files.
WINDOWS EXAMPLE::
...default="c:/myjunk/test*.py"
will open in directory c:\\myjunk\\ and show all Python files
whose names begin with "test".
Note that on Windows, fileopenbox automatically changes the path
separator to the Windows path separator (backslash).
**About the "filetypes" argument**
If specified, it should contain a list of items,
where each item is either:
- a string containing a filemask # e.g. "\*.txt"
- a list of strings, where all of the strings except the last one
are filemasks (each beginning with "\*.",
such as "\*.txt" for text files, "\*.py" for Python files, etc.).
and the last string contains a filetype description
EXAMPLE::
filetypes = ["*.css", ["*.htm", "*.html", "HTML files"] ]
.. note:: If the filetypes list does not contain ("All files","*"), it will be added.
If the filetypes list does not contain a filemask that includes
the extension of the "default" argument, it will be added.
For example, if default="\*abc.py"
and no filetypes argument was specified, then
"\*.py" will automatically be added to the filetypes argument.
:param str msg: the msg to be displayed.
:param str title: the window title
:param str default: filepath with wildcards
:param object filetypes: filemasks that a user can choose, e.g. "\*.txt"
:param bool multiple: If true, more than one file can be selected
:return: the name of a file, or None if user chose to cancel | Below is the the instruction that describes the task:
### Input:
A dialog to get a file name.
**About the "default" argument**
The "default" argument specifies a filepath that (normally)
contains one or more wildcards.
fileopenbox will display only files that match the default filepath.
If omitted, defaults to "\*" (all files in the current directory).
WINDOWS EXAMPLE::
...default="c:/myjunk/*.py"
will open in directory c:\\myjunk\\ and show all Python files.
WINDOWS EXAMPLE::
...default="c:/myjunk/test*.py"
will open in directory c:\\myjunk\\ and show all Python files
whose names begin with "test".
Note that on Windows, fileopenbox automatically changes the path
separator to the Windows path separator (backslash).
**About the "filetypes" argument**
If specified, it should contain a list of items,
where each item is either:
- a string containing a filemask # e.g. "\*.txt"
- a list of strings, where all of the strings except the last one
are filemasks (each beginning with "\*.",
such as "\*.txt" for text files, "\*.py" for Python files, etc.).
and the last string contains a filetype description
EXAMPLE::
filetypes = ["*.css", ["*.htm", "*.html", "HTML files"] ]
.. note:: If the filetypes list does not contain ("All files","*"), it will be added.
If the filetypes list does not contain a filemask that includes
the extension of the "default" argument, it will be added.
For example, if default="\*abc.py"
and no filetypes argument was specified, then
"\*.py" will automatically be added to the filetypes argument.
:param str msg: the msg to be displayed.
:param str title: the window title
:param str default: filepath with wildcards
:param object filetypes: filemasks that a user can choose, e.g. "\*.txt"
:param bool multiple: If true, more than one file can be selected
:return: the name of a file, or None if user chose to cancel
### Response:
def fileopenbox(msg=None, title=None, default='*', filetypes=None, multiple=False):
"""
A dialog to get a file name.
**About the "default" argument**
The "default" argument specifies a filepath that (normally)
contains one or more wildcards.
fileopenbox will display only files that match the default filepath.
If omitted, defaults to "\*" (all files in the current directory).
WINDOWS EXAMPLE::
...default="c:/myjunk/*.py"
will open in directory c:\\myjunk\\ and show all Python files.
WINDOWS EXAMPLE::
...default="c:/myjunk/test*.py"
will open in directory c:\\myjunk\\ and show all Python files
whose names begin with "test".
Note that on Windows, fileopenbox automatically changes the path
separator to the Windows path separator (backslash).
**About the "filetypes" argument**
If specified, it should contain a list of items,
where each item is either:
- a string containing a filemask # e.g. "\*.txt"
- a list of strings, where all of the strings except the last one
are filemasks (each beginning with "\*.",
such as "\*.txt" for text files, "\*.py" for Python files, etc.).
and the last string contains a filetype description
EXAMPLE::
filetypes = ["*.css", ["*.htm", "*.html", "HTML files"] ]
.. note:: If the filetypes list does not contain ("All files","*"), it will be added.
If the filetypes list does not contain a filemask that includes
the extension of the "default" argument, it will be added.
For example, if default="\*abc.py"
and no filetypes argument was specified, then
"\*.py" will automatically be added to the filetypes argument.
:param str msg: the msg to be displayed.
:param str title: the window title
:param str default: filepath with wildcards
:param object filetypes: filemasks that a user can choose, e.g. "\*.txt"
:param bool multiple: If true, more than one file can be selected
:return: the name of a file, or None if user chose to cancel
"""
localRoot = Tk()
localRoot.withdraw()
initialbase, initialfile, initialdir, filetypes = fileboxSetup(
default, filetypes)
# ------------------------------------------------------------
# if initialfile contains no wildcards; we don't want an
# initial file. It won't be used anyway.
# Also: if initialbase is simply "*", we don't want an
# initialfile; it is not doing any useful work.
# ------------------------------------------------------------
if (initialfile.find("*") < 0) and (initialfile.find("?") < 0):
initialfile = None
elif initialbase == "*":
initialfile = None
func = ut.tk_FileDialog.askopenfilenames if multiple else ut.tk_FileDialog.askopenfilename
ret_val = func(parent=localRoot, title=getFileDialogTitle(msg, title), initialdir=initialdir, initialfile=initialfile, filetypes=filetypes
)
if multiple:
f = [os.path.normpath(x) for x in localRoot.tk.splitlist(ret_val)]
else:
f = os.path.normpath(ret_val)
localRoot.destroy()
if not f:
return None
return f |
def setCovariance(self, cov):
""" makes lowrank approximation of cov """
assert cov.shape[0]==self.dim, 'Dimension mismatch.'
S, U = la.eigh(cov)
U = U[:,::-1]
S = S[::-1]
_X = U[:, :self.rank] * sp.sqrt(S[:self.rank])
self.X = _X | makes lowrank approximation of cov | Below is the the instruction that describes the task:
### Input:
makes lowrank approximation of cov
### Response:
def setCovariance(self, cov):
""" makes lowrank approximation of cov """
assert cov.shape[0]==self.dim, 'Dimension mismatch.'
S, U = la.eigh(cov)
U = U[:,::-1]
S = S[::-1]
_X = U[:, :self.rank] * sp.sqrt(S[:self.rank])
self.X = _X |
def channel(layer, n_channel, batch=None):
"""Visualize a single channel"""
if batch is None:
return lambda T: tf.reduce_mean(T(layer)[..., n_channel])
else:
return lambda T: tf.reduce_mean(T(layer)[batch, ..., n_channel]) | Visualize a single channel | Below is the the instruction that describes the task:
### Input:
Visualize a single channel
### Response:
def channel(layer, n_channel, batch=None):
"""Visualize a single channel"""
if batch is None:
return lambda T: tf.reduce_mean(T(layer)[..., n_channel])
else:
return lambda T: tf.reduce_mean(T(layer)[batch, ..., n_channel]) |
def adjust_for_length(self, key, r, kwargs):
"""
Converts the response to a string and compares its length to a max
length specified in settings. If the response is too long, an error is
logged, and an abbreviated response is returned instead.
"""
length = len(str(kwargs))
if length > settings.defaults["max_detail_length"]:
self._log_length_error(key, length)
r["max_detail_length_error"] = length
return r
return kwargs | Converts the response to a string and compares its length to a max
length specified in settings. If the response is too long, an error is
logged, and an abbreviated response is returned instead. | Below is the the instruction that describes the task:
### Input:
Converts the response to a string and compares its length to a max
length specified in settings. If the response is too long, an error is
logged, and an abbreviated response is returned instead.
### Response:
def adjust_for_length(self, key, r, kwargs):
"""
Converts the response to a string and compares its length to a max
length specified in settings. If the response is too long, an error is
logged, and an abbreviated response is returned instead.
"""
length = len(str(kwargs))
if length > settings.defaults["max_detail_length"]:
self._log_length_error(key, length)
r["max_detail_length_error"] = length
return r
return kwargs |
def _convertPointsToSegments(points, willBeReversed=False):
"""
Compile points into InputSegment objects.
"""
# get the last on curve
previousOnCurve = None
for point in reversed(points):
if point.segmentType is not None:
previousOnCurve = point.coordinates
break
assert previousOnCurve is not None
# gather the segments
offCurves = []
segments = []
for point in points:
# off curve, hold.
if point.segmentType is None:
offCurves.append(point)
else:
segment = InputSegment(
points=offCurves + [point],
previousOnCurve=previousOnCurve,
willBeReversed=willBeReversed
)
segments.append(segment)
offCurves = []
previousOnCurve = point.coordinates
assert not offCurves
return segments | Compile points into InputSegment objects. | Below is the the instruction that describes the task:
### Input:
Compile points into InputSegment objects.
### Response:
def _convertPointsToSegments(points, willBeReversed=False):
"""
Compile points into InputSegment objects.
"""
# get the last on curve
previousOnCurve = None
for point in reversed(points):
if point.segmentType is not None:
previousOnCurve = point.coordinates
break
assert previousOnCurve is not None
# gather the segments
offCurves = []
segments = []
for point in points:
# off curve, hold.
if point.segmentType is None:
offCurves.append(point)
else:
segment = InputSegment(
points=offCurves + [point],
previousOnCurve=previousOnCurve,
willBeReversed=willBeReversed
)
segments.append(segment)
offCurves = []
previousOnCurve = point.coordinates
assert not offCurves
return segments |
def OnApprove(self, event):
"""File approve event handler"""
if not self.main_window.safe_mode:
return
msg = _(u"You are going to approve and trust a file that\n"
u"you have not created yourself.\n"
u"After proceeding, the file is executed.\n \n"
u"It may harm your system as any program can.\n"
u"Please check all cells thoroughly before\nproceeding.\n \n"
u"Proceed and sign this file as trusted?")
short_msg = _("Security warning")
if self.main_window.interfaces.get_warning_choice(msg, short_msg):
# Leave safe mode
self.main_window.grid.actions.leave_safe_mode()
# Display safe mode end in status bar
statustext = _("Safe mode deactivated.")
post_command_event(self.main_window, self.main_window.StatusBarMsg,
text=statustext) | File approve event handler | Below is the the instruction that describes the task:
### Input:
File approve event handler
### Response:
def OnApprove(self, event):
"""File approve event handler"""
if not self.main_window.safe_mode:
return
msg = _(u"You are going to approve and trust a file that\n"
u"you have not created yourself.\n"
u"After proceeding, the file is executed.\n \n"
u"It may harm your system as any program can.\n"
u"Please check all cells thoroughly before\nproceeding.\n \n"
u"Proceed and sign this file as trusted?")
short_msg = _("Security warning")
if self.main_window.interfaces.get_warning_choice(msg, short_msg):
# Leave safe mode
self.main_window.grid.actions.leave_safe_mode()
# Display safe mode end in status bar
statustext = _("Safe mode deactivated.")
post_command_event(self.main_window, self.main_window.StatusBarMsg,
text=statustext) |
def merge_overlaps(self, data_reducer=None, data_initializer=None, strict=True):
"""
Finds all intervals with overlapping ranges and merges them
into a single interval. If provided, uses data_reducer and
data_initializer with similar semantics to Python's built-in
reduce(reducer_func[, initializer]), as follows:
If data_reducer is set to a function, combines the data
fields of the Intervals with
current_reduced_data = data_reducer(current_reduced_data, new_data)
If data_reducer is None, the merged Interval's data
field will be set to None, ignoring all the data fields
of the merged Intervals.
On encountering the first Interval to merge, if
data_initializer is None (default), uses the first
Interval's data field as the first value for
current_reduced_data. If data_initializer is not None,
current_reduced_data is set to a shallow copy of
data_initializer created with copy.copy(data_initializer).
If strict is True (default), intervals are only merged if
their ranges actually overlap; adjacent, touching intervals
will not be merged. If strict is False, intervals are merged
even if they are only end-to-end adjacent.
Completes in O(n*logn).
"""
if not self:
return
sorted_intervals = sorted(self.all_intervals) # get sorted intervals
merged = []
# use mutable object to allow new_series() to modify it
current_reduced = [None]
higher = None # iterating variable, which new_series() needs access to
def new_series():
if data_initializer is None:
current_reduced[0] = higher.data
merged.append(higher)
return
else: # data_initializer is not None
current_reduced[0] = copy(data_initializer)
current_reduced[0] = data_reducer(current_reduced[0], higher.data)
merged.append(Interval(higher.begin, higher.end, current_reduced[0]))
for higher in sorted_intervals:
if merged: # series already begun
lower = merged[-1]
if (higher.begin < lower.end or
not strict and higher.begin == lower.end): # should merge
upper_bound = max(lower.end, higher.end)
if data_reducer is not None:
current_reduced[0] = data_reducer(current_reduced[0], higher.data)
else: # annihilate the data, since we don't know how to merge it
current_reduced[0] = None
merged[-1] = Interval(lower.begin, upper_bound, current_reduced[0])
else:
new_series()
else: # not merged; is first of Intervals to merge
new_series()
self.__init__(merged) | Finds all intervals with overlapping ranges and merges them
into a single interval. If provided, uses data_reducer and
data_initializer with similar semantics to Python's built-in
reduce(reducer_func[, initializer]), as follows:
If data_reducer is set to a function, combines the data
fields of the Intervals with
current_reduced_data = data_reducer(current_reduced_data, new_data)
If data_reducer is None, the merged Interval's data
field will be set to None, ignoring all the data fields
of the merged Intervals.
On encountering the first Interval to merge, if
data_initializer is None (default), uses the first
Interval's data field as the first value for
current_reduced_data. If data_initializer is not None,
current_reduced_data is set to a shallow copy of
data_initializer created with copy.copy(data_initializer).
If strict is True (default), intervals are only merged if
their ranges actually overlap; adjacent, touching intervals
will not be merged. If strict is False, intervals are merged
even if they are only end-to-end adjacent.
Completes in O(n*logn). | Below is the the instruction that describes the task:
### Input:
Finds all intervals with overlapping ranges and merges them
into a single interval. If provided, uses data_reducer and
data_initializer with similar semantics to Python's built-in
reduce(reducer_func[, initializer]), as follows:
If data_reducer is set to a function, combines the data
fields of the Intervals with
current_reduced_data = data_reducer(current_reduced_data, new_data)
If data_reducer is None, the merged Interval's data
field will be set to None, ignoring all the data fields
of the merged Intervals.
On encountering the first Interval to merge, if
data_initializer is None (default), uses the first
Interval's data field as the first value for
current_reduced_data. If data_initializer is not None,
current_reduced_data is set to a shallow copy of
data_initializer created with copy.copy(data_initializer).
If strict is True (default), intervals are only merged if
their ranges actually overlap; adjacent, touching intervals
will not be merged. If strict is False, intervals are merged
even if they are only end-to-end adjacent.
Completes in O(n*logn).
### Response:
def merge_overlaps(self, data_reducer=None, data_initializer=None, strict=True):
"""
Finds all intervals with overlapping ranges and merges them
into a single interval. If provided, uses data_reducer and
data_initializer with similar semantics to Python's built-in
reduce(reducer_func[, initializer]), as follows:
If data_reducer is set to a function, combines the data
fields of the Intervals with
current_reduced_data = data_reducer(current_reduced_data, new_data)
If data_reducer is None, the merged Interval's data
field will be set to None, ignoring all the data fields
of the merged Intervals.
On encountering the first Interval to merge, if
data_initializer is None (default), uses the first
Interval's data field as the first value for
current_reduced_data. If data_initializer is not None,
current_reduced_data is set to a shallow copy of
data_initializer created with copy.copy(data_initializer).
If strict is True (default), intervals are only merged if
their ranges actually overlap; adjacent, touching intervals
will not be merged. If strict is False, intervals are merged
even if they are only end-to-end adjacent.
Completes in O(n*logn).
"""
if not self:
return
sorted_intervals = sorted(self.all_intervals) # get sorted intervals
merged = []
# use mutable object to allow new_series() to modify it
current_reduced = [None]
higher = None # iterating variable, which new_series() needs access to
def new_series():
if data_initializer is None:
current_reduced[0] = higher.data
merged.append(higher)
return
else: # data_initializer is not None
current_reduced[0] = copy(data_initializer)
current_reduced[0] = data_reducer(current_reduced[0], higher.data)
merged.append(Interval(higher.begin, higher.end, current_reduced[0]))
for higher in sorted_intervals:
if merged: # series already begun
lower = merged[-1]
if (higher.begin < lower.end or
not strict and higher.begin == lower.end): # should merge
upper_bound = max(lower.end, higher.end)
if data_reducer is not None:
current_reduced[0] = data_reducer(current_reduced[0], higher.data)
else: # annihilate the data, since we don't know how to merge it
current_reduced[0] = None
merged[-1] = Interval(lower.begin, upper_bound, current_reduced[0])
else:
new_series()
else: # not merged; is first of Intervals to merge
new_series()
self.__init__(merged) |
def regex_lexer(regex_pat):
"""
generate token names' cache
"""
if isinstance(regex_pat, str):
regex_pat = re.compile(regex_pat)
def f(inp_str, pos):
m = regex_pat.match(inp_str, pos)
return m.group() if m else None
elif hasattr(regex_pat, 'match'):
def f(inp_str, pos):
m = regex_pat.match(inp_str, pos)
return m.group() if m else None
else:
regex_pats = tuple(re.compile(e) for e in regex_pat)
def f(inp_str, pos):
for each_pat in regex_pats:
m = each_pat.match(inp_str, pos)
if m:
return m.group()
return f | generate token names' cache | Below is the the instruction that describes the task:
### Input:
generate token names' cache
### Response:
def regex_lexer(regex_pat):
"""
generate token names' cache
"""
if isinstance(regex_pat, str):
regex_pat = re.compile(regex_pat)
def f(inp_str, pos):
m = regex_pat.match(inp_str, pos)
return m.group() if m else None
elif hasattr(regex_pat, 'match'):
def f(inp_str, pos):
m = regex_pat.match(inp_str, pos)
return m.group() if m else None
else:
regex_pats = tuple(re.compile(e) for e in regex_pat)
def f(inp_str, pos):
for each_pat in regex_pats:
m = each_pat.match(inp_str, pos)
if m:
return m.group()
return f |
def entity_list(args):
""" List entities in a workspace. """
r = fapi.get_entities_with_type(args.project, args.workspace)
fapi._check_response_code(r, 200)
return [ '{0}\t{1}'.format(e['entityType'], e['name']) for e in r.json() ] | List entities in a workspace. | Below is the the instruction that describes the task:
### Input:
List entities in a workspace.
### Response:
def entity_list(args):
""" List entities in a workspace. """
r = fapi.get_entities_with_type(args.project, args.workspace)
fapi._check_response_code(r, 200)
return [ '{0}\t{1}'.format(e['entityType'], e['name']) for e in r.json() ] |
def listdir(search_base, followlinks=False, filter='*',
relpath=False, bestprefix=False, system=NIST):
"""This is a generator which recurses the directory tree
`search_base`, yielding 2-tuples of:
* The absolute/relative path to a discovered file
* A bitmath instance representing the "apparent size" of the file.
- `search_base` - The directory to begin walking down.
- `followlinks` - Whether or not to follow symbolic links to directories
- `filter` - A glob (see :py:mod:`fnmatch`) to filter results with
(default: ``*``, everything)
- `relpath` - ``True`` to return the relative path from `pwd` or
``False`` (default) to return the fully qualified path
- ``bestprefix`` - set to ``False`` to get ``bitmath.Byte``
instances back instead.
- `system` - Provide a preferred unit system by setting `system`
to either ``bitmath.NIST`` (default) or ``bitmath.SI``.
.. note:: This function does NOT return tuples for directory entities.
.. note:: Symlinks to **files** are followed automatically
"""
for root, dirs, files in os.walk(search_base, followlinks=followlinks):
for name in fnmatch.filter(files, filter):
_path = os.path.join(root, name)
if relpath:
# RELATIVE path
_return_path = os.path.relpath(_path, '.')
else:
# REAL path
_return_path = os.path.realpath(_path)
if followlinks:
yield (_return_path, getsize(_path, bestprefix=bestprefix, system=system))
else:
if os.path.isdir(_path) or os.path.islink(_path):
pass
else:
yield (_return_path, getsize(_path, bestprefix=bestprefix, system=system)) | This is a generator which recurses the directory tree
`search_base`, yielding 2-tuples of:
* The absolute/relative path to a discovered file
* A bitmath instance representing the "apparent size" of the file.
- `search_base` - The directory to begin walking down.
- `followlinks` - Whether or not to follow symbolic links to directories
- `filter` - A glob (see :py:mod:`fnmatch`) to filter results with
(default: ``*``, everything)
- `relpath` - ``True`` to return the relative path from `pwd` or
``False`` (default) to return the fully qualified path
- ``bestprefix`` - set to ``False`` to get ``bitmath.Byte``
instances back instead.
- `system` - Provide a preferred unit system by setting `system`
to either ``bitmath.NIST`` (default) or ``bitmath.SI``.
.. note:: This function does NOT return tuples for directory entities.
.. note:: Symlinks to **files** are followed automatically | Below is the the instruction that describes the task:
### Input:
This is a generator which recurses the directory tree
`search_base`, yielding 2-tuples of:
* The absolute/relative path to a discovered file
* A bitmath instance representing the "apparent size" of the file.
- `search_base` - The directory to begin walking down.
- `followlinks` - Whether or not to follow symbolic links to directories
- `filter` - A glob (see :py:mod:`fnmatch`) to filter results with
(default: ``*``, everything)
- `relpath` - ``True`` to return the relative path from `pwd` or
``False`` (default) to return the fully qualified path
- ``bestprefix`` - set to ``False`` to get ``bitmath.Byte``
instances back instead.
- `system` - Provide a preferred unit system by setting `system`
to either ``bitmath.NIST`` (default) or ``bitmath.SI``.
.. note:: This function does NOT return tuples for directory entities.
.. note:: Symlinks to **files** are followed automatically
### Response:
def listdir(search_base, followlinks=False, filter='*',
relpath=False, bestprefix=False, system=NIST):
"""This is a generator which recurses the directory tree
`search_base`, yielding 2-tuples of:
* The absolute/relative path to a discovered file
* A bitmath instance representing the "apparent size" of the file.
- `search_base` - The directory to begin walking down.
- `followlinks` - Whether or not to follow symbolic links to directories
- `filter` - A glob (see :py:mod:`fnmatch`) to filter results with
(default: ``*``, everything)
- `relpath` - ``True`` to return the relative path from `pwd` or
``False`` (default) to return the fully qualified path
- ``bestprefix`` - set to ``False`` to get ``bitmath.Byte``
instances back instead.
- `system` - Provide a preferred unit system by setting `system`
to either ``bitmath.NIST`` (default) or ``bitmath.SI``.
.. note:: This function does NOT return tuples for directory entities.
.. note:: Symlinks to **files** are followed automatically
"""
for root, dirs, files in os.walk(search_base, followlinks=followlinks):
for name in fnmatch.filter(files, filter):
_path = os.path.join(root, name)
if relpath:
# RELATIVE path
_return_path = os.path.relpath(_path, '.')
else:
# REAL path
_return_path = os.path.realpath(_path)
if followlinks:
yield (_return_path, getsize(_path, bestprefix=bestprefix, system=system))
else:
if os.path.isdir(_path) or os.path.islink(_path):
pass
else:
yield (_return_path, getsize(_path, bestprefix=bestprefix, system=system)) |
def size(self):
"""The size of the element."""
size = {}
if self._w3c:
size = self._execute(Command.GET_ELEMENT_RECT)['value']
else:
size = self._execute(Command.GET_ELEMENT_SIZE)['value']
new_size = {"height": size["height"],
"width": size["width"]}
return new_size | The size of the element. | Below is the the instruction that describes the task:
### Input:
The size of the element.
### Response:
def size(self):
"""The size of the element."""
size = {}
if self._w3c:
size = self._execute(Command.GET_ELEMENT_RECT)['value']
else:
size = self._execute(Command.GET_ELEMENT_SIZE)['value']
new_size = {"height": size["height"],
"width": size["width"]}
return new_size |
def combine_first(self, other):
"""
Combine Series values, choosing the calling Series's values
first. Result index will be the union of the two indexes
Parameters
----------
other : Series
Returns
-------
y : Series
"""
if isinstance(other, SparseSeries):
other = other.to_dense()
dense_combined = self.to_dense().combine_first(other)
return dense_combined.to_sparse(fill_value=self.fill_value) | Combine Series values, choosing the calling Series's values
first. Result index will be the union of the two indexes
Parameters
----------
other : Series
Returns
-------
y : Series | Below is the the instruction that describes the task:
### Input:
Combine Series values, choosing the calling Series's values
first. Result index will be the union of the two indexes
Parameters
----------
other : Series
Returns
-------
y : Series
### Response:
def combine_first(self, other):
"""
Combine Series values, choosing the calling Series's values
first. Result index will be the union of the two indexes
Parameters
----------
other : Series
Returns
-------
y : Series
"""
if isinstance(other, SparseSeries):
other = other.to_dense()
dense_combined = self.to_dense().combine_first(other)
return dense_combined.to_sparse(fill_value=self.fill_value) |
def decrypt(self, text, appid):
"""对解密后的明文进行补位删除
@param text: 密文
@return: 删除填充补位后的明文
"""
try:
cryptor = AES.new(self.key, self.mode, self.key[:16])
# 使用BASE64对密文进行解码,然后AES-CBC解密
plain_text = cryptor.decrypt(base64.b64decode(text))
except Exception as e:
raise DecryptAESError(e)
try:
if six.PY2:
pad = ord(plain_text[-1])
else:
pad = plain_text[-1]
# 去掉补位字符串
# pkcs7 = PKCS7Encoder()
# plain_text = pkcs7.encode(plain_text)
# 去除16位随机字符串
content = plain_text[16:-pad]
xml_len = socket.ntohl(struct.unpack("I", content[: 4])[0])
xml_content = content[4: xml_len + 4]
from_appid = content[xml_len + 4:]
except Exception as e:
raise IllegalBuffer(e)
if from_appid != appid:
raise ValidateAppIDError()
return xml_content | 对解密后的明文进行补位删除
@param text: 密文
@return: 删除填充补位后的明文 | Below is the the instruction that describes the task:
### Input:
对解密后的明文进行补位删除
@param text: 密文
@return: 删除填充补位后的明文
### Response:
def decrypt(self, text, appid):
"""对解密后的明文进行补位删除
@param text: 密文
@return: 删除填充补位后的明文
"""
try:
cryptor = AES.new(self.key, self.mode, self.key[:16])
# 使用BASE64对密文进行解码,然后AES-CBC解密
plain_text = cryptor.decrypt(base64.b64decode(text))
except Exception as e:
raise DecryptAESError(e)
try:
if six.PY2:
pad = ord(plain_text[-1])
else:
pad = plain_text[-1]
# 去掉补位字符串
# pkcs7 = PKCS7Encoder()
# plain_text = pkcs7.encode(plain_text)
# 去除16位随机字符串
content = plain_text[16:-pad]
xml_len = socket.ntohl(struct.unpack("I", content[: 4])[0])
xml_content = content[4: xml_len + 4]
from_appid = content[xml_len + 4:]
except Exception as e:
raise IllegalBuffer(e)
if from_appid != appid:
raise ValidateAppIDError()
return xml_content |
def _rpartition(entity, sep):
"""Python2.4 doesn't have an rpartition method so we provide
our own that mimics str.rpartition from later releases.
Split the string at the last occurrence of sep, and return a
3-tuple containing the part before the separator, the separator
itself, and the part after the separator. If the separator is not
found, return a 3-tuple containing two empty strings, followed
by the string itself.
"""
idx = entity.rfind(sep)
if idx == -1:
return '', '', entity
return entity[:idx], sep, entity[idx + 1:] | Python2.4 doesn't have an rpartition method so we provide
our own that mimics str.rpartition from later releases.
Split the string at the last occurrence of sep, and return a
3-tuple containing the part before the separator, the separator
itself, and the part after the separator. If the separator is not
found, return a 3-tuple containing two empty strings, followed
by the string itself. | Below is the the instruction that describes the task:
### Input:
Python2.4 doesn't have an rpartition method so we provide
our own that mimics str.rpartition from later releases.
Split the string at the last occurrence of sep, and return a
3-tuple containing the part before the separator, the separator
itself, and the part after the separator. If the separator is not
found, return a 3-tuple containing two empty strings, followed
by the string itself.
### Response:
def _rpartition(entity, sep):
"""Python2.4 doesn't have an rpartition method so we provide
our own that mimics str.rpartition from later releases.
Split the string at the last occurrence of sep, and return a
3-tuple containing the part before the separator, the separator
itself, and the part after the separator. If the separator is not
found, return a 3-tuple containing two empty strings, followed
by the string itself.
"""
idx = entity.rfind(sep)
if idx == -1:
return '', '', entity
return entity[:idx], sep, entity[idx + 1:] |
def get_int(errmsg, arg, default=1, cmdname=None):
"""If arg is an int, use that otherwise take default."""
if arg:
try:
# eval() is used so we will allow arithmetic expressions,
# variables etc.
default = int(eval(arg))
except (SyntaxError, NameError, ValueError):
if cmdname:
errmsg("Command '%s' expects an integer; got: %s." %
(cmdname, str(arg)))
else:
errmsg('Expecting an integer, got: %s.' % str(arg))
pass
raise ValueError
return default | If arg is an int, use that otherwise take default. | Below is the the instruction that describes the task:
### Input:
If arg is an int, use that otherwise take default.
### Response:
def get_int(errmsg, arg, default=1, cmdname=None):
"""If arg is an int, use that otherwise take default."""
if arg:
try:
# eval() is used so we will allow arithmetic expressions,
# variables etc.
default = int(eval(arg))
except (SyntaxError, NameError, ValueError):
if cmdname:
errmsg("Command '%s' expects an integer; got: %s." %
(cmdname, str(arg)))
else:
errmsg('Expecting an integer, got: %s.' % str(arg))
pass
raise ValueError
return default |
def parse_date(dt, ignoretz=True, as_tz=None):
"""
:param dt: string datetime to convert into datetime object.
:return: date object if the string can be parsed into a date. Otherwise,
return None.
:see: http://labix.org/python-dateutil
Examples:
>>> parse_date('2011-12-30')
datetime.date(2011, 12, 30)
>>> parse_date('12/30/2011')
datetime.date(2011, 12, 30)
"""
dttm = parse_datetime(dt, ignoretz=ignoretz)
return None if dttm is None else dttm.date() | :param dt: string datetime to convert into datetime object.
:return: date object if the string can be parsed into a date. Otherwise,
return None.
:see: http://labix.org/python-dateutil
Examples:
>>> parse_date('2011-12-30')
datetime.date(2011, 12, 30)
>>> parse_date('12/30/2011')
datetime.date(2011, 12, 30) | Below is the the instruction that describes the task:
### Input:
:param dt: string datetime to convert into datetime object.
:return: date object if the string can be parsed into a date. Otherwise,
return None.
:see: http://labix.org/python-dateutil
Examples:
>>> parse_date('2011-12-30')
datetime.date(2011, 12, 30)
>>> parse_date('12/30/2011')
datetime.date(2011, 12, 30)
### Response:
def parse_date(dt, ignoretz=True, as_tz=None):
"""
:param dt: string datetime to convert into datetime object.
:return: date object if the string can be parsed into a date. Otherwise,
return None.
:see: http://labix.org/python-dateutil
Examples:
>>> parse_date('2011-12-30')
datetime.date(2011, 12, 30)
>>> parse_date('12/30/2011')
datetime.date(2011, 12, 30)
"""
dttm = parse_datetime(dt, ignoretz=ignoretz)
return None if dttm is None else dttm.date() |
def delete_user_avatar(self, username, avatar):
"""Delete a user's avatar.
:param username: the user to delete the avatar from
:param avatar: ID of the avatar to remove
"""
params = {'username': username}
url = self._get_url('user/avatar/' + avatar)
return self._session.delete(url, params=params) | Delete a user's avatar.
:param username: the user to delete the avatar from
:param avatar: ID of the avatar to remove | Below is the the instruction that describes the task:
### Input:
Delete a user's avatar.
:param username: the user to delete the avatar from
:param avatar: ID of the avatar to remove
### Response:
def delete_user_avatar(self, username, avatar):
"""Delete a user's avatar.
:param username: the user to delete the avatar from
:param avatar: ID of the avatar to remove
"""
params = {'username': username}
url = self._get_url('user/avatar/' + avatar)
return self._session.delete(url, params=params) |
def dbus_readBytesFD(self, fd, byte_count):
"""
Reads byte_count bytes from fd and returns them.
"""
f = os.fdopen(fd, 'rb')
result = f.read(byte_count)
f.close()
return bytearray(result) | Reads byte_count bytes from fd and returns them. | Below is the the instruction that describes the task:
### Input:
Reads byte_count bytes from fd and returns them.
### Response:
def dbus_readBytesFD(self, fd, byte_count):
"""
Reads byte_count bytes from fd and returns them.
"""
f = os.fdopen(fd, 'rb')
result = f.read(byte_count)
f.close()
return bytearray(result) |
def peek(self, n=None, constructor=list):
"""
Sees/peeks the next few items in the Stream, without removing them.
Besides that this functions keeps the Stream items, it's the same to the
``Stream.take()`` method.
See Also
--------
Stream.take :
Returns the n first elements from the Stream, removing them.
Note
----
When applied in a StreamTeeHub, this method doesn't consume a copy.
Data evaluation is done only once, i.e., after peeking the data is simply
stored to be yielded again when asked for.
"""
return self.copy().take(n=n, constructor=constructor) | Sees/peeks the next few items in the Stream, without removing them.
Besides that this functions keeps the Stream items, it's the same to the
``Stream.take()`` method.
See Also
--------
Stream.take :
Returns the n first elements from the Stream, removing them.
Note
----
When applied in a StreamTeeHub, this method doesn't consume a copy.
Data evaluation is done only once, i.e., after peeking the data is simply
stored to be yielded again when asked for. | Below is the the instruction that describes the task:
### Input:
Sees/peeks the next few items in the Stream, without removing them.
Besides that this functions keeps the Stream items, it's the same to the
``Stream.take()`` method.
See Also
--------
Stream.take :
Returns the n first elements from the Stream, removing them.
Note
----
When applied in a StreamTeeHub, this method doesn't consume a copy.
Data evaluation is done only once, i.e., after peeking the data is simply
stored to be yielded again when asked for.
### Response:
def peek(self, n=None, constructor=list):
"""
Sees/peeks the next few items in the Stream, without removing them.
Besides that this functions keeps the Stream items, it's the same to the
``Stream.take()`` method.
See Also
--------
Stream.take :
Returns the n first elements from the Stream, removing them.
Note
----
When applied in a StreamTeeHub, this method doesn't consume a copy.
Data evaluation is done only once, i.e., after peeking the data is simply
stored to be yielded again when asked for.
"""
return self.copy().take(n=n, constructor=constructor) |
def solve(self, graph, timeout, debug=False, anim=None):
"""Solves the CVRP problem using Clarke and Wright Savings methods
Parameters
----------
graph: :networkx:`NetworkX Graph Obj< >`
A NetworkX graaph is used.
timeout: int
max processing time in seconds
debug: bool, defaults to False
If True, information is printed while routing
anim: AnimationDing0
Returns
-------
SavingsSolution
A solution
"""
savings_list = self.compute_savings_list(graph)
solution = SavingsSolution(graph)
start = time.time()
for i, j in savings_list[:]:
if solution.is_complete():
break
if solution.can_process((i, j)):
solution, inserted = solution.process((i, j))
if inserted:
savings_list.remove((i, j))
if anim:
solution.draw_network(anim)
if time.time() - start > timeout:
break
return solution | Solves the CVRP problem using Clarke and Wright Savings methods
Parameters
----------
graph: :networkx:`NetworkX Graph Obj< >`
A NetworkX graaph is used.
timeout: int
max processing time in seconds
debug: bool, defaults to False
If True, information is printed while routing
anim: AnimationDing0
Returns
-------
SavingsSolution
A solution | Below is the the instruction that describes the task:
### Input:
Solves the CVRP problem using Clarke and Wright Savings methods
Parameters
----------
graph: :networkx:`NetworkX Graph Obj< >`
A NetworkX graaph is used.
timeout: int
max processing time in seconds
debug: bool, defaults to False
If True, information is printed while routing
anim: AnimationDing0
Returns
-------
SavingsSolution
A solution
### Response:
def solve(self, graph, timeout, debug=False, anim=None):
"""Solves the CVRP problem using Clarke and Wright Savings methods
Parameters
----------
graph: :networkx:`NetworkX Graph Obj< >`
A NetworkX graaph is used.
timeout: int
max processing time in seconds
debug: bool, defaults to False
If True, information is printed while routing
anim: AnimationDing0
Returns
-------
SavingsSolution
A solution
"""
savings_list = self.compute_savings_list(graph)
solution = SavingsSolution(graph)
start = time.time()
for i, j in savings_list[:]:
if solution.is_complete():
break
if solution.can_process((i, j)):
solution, inserted = solution.process((i, j))
if inserted:
savings_list.remove((i, j))
if anim:
solution.draw_network(anim)
if time.time() - start > timeout:
break
return solution |
def replaceWith(self, *nodes: Union[AbstractNode, str]) -> None:
"""Replace this node with nodes.
If nodes contains ``str``, it will be converted to Text node.
"""
if self.parentNode:
node = _to_node_list(nodes)
self.parentNode.replaceChild(node, self) | Replace this node with nodes.
If nodes contains ``str``, it will be converted to Text node. | Below is the the instruction that describes the task:
### Input:
Replace this node with nodes.
If nodes contains ``str``, it will be converted to Text node.
### Response:
def replaceWith(self, *nodes: Union[AbstractNode, str]) -> None:
"""Replace this node with nodes.
If nodes contains ``str``, it will be converted to Text node.
"""
if self.parentNode:
node = _to_node_list(nodes)
self.parentNode.replaceChild(node, self) |
def _set_alignment(self, group_size, bit_offset=0, auto_align=False):
""" Sets the alignment of the ``Decimal`` field.
:param int group_size: size of the aligned `Field` group in bytes,
can be between ``1`` and ``8``.
:param int bit_offset: bit offset of the `Decimal` field within the
aligned `Field` group, can be between ``0`` and ``63``.
:param bool auto_align: if ``True`` the `Decimal` field aligns itself
to the next matching byte size according to the *size* of the
`Decimal` field.
"""
# Field alignment offset
field_offset = int(bit_offset)
# Auto alignment
if auto_align:
# Field alignment size
field_size, bit_offset = divmod(field_offset, 8)
if bit_offset is not 0:
field_size += 1
field_size = max(field_size, 1)
# No auto alignment
else:
# Field alignment size
field_size = int(group_size)
# Field alignment
alignment = Alignment(field_size, field_offset)
# Invalid field alignment size
if field_size not in range(1, 8):
raise FieldAlignmentError(self, self.index, alignment)
# Invalid field alignment offset
if not (0 <= field_offset <= 63):
raise FieldAlignmentError(self, self.index, alignment)
# Invalid field alignment
if field_offset >= field_size * 8:
raise FieldAlignmentError(self, self.index, alignment)
# Set field alignment
self._align_to_byte_size = alignment.byte_size
self._align_to_bit_offset = alignment.bit_offset | Sets the alignment of the ``Decimal`` field.
:param int group_size: size of the aligned `Field` group in bytes,
can be between ``1`` and ``8``.
:param int bit_offset: bit offset of the `Decimal` field within the
aligned `Field` group, can be between ``0`` and ``63``.
:param bool auto_align: if ``True`` the `Decimal` field aligns itself
to the next matching byte size according to the *size* of the
`Decimal` field. | Below is the the instruction that describes the task:
### Input:
Sets the alignment of the ``Decimal`` field.
:param int group_size: size of the aligned `Field` group in bytes,
can be between ``1`` and ``8``.
:param int bit_offset: bit offset of the `Decimal` field within the
aligned `Field` group, can be between ``0`` and ``63``.
:param bool auto_align: if ``True`` the `Decimal` field aligns itself
to the next matching byte size according to the *size* of the
`Decimal` field.
### Response:
def _set_alignment(self, group_size, bit_offset=0, auto_align=False):
""" Sets the alignment of the ``Decimal`` field.
:param int group_size: size of the aligned `Field` group in bytes,
can be between ``1`` and ``8``.
:param int bit_offset: bit offset of the `Decimal` field within the
aligned `Field` group, can be between ``0`` and ``63``.
:param bool auto_align: if ``True`` the `Decimal` field aligns itself
to the next matching byte size according to the *size* of the
`Decimal` field.
"""
# Field alignment offset
field_offset = int(bit_offset)
# Auto alignment
if auto_align:
# Field alignment size
field_size, bit_offset = divmod(field_offset, 8)
if bit_offset is not 0:
field_size += 1
field_size = max(field_size, 1)
# No auto alignment
else:
# Field alignment size
field_size = int(group_size)
# Field alignment
alignment = Alignment(field_size, field_offset)
# Invalid field alignment size
if field_size not in range(1, 8):
raise FieldAlignmentError(self, self.index, alignment)
# Invalid field alignment offset
if not (0 <= field_offset <= 63):
raise FieldAlignmentError(self, self.index, alignment)
# Invalid field alignment
if field_offset >= field_size * 8:
raise FieldAlignmentError(self, self.index, alignment)
# Set field alignment
self._align_to_byte_size = alignment.byte_size
self._align_to_bit_offset = alignment.bit_offset |
def _inferSchema(self, rdd, samplingRatio=None, names=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first, names=names)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row, names=names))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(lambda row: _infer_schema(row, names)).reduce(_merge_type)
return schema | Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType` | Below is the the instruction that describes the task:
### Input:
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
### Response:
def _inferSchema(self, rdd, samplingRatio=None, names=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first, names=names)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row, names=names))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(lambda row: _infer_schema(row, names)).reduce(_merge_type)
return schema |
def analyse_cache_dir(self, jobhandler=None, batchsize=1, **kwargs):
"""
Scan the cache directory and launch analysis for all unscored alignments
using associated task handler. KWargs are passed to the tree calculating
task managed by the TaskInterface in self.task_interface.
Example kwargs:
TreeCollectionTaskInterface: scale=1, guide_tree=None,
niters=10, keep_topology=False
RaxmlTaskInterface: -------- partition_files=None, model=None, threads=1
FastTreeTaskInterface: ----- No kwargs
"""
if jobhandler is None:
jobhandler = SequentialJobHandler()
files = glob.glob(os.path.join(self.cache_dir, '*.phy'))
#logger.debug('Files - {}'.format(files))
records = []
outfiles = []
dna = self.collection[0].is_dna() # THIS IS ONLY A GUESS AT SEQ TYPE!!
for infile in files:
id_ = fileIO.strip_extensions(infile)
outfile = self.get_result_file(id_)
#logger.debug('Looking for {}: {}'.format(outfile, os.path.exists(outfile)))
if not os.path.exists(outfile):
record = Alignment(infile, 'phylip', True)
records.append(record)
outfiles.append(outfile)
if len(records) == 0:
return []
args, to_delete = self.task_interface.scrape_args(records, outfiles=outfiles, **kwargs)
# logger.debug('Args - {}'.format(args))
with fileIO.TempFileList(to_delete):
result = jobhandler(self.task_interface.get_task(), args, 'Cache dir analysis', batchsize)
for (out, res) in zip(outfiles, result):
if not os.path.exists(out) and res:
with open(out, 'w') as outfl:
json.dump(res, outfl)
return result | Scan the cache directory and launch analysis for all unscored alignments
using associated task handler. KWargs are passed to the tree calculating
task managed by the TaskInterface in self.task_interface.
Example kwargs:
TreeCollectionTaskInterface: scale=1, guide_tree=None,
niters=10, keep_topology=False
RaxmlTaskInterface: -------- partition_files=None, model=None, threads=1
FastTreeTaskInterface: ----- No kwargs | Below is the the instruction that describes the task:
### Input:
Scan the cache directory and launch analysis for all unscored alignments
using associated task handler. KWargs are passed to the tree calculating
task managed by the TaskInterface in self.task_interface.
Example kwargs:
TreeCollectionTaskInterface: scale=1, guide_tree=None,
niters=10, keep_topology=False
RaxmlTaskInterface: -------- partition_files=None, model=None, threads=1
FastTreeTaskInterface: ----- No kwargs
### Response:
def analyse_cache_dir(self, jobhandler=None, batchsize=1, **kwargs):
"""
Scan the cache directory and launch analysis for all unscored alignments
using associated task handler. KWargs are passed to the tree calculating
task managed by the TaskInterface in self.task_interface.
Example kwargs:
TreeCollectionTaskInterface: scale=1, guide_tree=None,
niters=10, keep_topology=False
RaxmlTaskInterface: -------- partition_files=None, model=None, threads=1
FastTreeTaskInterface: ----- No kwargs
"""
if jobhandler is None:
jobhandler = SequentialJobHandler()
files = glob.glob(os.path.join(self.cache_dir, '*.phy'))
#logger.debug('Files - {}'.format(files))
records = []
outfiles = []
dna = self.collection[0].is_dna() # THIS IS ONLY A GUESS AT SEQ TYPE!!
for infile in files:
id_ = fileIO.strip_extensions(infile)
outfile = self.get_result_file(id_)
#logger.debug('Looking for {}: {}'.format(outfile, os.path.exists(outfile)))
if not os.path.exists(outfile):
record = Alignment(infile, 'phylip', True)
records.append(record)
outfiles.append(outfile)
if len(records) == 0:
return []
args, to_delete = self.task_interface.scrape_args(records, outfiles=outfiles, **kwargs)
# logger.debug('Args - {}'.format(args))
with fileIO.TempFileList(to_delete):
result = jobhandler(self.task_interface.get_task(), args, 'Cache dir analysis', batchsize)
for (out, res) in zip(outfiles, result):
if not os.path.exists(out) and res:
with open(out, 'w') as outfl:
json.dump(res, outfl)
return result |
def loads(data, validate=False, **kwargs):
"""Load a PPMP message from the JSON-formatted string in `data`. When
`validate` is set, raise `ValidationError`. Additional keyword
arguments are the same that are accepted by `json.loads`,
e.g. `indent` to get a pretty output.
"""
d = json.loads(data, **kwargs)
content_spec = d["content-spec"]
Payload = CONTENT_SPECS[content_spec]
payload = Payload.load(d)
if validate:
errors = payload.problems()
if errors:
raise ValidationError(errors)
return payload | Load a PPMP message from the JSON-formatted string in `data`. When
`validate` is set, raise `ValidationError`. Additional keyword
arguments are the same that are accepted by `json.loads`,
e.g. `indent` to get a pretty output. | Below is the the instruction that describes the task:
### Input:
Load a PPMP message from the JSON-formatted string in `data`. When
`validate` is set, raise `ValidationError`. Additional keyword
arguments are the same that are accepted by `json.loads`,
e.g. `indent` to get a pretty output.
### Response:
def loads(data, validate=False, **kwargs):
"""Load a PPMP message from the JSON-formatted string in `data`. When
`validate` is set, raise `ValidationError`. Additional keyword
arguments are the same that are accepted by `json.loads`,
e.g. `indent` to get a pretty output.
"""
d = json.loads(data, **kwargs)
content_spec = d["content-spec"]
Payload = CONTENT_SPECS[content_spec]
payload = Payload.load(d)
if validate:
errors = payload.problems()
if errors:
raise ValidationError(errors)
return payload |
def confirm(self, question, default=False, true_answer_regex="(?i)^y"):
"""
Confirm a question with the user.
"""
return self._io.confirm(question, default, true_answer_regex) | Confirm a question with the user. | Below is the the instruction that describes the task:
### Input:
Confirm a question with the user.
### Response:
def confirm(self, question, default=False, true_answer_regex="(?i)^y"):
"""
Confirm a question with the user.
"""
return self._io.confirm(question, default, true_answer_regex) |
def set_euid():
"""
Set settings.DROPLET_USER effective UID for the current process
This adds some security, but nothing magic, an attacker can still
gain root access, but at least we only elevate privileges when needed
See root context manager
"""
current = os.geteuid()
logger.debug("Current EUID is %s" % current)
if settings.DROPLET_USER is None:
logger.info("Not changing EUID, DROPLET_USER is None")
return
uid = int(pwd.getpwnam(settings.DROPLET_USER).pw_uid)
if current != uid:
try:
os.seteuid(uid)
logger.info("Set EUID to %s (%s)" %
(settings.DROPLET_USER, os.geteuid()))
except:
current_user = pwd.getpwuid(os.getuid()).pw_name
logger.error("Failed to set '%s' EUID, running as '%s'" %
(settings.DROPLET_USER, current_user))
else:
logger.debug("Didn't set EUID, it was already correct") | Set settings.DROPLET_USER effective UID for the current process
This adds some security, but nothing magic, an attacker can still
gain root access, but at least we only elevate privileges when needed
See root context manager | Below is the the instruction that describes the task:
### Input:
Set settings.DROPLET_USER effective UID for the current process
This adds some security, but nothing magic, an attacker can still
gain root access, but at least we only elevate privileges when needed
See root context manager
### Response:
def set_euid():
"""
Set settings.DROPLET_USER effective UID for the current process
This adds some security, but nothing magic, an attacker can still
gain root access, but at least we only elevate privileges when needed
See root context manager
"""
current = os.geteuid()
logger.debug("Current EUID is %s" % current)
if settings.DROPLET_USER is None:
logger.info("Not changing EUID, DROPLET_USER is None")
return
uid = int(pwd.getpwnam(settings.DROPLET_USER).pw_uid)
if current != uid:
try:
os.seteuid(uid)
logger.info("Set EUID to %s (%s)" %
(settings.DROPLET_USER, os.geteuid()))
except:
current_user = pwd.getpwuid(os.getuid()).pw_name
logger.error("Failed to set '%s' EUID, running as '%s'" %
(settings.DROPLET_USER, current_user))
else:
logger.debug("Didn't set EUID, it was already correct") |
def _instance_callable(obj):
"""Given an object, return True if the object is callable.
For classes, return True if instances would be callable."""
if not isinstance(obj, ClassTypes):
# already an instance
return getattr(obj, '__call__', None) is not None
klass = obj
# uses __bases__ instead of __mro__ so that we work with old style classes
if klass.__dict__.get('__call__') is not None:
return True
for base in klass.__bases__:
if _instance_callable(base):
return True
return False | Given an object, return True if the object is callable.
For classes, return True if instances would be callable. | Below is the the instruction that describes the task:
### Input:
Given an object, return True if the object is callable.
For classes, return True if instances would be callable.
### Response:
def _instance_callable(obj):
"""Given an object, return True if the object is callable.
For classes, return True if instances would be callable."""
if not isinstance(obj, ClassTypes):
# already an instance
return getattr(obj, '__call__', None) is not None
klass = obj
# uses __bases__ instead of __mro__ so that we work with old style classes
if klass.__dict__.get('__call__') is not None:
return True
for base in klass.__bases__:
if _instance_callable(base):
return True
return False |
def read_version(version_file):
"Read the `(version-string, version-info)` from `version_file`."
vars = {}
with open(version_file) as f:
exec(f.read(), {}, vars)
return (vars['__version__'], vars['__version_info__']) | Read the `(version-string, version-info)` from `version_file`. | Below is the the instruction that describes the task:
### Input:
Read the `(version-string, version-info)` from `version_file`.
### Response:
def read_version(version_file):
"Read the `(version-string, version-info)` from `version_file`."
vars = {}
with open(version_file) as f:
exec(f.read(), {}, vars)
return (vars['__version__'], vars['__version_info__']) |
def set_state_data(cls, entity, data):
"""
Sets the state data for the given entity to the given data.
This also works for unmanaged entities.
"""
attr_names = get_domain_class_attribute_names(type(entity))
nested_items = []
for attr, new_attr_value in iteritems_(data):
if not attr.entity_attr in attr_names:
raise ValueError('Can not set attribute "%s" for entity '
'"%s".' % (attr.entity_attr, entity))
if '.' in attr.entity_attr:
nested_items.append((attr, new_attr_value))
continue
else:
setattr(entity, attr.entity_attr, new_attr_value)
for attr, new_attr_value in nested_items:
try:
set_nested_attribute(entity, attr.entity_attr, new_attr_value)
except AttributeError as exc:
if not new_attr_value is None:
raise exc | Sets the state data for the given entity to the given data.
This also works for unmanaged entities. | Below is the the instruction that describes the task:
### Input:
Sets the state data for the given entity to the given data.
This also works for unmanaged entities.
### Response:
def set_state_data(cls, entity, data):
"""
Sets the state data for the given entity to the given data.
This also works for unmanaged entities.
"""
attr_names = get_domain_class_attribute_names(type(entity))
nested_items = []
for attr, new_attr_value in iteritems_(data):
if not attr.entity_attr in attr_names:
raise ValueError('Can not set attribute "%s" for entity '
'"%s".' % (attr.entity_attr, entity))
if '.' in attr.entity_attr:
nested_items.append((attr, new_attr_value))
continue
else:
setattr(entity, attr.entity_attr, new_attr_value)
for attr, new_attr_value in nested_items:
try:
set_nested_attribute(entity, attr.entity_attr, new_attr_value)
except AttributeError as exc:
if not new_attr_value is None:
raise exc |
def check_classes(self, scope=-1):
""" Check if pending identifiers are defined or not. If not,
returns a syntax error. If no scope is given, the current
one is checked.
"""
for entry in self[scope].values():
if entry.class_ is None:
syntax_error(entry.lineno, "Unknown identifier '%s'" % entry.name) | Check if pending identifiers are defined or not. If not,
returns a syntax error. If no scope is given, the current
one is checked. | Below is the the instruction that describes the task:
### Input:
Check if pending identifiers are defined or not. If not,
returns a syntax error. If no scope is given, the current
one is checked.
### Response:
def check_classes(self, scope=-1):
""" Check if pending identifiers are defined or not. If not,
returns a syntax error. If no scope is given, the current
one is checked.
"""
for entry in self[scope].values():
if entry.class_ is None:
syntax_error(entry.lineno, "Unknown identifier '%s'" % entry.name) |
async def get_participant(self, p_id: int, force_update=False) -> Participant:
""" get a participant by its id
|methcoro|
Args:
p_id: participant id
force_update (dfault=False): True to force an update to the Challonge API
Returns:
Participant: None if not found
Raises:
APIException
"""
found_p = self._find_participant(p_id)
if force_update or found_p is None:
await self.get_participants()
found_p = self._find_participant(p_id)
return found_p | get a participant by its id
|methcoro|
Args:
p_id: participant id
force_update (dfault=False): True to force an update to the Challonge API
Returns:
Participant: None if not found
Raises:
APIException | Below is the the instruction that describes the task:
### Input:
get a participant by its id
|methcoro|
Args:
p_id: participant id
force_update (dfault=False): True to force an update to the Challonge API
Returns:
Participant: None if not found
Raises:
APIException
### Response:
async def get_participant(self, p_id: int, force_update=False) -> Participant:
""" get a participant by its id
|methcoro|
Args:
p_id: participant id
force_update (dfault=False): True to force an update to the Challonge API
Returns:
Participant: None if not found
Raises:
APIException
"""
found_p = self._find_participant(p_id)
if force_update or found_p is None:
await self.get_participants()
found_p = self._find_participant(p_id)
return found_p |
def import_object(object_name):
"""Import an object from its Fully Qualified Name."""
package, name = object_name.rsplit('.', 1)
return getattr(importlib.import_module(package), name) | Import an object from its Fully Qualified Name. | Below is the the instruction that describes the task:
### Input:
Import an object from its Fully Qualified Name.
### Response:
def import_object(object_name):
"""Import an object from its Fully Qualified Name."""
package, name = object_name.rsplit('.', 1)
return getattr(importlib.import_module(package), name) |
def capture_reset_password_requests(reset_password_sent_at=None):
"""Testing utility for capturing password reset requests.
:param reset_password_sent_at: An optional datetime object to set the
user's `reset_password_sent_at` to
"""
reset_requests = []
def _on(app, **data):
reset_requests.append(data)
reset_password_instructions_sent.connect(_on)
try:
yield reset_requests
finally:
reset_password_instructions_sent.disconnect(_on) | Testing utility for capturing password reset requests.
:param reset_password_sent_at: An optional datetime object to set the
user's `reset_password_sent_at` to | Below is the the instruction that describes the task:
### Input:
Testing utility for capturing password reset requests.
:param reset_password_sent_at: An optional datetime object to set the
user's `reset_password_sent_at` to
### Response:
def capture_reset_password_requests(reset_password_sent_at=None):
"""Testing utility for capturing password reset requests.
:param reset_password_sent_at: An optional datetime object to set the
user's `reset_password_sent_at` to
"""
reset_requests = []
def _on(app, **data):
reset_requests.append(data)
reset_password_instructions_sent.connect(_on)
try:
yield reset_requests
finally:
reset_password_instructions_sent.disconnect(_on) |
def _send_start_ok(self, frame_in):
"""Send Start OK frame.
:param specification.Connection.Start frame_in: Amqp frame.
:return:
"""
mechanisms = try_utf8_decode(frame_in.mechanisms)
if 'EXTERNAL' in mechanisms:
mechanism = 'EXTERNAL'
credentials = '\0\0'
elif 'PLAIN' in mechanisms:
mechanism = 'PLAIN'
credentials = self._plain_credentials()
else:
exception = AMQPConnectionError(
'Unsupported Security Mechanism(s): %s' %
frame_in.mechanisms
)
self._connection.exceptions.append(exception)
return
start_ok_frame = specification.Connection.StartOk(
mechanism=mechanism,
client_properties=self._client_properties(),
response=credentials,
locale=LOCALE
)
self._write_frame(start_ok_frame) | Send Start OK frame.
:param specification.Connection.Start frame_in: Amqp frame.
:return: | Below is the the instruction that describes the task:
### Input:
Send Start OK frame.
:param specification.Connection.Start frame_in: Amqp frame.
:return:
### Response:
def _send_start_ok(self, frame_in):
"""Send Start OK frame.
:param specification.Connection.Start frame_in: Amqp frame.
:return:
"""
mechanisms = try_utf8_decode(frame_in.mechanisms)
if 'EXTERNAL' in mechanisms:
mechanism = 'EXTERNAL'
credentials = '\0\0'
elif 'PLAIN' in mechanisms:
mechanism = 'PLAIN'
credentials = self._plain_credentials()
else:
exception = AMQPConnectionError(
'Unsupported Security Mechanism(s): %s' %
frame_in.mechanisms
)
self._connection.exceptions.append(exception)
return
start_ok_frame = specification.Connection.StartOk(
mechanism=mechanism,
client_properties=self._client_properties(),
response=credentials,
locale=LOCALE
)
self._write_frame(start_ok_frame) |
def valid_return_codes(self, *codes):
""" Sets codes which are considered valid when returned from
command modules. The default is (0, ).
Should be used as a context::
with api.valid_return_codes(0, 1):
api.shell('test -e /tmp/log && rm /tmp/log')
"""
previous_codes = self._valid_return_codes
self._valid_return_codes = codes
yield
self._valid_return_codes = previous_codes | Sets codes which are considered valid when returned from
command modules. The default is (0, ).
Should be used as a context::
with api.valid_return_codes(0, 1):
api.shell('test -e /tmp/log && rm /tmp/log') | Below is the the instruction that describes the task:
### Input:
Sets codes which are considered valid when returned from
command modules. The default is (0, ).
Should be used as a context::
with api.valid_return_codes(0, 1):
api.shell('test -e /tmp/log && rm /tmp/log')
### Response:
def valid_return_codes(self, *codes):
""" Sets codes which are considered valid when returned from
command modules. The default is (0, ).
Should be used as a context::
with api.valid_return_codes(0, 1):
api.shell('test -e /tmp/log && rm /tmp/log')
"""
previous_codes = self._valid_return_codes
self._valid_return_codes = codes
yield
self._valid_return_codes = previous_codes |
def matches(self, tokenJson):
'''Determines whether given token (tokenJson) satisfies all the rules listed
in the WordTemplate. If the rules describe tokenJson[ANALYSIS], it is
required that at least one item in the list tokenJson[ANALYSIS] satisfies
all the rules (but it is not required that all the items should satisfy).
Returns a boolean value.
Parameters
----------
tokenJson: pyvabamorf's analysis of a single word token;
'''
if self.otherRules != None:
otherMatches = []
for field in self.otherRules:
match = field in tokenJson and ((self.otherRules[field]).match(tokenJson[field]) != None)
otherMatches.append( match )
if not otherMatches or not all(otherMatches):
return False
elif self.analysisRules == None and all(otherMatches):
return True
if self.analysisRules != None:
assert ANALYSIS in tokenJson, "No ANALYSIS found within token: "+str(tokenJson)
totalMatches = []
for analysis in tokenJson[ANALYSIS]:
# Check whether this analysis satisfies all the rules
# (if not, discard the analysis)
matches = []
for field in self.analysisRules:
value = analysis[field] if field in analysis else ""
match = (self.analysisRules[field]).match(value) != None
matches.append( match )
if not match:
break
totalMatches.append( all(matches) )
# Return True iff there was at least one analysis that
# satisfied all the rules;
return any(totalMatches)
return False | Determines whether given token (tokenJson) satisfies all the rules listed
in the WordTemplate. If the rules describe tokenJson[ANALYSIS], it is
required that at least one item in the list tokenJson[ANALYSIS] satisfies
all the rules (but it is not required that all the items should satisfy).
Returns a boolean value.
Parameters
----------
tokenJson: pyvabamorf's analysis of a single word token; | Below is the the instruction that describes the task:
### Input:
Determines whether given token (tokenJson) satisfies all the rules listed
in the WordTemplate. If the rules describe tokenJson[ANALYSIS], it is
required that at least one item in the list tokenJson[ANALYSIS] satisfies
all the rules (but it is not required that all the items should satisfy).
Returns a boolean value.
Parameters
----------
tokenJson: pyvabamorf's analysis of a single word token;
### Response:
def matches(self, tokenJson):
'''Determines whether given token (tokenJson) satisfies all the rules listed
in the WordTemplate. If the rules describe tokenJson[ANALYSIS], it is
required that at least one item in the list tokenJson[ANALYSIS] satisfies
all the rules (but it is not required that all the items should satisfy).
Returns a boolean value.
Parameters
----------
tokenJson: pyvabamorf's analysis of a single word token;
'''
if self.otherRules != None:
otherMatches = []
for field in self.otherRules:
match = field in tokenJson and ((self.otherRules[field]).match(tokenJson[field]) != None)
otherMatches.append( match )
if not otherMatches or not all(otherMatches):
return False
elif self.analysisRules == None and all(otherMatches):
return True
if self.analysisRules != None:
assert ANALYSIS in tokenJson, "No ANALYSIS found within token: "+str(tokenJson)
totalMatches = []
for analysis in tokenJson[ANALYSIS]:
# Check whether this analysis satisfies all the rules
# (if not, discard the analysis)
matches = []
for field in self.analysisRules:
value = analysis[field] if field in analysis else ""
match = (self.analysisRules[field]).match(value) != None
matches.append( match )
if not match:
break
totalMatches.append( all(matches) )
# Return True iff there was at least one analysis that
# satisfied all the rules;
return any(totalMatches)
return False |
def is_conditional(self, include_loop=True):
"""
Check if the node is a conditional node
A conditional node is either a IF or a require/assert or a RETURN bool
Returns:
bool: True if the node is a conditional node
"""
if self.contains_if(include_loop) or self.contains_require_or_assert():
return True
if self.irs:
last_ir = self.irs[-1]
if last_ir:
if isinstance(last_ir, Return):
for r in last_ir.read:
if r.type == ElementaryType('bool'):
return True
return False | Check if the node is a conditional node
A conditional node is either a IF or a require/assert or a RETURN bool
Returns:
bool: True if the node is a conditional node | Below is the the instruction that describes the task:
### Input:
Check if the node is a conditional node
A conditional node is either a IF or a require/assert or a RETURN bool
Returns:
bool: True if the node is a conditional node
### Response:
def is_conditional(self, include_loop=True):
"""
Check if the node is a conditional node
A conditional node is either a IF or a require/assert or a RETURN bool
Returns:
bool: True if the node is a conditional node
"""
if self.contains_if(include_loop) or self.contains_require_or_assert():
return True
if self.irs:
last_ir = self.irs[-1]
if last_ir:
if isinstance(last_ir, Return):
for r in last_ir.read:
if r.type == ElementaryType('bool'):
return True
return False |
def search(self, query, category=None, orientation=None, page=1, per_page=10):
"""
Get a single page from a photo search.
Optionally limit your search to a set of categories by supplying the category ID’s.
Note: If supplying multiple category ID’s,
the resulting photos will be those that match all of the given categories,
not ones that match any category.
:param query [string]: Search terms.
:param category [string]: Category ID(‘s) to filter search. If multiple, comma-separated. (deprecated)
:param orientation [string]: Filter search results by photo orientation.
Valid values are landscape, portrait, and squarish.
:param page [integer]: Page number to retrieve. (Optional; default: 1)
:param per_page [integer]: Number of items per page. (Optional; default: 10)
:return: [Array]: A single page of the curated Photo list.
:raise UnsplashError: If the given orientation is not in the default orientation values.
"""
if orientation and orientation not in self.orientation_values:
raise Exception()
params = {
"query": query,
"category": category,
"orientation": orientation,
"page": page,
"per_page": per_page
}
url = "/photos/search"
result = self._get(url, params=params)
return PhotoModel.parse_list(result) | Get a single page from a photo search.
Optionally limit your search to a set of categories by supplying the category ID’s.
Note: If supplying multiple category ID’s,
the resulting photos will be those that match all of the given categories,
not ones that match any category.
:param query [string]: Search terms.
:param category [string]: Category ID(‘s) to filter search. If multiple, comma-separated. (deprecated)
:param orientation [string]: Filter search results by photo orientation.
Valid values are landscape, portrait, and squarish.
:param page [integer]: Page number to retrieve. (Optional; default: 1)
:param per_page [integer]: Number of items per page. (Optional; default: 10)
:return: [Array]: A single page of the curated Photo list.
:raise UnsplashError: If the given orientation is not in the default orientation values. | Below is the the instruction that describes the task:
### Input:
Get a single page from a photo search.
Optionally limit your search to a set of categories by supplying the category ID’s.
Note: If supplying multiple category ID’s,
the resulting photos will be those that match all of the given categories,
not ones that match any category.
:param query [string]: Search terms.
:param category [string]: Category ID(‘s) to filter search. If multiple, comma-separated. (deprecated)
:param orientation [string]: Filter search results by photo orientation.
Valid values are landscape, portrait, and squarish.
:param page [integer]: Page number to retrieve. (Optional; default: 1)
:param per_page [integer]: Number of items per page. (Optional; default: 10)
:return: [Array]: A single page of the curated Photo list.
:raise UnsplashError: If the given orientation is not in the default orientation values.
### Response:
def search(self, query, category=None, orientation=None, page=1, per_page=10):
"""
Get a single page from a photo search.
Optionally limit your search to a set of categories by supplying the category ID’s.
Note: If supplying multiple category ID’s,
the resulting photos will be those that match all of the given categories,
not ones that match any category.
:param query [string]: Search terms.
:param category [string]: Category ID(‘s) to filter search. If multiple, comma-separated. (deprecated)
:param orientation [string]: Filter search results by photo orientation.
Valid values are landscape, portrait, and squarish.
:param page [integer]: Page number to retrieve. (Optional; default: 1)
:param per_page [integer]: Number of items per page. (Optional; default: 10)
:return: [Array]: A single page of the curated Photo list.
:raise UnsplashError: If the given orientation is not in the default orientation values.
"""
if orientation and orientation not in self.orientation_values:
raise Exception()
params = {
"query": query,
"category": category,
"orientation": orientation,
"page": page,
"per_page": per_page
}
url = "/photos/search"
result = self._get(url, params=params)
return PhotoModel.parse_list(result) |
def set(self, key, val, timeout=None, using=None):
"""
Set will be using the generational key, so if another thread
bumps this key, the localstore version will still be invalid.
If the key is bumped during a transaction it will be new
to the global cache on commit, so it will still be a bump.
"""
if timeout is None:
timeout = self.timeout
if self.is_managed(using=using) and self._patched_var:
self.local[key] = val
else:
self.cache_backend.set(key, val, timeout) | Set will be using the generational key, so if another thread
bumps this key, the localstore version will still be invalid.
If the key is bumped during a transaction it will be new
to the global cache on commit, so it will still be a bump. | Below is the the instruction that describes the task:
### Input:
Set will be using the generational key, so if another thread
bumps this key, the localstore version will still be invalid.
If the key is bumped during a transaction it will be new
to the global cache on commit, so it will still be a bump.
### Response:
def set(self, key, val, timeout=None, using=None):
"""
Set will be using the generational key, so if another thread
bumps this key, the localstore version will still be invalid.
If the key is bumped during a transaction it will be new
to the global cache on commit, so it will still be a bump.
"""
if timeout is None:
timeout = self.timeout
if self.is_managed(using=using) and self._patched_var:
self.local[key] = val
else:
self.cache_backend.set(key, val, timeout) |
def user_saw_task(self, username, courseid, taskid):
""" Set in the database that the user has viewed this task """
self._database.user_tasks.update({"username": username, "courseid": courseid, "taskid": taskid},
{"$setOnInsert": {"username": username, "courseid": courseid, "taskid": taskid,
"tried": 0, "succeeded": False, "grade": 0.0, "submissionid": None, "state": ""}},
upsert=True) | Set in the database that the user has viewed this task | Below is the the instruction that describes the task:
### Input:
Set in the database that the user has viewed this task
### Response:
def user_saw_task(self, username, courseid, taskid):
""" Set in the database that the user has viewed this task """
self._database.user_tasks.update({"username": username, "courseid": courseid, "taskid": taskid},
{"$setOnInsert": {"username": username, "courseid": courseid, "taskid": taskid,
"tried": 0, "succeeded": False, "grade": 0.0, "submissionid": None, "state": ""}},
upsert=True) |
def client_list_entries_multi_project(
client, to_delete
): # pylint: disable=unused-argument
"""List entries via client across multiple projects."""
# [START client_list_entries_multi_project]
PROJECT_IDS = ["one-project", "another-project"]
for entry in client.list_entries(project_ids=PROJECT_IDS): # API call(s)
do_something_with(entry) | List entries via client across multiple projects. | Below is the the instruction that describes the task:
### Input:
List entries via client across multiple projects.
### Response:
def client_list_entries_multi_project(
client, to_delete
): # pylint: disable=unused-argument
"""List entries via client across multiple projects."""
# [START client_list_entries_multi_project]
PROJECT_IDS = ["one-project", "another-project"]
for entry in client.list_entries(project_ids=PROJECT_IDS): # API call(s)
do_something_with(entry) |
def is_oriented(self):
"""
Returns whether or not the current box is rotated at all.
"""
if util.is_shape(self.primitive.transform, (4, 4)):
return not np.allclose(self.primitive.transform[
0:3, 0:3], np.eye(3))
else:
return False | Returns whether or not the current box is rotated at all. | Below is the the instruction that describes the task:
### Input:
Returns whether or not the current box is rotated at all.
### Response:
def is_oriented(self):
"""
Returns whether or not the current box is rotated at all.
"""
if util.is_shape(self.primitive.transform, (4, 4)):
return not np.allclose(self.primitive.transform[
0:3, 0:3], np.eye(3))
else:
return False |
def import_(self, data):
"""Read JSON from `data`."""
return self.__import(json.loads(data, **self.kwargs)) | Read JSON from `data`. | Below is the the instruction that describes the task:
### Input:
Read JSON from `data`.
### Response:
def import_(self, data):
"""Read JSON from `data`."""
return self.__import(json.loads(data, **self.kwargs)) |
def expand(self, local_search=False):
'''Create successors.'''
new_nodes = []
for action in self.problem.actions(self.state):
new_state = self.problem.result(self.state, action)
cost = self.problem.cost(self.state,
action,
new_state)
nodefactory = self.__class__
new_nodes.append(nodefactory(state=new_state,
parent=None if local_search else self,
problem=self.problem,
action=action,
cost=self.cost + cost,
depth=self.depth + 1))
return new_nodes | Create successors. | Below is the the instruction that describes the task:
### Input:
Create successors.
### Response:
def expand(self, local_search=False):
'''Create successors.'''
new_nodes = []
for action in self.problem.actions(self.state):
new_state = self.problem.result(self.state, action)
cost = self.problem.cost(self.state,
action,
new_state)
nodefactory = self.__class__
new_nodes.append(nodefactory(state=new_state,
parent=None if local_search else self,
problem=self.problem,
action=action,
cost=self.cost + cost,
depth=self.depth + 1))
return new_nodes |
def dump(self):
"""Dump the image data"""
scan_lines = bytearray()
for y in range(self.height):
scan_lines.append(0) # filter type 0 (None)
scan_lines.extend(
self.canvas[(y * self.width * 4):((y + 1) * self.width * 4)]
)
# image represented as RGBA tuples, no interlacing
return SIGNATURE + \
self.pack_chunk(b'IHDR', struct.pack(b"!2I5B",
self.width, self.height,
8, 6, 0, 0, 0)) + \
self.pack_chunk(b'IDAT', zlib.compress(bytes(scan_lines), 9)) + \
self.pack_chunk(b'IEND', b'') | Dump the image data | Below is the the instruction that describes the task:
### Input:
Dump the image data
### Response:
def dump(self):
"""Dump the image data"""
scan_lines = bytearray()
for y in range(self.height):
scan_lines.append(0) # filter type 0 (None)
scan_lines.extend(
self.canvas[(y * self.width * 4):((y + 1) * self.width * 4)]
)
# image represented as RGBA tuples, no interlacing
return SIGNATURE + \
self.pack_chunk(b'IHDR', struct.pack(b"!2I5B",
self.width, self.height,
8, 6, 0, 0, 0)) + \
self.pack_chunk(b'IDAT', zlib.compress(bytes(scan_lines), 9)) + \
self.pack_chunk(b'IEND', b'') |
def list_descriptors(self):
"""Return list of GATT descriptors that have been discovered for this
characteristic.
"""
paths = self._props.Get(_CHARACTERISTIC_INTERFACE, 'Descriptors')
return map(BluezGattDescriptor,
get_provider()._get_objects_by_path(paths)) | Return list of GATT descriptors that have been discovered for this
characteristic. | Below is the the instruction that describes the task:
### Input:
Return list of GATT descriptors that have been discovered for this
characteristic.
### Response:
def list_descriptors(self):
"""Return list of GATT descriptors that have been discovered for this
characteristic.
"""
paths = self._props.Get(_CHARACTERISTIC_INTERFACE, 'Descriptors')
return map(BluezGattDescriptor,
get_provider()._get_objects_by_path(paths)) |
def get_apps_api():
"""
Create instance of Apps V1 API of kubernetes:
https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/AppsV1Api.md
:return: instance of client
"""
global apps_api
if apps_api is None:
config.load_kube_config()
if API_KEY is not None:
# Configure API key authorization: BearerToken
configuration = client.Configuration()
configuration.api_key['authorization'] = API_KEY
configuration.api_key_prefix['authorization'] = 'Bearer'
apps_api = client.AppsV1Api(client.ApiClient(configuration))
else:
apps_api = client.AppsV1Api()
return apps_api | Create instance of Apps V1 API of kubernetes:
https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/AppsV1Api.md
:return: instance of client | Below is the the instruction that describes the task:
### Input:
Create instance of Apps V1 API of kubernetes:
https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/AppsV1Api.md
:return: instance of client
### Response:
def get_apps_api():
"""
Create instance of Apps V1 API of kubernetes:
https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/AppsV1Api.md
:return: instance of client
"""
global apps_api
if apps_api is None:
config.load_kube_config()
if API_KEY is not None:
# Configure API key authorization: BearerToken
configuration = client.Configuration()
configuration.api_key['authorization'] = API_KEY
configuration.api_key_prefix['authorization'] = 'Bearer'
apps_api = client.AppsV1Api(client.ApiClient(configuration))
else:
apps_api = client.AppsV1Api()
return apps_api |
def interpolate_sysenv(line, defaults={}):
'''
Format line system environment variables + defaults
'''
map = ChainMap(os.environ, defaults)
return line.format(**map) | Format line system environment variables + defaults | Below is the the instruction that describes the task:
### Input:
Format line system environment variables + defaults
### Response:
def interpolate_sysenv(line, defaults={}):
'''
Format line system environment variables + defaults
'''
map = ChainMap(os.environ, defaults)
return line.format(**map) |
def container_query(self, query, quiet=False):
'''search for a specific container.
This function would likely be similar to the above, but have different
filter criteria from the user (based on the query)
'''
results = self._list_containers()
matches = []
for result in results:
for key,val in result.metadata.items():
if query in val and result not in matches:
matches.append(result)
if not quiet:
bot.info("[gs://%s] Found %s containers" %(self._bucket_name,len(matches)))
for image in matches:
size = round(image.size / (1024*1024.0))
bot.custom(prefix=image.name, color="CYAN")
bot.custom(prefix='id: ', message=image.id)
bot.custom(prefix='uri: ', message=image.metadata['name'])
bot.custom(prefix='updated:', message=image.updated)
bot.custom(prefix='size: ', message=' %s MB' %(size))
bot.custom(prefix='md5: ', message=image.md5_hash)
if "public_url" in image.metadata:
public_url = image.metadata['public_url']
bot.custom(prefix='url: ', message=public_url)
bot.newline()
return matches | search for a specific container.
This function would likely be similar to the above, but have different
filter criteria from the user (based on the query) | Below is the the instruction that describes the task:
### Input:
search for a specific container.
This function would likely be similar to the above, but have different
filter criteria from the user (based on the query)
### Response:
def container_query(self, query, quiet=False):
'''search for a specific container.
This function would likely be similar to the above, but have different
filter criteria from the user (based on the query)
'''
results = self._list_containers()
matches = []
for result in results:
for key,val in result.metadata.items():
if query in val and result not in matches:
matches.append(result)
if not quiet:
bot.info("[gs://%s] Found %s containers" %(self._bucket_name,len(matches)))
for image in matches:
size = round(image.size / (1024*1024.0))
bot.custom(prefix=image.name, color="CYAN")
bot.custom(prefix='id: ', message=image.id)
bot.custom(prefix='uri: ', message=image.metadata['name'])
bot.custom(prefix='updated:', message=image.updated)
bot.custom(prefix='size: ', message=' %s MB' %(size))
bot.custom(prefix='md5: ', message=image.md5_hash)
if "public_url" in image.metadata:
public_url = image.metadata['public_url']
bot.custom(prefix='url: ', message=public_url)
bot.newline()
return matches |
def notify_created(room, event, user):
"""Notifies about the creation of a chatroom.
:param room: the chatroom
:param event: the event
:param user: the user performing the action
"""
tpl = get_plugin_template_module('emails/created.txt', chatroom=room, event=event, user=user)
_send(event, tpl) | Notifies about the creation of a chatroom.
:param room: the chatroom
:param event: the event
:param user: the user performing the action | Below is the the instruction that describes the task:
### Input:
Notifies about the creation of a chatroom.
:param room: the chatroom
:param event: the event
:param user: the user performing the action
### Response:
def notify_created(room, event, user):
"""Notifies about the creation of a chatroom.
:param room: the chatroom
:param event: the event
:param user: the user performing the action
"""
tpl = get_plugin_template_module('emails/created.txt', chatroom=room, event=event, user=user)
_send(event, tpl) |
def chains(xs, labels=None, truths=None, truth_color=u"#4682b4", burn=None,
alpha=0.5, fig=None):
"""
Create a plot showing the walker values for each parameter at every step.
:param xs:
The samples. This should be a 3D :class:`numpy.ndarray` of size
(``n_walkers``, ``n_steps``, ``n_parameters``).
:type xs:
:class:`numpy.ndarray`
:param labels: [optional]
Labels for all the parameters.
:type labels:
iterable of strings or None
:param truths: [optional]
Reference values to indicate on the plots.
:type truths:
iterable of floats or None
:param truth_color: [optional]
A ``matplotlib`` style color for the ``truths`` markers.
:param burn: [optional]
Reference step to indicate on the plots.
:type burn:
integer or None
:param alpha: [optional]
Transparency of individual walker lines between zero and one.
:type alpha:
float
:param fig: [optional]
Overplot onto the provided figure object.
:type fig:
:class:`matplotlib.Figure` or None
:raises ValueError:
If a ``fig`` is provided with the incorrect number of axes.
:returns:
The chain figure.
:rtype:
:class:`matplotlib.Figure`
"""
n_walkers, n_steps, K = xs.shape
if labels is not None:
assert len(labels) == K
if truths is not None:
assert len(truths) == K
factor = 2.0
lbdim = 0.5 * factor
trdim = 0.2 * factor
whspace = 0.10
width = 15.
height = factor*K + factor * (K - 1.) * whspace
dimy = lbdim + height + trdim
dimx = lbdim + width + trdim
if fig is None:
fig, axes = plt.subplots(K, 1, figsize=(dimx, dimy))
else:
try:
axes = np.array(fig.axes).reshape((1, K))
except:
raise ValueError("Provided figure has {0} axes, but data has "
"parameters K={1}".format(len(fig.axes), K))
lm = lbdim / dimx
bm = lbdim / dimy
trm = (lbdim + height) / dimy
fig.subplots_adjust(left=lm, bottom=bm, right=trm, top=trm,
wspace=whspace, hspace=whspace)
if K == 1:
axes = [axes]
for k, ax in enumerate(axes):
for walker in range(n_walkers):
ax.plot(xs[walker, :, k], color="k", alpha=alpha)
if burn is not None:
ax.axvline(burn, color="k", linestyle=":")
if truths is not None:
ax.axhline(truths[k], color=truth_color, lw=2)
ax.set_xlim(0, n_steps)
if k < K - 1:
ax.set_xticklabels([])
else:
ax.set_xlabel("Step")
ax.yaxis.set_major_locator(MaxNLocator(4))
[l.set_rotation(45) for l in ax.get_yticklabels()]
if labels is not None:
ax.set_ylabel(labels[k])
ax.yaxis.set_label_coords(-0.05, 0.5)
return fig | Create a plot showing the walker values for each parameter at every step.
:param xs:
The samples. This should be a 3D :class:`numpy.ndarray` of size
(``n_walkers``, ``n_steps``, ``n_parameters``).
:type xs:
:class:`numpy.ndarray`
:param labels: [optional]
Labels for all the parameters.
:type labels:
iterable of strings or None
:param truths: [optional]
Reference values to indicate on the plots.
:type truths:
iterable of floats or None
:param truth_color: [optional]
A ``matplotlib`` style color for the ``truths`` markers.
:param burn: [optional]
Reference step to indicate on the plots.
:type burn:
integer or None
:param alpha: [optional]
Transparency of individual walker lines between zero and one.
:type alpha:
float
:param fig: [optional]
Overplot onto the provided figure object.
:type fig:
:class:`matplotlib.Figure` or None
:raises ValueError:
If a ``fig`` is provided with the incorrect number of axes.
:returns:
The chain figure.
:rtype:
:class:`matplotlib.Figure` | Below is the the instruction that describes the task:
### Input:
Create a plot showing the walker values for each parameter at every step.
:param xs:
The samples. This should be a 3D :class:`numpy.ndarray` of size
(``n_walkers``, ``n_steps``, ``n_parameters``).
:type xs:
:class:`numpy.ndarray`
:param labels: [optional]
Labels for all the parameters.
:type labels:
iterable of strings or None
:param truths: [optional]
Reference values to indicate on the plots.
:type truths:
iterable of floats or None
:param truth_color: [optional]
A ``matplotlib`` style color for the ``truths`` markers.
:param burn: [optional]
Reference step to indicate on the plots.
:type burn:
integer or None
:param alpha: [optional]
Transparency of individual walker lines between zero and one.
:type alpha:
float
:param fig: [optional]
Overplot onto the provided figure object.
:type fig:
:class:`matplotlib.Figure` or None
:raises ValueError:
If a ``fig`` is provided with the incorrect number of axes.
:returns:
The chain figure.
:rtype:
:class:`matplotlib.Figure`
### Response:
def chains(xs, labels=None, truths=None, truth_color=u"#4682b4", burn=None,
alpha=0.5, fig=None):
"""
Create a plot showing the walker values for each parameter at every step.
:param xs:
The samples. This should be a 3D :class:`numpy.ndarray` of size
(``n_walkers``, ``n_steps``, ``n_parameters``).
:type xs:
:class:`numpy.ndarray`
:param labels: [optional]
Labels for all the parameters.
:type labels:
iterable of strings or None
:param truths: [optional]
Reference values to indicate on the plots.
:type truths:
iterable of floats or None
:param truth_color: [optional]
A ``matplotlib`` style color for the ``truths`` markers.
:param burn: [optional]
Reference step to indicate on the plots.
:type burn:
integer or None
:param alpha: [optional]
Transparency of individual walker lines between zero and one.
:type alpha:
float
:param fig: [optional]
Overplot onto the provided figure object.
:type fig:
:class:`matplotlib.Figure` or None
:raises ValueError:
If a ``fig`` is provided with the incorrect number of axes.
:returns:
The chain figure.
:rtype:
:class:`matplotlib.Figure`
"""
n_walkers, n_steps, K = xs.shape
if labels is not None:
assert len(labels) == K
if truths is not None:
assert len(truths) == K
factor = 2.0
lbdim = 0.5 * factor
trdim = 0.2 * factor
whspace = 0.10
width = 15.
height = factor*K + factor * (K - 1.) * whspace
dimy = lbdim + height + trdim
dimx = lbdim + width + trdim
if fig is None:
fig, axes = plt.subplots(K, 1, figsize=(dimx, dimy))
else:
try:
axes = np.array(fig.axes).reshape((1, K))
except:
raise ValueError("Provided figure has {0} axes, but data has "
"parameters K={1}".format(len(fig.axes), K))
lm = lbdim / dimx
bm = lbdim / dimy
trm = (lbdim + height) / dimy
fig.subplots_adjust(left=lm, bottom=bm, right=trm, top=trm,
wspace=whspace, hspace=whspace)
if K == 1:
axes = [axes]
for k, ax in enumerate(axes):
for walker in range(n_walkers):
ax.plot(xs[walker, :, k], color="k", alpha=alpha)
if burn is not None:
ax.axvline(burn, color="k", linestyle=":")
if truths is not None:
ax.axhline(truths[k], color=truth_color, lw=2)
ax.set_xlim(0, n_steps)
if k < K - 1:
ax.set_xticklabels([])
else:
ax.set_xlabel("Step")
ax.yaxis.set_major_locator(MaxNLocator(4))
[l.set_rotation(45) for l in ax.get_yticklabels()]
if labels is not None:
ax.set_ylabel(labels[k])
ax.yaxis.set_label_coords(-0.05, 0.5)
return fig |
def read_code(self, address, size=1):
"""
Read size byte from bytecode.
If less than size bytes are available result will be pad with \x00
"""
assert address < len(self.bytecode)
value = self.bytecode[address:address + size]
if len(value) < size:
value += '\x00' * (size - len(value)) # pad with null (spec)
return value | Read size byte from bytecode.
If less than size bytes are available result will be pad with \x00 | Below is the the instruction that describes the task:
### Input:
Read size byte from bytecode.
If less than size bytes are available result will be pad with \x00
### Response:
def read_code(self, address, size=1):
"""
Read size byte from bytecode.
If less than size bytes are available result will be pad with \x00
"""
assert address < len(self.bytecode)
value = self.bytecode[address:address + size]
if len(value) < size:
value += '\x00' * (size - len(value)) # pad with null (spec)
return value |
def pad(text, bits=32):
"""
Pads the inputted text to ensure it fits the proper block length
for encryption.
:param text | <str>
bits | <int>
:return <str>
"""
return text + (bits - len(text) % bits) * chr(bits - len(text) % bits) | Pads the inputted text to ensure it fits the proper block length
for encryption.
:param text | <str>
bits | <int>
:return <str> | Below is the the instruction that describes the task:
### Input:
Pads the inputted text to ensure it fits the proper block length
for encryption.
:param text | <str>
bits | <int>
:return <str>
### Response:
def pad(text, bits=32):
"""
Pads the inputted text to ensure it fits the proper block length
for encryption.
:param text | <str>
bits | <int>
:return <str>
"""
return text + (bits - len(text) % bits) * chr(bits - len(text) % bits) |
def tree(string, token=[WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA]):
""" Transforms the output of parse() into a Text object.
The token parameter lists the order of tags in each token in the input string.
"""
return Text(string, token) | Transforms the output of parse() into a Text object.
The token parameter lists the order of tags in each token in the input string. | Below is the the instruction that describes the task:
### Input:
Transforms the output of parse() into a Text object.
The token parameter lists the order of tags in each token in the input string.
### Response:
def tree(string, token=[WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA]):
""" Transforms the output of parse() into a Text object.
The token parameter lists the order of tags in each token in the input string.
"""
return Text(string, token) |
def url_to_tile(url):
"""
Extracts tile name, date and AWS index from tile url on AWS.
:param url: class input parameter 'metafiles'
:type url: str
:return: Name of tile, date and AWS index which uniquely identifies tile on AWS
:rtype: (str, str, int)
"""
info = url.strip('/').split('/')
name = ''.join(info[-7: -4])
date = '-'.join(info[-4: -1])
return name, date, int(info[-1]) | Extracts tile name, date and AWS index from tile url on AWS.
:param url: class input parameter 'metafiles'
:type url: str
:return: Name of tile, date and AWS index which uniquely identifies tile on AWS
:rtype: (str, str, int) | Below is the the instruction that describes the task:
### Input:
Extracts tile name, date and AWS index from tile url on AWS.
:param url: class input parameter 'metafiles'
:type url: str
:return: Name of tile, date and AWS index which uniquely identifies tile on AWS
:rtype: (str, str, int)
### Response:
def url_to_tile(url):
"""
Extracts tile name, date and AWS index from tile url on AWS.
:param url: class input parameter 'metafiles'
:type url: str
:return: Name of tile, date and AWS index which uniquely identifies tile on AWS
:rtype: (str, str, int)
"""
info = url.strip('/').split('/')
name = ''.join(info[-7: -4])
date = '-'.join(info[-4: -1])
return name, date, int(info[-1]) |
def split_unescaped(char, string, include_empty_strings=False):
'''
:param char: The character on which to split the string
:type char: string
:param string: The string to split
:type string: string
:returns: List of substrings of *string*
:rtype: list of strings
Splits *string* whenever *char* appears without an odd number of
backslashes ('\\') preceding it, discarding any empty string
elements.
'''
words = []
pos = len(string)
lastpos = pos
while pos >= 0:
pos = get_last_pos_of_char(char, string[:lastpos])
if pos >= 0:
if pos + 1 != lastpos or include_empty_strings:
words.append(string[pos + 1: lastpos])
lastpos = pos
if lastpos != 0 or include_empty_strings:
words.append(string[:lastpos])
words.reverse()
return words | :param char: The character on which to split the string
:type char: string
:param string: The string to split
:type string: string
:returns: List of substrings of *string*
:rtype: list of strings
Splits *string* whenever *char* appears without an odd number of
backslashes ('\\') preceding it, discarding any empty string
elements. | Below is the the instruction that describes the task:
### Input:
:param char: The character on which to split the string
:type char: string
:param string: The string to split
:type string: string
:returns: List of substrings of *string*
:rtype: list of strings
Splits *string* whenever *char* appears without an odd number of
backslashes ('\\') preceding it, discarding any empty string
elements.
### Response:
def split_unescaped(char, string, include_empty_strings=False):
'''
:param char: The character on which to split the string
:type char: string
:param string: The string to split
:type string: string
:returns: List of substrings of *string*
:rtype: list of strings
Splits *string* whenever *char* appears without an odd number of
backslashes ('\\') preceding it, discarding any empty string
elements.
'''
words = []
pos = len(string)
lastpos = pos
while pos >= 0:
pos = get_last_pos_of_char(char, string[:lastpos])
if pos >= 0:
if pos + 1 != lastpos or include_empty_strings:
words.append(string[pos + 1: lastpos])
lastpos = pos
if lastpos != 0 or include_empty_strings:
words.append(string[:lastpos])
words.reverse()
return words |
def fast_sync_sign_snapshot( snapshot_path, private_key, first=False ):
"""
Append a signature to the end of a snapshot path
with the given private key.
If first is True, then don't expect the signature trailer.
Return True on success
Return False on error
"""
if not os.path.exists(snapshot_path):
log.error("No such file or directory: {}".format(snapshot_path))
return False
file_size = 0
payload_size = 0
write_offset = 0
try:
sb = os.stat(snapshot_path)
file_size = sb.st_size
assert file_size > 8
except Exception as e:
log.exception(e)
return False
num_sigs = 0
snapshot_hash = None
with open(snapshot_path, 'r+') as f:
if not first:
info = fast_sync_inspect(f)
if 'error' in info:
log.error("Failed to inspect {}: {}".format(snapshot_path, info['error']))
return False
num_sigs = len(info['signatures'])
write_offset = info['sig_append_offset']
payload_size = info['payload_size']
else:
# no one has signed yet.
write_offset = file_size
num_sigs = 0
payload_size = file_size
# hash the file and sign the (bin-encoded) hash
privkey_hex = keylib.ECPrivateKey(private_key).to_hex()
hash_hex = get_file_hash( f, hashlib.sha256, fd_len=payload_size )
sigb64 = sign_digest( hash_hex, privkey_hex, hashfunc=hashlib.sha256 )
if BLOCKSTACK_TEST:
log.debug("Signed {} with {} to make {}".format(hash_hex, keylib.ECPrivateKey(private_key).public_key().to_hex(), sigb64))
# append
f.seek(write_offset, os.SEEK_SET)
f.write(sigb64)
f.write('{:08x}'.format(len(sigb64)))
# append number of signatures
num_sigs += 1
f.write('{:08x}'.format(num_sigs))
f.flush()
os.fsync(f.fileno())
return True | Append a signature to the end of a snapshot path
with the given private key.
If first is True, then don't expect the signature trailer.
Return True on success
Return False on error | Below is the the instruction that describes the task:
### Input:
Append a signature to the end of a snapshot path
with the given private key.
If first is True, then don't expect the signature trailer.
Return True on success
Return False on error
### Response:
def fast_sync_sign_snapshot( snapshot_path, private_key, first=False ):
"""
Append a signature to the end of a snapshot path
with the given private key.
If first is True, then don't expect the signature trailer.
Return True on success
Return False on error
"""
if not os.path.exists(snapshot_path):
log.error("No such file or directory: {}".format(snapshot_path))
return False
file_size = 0
payload_size = 0
write_offset = 0
try:
sb = os.stat(snapshot_path)
file_size = sb.st_size
assert file_size > 8
except Exception as e:
log.exception(e)
return False
num_sigs = 0
snapshot_hash = None
with open(snapshot_path, 'r+') as f:
if not first:
info = fast_sync_inspect(f)
if 'error' in info:
log.error("Failed to inspect {}: {}".format(snapshot_path, info['error']))
return False
num_sigs = len(info['signatures'])
write_offset = info['sig_append_offset']
payload_size = info['payload_size']
else:
# no one has signed yet.
write_offset = file_size
num_sigs = 0
payload_size = file_size
# hash the file and sign the (bin-encoded) hash
privkey_hex = keylib.ECPrivateKey(private_key).to_hex()
hash_hex = get_file_hash( f, hashlib.sha256, fd_len=payload_size )
sigb64 = sign_digest( hash_hex, privkey_hex, hashfunc=hashlib.sha256 )
if BLOCKSTACK_TEST:
log.debug("Signed {} with {} to make {}".format(hash_hex, keylib.ECPrivateKey(private_key).public_key().to_hex(), sigb64))
# append
f.seek(write_offset, os.SEEK_SET)
f.write(sigb64)
f.write('{:08x}'.format(len(sigb64)))
# append number of signatures
num_sigs += 1
f.write('{:08x}'.format(num_sigs))
f.flush()
os.fsync(f.fileno())
return True |
def skips(self, user):
"""
Skips for user. Zendesk API `Reference <https://developer.zendesk.com/rest_api/docs/core/ticket_skips>`__.
"""
return self._get(self._build_url(self.endpoint.skips(id=user))) | Skips for user. Zendesk API `Reference <https://developer.zendesk.com/rest_api/docs/core/ticket_skips>`__. | Below is the the instruction that describes the task:
### Input:
Skips for user. Zendesk API `Reference <https://developer.zendesk.com/rest_api/docs/core/ticket_skips>`__.
### Response:
def skips(self, user):
"""
Skips for user. Zendesk API `Reference <https://developer.zendesk.com/rest_api/docs/core/ticket_skips>`__.
"""
return self._get(self._build_url(self.endpoint.skips(id=user))) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.