code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def json_schema(self, schema_id=None, is_main_schema=True):
"""Generate a draft-07 JSON schema dict representing the Schema.
This method can only be called when the Schema's value is a dict.
This method must be called with a schema_id. Calling it without one
is used in a recursive context for sub schemas."""
Schema = self.__class__
s = self._schema
i = self._ignore_extra_keys
flavor = _priority(s)
if flavor != DICT and is_main_schema:
raise ValueError("The main schema must be a dict.")
if flavor == TYPE:
# Handle type
return {"type": {int: "integer", float: "number", bool: "boolean"}.get(s, "string")}
elif flavor == ITERABLE and len(s) == 1:
# Handle arrays of a single type or dict schema
return {"type": "array", "items": Schema(s[0]).json_schema(is_main_schema=False)}
elif isinstance(s, Or):
# Handle Or values
values = [Schema(or_key).json_schema(is_main_schema=False) for or_key in s._args]
any_of = []
for value in values:
if value not in any_of:
any_of.append(value)
return {"anyOf": any_of}
if flavor != DICT:
# If not handled, do not check
return {}
if is_main_schema and not schema_id:
raise ValueError("schema_id is required.")
# Handle dict
required_keys = []
expanded_schema = {}
for key in s:
if isinstance(key, Hook):
continue
if isinstance(s[key], Schema):
sub_schema = s[key]
else:
sub_schema = Schema(s[key], ignore_extra_keys=i)
sub_schema_json = sub_schema.json_schema(is_main_schema=False)
is_optional = False
if isinstance(key, Optional):
key = key._schema
is_optional = True
if isinstance(key, str):
if not is_optional:
required_keys.append(key)
expanded_schema[key] = sub_schema_json
elif isinstance(key, Or):
for or_key in key._args:
expanded_schema[or_key] = sub_schema_json
schema_dict = {
"type": "object",
"properties": expanded_schema,
"required": required_keys,
"additionalProperties": i,
}
if is_main_schema:
schema_dict.update({"id": schema_id, "$schema": "http://json-schema.org/draft-07/schema#"})
return schema_dict | Generate a draft-07 JSON schema dict representing the Schema.
This method can only be called when the Schema's value is a dict.
This method must be called with a schema_id. Calling it without one
is used in a recursive context for sub schemas. | Below is the the instruction that describes the task:
### Input:
Generate a draft-07 JSON schema dict representing the Schema.
This method can only be called when the Schema's value is a dict.
This method must be called with a schema_id. Calling it without one
is used in a recursive context for sub schemas.
### Response:
def json_schema(self, schema_id=None, is_main_schema=True):
"""Generate a draft-07 JSON schema dict representing the Schema.
This method can only be called when the Schema's value is a dict.
This method must be called with a schema_id. Calling it without one
is used in a recursive context for sub schemas."""
Schema = self.__class__
s = self._schema
i = self._ignore_extra_keys
flavor = _priority(s)
if flavor != DICT and is_main_schema:
raise ValueError("The main schema must be a dict.")
if flavor == TYPE:
# Handle type
return {"type": {int: "integer", float: "number", bool: "boolean"}.get(s, "string")}
elif flavor == ITERABLE and len(s) == 1:
# Handle arrays of a single type or dict schema
return {"type": "array", "items": Schema(s[0]).json_schema(is_main_schema=False)}
elif isinstance(s, Or):
# Handle Or values
values = [Schema(or_key).json_schema(is_main_schema=False) for or_key in s._args]
any_of = []
for value in values:
if value not in any_of:
any_of.append(value)
return {"anyOf": any_of}
if flavor != DICT:
# If not handled, do not check
return {}
if is_main_schema and not schema_id:
raise ValueError("schema_id is required.")
# Handle dict
required_keys = []
expanded_schema = {}
for key in s:
if isinstance(key, Hook):
continue
if isinstance(s[key], Schema):
sub_schema = s[key]
else:
sub_schema = Schema(s[key], ignore_extra_keys=i)
sub_schema_json = sub_schema.json_schema(is_main_schema=False)
is_optional = False
if isinstance(key, Optional):
key = key._schema
is_optional = True
if isinstance(key, str):
if not is_optional:
required_keys.append(key)
expanded_schema[key] = sub_schema_json
elif isinstance(key, Or):
for or_key in key._args:
expanded_schema[or_key] = sub_schema_json
schema_dict = {
"type": "object",
"properties": expanded_schema,
"required": required_keys,
"additionalProperties": i,
}
if is_main_schema:
schema_dict.update({"id": schema_id, "$schema": "http://json-schema.org/draft-07/schema#"})
return schema_dict |
def getOr(subject, predicate, *args, **kwargs):
""" Retrieve a metadata node or generate a new one
:param subject: Subject to which the metadata node should be connected
:param predicate: Predicate by which the metadata node should be connected
:return: Metadata for given node
:rtype: Metadata
"""
if (subject, predicate, None) in get_graph():
return Metadata(node=get_graph().objects(subject, predicate).__next__())
return Metadata(*args, **kwargs) | Retrieve a metadata node or generate a new one
:param subject: Subject to which the metadata node should be connected
:param predicate: Predicate by which the metadata node should be connected
:return: Metadata for given node
:rtype: Metadata | Below is the the instruction that describes the task:
### Input:
Retrieve a metadata node or generate a new one
:param subject: Subject to which the metadata node should be connected
:param predicate: Predicate by which the metadata node should be connected
:return: Metadata for given node
:rtype: Metadata
### Response:
def getOr(subject, predicate, *args, **kwargs):
""" Retrieve a metadata node or generate a new one
:param subject: Subject to which the metadata node should be connected
:param predicate: Predicate by which the metadata node should be connected
:return: Metadata for given node
:rtype: Metadata
"""
if (subject, predicate, None) in get_graph():
return Metadata(node=get_graph().objects(subject, predicate).__next__())
return Metadata(*args, **kwargs) |
def pop(self, *args):
"""remove and return item at index (default last)."""
value = list.pop(self, *args)
index = self._dict.pop(value.id)
# If the pop occured from a location other than the end of the list,
# we will need to subtract 1 from every entry afterwards
if len(args) == 0 or args == [-1]: # removing from the end of the list
return value
_dict = self._dict
for i, j in iteritems(_dict):
if j > index:
_dict[i] = j - 1
return value | remove and return item at index (default last). | Below is the the instruction that describes the task:
### Input:
remove and return item at index (default last).
### Response:
def pop(self, *args):
"""remove and return item at index (default last)."""
value = list.pop(self, *args)
index = self._dict.pop(value.id)
# If the pop occured from a location other than the end of the list,
# we will need to subtract 1 from every entry afterwards
if len(args) == 0 or args == [-1]: # removing from the end of the list
return value
_dict = self._dict
for i, j in iteritems(_dict):
if j > index:
_dict[i] = j - 1
return value |
def delete_disk(kwargs=None, conn=None, call=None):
'''
.. versionadded:: 2015.8.0
Delete a specific disk associated with the account
CLI Examples:
.. code-block:: bash
salt-cloud -f delete_disk my-azure name=my_disk
salt-cloud -f delete_disk my-azure name=my_disk delete_vhd=True
'''
if call != 'function':
raise SaltCloudSystemExit(
'The delete_disk function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
if 'name' not in kwargs:
raise SaltCloudSystemExit('A name must be specified as "name"')
if not conn:
conn = get_conn()
try:
data = conn.delete_disk(kwargs['name'], kwargs.get('delete_vhd', False))
return {'Success': 'The disk was successfully deleted'}
except AzureMissingResourceHttpError as exc:
raise SaltCloudSystemExit('{0}: {1}'.format(kwargs['name'], exc.message)) | .. versionadded:: 2015.8.0
Delete a specific disk associated with the account
CLI Examples:
.. code-block:: bash
salt-cloud -f delete_disk my-azure name=my_disk
salt-cloud -f delete_disk my-azure name=my_disk delete_vhd=True | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2015.8.0
Delete a specific disk associated with the account
CLI Examples:
.. code-block:: bash
salt-cloud -f delete_disk my-azure name=my_disk
salt-cloud -f delete_disk my-azure name=my_disk delete_vhd=True
### Response:
def delete_disk(kwargs=None, conn=None, call=None):
'''
.. versionadded:: 2015.8.0
Delete a specific disk associated with the account
CLI Examples:
.. code-block:: bash
salt-cloud -f delete_disk my-azure name=my_disk
salt-cloud -f delete_disk my-azure name=my_disk delete_vhd=True
'''
if call != 'function':
raise SaltCloudSystemExit(
'The delete_disk function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
if 'name' not in kwargs:
raise SaltCloudSystemExit('A name must be specified as "name"')
if not conn:
conn = get_conn()
try:
data = conn.delete_disk(kwargs['name'], kwargs.get('delete_vhd', False))
return {'Success': 'The disk was successfully deleted'}
except AzureMissingResourceHttpError as exc:
raise SaltCloudSystemExit('{0}: {1}'.format(kwargs['name'], exc.message)) |
def slices(self):
"""
The bounding box as a tuple of `slice` objects.
The slice tuple is in numpy axis order (i.e. ``(y, x)``) and
therefore can be used to slice numpy arrays.
"""
return (slice(self.iymin, self.iymax), slice(self.ixmin, self.ixmax)) | The bounding box as a tuple of `slice` objects.
The slice tuple is in numpy axis order (i.e. ``(y, x)``) and
therefore can be used to slice numpy arrays. | Below is the the instruction that describes the task:
### Input:
The bounding box as a tuple of `slice` objects.
The slice tuple is in numpy axis order (i.e. ``(y, x)``) and
therefore can be used to slice numpy arrays.
### Response:
def slices(self):
"""
The bounding box as a tuple of `slice` objects.
The slice tuple is in numpy axis order (i.e. ``(y, x)``) and
therefore can be used to slice numpy arrays.
"""
return (slice(self.iymin, self.iymax), slice(self.ixmin, self.ixmax)) |
def get_installed_classes(cls):
"""
Iterates over installed plugins associated with the `entry_point` and
returns a dictionary of viable ones keyed off of their names.
A viable installed plugin is one that is both loadable *and* a subclass
of the Pluggable subclass in question.
"""
installed_classes = {}
for entry_point in pkg_resources.iter_entry_points(cls.entry_point):
try:
plugin = entry_point.load()
except ImportError as e:
logger.error(
"Could not load plugin %s: %s", entry_point.name, str(e)
)
continue
if not issubclass(plugin, cls):
logger.error(
"Could not load plugin %s:" +
" %s class is not subclass of %s",
entry_point.name, plugin.__class__.__name__, cls.__name__
)
continue
if not plugin.validate_dependencies():
logger.error(
"Could not load plugin %s:" +
" %s class dependencies not met",
entry_point.name, plugin.__name__
)
continue
installed_classes[entry_point.name] = plugin
return installed_classes | Iterates over installed plugins associated with the `entry_point` and
returns a dictionary of viable ones keyed off of their names.
A viable installed plugin is one that is both loadable *and* a subclass
of the Pluggable subclass in question. | Below is the the instruction that describes the task:
### Input:
Iterates over installed plugins associated with the `entry_point` and
returns a dictionary of viable ones keyed off of their names.
A viable installed plugin is one that is both loadable *and* a subclass
of the Pluggable subclass in question.
### Response:
def get_installed_classes(cls):
"""
Iterates over installed plugins associated with the `entry_point` and
returns a dictionary of viable ones keyed off of their names.
A viable installed plugin is one that is both loadable *and* a subclass
of the Pluggable subclass in question.
"""
installed_classes = {}
for entry_point in pkg_resources.iter_entry_points(cls.entry_point):
try:
plugin = entry_point.load()
except ImportError as e:
logger.error(
"Could not load plugin %s: %s", entry_point.name, str(e)
)
continue
if not issubclass(plugin, cls):
logger.error(
"Could not load plugin %s:" +
" %s class is not subclass of %s",
entry_point.name, plugin.__class__.__name__, cls.__name__
)
continue
if not plugin.validate_dependencies():
logger.error(
"Could not load plugin %s:" +
" %s class dependencies not met",
entry_point.name, plugin.__name__
)
continue
installed_classes[entry_point.name] = plugin
return installed_classes |
def configKeyButtons( self, enableButtons = [], bounceTime = DEF_BOUNCE_TIME_NORMAL, pullUpDown = GPIO.PUD_UP, event = GPIO.BOTH ):
"""!
\~english
Config multi key buttons IO and event on same time
@param enableButtons: an array of key button configs. eg. <br>
[{ "id":BUTTON_ACT_A, "callback": aCallbackFun }, ... ]
@param bounceTime: Default set to DEF_BOUNCE_TIME_NORMAL
@param pullUpDown: Default set to GPIO.PUD_UP
@param event: Default set to GPIO.BOTH. it can be: { GPIO.RISING | GPIO.FALLING | GPIO.BOTH }
\~chinese
同时配置多个按键IO和事件
@param enableButtons: 组按键配置 例如: <br>
[{ "id":BUTTON_ACT_A, "callback": aCallbackFun }, ... ]
@param bounceTime: 默认 DEF_BOUNCE_TIME_NORMAL
@param pullUpDown: 默认 GPIO.PUD_UP
@param event: 默认 GPIO.BOTH 它可以是: { GPIO.RISING | GPIO.FALLING | GPIO.BOTH }
\~ \n
@see DEF_BOUNCE_TIME_SHORT_MON (10ms)
@see DEF_BOUNCE_TIME_SHORT (50ms)
@see DEF_BOUNCE_TIME_NORMAL (100ms)
@see DEF_BOUNCE_TIME_LONG (200ms)
"""
for key in enableButtons:
self.setKeyButton( key["id"], key["callback"], bounceTime, pullUpDown, event )
pass | !
\~english
Config multi key buttons IO and event on same time
@param enableButtons: an array of key button configs. eg. <br>
[{ "id":BUTTON_ACT_A, "callback": aCallbackFun }, ... ]
@param bounceTime: Default set to DEF_BOUNCE_TIME_NORMAL
@param pullUpDown: Default set to GPIO.PUD_UP
@param event: Default set to GPIO.BOTH. it can be: { GPIO.RISING | GPIO.FALLING | GPIO.BOTH }
\~chinese
同时配置多个按键IO和事件
@param enableButtons: 组按键配置 例如: <br>
[{ "id":BUTTON_ACT_A, "callback": aCallbackFun }, ... ]
@param bounceTime: 默认 DEF_BOUNCE_TIME_NORMAL
@param pullUpDown: 默认 GPIO.PUD_UP
@param event: 默认 GPIO.BOTH 它可以是: { GPIO.RISING | GPIO.FALLING | GPIO.BOTH }
\~ \n
@see DEF_BOUNCE_TIME_SHORT_MON (10ms)
@see DEF_BOUNCE_TIME_SHORT (50ms)
@see DEF_BOUNCE_TIME_NORMAL (100ms)
@see DEF_BOUNCE_TIME_LONG (200ms) | Below is the the instruction that describes the task:
### Input:
!
\~english
Config multi key buttons IO and event on same time
@param enableButtons: an array of key button configs. eg. <br>
[{ "id":BUTTON_ACT_A, "callback": aCallbackFun }, ... ]
@param bounceTime: Default set to DEF_BOUNCE_TIME_NORMAL
@param pullUpDown: Default set to GPIO.PUD_UP
@param event: Default set to GPIO.BOTH. it can be: { GPIO.RISING | GPIO.FALLING | GPIO.BOTH }
\~chinese
同时配置多个按键IO和事件
@param enableButtons: 组按键配置 例如: <br>
[{ "id":BUTTON_ACT_A, "callback": aCallbackFun }, ... ]
@param bounceTime: 默认 DEF_BOUNCE_TIME_NORMAL
@param pullUpDown: 默认 GPIO.PUD_UP
@param event: 默认 GPIO.BOTH 它可以是: { GPIO.RISING | GPIO.FALLING | GPIO.BOTH }
\~ \n
@see DEF_BOUNCE_TIME_SHORT_MON (10ms)
@see DEF_BOUNCE_TIME_SHORT (50ms)
@see DEF_BOUNCE_TIME_NORMAL (100ms)
@see DEF_BOUNCE_TIME_LONG (200ms)
### Response:
def configKeyButtons( self, enableButtons = [], bounceTime = DEF_BOUNCE_TIME_NORMAL, pullUpDown = GPIO.PUD_UP, event = GPIO.BOTH ):
"""!
\~english
Config multi key buttons IO and event on same time
@param enableButtons: an array of key button configs. eg. <br>
[{ "id":BUTTON_ACT_A, "callback": aCallbackFun }, ... ]
@param bounceTime: Default set to DEF_BOUNCE_TIME_NORMAL
@param pullUpDown: Default set to GPIO.PUD_UP
@param event: Default set to GPIO.BOTH. it can be: { GPIO.RISING | GPIO.FALLING | GPIO.BOTH }
\~chinese
同时配置多个按键IO和事件
@param enableButtons: 组按键配置 例如: <br>
[{ "id":BUTTON_ACT_A, "callback": aCallbackFun }, ... ]
@param bounceTime: 默认 DEF_BOUNCE_TIME_NORMAL
@param pullUpDown: 默认 GPIO.PUD_UP
@param event: 默认 GPIO.BOTH 它可以是: { GPIO.RISING | GPIO.FALLING | GPIO.BOTH }
\~ \n
@see DEF_BOUNCE_TIME_SHORT_MON (10ms)
@see DEF_BOUNCE_TIME_SHORT (50ms)
@see DEF_BOUNCE_TIME_NORMAL (100ms)
@see DEF_BOUNCE_TIME_LONG (200ms)
"""
for key in enableButtons:
self.setKeyButton( key["id"], key["callback"], bounceTime, pullUpDown, event )
pass |
def remove_all_listeners(self, event=None):
"""Remove all functions for all events, or one event if one is specifed.
:param event: Optional event you wish to remove all functions from
"""
if event is not None:
self._registered_events[event] = OrderedDict()
else:
self._registered_events = defaultdict(OrderedDict) | Remove all functions for all events, or one event if one is specifed.
:param event: Optional event you wish to remove all functions from | Below is the the instruction that describes the task:
### Input:
Remove all functions for all events, or one event if one is specifed.
:param event: Optional event you wish to remove all functions from
### Response:
def remove_all_listeners(self, event=None):
"""Remove all functions for all events, or one event if one is specifed.
:param event: Optional event you wish to remove all functions from
"""
if event is not None:
self._registered_events[event] = OrderedDict()
else:
self._registered_events = defaultdict(OrderedDict) |
def proc_polyline(self, tokens):
""" Returns the components of a polyline. """
pts = [(p["x"], p["y"]) for p in tokens["points"]]
component = Polyline(pen=self.pen, points=pts)
return component | Returns the components of a polyline. | Below is the the instruction that describes the task:
### Input:
Returns the components of a polyline.
### Response:
def proc_polyline(self, tokens):
""" Returns the components of a polyline. """
pts = [(p["x"], p["y"]) for p in tokens["points"]]
component = Polyline(pen=self.pen, points=pts)
return component |
def set_wallpaper(image):
'''Set the desktop wallpaper.
Sets the desktop wallpaper to an image.
Args:
image (str): The path to the image to be set as wallpaper.
'''
desktop_env = system.get_name()
if desktop_env in ['gnome', 'unity', 'cinnamon', 'pantheon', 'mate']:
uri = 'file://%s' % image
SCHEMA = 'org.gnome.desktop.background'
KEY = 'picture-uri'
if desktop_env == 'mate':
uri = image
SCHEMA = 'org.mate.background'
KEY = 'picture-filename'
try:
from gi.repository import Gio
gsettings = Gio.Settings.new(SCHEMA)
gsettings.set_string(KEY, uri)
except ImportError:
try:
gsettings_proc = sp.Popen(
['gsettings', 'set', SCHEMA, KEY, uri])
except: # MATE < 1.6
sp.Popen(['mateconftool-2',
'-t',
'string',
'--set',
'/desktop/mate/background/picture_filename',
'%s' % image],
stdout=sp.PIPE)
finally:
gsettings_proc.communicate()
if gsettings_proc.returncode != 0:
sp.Popen(['mateconftool-2',
'-t',
'string',
'--set',
'/desktop/mate/background/picture_filename',
'%s' % image])
elif desktop_env == 'gnome2':
sp.Popen(
['gconftool-2',
'-t',
'string',
'--set',
'/desktop/gnome/background/picture_filename',
image]
)
elif desktop_env == 'kde':
# This probably only works in Plasma 5+
kde_script = dedent(
'''\
var Desktops = desktops();
for (i=0;i<Desktops.length;i++) {{
d = Desktops[i];
d.wallpaperPlugin = "org.kde.image";
d.currentConfigGroup = Array("Wallpaper",
"org.kde.image",
"General");
d.writeConfig("Image", "file://{}")
}}
''').format(image)
sp.Popen(
['dbus-send',
'--session',
'--dest=org.kde.plasmashell',
'--type=method_call',
'/PlasmaShell',
'org.kde.PlasmaShell.evaluateScript',
'string:{}'.format(kde_script)]
)
elif desktop_env in ['kde3', 'trinity']:
args = 'dcop kdesktop KBackgroundIface setWallpaper 0 "%s" 6' % image
sp.Popen(args, shell=True)
elif desktop_env == 'xfce4':
# XFCE4's image property is not image-path but last-image (What?)
list_of_properties = system.get_cmd_out(
['xfconf-query',
'-R',
'-l',
'-c',
'xfce4-desktop',
'-p',
'/backdrop']
)
for i in list_of_properties.split('\n'):
if i.endswith('last-image'):
# The property given is a background property
sp.Popen(
['xfconf-query -c xfce4-desktop -p %s -s "%s"' %
(i, image)],
shell=True)
sp.Popen(['xfdesktop --reload'], shell=True)
elif desktop_env == 'razor-qt':
desktop_conf = configparser.ConfigParser()
# Development version
desktop_conf_file = os.path.join(
get_config_dir('razor')[0], 'desktop.conf')
if os.path.isfile(desktop_conf_file):
config_option = r'screens\1\desktops\1\wallpaper'
else:
desktop_conf_file = os.path.join(
os.path.expanduser('~'), '.razor/desktop.conf')
config_option = r'desktops\1\wallpaper'
desktop_conf.read(os.path.join(desktop_conf_file))
try:
if desktop_conf.has_option('razor', config_option):
desktop_conf.set('razor', config_option, image)
with codecs.open(desktop_conf_file, 'w', encoding='utf-8', errors='replace') as f:
desktop_conf.write(f)
except:
pass
elif desktop_env in ['fluxbox', 'jwm', 'openbox', 'afterstep', 'i3']:
try:
args = ['feh', '--bg-scale', image]
sp.Popen(args)
except:
sys.stderr.write('Error: Failed to set wallpaper with feh!')
sys.stderr.write('Please make sre that You have feh installed.')
elif desktop_env == 'icewm':
args = ['icewmbg', image]
sp.Popen(args)
elif desktop_env == 'blackbox':
args = ['bsetbg', '-full', image]
sp.Popen(args)
elif desktop_env == 'lxde':
args = 'pcmanfm --set-wallpaper %s --wallpaper-mode=scaled' % image
sp.Popen(args, shell=True)
elif desktop_env == 'lxqt':
args = 'pcmanfm-qt --set-wallpaper %s --wallpaper-mode=scaled' % image
sp.Popen(args, shell=True)
elif desktop_env == 'windowmaker':
args = 'wmsetbg -s -u %s' % image
sp.Popen(args, shell=True)
elif desktop_env == 'enlightenment':
args = 'enlightenment_remote -desktop-bg-add 0 0 0 0 %s' % image
sp.Popen(args, shell=True)
elif desktop_env == 'awesome':
with sp.Popen("awesome-client", stdin=sp.PIPE) as awesome_client:
command = ('local gears = require("gears"); for s = 1,'
' screen.count() do gears.wallpaper.maximized'
'("%s", s, true); end;') % image
awesome_client.communicate(input=bytes(command, 'UTF-8'))
elif desktop_env == 'windows':
WINDOWS_SCRIPT = dedent('''
reg add "HKEY_CURRENT_USER\Control Panel\Desktop" \
/v Wallpaper /t REG_SZ /d %s /f
rundll32.exe user32.dll,UpdatePerUserSystemParameters
''') % image
windows_script_file = os.path.join(
tempfile.gettempdir(), 'wallscript.bat')
with open(windows_script_file, 'w') as f:
f.write(WINDOWS_SCRIPT)
sp.Popen([windows_script_file], shell=True)
# Sometimes the method above works
# and sometimes the one below
SPI_SETDESKWALLPAPER = 20
ctypes.windll.user32.SystemParametersInfoA(
SPI_SETDESKWALLPAPER, 0, image, 0)
elif desktop_env == 'mac':
try:
from appscript import app, mactypes
app('Finder').desktop_picture.set(mactypes.File(image))
except ImportError:
OSX_SCRIPT = dedent(
'''tell application "System Events"
set desktopCount to count of desktops
repeat with desktopNumber from 1 to desktopCount
tell desktop desktopNumber
set picture to POSIX file "%s"
end tell
end repeat
end tell''') % image
sp.Popen(['osascript', OSX_SCRIPT])
else:
try:
sp.Popen(['feh', '--bg-scale', image])
# feh is nearly a catch-all for Linux WMs
except:
pass | Set the desktop wallpaper.
Sets the desktop wallpaper to an image.
Args:
image (str): The path to the image to be set as wallpaper. | Below is the the instruction that describes the task:
### Input:
Set the desktop wallpaper.
Sets the desktop wallpaper to an image.
Args:
image (str): The path to the image to be set as wallpaper.
### Response:
def set_wallpaper(image):
'''Set the desktop wallpaper.
Sets the desktop wallpaper to an image.
Args:
image (str): The path to the image to be set as wallpaper.
'''
desktop_env = system.get_name()
if desktop_env in ['gnome', 'unity', 'cinnamon', 'pantheon', 'mate']:
uri = 'file://%s' % image
SCHEMA = 'org.gnome.desktop.background'
KEY = 'picture-uri'
if desktop_env == 'mate':
uri = image
SCHEMA = 'org.mate.background'
KEY = 'picture-filename'
try:
from gi.repository import Gio
gsettings = Gio.Settings.new(SCHEMA)
gsettings.set_string(KEY, uri)
except ImportError:
try:
gsettings_proc = sp.Popen(
['gsettings', 'set', SCHEMA, KEY, uri])
except: # MATE < 1.6
sp.Popen(['mateconftool-2',
'-t',
'string',
'--set',
'/desktop/mate/background/picture_filename',
'%s' % image],
stdout=sp.PIPE)
finally:
gsettings_proc.communicate()
if gsettings_proc.returncode != 0:
sp.Popen(['mateconftool-2',
'-t',
'string',
'--set',
'/desktop/mate/background/picture_filename',
'%s' % image])
elif desktop_env == 'gnome2':
sp.Popen(
['gconftool-2',
'-t',
'string',
'--set',
'/desktop/gnome/background/picture_filename',
image]
)
elif desktop_env == 'kde':
# This probably only works in Plasma 5+
kde_script = dedent(
'''\
var Desktops = desktops();
for (i=0;i<Desktops.length;i++) {{
d = Desktops[i];
d.wallpaperPlugin = "org.kde.image";
d.currentConfigGroup = Array("Wallpaper",
"org.kde.image",
"General");
d.writeConfig("Image", "file://{}")
}}
''').format(image)
sp.Popen(
['dbus-send',
'--session',
'--dest=org.kde.plasmashell',
'--type=method_call',
'/PlasmaShell',
'org.kde.PlasmaShell.evaluateScript',
'string:{}'.format(kde_script)]
)
elif desktop_env in ['kde3', 'trinity']:
args = 'dcop kdesktop KBackgroundIface setWallpaper 0 "%s" 6' % image
sp.Popen(args, shell=True)
elif desktop_env == 'xfce4':
# XFCE4's image property is not image-path but last-image (What?)
list_of_properties = system.get_cmd_out(
['xfconf-query',
'-R',
'-l',
'-c',
'xfce4-desktop',
'-p',
'/backdrop']
)
for i in list_of_properties.split('\n'):
if i.endswith('last-image'):
# The property given is a background property
sp.Popen(
['xfconf-query -c xfce4-desktop -p %s -s "%s"' %
(i, image)],
shell=True)
sp.Popen(['xfdesktop --reload'], shell=True)
elif desktop_env == 'razor-qt':
desktop_conf = configparser.ConfigParser()
# Development version
desktop_conf_file = os.path.join(
get_config_dir('razor')[0], 'desktop.conf')
if os.path.isfile(desktop_conf_file):
config_option = r'screens\1\desktops\1\wallpaper'
else:
desktop_conf_file = os.path.join(
os.path.expanduser('~'), '.razor/desktop.conf')
config_option = r'desktops\1\wallpaper'
desktop_conf.read(os.path.join(desktop_conf_file))
try:
if desktop_conf.has_option('razor', config_option):
desktop_conf.set('razor', config_option, image)
with codecs.open(desktop_conf_file, 'w', encoding='utf-8', errors='replace') as f:
desktop_conf.write(f)
except:
pass
elif desktop_env in ['fluxbox', 'jwm', 'openbox', 'afterstep', 'i3']:
try:
args = ['feh', '--bg-scale', image]
sp.Popen(args)
except:
sys.stderr.write('Error: Failed to set wallpaper with feh!')
sys.stderr.write('Please make sre that You have feh installed.')
elif desktop_env == 'icewm':
args = ['icewmbg', image]
sp.Popen(args)
elif desktop_env == 'blackbox':
args = ['bsetbg', '-full', image]
sp.Popen(args)
elif desktop_env == 'lxde':
args = 'pcmanfm --set-wallpaper %s --wallpaper-mode=scaled' % image
sp.Popen(args, shell=True)
elif desktop_env == 'lxqt':
args = 'pcmanfm-qt --set-wallpaper %s --wallpaper-mode=scaled' % image
sp.Popen(args, shell=True)
elif desktop_env == 'windowmaker':
args = 'wmsetbg -s -u %s' % image
sp.Popen(args, shell=True)
elif desktop_env == 'enlightenment':
args = 'enlightenment_remote -desktop-bg-add 0 0 0 0 %s' % image
sp.Popen(args, shell=True)
elif desktop_env == 'awesome':
with sp.Popen("awesome-client", stdin=sp.PIPE) as awesome_client:
command = ('local gears = require("gears"); for s = 1,'
' screen.count() do gears.wallpaper.maximized'
'("%s", s, true); end;') % image
awesome_client.communicate(input=bytes(command, 'UTF-8'))
elif desktop_env == 'windows':
WINDOWS_SCRIPT = dedent('''
reg add "HKEY_CURRENT_USER\Control Panel\Desktop" \
/v Wallpaper /t REG_SZ /d %s /f
rundll32.exe user32.dll,UpdatePerUserSystemParameters
''') % image
windows_script_file = os.path.join(
tempfile.gettempdir(), 'wallscript.bat')
with open(windows_script_file, 'w') as f:
f.write(WINDOWS_SCRIPT)
sp.Popen([windows_script_file], shell=True)
# Sometimes the method above works
# and sometimes the one below
SPI_SETDESKWALLPAPER = 20
ctypes.windll.user32.SystemParametersInfoA(
SPI_SETDESKWALLPAPER, 0, image, 0)
elif desktop_env == 'mac':
try:
from appscript import app, mactypes
app('Finder').desktop_picture.set(mactypes.File(image))
except ImportError:
OSX_SCRIPT = dedent(
'''tell application "System Events"
set desktopCount to count of desktops
repeat with desktopNumber from 1 to desktopCount
tell desktop desktopNumber
set picture to POSIX file "%s"
end tell
end repeat
end tell''') % image
sp.Popen(['osascript', OSX_SCRIPT])
else:
try:
sp.Popen(['feh', '--bg-scale', image])
# feh is nearly a catch-all for Linux WMs
except:
pass |
def list_vnets(access_token, subscription_id):
'''List the VNETs in a subscription .
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body of VNets list with properties.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/Microsoft.Network/',
'/virtualNetworks?api-version=', NETWORK_API])
return do_get(endpoint, access_token) | List the VNETs in a subscription .
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body of VNets list with properties. | Below is the the instruction that describes the task:
### Input:
List the VNETs in a subscription .
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body of VNets list with properties.
### Response:
def list_vnets(access_token, subscription_id):
'''List the VNETs in a subscription .
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body of VNets list with properties.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/Microsoft.Network/',
'/virtualNetworks?api-version=', NETWORK_API])
return do_get(endpoint, access_token) |
def get_resource_area_by_host(self, area_id, host_id):
"""GetResourceAreaByHost.
[Preview API]
:param str area_id:
:param str host_id:
:rtype: :class:`<ResourceAreaInfo> <azure.devops.v5_0.location.models.ResourceAreaInfo>`
"""
route_values = {}
if area_id is not None:
route_values['areaId'] = self._serialize.url('area_id', area_id, 'str')
query_parameters = {}
if host_id is not None:
query_parameters['hostId'] = self._serialize.query('host_id', host_id, 'str')
response = self._send(http_method='GET',
location_id='e81700f7-3be2-46de-8624-2eb35882fcaa',
version='5.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ResourceAreaInfo', response) | GetResourceAreaByHost.
[Preview API]
:param str area_id:
:param str host_id:
:rtype: :class:`<ResourceAreaInfo> <azure.devops.v5_0.location.models.ResourceAreaInfo>` | Below is the the instruction that describes the task:
### Input:
GetResourceAreaByHost.
[Preview API]
:param str area_id:
:param str host_id:
:rtype: :class:`<ResourceAreaInfo> <azure.devops.v5_0.location.models.ResourceAreaInfo>`
### Response:
def get_resource_area_by_host(self, area_id, host_id):
"""GetResourceAreaByHost.
[Preview API]
:param str area_id:
:param str host_id:
:rtype: :class:`<ResourceAreaInfo> <azure.devops.v5_0.location.models.ResourceAreaInfo>`
"""
route_values = {}
if area_id is not None:
route_values['areaId'] = self._serialize.url('area_id', area_id, 'str')
query_parameters = {}
if host_id is not None:
query_parameters['hostId'] = self._serialize.query('host_id', host_id, 'str')
response = self._send(http_method='GET',
location_id='e81700f7-3be2-46de-8624-2eb35882fcaa',
version='5.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ResourceAreaInfo', response) |
def do_quit(self, _: argparse.Namespace) -> bool:
"""Exit this application"""
self._should_quit = True
return self._STOP_AND_EXIT | Exit this application | Below is the the instruction that describes the task:
### Input:
Exit this application
### Response:
def do_quit(self, _: argparse.Namespace) -> bool:
"""Exit this application"""
self._should_quit = True
return self._STOP_AND_EXIT |
def cached(cls, timeout=60, cache_none=False):
""" Cache queries
:param timeout: cache timeout
:param cache_none: cache None result
Usage::
>>> Model.cached(60).query({...})
"""
return CachedModel(cls=cls, timeout=timeout, cache_none=cache_none) | Cache queries
:param timeout: cache timeout
:param cache_none: cache None result
Usage::
>>> Model.cached(60).query({...}) | Below is the the instruction that describes the task:
### Input:
Cache queries
:param timeout: cache timeout
:param cache_none: cache None result
Usage::
>>> Model.cached(60).query({...})
### Response:
def cached(cls, timeout=60, cache_none=False):
""" Cache queries
:param timeout: cache timeout
:param cache_none: cache None result
Usage::
>>> Model.cached(60).query({...})
"""
return CachedModel(cls=cls, timeout=timeout, cache_none=cache_none) |
def setCamera(self, camera_name, bit_depth=16):
'''
Args:
camera_name (str): Name of the camera
bit_depth (int): depth (bit) of the camera sensor
'''
self.coeffs['name'] = camera_name
self.coeffs['depth'] = bit_depth | Args:
camera_name (str): Name of the camera
bit_depth (int): depth (bit) of the camera sensor | Below is the the instruction that describes the task:
### Input:
Args:
camera_name (str): Name of the camera
bit_depth (int): depth (bit) of the camera sensor
### Response:
def setCamera(self, camera_name, bit_depth=16):
'''
Args:
camera_name (str): Name of the camera
bit_depth (int): depth (bit) of the camera sensor
'''
self.coeffs['name'] = camera_name
self.coeffs['depth'] = bit_depth |
def _cache_lookup(word, data_dir, native=False):
"""Checks if word is in cache.
Parameters
----------
word : str
Word to check in cache.
data_dir : pathlib.Path
Cache directory location.
Returns
-------
translation : str or None
Translation of given word.
"""
trans_dir = "translations"
if native:
trans_dir += "_native"
logger.debug("Cache lookup: %s", word)
filename = data_dir.joinpath(trans_dir, "{}.html".format(word))
if filename.is_file():
with open(filename, mode="r") as f:
logger.debug("Cache found: %s", word)
# TODO: not sure if we should parse data here
translation = _parse_cached(f.read())
return translation
logger.debug("Cache miss: %s", word)
return None | Checks if word is in cache.
Parameters
----------
word : str
Word to check in cache.
data_dir : pathlib.Path
Cache directory location.
Returns
-------
translation : str or None
Translation of given word. | Below is the the instruction that describes the task:
### Input:
Checks if word is in cache.
Parameters
----------
word : str
Word to check in cache.
data_dir : pathlib.Path
Cache directory location.
Returns
-------
translation : str or None
Translation of given word.
### Response:
def _cache_lookup(word, data_dir, native=False):
"""Checks if word is in cache.
Parameters
----------
word : str
Word to check in cache.
data_dir : pathlib.Path
Cache directory location.
Returns
-------
translation : str or None
Translation of given word.
"""
trans_dir = "translations"
if native:
trans_dir += "_native"
logger.debug("Cache lookup: %s", word)
filename = data_dir.joinpath(trans_dir, "{}.html".format(word))
if filename.is_file():
with open(filename, mode="r") as f:
logger.debug("Cache found: %s", word)
# TODO: not sure if we should parse data here
translation = _parse_cached(f.read())
return translation
logger.debug("Cache miss: %s", word)
return None |
def less_than_obs_constraints(self):
"""get the names of the observations that
are listed as less than inequality constraints. Zero-
weighted obs are skipped
Returns
-------
pandas.Series : obsnme of obseravtions that are non-zero weighted
less than constraints
"""
obs = self.observation_data
lt_obs = obs.loc[obs.apply(lambda x: self._is_less_const(x.obgnme) \
and x.weight != 0.0,axis=1),"obsnme"]
return lt_obs | get the names of the observations that
are listed as less than inequality constraints. Zero-
weighted obs are skipped
Returns
-------
pandas.Series : obsnme of obseravtions that are non-zero weighted
less than constraints | Below is the the instruction that describes the task:
### Input:
get the names of the observations that
are listed as less than inequality constraints. Zero-
weighted obs are skipped
Returns
-------
pandas.Series : obsnme of obseravtions that are non-zero weighted
less than constraints
### Response:
def less_than_obs_constraints(self):
"""get the names of the observations that
are listed as less than inequality constraints. Zero-
weighted obs are skipped
Returns
-------
pandas.Series : obsnme of obseravtions that are non-zero weighted
less than constraints
"""
obs = self.observation_data
lt_obs = obs.loc[obs.apply(lambda x: self._is_less_const(x.obgnme) \
and x.weight != 0.0,axis=1),"obsnme"]
return lt_obs |
def get_review_id(self, id_):
""" Get a particular review id, independent from the user_id and
startup_id
"""
return _get_request(_REVIEW_ID.format(c_api=_C_API_BEGINNING,
api=_API_VERSION,
id_=id_,
at=self.access_token)) | Get a particular review id, independent from the user_id and
startup_id | Below is the the instruction that describes the task:
### Input:
Get a particular review id, independent from the user_id and
startup_id
### Response:
def get_review_id(self, id_):
""" Get a particular review id, independent from the user_id and
startup_id
"""
return _get_request(_REVIEW_ID.format(c_api=_C_API_BEGINNING,
api=_API_VERSION,
id_=id_,
at=self.access_token)) |
def find(self, obj, filter_to_class=Ingredient, constructor=None):
"""
Find an Ingredient, optionally using the shelf.
:param obj: A string or Ingredient
:param filter_to_class: The Ingredient subclass that obj must be an
instance of
:param constructor: An optional callable for building Ingredients
from obj
:return: An Ingredient of subclass `filter_to_class`
"""
if callable(constructor):
obj = constructor(obj, shelf=self)
if isinstance(obj, basestring):
set_descending = obj.startswith('-')
if set_descending:
obj = obj[1:]
if obj not in self:
raise BadRecipe("{} doesn't exist on the shelf".format(obj))
ingredient = self[obj]
if not isinstance(ingredient, filter_to_class):
raise BadRecipe('{} is not a {}'.format(obj, filter_to_class))
if set_descending:
ingredient.ordering = 'desc'
return ingredient
elif isinstance(obj, filter_to_class):
return obj
else:
raise BadRecipe('{} is not a {}'.format(obj, filter_to_class)) | Find an Ingredient, optionally using the shelf.
:param obj: A string or Ingredient
:param filter_to_class: The Ingredient subclass that obj must be an
instance of
:param constructor: An optional callable for building Ingredients
from obj
:return: An Ingredient of subclass `filter_to_class` | Below is the the instruction that describes the task:
### Input:
Find an Ingredient, optionally using the shelf.
:param obj: A string or Ingredient
:param filter_to_class: The Ingredient subclass that obj must be an
instance of
:param constructor: An optional callable for building Ingredients
from obj
:return: An Ingredient of subclass `filter_to_class`
### Response:
def find(self, obj, filter_to_class=Ingredient, constructor=None):
"""
Find an Ingredient, optionally using the shelf.
:param obj: A string or Ingredient
:param filter_to_class: The Ingredient subclass that obj must be an
instance of
:param constructor: An optional callable for building Ingredients
from obj
:return: An Ingredient of subclass `filter_to_class`
"""
if callable(constructor):
obj = constructor(obj, shelf=self)
if isinstance(obj, basestring):
set_descending = obj.startswith('-')
if set_descending:
obj = obj[1:]
if obj not in self:
raise BadRecipe("{} doesn't exist on the shelf".format(obj))
ingredient = self[obj]
if not isinstance(ingredient, filter_to_class):
raise BadRecipe('{} is not a {}'.format(obj, filter_to_class))
if set_descending:
ingredient.ordering = 'desc'
return ingredient
elif isinstance(obj, filter_to_class):
return obj
else:
raise BadRecipe('{} is not a {}'.format(obj, filter_to_class)) |
def subdevicenames(self) -> Tuple[str, ...]:
"""A |tuple| containing the device names."""
self: NetCDFVariableBase
return tuple(self.sequences.keys()) | A |tuple| containing the device names. | Below is the the instruction that describes the task:
### Input:
A |tuple| containing the device names.
### Response:
def subdevicenames(self) -> Tuple[str, ...]:
"""A |tuple| containing the device names."""
self: NetCDFVariableBase
return tuple(self.sequences.keys()) |
def query_source(self, source):
"""
Query by source
"""
return self._get_repo_filter(Layer.objects).filter(url=source) | Query by source | Below is the the instruction that describes the task:
### Input:
Query by source
### Response:
def query_source(self, source):
"""
Query by source
"""
return self._get_repo_filter(Layer.objects).filter(url=source) |
def migrate(move_data=True, update_alias=True):
"""
Upgrade function that creates a new index for the data. Optionally it also can
(and by default will) reindex previous copy of the data into the new index
(specify ``move_data=False`` to skip this step) and update the alias to
point to the latest index (set ``update_alias=False`` to skip).
Note that while this function is running the application can still perform
any and all searches without any loss of functionality. It should, however,
not perform any writes at this time as those might be lost.
"""
# construct a new index name by appending current timestamp
next_index = PATTERN.replace('*', datetime.now().strftime('%Y%m%d%H%M%S%f'))
# get the low level connection
es = connections.get_connection()
# create new index, it will use the settings from the template
es.indices.create(index=next_index)
if move_data:
# move data from current alias to the new index
es.reindex(
body={"source": {"index": ALIAS}, "dest": {"index": next_index}},
request_timeout=3600
)
# refresh the index to make the changes visible
es.indices.refresh(index=next_index)
if update_alias:
# repoint the alias to point to the newly created index
es.indices.update_aliases(body={
'actions': [
{"remove": {"alias": ALIAS, "index": PATTERN}},
{"add": {"alias": ALIAS, "index": next_index}},
]
}) | Upgrade function that creates a new index for the data. Optionally it also can
(and by default will) reindex previous copy of the data into the new index
(specify ``move_data=False`` to skip this step) and update the alias to
point to the latest index (set ``update_alias=False`` to skip).
Note that while this function is running the application can still perform
any and all searches without any loss of functionality. It should, however,
not perform any writes at this time as those might be lost. | Below is the the instruction that describes the task:
### Input:
Upgrade function that creates a new index for the data. Optionally it also can
(and by default will) reindex previous copy of the data into the new index
(specify ``move_data=False`` to skip this step) and update the alias to
point to the latest index (set ``update_alias=False`` to skip).
Note that while this function is running the application can still perform
any and all searches without any loss of functionality. It should, however,
not perform any writes at this time as those might be lost.
### Response:
def migrate(move_data=True, update_alias=True):
"""
Upgrade function that creates a new index for the data. Optionally it also can
(and by default will) reindex previous copy of the data into the new index
(specify ``move_data=False`` to skip this step) and update the alias to
point to the latest index (set ``update_alias=False`` to skip).
Note that while this function is running the application can still perform
any and all searches without any loss of functionality. It should, however,
not perform any writes at this time as those might be lost.
"""
# construct a new index name by appending current timestamp
next_index = PATTERN.replace('*', datetime.now().strftime('%Y%m%d%H%M%S%f'))
# get the low level connection
es = connections.get_connection()
# create new index, it will use the settings from the template
es.indices.create(index=next_index)
if move_data:
# move data from current alias to the new index
es.reindex(
body={"source": {"index": ALIAS}, "dest": {"index": next_index}},
request_timeout=3600
)
# refresh the index to make the changes visible
es.indices.refresh(index=next_index)
if update_alias:
# repoint the alias to point to the newly created index
es.indices.update_aliases(body={
'actions': [
{"remove": {"alias": ALIAS, "index": PATTERN}},
{"add": {"alias": ALIAS, "index": next_index}},
]
}) |
def platform_information(_linux_distribution=None):
""" detect platform information from remote host """
linux_distribution = _linux_distribution or platform.linux_distribution
distro, release, codename = linux_distribution()
if not distro:
distro, release, codename = parse_os_release()
if not codename and 'debian' in distro.lower(): # this could be an empty string in Debian
debian_codenames = {
'10': 'buster',
'9': 'stretch',
'8': 'jessie',
'7': 'wheezy',
'6': 'squeeze',
}
major_version = release.split('.')[0]
codename = debian_codenames.get(major_version, '')
# In order to support newer jessie/sid or wheezy/sid strings we test this
# if sid is buried in the minor, we should use sid anyway.
if not codename and '/' in release:
major, minor = release.split('/')
if minor == 'sid':
codename = minor
else:
codename = major
if not codename and 'oracle' in distro.lower(): # this could be an empty string in Oracle linux
codename = 'oracle'
if not codename and 'virtuozzo linux' in distro.lower(): # this could be an empty string in Virtuozzo linux
codename = 'virtuozzo'
if not codename and 'arch' in distro.lower(): # this could be an empty string in Arch linux
codename = 'arch'
return (
str(distro).rstrip(),
str(release).rstrip(),
str(codename).rstrip()
) | detect platform information from remote host | Below is the the instruction that describes the task:
### Input:
detect platform information from remote host
### Response:
def platform_information(_linux_distribution=None):
""" detect platform information from remote host """
linux_distribution = _linux_distribution or platform.linux_distribution
distro, release, codename = linux_distribution()
if not distro:
distro, release, codename = parse_os_release()
if not codename and 'debian' in distro.lower(): # this could be an empty string in Debian
debian_codenames = {
'10': 'buster',
'9': 'stretch',
'8': 'jessie',
'7': 'wheezy',
'6': 'squeeze',
}
major_version = release.split('.')[0]
codename = debian_codenames.get(major_version, '')
# In order to support newer jessie/sid or wheezy/sid strings we test this
# if sid is buried in the minor, we should use sid anyway.
if not codename and '/' in release:
major, minor = release.split('/')
if minor == 'sid':
codename = minor
else:
codename = major
if not codename and 'oracle' in distro.lower(): # this could be an empty string in Oracle linux
codename = 'oracle'
if not codename and 'virtuozzo linux' in distro.lower(): # this could be an empty string in Virtuozzo linux
codename = 'virtuozzo'
if not codename and 'arch' in distro.lower(): # this could be an empty string in Arch linux
codename = 'arch'
return (
str(distro).rstrip(),
str(release).rstrip(),
str(codename).rstrip()
) |
def update_remote_archive(self, save_uri, timeout=-1):
"""
Saves a backup of the appliance to a previously-configured remote location.
Args:
save_uri (dict): The URI for saving the backup to a previously configured location.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView, just stop waiting for its completion.
Returns:
dict: Backup details.
"""
return self._client.update_with_zero_body(uri=save_uri, timeout=timeout) | Saves a backup of the appliance to a previously-configured remote location.
Args:
save_uri (dict): The URI for saving the backup to a previously configured location.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView, just stop waiting for its completion.
Returns:
dict: Backup details. | Below is the the instruction that describes the task:
### Input:
Saves a backup of the appliance to a previously-configured remote location.
Args:
save_uri (dict): The URI for saving the backup to a previously configured location.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView, just stop waiting for its completion.
Returns:
dict: Backup details.
### Response:
def update_remote_archive(self, save_uri, timeout=-1):
"""
Saves a backup of the appliance to a previously-configured remote location.
Args:
save_uri (dict): The URI for saving the backup to a previously configured location.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView, just stop waiting for its completion.
Returns:
dict: Backup details.
"""
return self._client.update_with_zero_body(uri=save_uri, timeout=timeout) |
def write_frames(filename, frames, compression=257, compression_level=6):
"""Write a list of frame objects to a file
**Requires:** |LDAStools.frameCPP|_
Parameters
----------
filename : `str`
path to write into
frames : `list` of `LDAStools.frameCPP.FrameH`
list of frames to write into file
compression : `int`, optional
enum value for compression scheme, default is ``GZIP``
compression_level : `int`, optional
compression level for given scheme
"""
from LDAStools import frameCPP
# open stream
stream = open_gwf(filename, 'w')
# write frames one-by-one
if isinstance(frames, frameCPP.FrameH):
frames = [frames]
for frame in frames:
stream.WriteFrame(frame, compression, compression_level) | Write a list of frame objects to a file
**Requires:** |LDAStools.frameCPP|_
Parameters
----------
filename : `str`
path to write into
frames : `list` of `LDAStools.frameCPP.FrameH`
list of frames to write into file
compression : `int`, optional
enum value for compression scheme, default is ``GZIP``
compression_level : `int`, optional
compression level for given scheme | Below is the the instruction that describes the task:
### Input:
Write a list of frame objects to a file
**Requires:** |LDAStools.frameCPP|_
Parameters
----------
filename : `str`
path to write into
frames : `list` of `LDAStools.frameCPP.FrameH`
list of frames to write into file
compression : `int`, optional
enum value for compression scheme, default is ``GZIP``
compression_level : `int`, optional
compression level for given scheme
### Response:
def write_frames(filename, frames, compression=257, compression_level=6):
"""Write a list of frame objects to a file
**Requires:** |LDAStools.frameCPP|_
Parameters
----------
filename : `str`
path to write into
frames : `list` of `LDAStools.frameCPP.FrameH`
list of frames to write into file
compression : `int`, optional
enum value for compression scheme, default is ``GZIP``
compression_level : `int`, optional
compression level for given scheme
"""
from LDAStools import frameCPP
# open stream
stream = open_gwf(filename, 'w')
# write frames one-by-one
if isinstance(frames, frameCPP.FrameH):
frames = [frames]
for frame in frames:
stream.WriteFrame(frame, compression, compression_level) |
def spell(self, word: str) -> List[str]:
"""
Return a list of possible words, according to edit distance of 1 and 2,
sorted by frequency of word occurrance in the spelling dictionary
:param str word: A word to check its spelling
"""
if not word:
return ""
candidates = (
self.known([word])
or self.known(_edits1(word))
or self.known(_edits2(word))
or [word]
)
candidates.sort(key=self.freq, reverse=True)
return candidates | Return a list of possible words, according to edit distance of 1 and 2,
sorted by frequency of word occurrance in the spelling dictionary
:param str word: A word to check its spelling | Below is the the instruction that describes the task:
### Input:
Return a list of possible words, according to edit distance of 1 and 2,
sorted by frequency of word occurrance in the spelling dictionary
:param str word: A word to check its spelling
### Response:
def spell(self, word: str) -> List[str]:
"""
Return a list of possible words, according to edit distance of 1 and 2,
sorted by frequency of word occurrance in the spelling dictionary
:param str word: A word to check its spelling
"""
if not word:
return ""
candidates = (
self.known([word])
or self.known(_edits1(word))
or self.known(_edits2(word))
or [word]
)
candidates.sort(key=self.freq, reverse=True)
return candidates |
def getPlainText(self, identify=None):
"""
Convenience function for templates which want access
to the raw text, without XML tags.
"""
frags = getattr(self, 'frags', None)
if frags:
plains = []
for frag in frags:
if hasattr(frag, 'text'):
plains.append(frag.text)
return ''.join(plains)
elif identify:
text = getattr(self, 'text', None)
if text is None: text = repr(self)
return text
else:
return '' | Convenience function for templates which want access
to the raw text, without XML tags. | Below is the the instruction that describes the task:
### Input:
Convenience function for templates which want access
to the raw text, without XML tags.
### Response:
def getPlainText(self, identify=None):
"""
Convenience function for templates which want access
to the raw text, without XML tags.
"""
frags = getattr(self, 'frags', None)
if frags:
plains = []
for frag in frags:
if hasattr(frag, 'text'):
plains.append(frag.text)
return ''.join(plains)
elif identify:
text = getattr(self, 'text', None)
if text is None: text = repr(self)
return text
else:
return '' |
def get_subtree(self, name): # noqa: D302
r"""
Get all node names in a sub-tree.
:param name: Sub-tree root node name
:type name: :ref:`NodeName`
:rtype: list of :ref:`NodeName`
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
Using the same example tree created in
:py:meth:`ptrie.Trie.add_nodes`::
>>> from __future__ import print_function
>>> import docs.support.ptrie_example, pprint
>>> tobj = docs.support.ptrie_example.create_tree()
>>> print(tobj)
root
├branch1 (*)
│├leaf1
││└subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
└branch2
>>> pprint.pprint(tobj.get_subtree('root.branch1'))
['root.branch1',
'root.branch1.leaf1',
'root.branch1.leaf1.subleaf1',
'root.branch1.leaf2',
'root.branch1.leaf2.subleaf2']
"""
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
self._node_in_tree(name)
return self._get_subtree(name) | r"""
Get all node names in a sub-tree.
:param name: Sub-tree root node name
:type name: :ref:`NodeName`
:rtype: list of :ref:`NodeName`
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
Using the same example tree created in
:py:meth:`ptrie.Trie.add_nodes`::
>>> from __future__ import print_function
>>> import docs.support.ptrie_example, pprint
>>> tobj = docs.support.ptrie_example.create_tree()
>>> print(tobj)
root
├branch1 (*)
│├leaf1
││└subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
└branch2
>>> pprint.pprint(tobj.get_subtree('root.branch1'))
['root.branch1',
'root.branch1.leaf1',
'root.branch1.leaf1.subleaf1',
'root.branch1.leaf2',
'root.branch1.leaf2.subleaf2'] | Below is the the instruction that describes the task:
### Input:
r"""
Get all node names in a sub-tree.
:param name: Sub-tree root node name
:type name: :ref:`NodeName`
:rtype: list of :ref:`NodeName`
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
Using the same example tree created in
:py:meth:`ptrie.Trie.add_nodes`::
>>> from __future__ import print_function
>>> import docs.support.ptrie_example, pprint
>>> tobj = docs.support.ptrie_example.create_tree()
>>> print(tobj)
root
├branch1 (*)
│├leaf1
││└subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
└branch2
>>> pprint.pprint(tobj.get_subtree('root.branch1'))
['root.branch1',
'root.branch1.leaf1',
'root.branch1.leaf1.subleaf1',
'root.branch1.leaf2',
'root.branch1.leaf2.subleaf2']
### Response:
def get_subtree(self, name): # noqa: D302
r"""
Get all node names in a sub-tree.
:param name: Sub-tree root node name
:type name: :ref:`NodeName`
:rtype: list of :ref:`NodeName`
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
Using the same example tree created in
:py:meth:`ptrie.Trie.add_nodes`::
>>> from __future__ import print_function
>>> import docs.support.ptrie_example, pprint
>>> tobj = docs.support.ptrie_example.create_tree()
>>> print(tobj)
root
├branch1 (*)
│├leaf1
││└subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
└branch2
>>> pprint.pprint(tobj.get_subtree('root.branch1'))
['root.branch1',
'root.branch1.leaf1',
'root.branch1.leaf1.subleaf1',
'root.branch1.leaf2',
'root.branch1.leaf2.subleaf2']
"""
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
self._node_in_tree(name)
return self._get_subtree(name) |
def filename(self):
"""
Returns the provided data as a file location.
"""
if self._filename:
return self._filename
else:
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(self._bytes)
return f.name | Returns the provided data as a file location. | Below is the the instruction that describes the task:
### Input:
Returns the provided data as a file location.
### Response:
def filename(self):
"""
Returns the provided data as a file location.
"""
if self._filename:
return self._filename
else:
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(self._bytes)
return f.name |
def apply_to_segmentlist(self, seglist):
"""
Apply our low and high windows to the segments in a
segmentlist.
"""
for i, seg in enumerate(seglist):
seglist[i] = seg.__class__(seg[0] - self.low_window, seg[1] + self.high_window) | Apply our low and high windows to the segments in a
segmentlist. | Below is the the instruction that describes the task:
### Input:
Apply our low and high windows to the segments in a
segmentlist.
### Response:
def apply_to_segmentlist(self, seglist):
"""
Apply our low and high windows to the segments in a
segmentlist.
"""
for i, seg in enumerate(seglist):
seglist[i] = seg.__class__(seg[0] - self.low_window, seg[1] + self.high_window) |
def element_focus_should_be_set(self, locator):
"""Verifies the element identified by `locator` has focus.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |"""
self._info("Verifying element '%s' focus is set" % locator)
self._check_element_focus(True, locator) | Verifies the element identified by `locator` has focus.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id | | Below is the the instruction that describes the task:
### Input:
Verifies the element identified by `locator` has focus.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
### Response:
def element_focus_should_be_set(self, locator):
"""Verifies the element identified by `locator` has focus.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |"""
self._info("Verifying element '%s' focus is set" % locator)
self._check_element_focus(True, locator) |
def _generate_version(base_version):
"""Generate a version with information about the git repository"""
pkg_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
if not _is_git_repo(pkg_dir) or not _have_git():
return base_version
if _is_release(pkg_dir, base_version) and not _is_dirty(pkg_dir):
return base_version
return "{base_version}+{short_sha}{dirty}".format(
base_version=base_version,
short_sha=_git_revision(pkg_dir).decode("utf-8")[0:6],
dirty=".mod" if _is_dirty(pkg_dir) else "",
) | Generate a version with information about the git repository | Below is the the instruction that describes the task:
### Input:
Generate a version with information about the git repository
### Response:
def _generate_version(base_version):
"""Generate a version with information about the git repository"""
pkg_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
if not _is_git_repo(pkg_dir) or not _have_git():
return base_version
if _is_release(pkg_dir, base_version) and not _is_dirty(pkg_dir):
return base_version
return "{base_version}+{short_sha}{dirty}".format(
base_version=base_version,
short_sha=_git_revision(pkg_dir).decode("utf-8")[0:6],
dirty=".mod" if _is_dirty(pkg_dir) else "",
) |
def merge(
self, reservation_order_id, sources=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Merges two `Reservation`s.
Merge the specified `Reservation`s into a new `Reservation`. The two
`Reservation`s being merged must have same properties.
:param reservation_order_id: Order Id of the reservation
:type reservation_order_id: str
:param sources: Format of the resource id should be
/providers/Microsoft.Capacity/reservationOrders/{reservationOrderId}/reservations/{reservationId}
:type sources: list[str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns list or
ClientRawResponse<list> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[list[~azure.mgmt.reservations.models.ReservationResponse]]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[list[~azure.mgmt.reservations.models.ReservationResponse]]]
:raises:
:class:`ErrorException<azure.mgmt.reservations.models.ErrorException>`
"""
raw_result = self._merge_initial(
reservation_order_id=reservation_order_id,
sources=sources,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('[ReservationResponse]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) | Merges two `Reservation`s.
Merge the specified `Reservation`s into a new `Reservation`. The two
`Reservation`s being merged must have same properties.
:param reservation_order_id: Order Id of the reservation
:type reservation_order_id: str
:param sources: Format of the resource id should be
/providers/Microsoft.Capacity/reservationOrders/{reservationOrderId}/reservations/{reservationId}
:type sources: list[str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns list or
ClientRawResponse<list> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[list[~azure.mgmt.reservations.models.ReservationResponse]]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[list[~azure.mgmt.reservations.models.ReservationResponse]]]
:raises:
:class:`ErrorException<azure.mgmt.reservations.models.ErrorException>` | Below is the the instruction that describes the task:
### Input:
Merges two `Reservation`s.
Merge the specified `Reservation`s into a new `Reservation`. The two
`Reservation`s being merged must have same properties.
:param reservation_order_id: Order Id of the reservation
:type reservation_order_id: str
:param sources: Format of the resource id should be
/providers/Microsoft.Capacity/reservationOrders/{reservationOrderId}/reservations/{reservationId}
:type sources: list[str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns list or
ClientRawResponse<list> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[list[~azure.mgmt.reservations.models.ReservationResponse]]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[list[~azure.mgmt.reservations.models.ReservationResponse]]]
:raises:
:class:`ErrorException<azure.mgmt.reservations.models.ErrorException>`
### Response:
def merge(
self, reservation_order_id, sources=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Merges two `Reservation`s.
Merge the specified `Reservation`s into a new `Reservation`. The two
`Reservation`s being merged must have same properties.
:param reservation_order_id: Order Id of the reservation
:type reservation_order_id: str
:param sources: Format of the resource id should be
/providers/Microsoft.Capacity/reservationOrders/{reservationOrderId}/reservations/{reservationId}
:type sources: list[str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns list or
ClientRawResponse<list> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[list[~azure.mgmt.reservations.models.ReservationResponse]]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[list[~azure.mgmt.reservations.models.ReservationResponse]]]
:raises:
:class:`ErrorException<azure.mgmt.reservations.models.ErrorException>`
"""
raw_result = self._merge_initial(
reservation_order_id=reservation_order_id,
sources=sources,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('[ReservationResponse]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) |
def mutate_unsampled_call_args(self, module, method, wrapped, instance, args, kwargs, transaction):
"""
Method called for unsampled wrapped calls. This can e.g. be used to add traceparent headers to the
underlying http call for HTTP instrumentations.
:param module:
:param method:
:param wrapped:
:param instance:
:param args:
:param kwargs:
:param transaction:
:return:
"""
return args, kwargs | Method called for unsampled wrapped calls. This can e.g. be used to add traceparent headers to the
underlying http call for HTTP instrumentations.
:param module:
:param method:
:param wrapped:
:param instance:
:param args:
:param kwargs:
:param transaction:
:return: | Below is the the instruction that describes the task:
### Input:
Method called for unsampled wrapped calls. This can e.g. be used to add traceparent headers to the
underlying http call for HTTP instrumentations.
:param module:
:param method:
:param wrapped:
:param instance:
:param args:
:param kwargs:
:param transaction:
:return:
### Response:
def mutate_unsampled_call_args(self, module, method, wrapped, instance, args, kwargs, transaction):
"""
Method called for unsampled wrapped calls. This can e.g. be used to add traceparent headers to the
underlying http call for HTTP instrumentations.
:param module:
:param method:
:param wrapped:
:param instance:
:param args:
:param kwargs:
:param transaction:
:return:
"""
return args, kwargs |
def get_start_and_end_time(self, ref=None):
"""Specific function to get start time and end time for MonthDayDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int)
"""
now = time.localtime(ref)
if self.syear == 0:
self.syear = now.tm_year
month_start_id = now.tm_mon
day_start = find_day_by_offset(self.syear, month_start_id, self.smday)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
if self.eyear == 0:
self.eyear = now.tm_year
month_end_id = now.tm_mon
day_end = find_day_by_offset(self.eyear, month_end_id, self.emday)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
now_epoch = time.mktime(now)
if start_time > end_time:
month_start_id -= 1
if month_start_id < 1:
month_start_id = 12
self.syear -= 1
day_start = find_day_by_offset(self.syear, month_start_id, self.smday)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
if end_time < now_epoch:
month_end_id += 1
month_start_id += 1
if month_end_id > 12:
month_end_id = 1
self.eyear += 1
if month_start_id > 12:
month_start_id = 1
self.syear += 1
# For the start
day_start = find_day_by_offset(self.syear, month_start_id, self.smday)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
# For the end
day_end = find_day_by_offset(self.eyear, month_end_id, self.emday)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
return (start_time, end_time) | Specific function to get start time and end time for MonthDayDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int) | Below is the the instruction that describes the task:
### Input:
Specific function to get start time and end time for MonthDayDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int)
### Response:
def get_start_and_end_time(self, ref=None):
"""Specific function to get start time and end time for MonthDayDaterange
:param ref: time in seconds
:type ref: int
:return: tuple with start and end time
:rtype: tuple (int, int)
"""
now = time.localtime(ref)
if self.syear == 0:
self.syear = now.tm_year
month_start_id = now.tm_mon
day_start = find_day_by_offset(self.syear, month_start_id, self.smday)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
if self.eyear == 0:
self.eyear = now.tm_year
month_end_id = now.tm_mon
day_end = find_day_by_offset(self.eyear, month_end_id, self.emday)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
now_epoch = time.mktime(now)
if start_time > end_time:
month_start_id -= 1
if month_start_id < 1:
month_start_id = 12
self.syear -= 1
day_start = find_day_by_offset(self.syear, month_start_id, self.smday)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
if end_time < now_epoch:
month_end_id += 1
month_start_id += 1
if month_end_id > 12:
month_end_id = 1
self.eyear += 1
if month_start_id > 12:
month_start_id = 1
self.syear += 1
# For the start
day_start = find_day_by_offset(self.syear, month_start_id, self.smday)
start_time = get_start_of_day(self.syear, month_start_id, day_start)
# For the end
day_end = find_day_by_offset(self.eyear, month_end_id, self.emday)
end_time = get_end_of_day(self.eyear, month_end_id, day_end)
return (start_time, end_time) |
def get_previous_request(rid):
"""Return the last ceph broker request sent on a given relation
@param rid: Relation id to query for request
"""
request = None
broker_req = relation_get(attribute='broker_req', rid=rid,
unit=local_unit())
if broker_req:
request_data = json.loads(broker_req)
request = CephBrokerRq(api_version=request_data['api-version'],
request_id=request_data['request-id'])
request.set_ops(request_data['ops'])
return request | Return the last ceph broker request sent on a given relation
@param rid: Relation id to query for request | Below is the the instruction that describes the task:
### Input:
Return the last ceph broker request sent on a given relation
@param rid: Relation id to query for request
### Response:
def get_previous_request(rid):
"""Return the last ceph broker request sent on a given relation
@param rid: Relation id to query for request
"""
request = None
broker_req = relation_get(attribute='broker_req', rid=rid,
unit=local_unit())
if broker_req:
request_data = json.loads(broker_req)
request = CephBrokerRq(api_version=request_data['api-version'],
request_id=request_data['request-id'])
request.set_ops(request_data['ops'])
return request |
def _validateurl(self, url):
"""assembles the server url"""
parsed = urlparse(url)
path = parsed.path.strip("/")
if path:
parts = path.split("/")
url_types = ("admin", "manager", "rest")
if any(i in parts for i in url_types):
while parts.pop() not in url_types:
next
elif "services" in parts:
while parts.pop() not in "services":
next
path = "/".join(parts)
else:
path = "arcgis"
self._adminUrl = "%s://%s/%s/admin" % (parsed.scheme, parsed.netloc, path)
return "%s://%s/%s/rest/services" % (parsed.scheme, parsed.netloc, path) | assembles the server url | Below is the the instruction that describes the task:
### Input:
assembles the server url
### Response:
def _validateurl(self, url):
"""assembles the server url"""
parsed = urlparse(url)
path = parsed.path.strip("/")
if path:
parts = path.split("/")
url_types = ("admin", "manager", "rest")
if any(i in parts for i in url_types):
while parts.pop() not in url_types:
next
elif "services" in parts:
while parts.pop() not in "services":
next
path = "/".join(parts)
else:
path = "arcgis"
self._adminUrl = "%s://%s/%s/admin" % (parsed.scheme, parsed.netloc, path)
return "%s://%s/%s/rest/services" % (parsed.scheme, parsed.netloc, path) |
def indicator(self, data):
"""Update the request URI to include the Indicator for specific indicator retrieval.
Args:
data (string): The indicator value
"""
# handle hashes in form md5 : sha1 : sha256
data = self.get_first_hash(data)
super(File, self).indicator(data) | Update the request URI to include the Indicator for specific indicator retrieval.
Args:
data (string): The indicator value | Below is the the instruction that describes the task:
### Input:
Update the request URI to include the Indicator for specific indicator retrieval.
Args:
data (string): The indicator value
### Response:
def indicator(self, data):
"""Update the request URI to include the Indicator for specific indicator retrieval.
Args:
data (string): The indicator value
"""
# handle hashes in form md5 : sha1 : sha256
data = self.get_first_hash(data)
super(File, self).indicator(data) |
def verify_rank_integrity(self, tax_id, rank, parent_id, children):
"""Confirm that for each node the parent ranks and children ranks are
coherent
"""
def _lower(n1, n2):
return self.ranks.index(n1) < self.ranks.index(n2)
if rank not in self.ranks:
raise TaxonIntegrityError('rank "{}" is undefined'.format(rank))
parent_rank = self.rank(parent_id)
# undefined ranks can be placed anywhere in a lineage
if not _lower(rank, parent_rank) and rank != self.NO_RANK:
msg = ('New node "{}", rank "{}" has same or '
'higher rank than parent node "{}", rank "{}"')
msg = msg.format(tax_id, rank, parent_id, parent_rank)
raise TaxonIntegrityError(msg)
for child in children:
if not _lower(self.rank(child), rank):
msg = 'Child node {} has same or lower rank as new node {}'
msg = msg.format(tax_id, child)
raise TaxonIntegrityError(msg)
return True | Confirm that for each node the parent ranks and children ranks are
coherent | Below is the the instruction that describes the task:
### Input:
Confirm that for each node the parent ranks and children ranks are
coherent
### Response:
def verify_rank_integrity(self, tax_id, rank, parent_id, children):
"""Confirm that for each node the parent ranks and children ranks are
coherent
"""
def _lower(n1, n2):
return self.ranks.index(n1) < self.ranks.index(n2)
if rank not in self.ranks:
raise TaxonIntegrityError('rank "{}" is undefined'.format(rank))
parent_rank = self.rank(parent_id)
# undefined ranks can be placed anywhere in a lineage
if not _lower(rank, parent_rank) and rank != self.NO_RANK:
msg = ('New node "{}", rank "{}" has same or '
'higher rank than parent node "{}", rank "{}"')
msg = msg.format(tax_id, rank, parent_id, parent_rank)
raise TaxonIntegrityError(msg)
for child in children:
if not _lower(self.rank(child), rank):
msg = 'Child node {} has same or lower rank as new node {}'
msg = msg.format(tax_id, child)
raise TaxonIntegrityError(msg)
return True |
def warn(self, message, *args, **kwargs):
"""alias to message at warning level"""
self.log("warn", message, *args, **kwargs) | alias to message at warning level | Below is the the instruction that describes the task:
### Input:
alias to message at warning level
### Response:
def warn(self, message, *args, **kwargs):
"""alias to message at warning level"""
self.log("warn", message, *args, **kwargs) |
def to_iso(dt):
'''
Format a date or datetime into an ISO-8601 string
Support dates before 1900.
'''
if isinstance(dt, datetime):
return to_iso_datetime(dt)
elif isinstance(dt, date):
return to_iso_date(dt) | Format a date or datetime into an ISO-8601 string
Support dates before 1900. | Below is the the instruction that describes the task:
### Input:
Format a date or datetime into an ISO-8601 string
Support dates before 1900.
### Response:
def to_iso(dt):
'''
Format a date or datetime into an ISO-8601 string
Support dates before 1900.
'''
if isinstance(dt, datetime):
return to_iso_datetime(dt)
elif isinstance(dt, date):
return to_iso_date(dt) |
def count_by_key_impl(sequence):
"""
Implementation for count_by_key_t
:param sequence: sequence of (key, value) pairs
:return: counts by key
"""
counter = collections.Counter()
for key, _ in sequence:
counter[key] += 1
return six.viewitems(counter) | Implementation for count_by_key_t
:param sequence: sequence of (key, value) pairs
:return: counts by key | Below is the the instruction that describes the task:
### Input:
Implementation for count_by_key_t
:param sequence: sequence of (key, value) pairs
:return: counts by key
### Response:
def count_by_key_impl(sequence):
"""
Implementation for count_by_key_t
:param sequence: sequence of (key, value) pairs
:return: counts by key
"""
counter = collections.Counter()
for key, _ in sequence:
counter[key] += 1
return six.viewitems(counter) |
def autogender(self, api_token=None, genderize_all=False):
"""Autocomplete gender information of unique identities.
Autocomplete unique identities gender using genderize.io
API. Only those unique identities without an assigned
gender will be updated unless `genderize_all` option is given.
"""
name_cache = {}
no_gender = not genderize_all
pattern = re.compile(r"(^\w+)\s\w+")
profiles = api.search_profiles(self.db, no_gender=no_gender)
for profile in profiles:
if not profile.name:
continue
name = profile.name.strip()
m = pattern.match(name)
if not m:
continue
firstname = m.group(1).lower()
if firstname in name_cache:
gender_data = name_cache[firstname]
else:
try:
gender, acc = genderize(firstname, api_token)
except (requests.exceptions.RequestException,
requests.exceptions.RetryError) as e:
msg = "Skipping '%s' name (%s) due to a connection error. Error: %s"
msg = msg % (firstname, profile.uuid, str(e))
self.warning(msg)
continue
gender_data = {
'gender': gender,
'gender_acc': acc
}
name_cache[firstname] = gender_data
if not gender_data['gender']:
continue
try:
api.edit_profile(self.db, profile.uuid, **gender_data)
self.display('autogender.tmpl',
uuid=profile.uuid, name=profile.name,
gender_data=gender_data)
except (NotFoundError, InvalidValueError) as e:
self.error(str(e))
return e.code
return CMD_SUCCESS | Autocomplete gender information of unique identities.
Autocomplete unique identities gender using genderize.io
API. Only those unique identities without an assigned
gender will be updated unless `genderize_all` option is given. | Below is the the instruction that describes the task:
### Input:
Autocomplete gender information of unique identities.
Autocomplete unique identities gender using genderize.io
API. Only those unique identities without an assigned
gender will be updated unless `genderize_all` option is given.
### Response:
def autogender(self, api_token=None, genderize_all=False):
"""Autocomplete gender information of unique identities.
Autocomplete unique identities gender using genderize.io
API. Only those unique identities without an assigned
gender will be updated unless `genderize_all` option is given.
"""
name_cache = {}
no_gender = not genderize_all
pattern = re.compile(r"(^\w+)\s\w+")
profiles = api.search_profiles(self.db, no_gender=no_gender)
for profile in profiles:
if not profile.name:
continue
name = profile.name.strip()
m = pattern.match(name)
if not m:
continue
firstname = m.group(1).lower()
if firstname in name_cache:
gender_data = name_cache[firstname]
else:
try:
gender, acc = genderize(firstname, api_token)
except (requests.exceptions.RequestException,
requests.exceptions.RetryError) as e:
msg = "Skipping '%s' name (%s) due to a connection error. Error: %s"
msg = msg % (firstname, profile.uuid, str(e))
self.warning(msg)
continue
gender_data = {
'gender': gender,
'gender_acc': acc
}
name_cache[firstname] = gender_data
if not gender_data['gender']:
continue
try:
api.edit_profile(self.db, profile.uuid, **gender_data)
self.display('autogender.tmpl',
uuid=profile.uuid, name=profile.name,
gender_data=gender_data)
except (NotFoundError, InvalidValueError) as e:
self.error(str(e))
return e.code
return CMD_SUCCESS |
def reverse_deployments(deployments=None):
"""Reverse deployments and the modules/regions in them."""
if deployments is None:
deployments = []
reversed_deployments = []
for i in deployments[::-1]:
deployment = copy.deepcopy(i)
for config in ['modules', 'regions']:
if deployment.get(config):
deployment[config] = deployment[config][::-1]
reversed_deployments.append(deployment)
return reversed_deployments | Reverse deployments and the modules/regions in them. | Below is the the instruction that describes the task:
### Input:
Reverse deployments and the modules/regions in them.
### Response:
def reverse_deployments(deployments=None):
"""Reverse deployments and the modules/regions in them."""
if deployments is None:
deployments = []
reversed_deployments = []
for i in deployments[::-1]:
deployment = copy.deepcopy(i)
for config in ['modules', 'regions']:
if deployment.get(config):
deployment[config] = deployment[config][::-1]
reversed_deployments.append(deployment)
return reversed_deployments |
def apply(self, **kwexpr):
"""
Specify one or more projection expressions to add to each result
### Parameters
- **kwexpr**: One or more key-value pairs for a projection. The key is
the alias for the projection, and the value is the projection
expression itself, for example `apply(square_root="sqrt(@foo)")`
"""
for alias, expr in kwexpr.items():
self._projections.append([alias, expr])
return self | Specify one or more projection expressions to add to each result
### Parameters
- **kwexpr**: One or more key-value pairs for a projection. The key is
the alias for the projection, and the value is the projection
expression itself, for example `apply(square_root="sqrt(@foo)")` | Below is the the instruction that describes the task:
### Input:
Specify one or more projection expressions to add to each result
### Parameters
- **kwexpr**: One or more key-value pairs for a projection. The key is
the alias for the projection, and the value is the projection
expression itself, for example `apply(square_root="sqrt(@foo)")`
### Response:
def apply(self, **kwexpr):
"""
Specify one or more projection expressions to add to each result
### Parameters
- **kwexpr**: One or more key-value pairs for a projection. The key is
the alias for the projection, and the value is the projection
expression itself, for example `apply(square_root="sqrt(@foo)")`
"""
for alias, expr in kwexpr.items():
self._projections.append([alias, expr])
return self |
def save(self, msg=None):
"""
Modify item data and commit to repo.
Git objects are immutable, to save means adding a new item
:param msg: Commit message.
"""
if msg is None:
msg = 'Saving %s' % self.name
log.debug(msg)
self.repo.addItem(self, msg) | Modify item data and commit to repo.
Git objects are immutable, to save means adding a new item
:param msg: Commit message. | Below is the the instruction that describes the task:
### Input:
Modify item data and commit to repo.
Git objects are immutable, to save means adding a new item
:param msg: Commit message.
### Response:
def save(self, msg=None):
"""
Modify item data and commit to repo.
Git objects are immutable, to save means adding a new item
:param msg: Commit message.
"""
if msg is None:
msg = 'Saving %s' % self.name
log.debug(msg)
self.repo.addItem(self, msg) |
def matrix(
m, n, lst,
m_text: list=None,
n_text: list=None):
"""
m: row
n: column
lst: items
>>> print(_matrix(2, 3, [(1, 1), (2, 3)]))
|x| | |
| | |x|
"""
fmt = ""
if n_text:
fmt += " {}\n".format(" ".join(n_text))
for i in range(1, m+1):
if m_text:
fmt += "{:<4.4} ".format(m_text[i-1])
fmt += "|"
for j in range(1, n+1):
if (i, j) in lst:
fmt += "x|"
else:
fmt += " |"
fmt += "\n"
return fmt | m: row
n: column
lst: items
>>> print(_matrix(2, 3, [(1, 1), (2, 3)]))
|x| | |
| | |x| | Below is the the instruction that describes the task:
### Input:
m: row
n: column
lst: items
>>> print(_matrix(2, 3, [(1, 1), (2, 3)]))
|x| | |
| | |x|
### Response:
def matrix(
m, n, lst,
m_text: list=None,
n_text: list=None):
"""
m: row
n: column
lst: items
>>> print(_matrix(2, 3, [(1, 1), (2, 3)]))
|x| | |
| | |x|
"""
fmt = ""
if n_text:
fmt += " {}\n".format(" ".join(n_text))
for i in range(1, m+1):
if m_text:
fmt += "{:<4.4} ".format(m_text[i-1])
fmt += "|"
for j in range(1, n+1):
if (i, j) in lst:
fmt += "x|"
else:
fmt += " |"
fmt += "\n"
return fmt |
def get_queryset(self):
"""
Check that the queryset is defined and call it.
"""
if self.queryset is None:
raise ImproperlyConfigured(
"'%s' must define 'queryset'" % self.__class__.__name__)
return self.queryset() | Check that the queryset is defined and call it. | Below is the the instruction that describes the task:
### Input:
Check that the queryset is defined and call it.
### Response:
def get_queryset(self):
"""
Check that the queryset is defined and call it.
"""
if self.queryset is None:
raise ImproperlyConfigured(
"'%s' must define 'queryset'" % self.__class__.__name__)
return self.queryset() |
def check_domain(self, service_id, version_number, name):
"""Checks the status of a domain's DNS record. Returns an array of 3 items. The first is the details for the domain. The second is the current CNAME of the domain. The third is a boolean indicating whether or not it has been properly setup to use Fastly."""
content = self._fetch("/service/%s/version/%d/domain/%s/check" % (service_id, version_number, name))
return FastlyDomainCheck(self, content) | Checks the status of a domain's DNS record. Returns an array of 3 items. The first is the details for the domain. The second is the current CNAME of the domain. The third is a boolean indicating whether or not it has been properly setup to use Fastly. | Below is the the instruction that describes the task:
### Input:
Checks the status of a domain's DNS record. Returns an array of 3 items. The first is the details for the domain. The second is the current CNAME of the domain. The third is a boolean indicating whether or not it has been properly setup to use Fastly.
### Response:
def check_domain(self, service_id, version_number, name):
"""Checks the status of a domain's DNS record. Returns an array of 3 items. The first is the details for the domain. The second is the current CNAME of the domain. The third is a boolean indicating whether or not it has been properly setup to use Fastly."""
content = self._fetch("/service/%s/version/%d/domain/%s/check" % (service_id, version_number, name))
return FastlyDomainCheck(self, content) |
async def get_pinstate_report(self, command):
"""
This method retrieves a Firmata pin_state report for a pin..
See: http://firmata.org/wiki/Protocol#Pin_State_Query
:param command: {"method": "get_pin_state", "params": [PIN]}
:returns: {"method": "get_pin_state_reply", "params": [PIN_NUMBER, PIN_MODE, PIN_STATE]}
"""
pin = int(command[0])
value = await self.core.get_pin_state(pin)
if value:
reply = json.dumps({"method": "pin_state_reply", "params": value})
else:
reply = json.dumps({"method": "pin_state_reply", "params": "Unknown"})
await self.websocket.send(reply) | This method retrieves a Firmata pin_state report for a pin..
See: http://firmata.org/wiki/Protocol#Pin_State_Query
:param command: {"method": "get_pin_state", "params": [PIN]}
:returns: {"method": "get_pin_state_reply", "params": [PIN_NUMBER, PIN_MODE, PIN_STATE]} | Below is the the instruction that describes the task:
### Input:
This method retrieves a Firmata pin_state report for a pin..
See: http://firmata.org/wiki/Protocol#Pin_State_Query
:param command: {"method": "get_pin_state", "params": [PIN]}
:returns: {"method": "get_pin_state_reply", "params": [PIN_NUMBER, PIN_MODE, PIN_STATE]}
### Response:
async def get_pinstate_report(self, command):
"""
This method retrieves a Firmata pin_state report for a pin..
See: http://firmata.org/wiki/Protocol#Pin_State_Query
:param command: {"method": "get_pin_state", "params": [PIN]}
:returns: {"method": "get_pin_state_reply", "params": [PIN_NUMBER, PIN_MODE, PIN_STATE]}
"""
pin = int(command[0])
value = await self.core.get_pin_state(pin)
if value:
reply = json.dumps({"method": "pin_state_reply", "params": value})
else:
reply = json.dumps({"method": "pin_state_reply", "params": "Unknown"})
await self.websocket.send(reply) |
def get_notebook_tab_title(notebook, page_num):
"""Helper function that gets a notebook's tab title given its page number
:param notebook: The GTK notebook
:param page_num: The page number of the tab, for which the title is required
:return: The title of the tab
"""
child = notebook.get_nth_page(page_num)
tab_label_eventbox = notebook.get_tab_label(child)
return get_widget_title(tab_label_eventbox.get_tooltip_text()) | Helper function that gets a notebook's tab title given its page number
:param notebook: The GTK notebook
:param page_num: The page number of the tab, for which the title is required
:return: The title of the tab | Below is the the instruction that describes the task:
### Input:
Helper function that gets a notebook's tab title given its page number
:param notebook: The GTK notebook
:param page_num: The page number of the tab, for which the title is required
:return: The title of the tab
### Response:
def get_notebook_tab_title(notebook, page_num):
"""Helper function that gets a notebook's tab title given its page number
:param notebook: The GTK notebook
:param page_num: The page number of the tab, for which the title is required
:return: The title of the tab
"""
child = notebook.get_nth_page(page_num)
tab_label_eventbox = notebook.get_tab_label(child)
return get_widget_title(tab_label_eventbox.get_tooltip_text()) |
def selecttabindex(self, window_name, object_name, tab_index):
"""
Select tab based on index.
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param tab_index: tab to select
@type data: integer
@return: 1 on success.
@rtype: integer
"""
children = self._get_tab_children(window_name, object_name)
length = len(children)
if tab_index < 0 or tab_index > length:
raise LdtpServerException(u"Invalid tab index %s" % tab_index)
tab_handle = children[tab_index]
if not tab_handle.AXEnabled:
raise LdtpServerException(u"Object %s state disabled" % object_name)
tab_handle.Press()
return 1 | Select tab based on index.
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param tab_index: tab to select
@type data: integer
@return: 1 on success.
@rtype: integer | Below is the the instruction that describes the task:
### Input:
Select tab based on index.
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param tab_index: tab to select
@type data: integer
@return: 1 on success.
@rtype: integer
### Response:
def selecttabindex(self, window_name, object_name, tab_index):
"""
Select tab based on index.
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param tab_index: tab to select
@type data: integer
@return: 1 on success.
@rtype: integer
"""
children = self._get_tab_children(window_name, object_name)
length = len(children)
if tab_index < 0 or tab_index > length:
raise LdtpServerException(u"Invalid tab index %s" % tab_index)
tab_handle = children[tab_index]
if not tab_handle.AXEnabled:
raise LdtpServerException(u"Object %s state disabled" % object_name)
tab_handle.Press()
return 1 |
def make_name(self, reserved=[]):
"""
Autogenerates a :attr:`name` from the :attr:`title`. If the auto-generated name is already
in use in this model, :meth:`make_name` tries again by suffixing numbers starting with 2
until an available name is found.
:param reserved: List or set of reserved names unavailable for use
"""
if self.title:
if inspect(self).has_identity:
def checkused(c):
return bool(c in reserved or c in self.reserved_names
or self.__class__.query.filter(self.__class__.id != self.id).filter_by(name=c).notempty())
else:
def checkused(c):
return bool(c in reserved or c in self.reserved_names
or self.__class__.query.filter_by(name=c).notempty())
with self.__class__.query.session.no_autoflush:
self.name = six.text_type(make_name(self.title_for_name, maxlength=self.__name_length__, checkused=checkused)) | Autogenerates a :attr:`name` from the :attr:`title`. If the auto-generated name is already
in use in this model, :meth:`make_name` tries again by suffixing numbers starting with 2
until an available name is found.
:param reserved: List or set of reserved names unavailable for use | Below is the the instruction that describes the task:
### Input:
Autogenerates a :attr:`name` from the :attr:`title`. If the auto-generated name is already
in use in this model, :meth:`make_name` tries again by suffixing numbers starting with 2
until an available name is found.
:param reserved: List or set of reserved names unavailable for use
### Response:
def make_name(self, reserved=[]):
"""
Autogenerates a :attr:`name` from the :attr:`title`. If the auto-generated name is already
in use in this model, :meth:`make_name` tries again by suffixing numbers starting with 2
until an available name is found.
:param reserved: List or set of reserved names unavailable for use
"""
if self.title:
if inspect(self).has_identity:
def checkused(c):
return bool(c in reserved or c in self.reserved_names
or self.__class__.query.filter(self.__class__.id != self.id).filter_by(name=c).notempty())
else:
def checkused(c):
return bool(c in reserved or c in self.reserved_names
or self.__class__.query.filter_by(name=c).notempty())
with self.__class__.query.session.no_autoflush:
self.name = six.text_type(make_name(self.title_for_name, maxlength=self.__name_length__, checkused=checkused)) |
def accel_move_tab_left(self, *args):
# TODO KEYBINDINGS ONLY
""" Callback to move a tab to the left """
pos = self.get_notebook().get_current_page()
if pos != 0:
self.move_tab(pos, pos - 1)
return True | Callback to move a tab to the left | Below is the the instruction that describes the task:
### Input:
Callback to move a tab to the left
### Response:
def accel_move_tab_left(self, *args):
# TODO KEYBINDINGS ONLY
""" Callback to move a tab to the left """
pos = self.get_notebook().get_current_page()
if pos != 0:
self.move_tab(pos, pos - 1)
return True |
def infos(self, type=None, failed=False):
"""Get all infos created by the participants nodes.
Return a list of infos produced by nodes associated with the
participant. If specified, ``type`` filters by class. By default, failed
infos are excluded, to include only failed nodes use ``failed=True``,
for all nodes use ``failed=all``. Note that failed filters the infos,
not the nodes - infos from all nodes (whether failed or not) can be
returned.
"""
nodes = self.nodes(failed="all")
infos = []
for n in nodes:
infos.extend(n.infos(type=type, failed=failed))
return infos | Get all infos created by the participants nodes.
Return a list of infos produced by nodes associated with the
participant. If specified, ``type`` filters by class. By default, failed
infos are excluded, to include only failed nodes use ``failed=True``,
for all nodes use ``failed=all``. Note that failed filters the infos,
not the nodes - infos from all nodes (whether failed or not) can be
returned. | Below is the the instruction that describes the task:
### Input:
Get all infos created by the participants nodes.
Return a list of infos produced by nodes associated with the
participant. If specified, ``type`` filters by class. By default, failed
infos are excluded, to include only failed nodes use ``failed=True``,
for all nodes use ``failed=all``. Note that failed filters the infos,
not the nodes - infos from all nodes (whether failed or not) can be
returned.
### Response:
def infos(self, type=None, failed=False):
"""Get all infos created by the participants nodes.
Return a list of infos produced by nodes associated with the
participant. If specified, ``type`` filters by class. By default, failed
infos are excluded, to include only failed nodes use ``failed=True``,
for all nodes use ``failed=all``. Note that failed filters the infos,
not the nodes - infos from all nodes (whether failed or not) can be
returned.
"""
nodes = self.nodes(failed="all")
infos = []
for n in nodes:
infos.extend(n.infos(type=type, failed=failed))
return infos |
def _find_usage_cloudtrail(self):
"""Calculate current usage for CloudTrail related metrics"""
trail_list = self.conn.describe_trails()['trailList']
trail_count = len(trail_list) if trail_list else 0
for trail in trail_list:
data_resource_count = 0
if self.conn._client_config.region_name == trail['HomeRegion']:
response = self.conn.get_event_selectors(
TrailName=trail['Name']
)
event_selectors = response['EventSelectors']
for event_selector in event_selectors:
data_resource_count += len(
event_selector.get('DataResources', [])
)
self.limits['Event Selectors Per Trail']._add_current_usage(
len(event_selectors),
aws_type='AWS::CloudTrail::EventSelector',
resource_id=trail['Name']
)
self.limits['Data Resources Per Trail']._add_current_usage(
data_resource_count,
aws_type='AWS::CloudTrail::DataResource',
resource_id=trail['Name']
)
else:
logger.debug(
'Ignoring event selectors and data resources for '
'CloudTrail %s in non-home region' % trail['Name']
)
self.limits['Trails Per Region']._add_current_usage(
trail_count,
aws_type=self.aws_type
) | Calculate current usage for CloudTrail related metrics | Below is the the instruction that describes the task:
### Input:
Calculate current usage for CloudTrail related metrics
### Response:
def _find_usage_cloudtrail(self):
"""Calculate current usage for CloudTrail related metrics"""
trail_list = self.conn.describe_trails()['trailList']
trail_count = len(trail_list) if trail_list else 0
for trail in trail_list:
data_resource_count = 0
if self.conn._client_config.region_name == trail['HomeRegion']:
response = self.conn.get_event_selectors(
TrailName=trail['Name']
)
event_selectors = response['EventSelectors']
for event_selector in event_selectors:
data_resource_count += len(
event_selector.get('DataResources', [])
)
self.limits['Event Selectors Per Trail']._add_current_usage(
len(event_selectors),
aws_type='AWS::CloudTrail::EventSelector',
resource_id=trail['Name']
)
self.limits['Data Resources Per Trail']._add_current_usage(
data_resource_count,
aws_type='AWS::CloudTrail::DataResource',
resource_id=trail['Name']
)
else:
logger.debug(
'Ignoring event selectors and data resources for '
'CloudTrail %s in non-home region' % trail['Name']
)
self.limits['Trails Per Region']._add_current_usage(
trail_count,
aws_type=self.aws_type
) |
def postprocess_result(morphresult, trim_phonetic, trim_compound):
"""Postprocess vabamorf wrapper output."""
word, analysis = morphresult
return {
'text': deconvert(word),
'analysis': [postprocess_analysis(a, trim_phonetic, trim_compound) for a in analysis]
} | Postprocess vabamorf wrapper output. | Below is the the instruction that describes the task:
### Input:
Postprocess vabamorf wrapper output.
### Response:
def postprocess_result(morphresult, trim_phonetic, trim_compound):
"""Postprocess vabamorf wrapper output."""
word, analysis = morphresult
return {
'text': deconvert(word),
'analysis': [postprocess_analysis(a, trim_phonetic, trim_compound) for a in analysis]
} |
def handle_split(self, asset, ratio):
"""
Update the position by the split ratio, and return the resulting
fractional share that will be converted into cash.
Returns the unused cash.
"""
if self.asset != asset:
raise Exception("updating split with the wrong asset!")
# adjust the # of shares by the ratio
# (if we had 100 shares, and the ratio is 3,
# we now have 33 shares)
# (old_share_count / ratio = new_share_count)
# (old_price * ratio = new_price)
# e.g., 33.333
raw_share_count = self.amount / float(ratio)
# e.g., 33
full_share_count = np.floor(raw_share_count)
# e.g., 0.333
fractional_share_count = raw_share_count - full_share_count
# adjust the cost basis to the nearest cent, e.g., 60.0
new_cost_basis = round(self.cost_basis * ratio, 2)
self.cost_basis = new_cost_basis
self.amount = full_share_count
return_cash = round(float(fractional_share_count * new_cost_basis), 2)
log.info("after split: " + str(self))
log.info("returning cash: " + str(return_cash))
# return the leftover cash, which will be converted into cash
# (rounded to the nearest cent)
return return_cash | Update the position by the split ratio, and return the resulting
fractional share that will be converted into cash.
Returns the unused cash. | Below is the the instruction that describes the task:
### Input:
Update the position by the split ratio, and return the resulting
fractional share that will be converted into cash.
Returns the unused cash.
### Response:
def handle_split(self, asset, ratio):
"""
Update the position by the split ratio, and return the resulting
fractional share that will be converted into cash.
Returns the unused cash.
"""
if self.asset != asset:
raise Exception("updating split with the wrong asset!")
# adjust the # of shares by the ratio
# (if we had 100 shares, and the ratio is 3,
# we now have 33 shares)
# (old_share_count / ratio = new_share_count)
# (old_price * ratio = new_price)
# e.g., 33.333
raw_share_count = self.amount / float(ratio)
# e.g., 33
full_share_count = np.floor(raw_share_count)
# e.g., 0.333
fractional_share_count = raw_share_count - full_share_count
# adjust the cost basis to the nearest cent, e.g., 60.0
new_cost_basis = round(self.cost_basis * ratio, 2)
self.cost_basis = new_cost_basis
self.amount = full_share_count
return_cash = round(float(fractional_share_count * new_cost_basis), 2)
log.info("after split: " + str(self))
log.info("returning cash: " + str(return_cash))
# return the leftover cash, which will be converted into cash
# (rounded to the nearest cent)
return return_cash |
def flatten2d(d, key_as_tuple=True, delim='.',
list_of_dicts=None):
""" get nested dict as {key:dict,...},
where key is tuple/string of all-1 nested keys
NB: is same as flattennd(d,1,key_as_tuple,delim)
Parameters
----------
d : dict
key_as_tuple : bool
whether keys are list of nested keys or delimited string of nested keys
delim : str
if key_as_tuple=False, delimiter for keys
list_of_dicts: str or None
if not None, flatten lists of dicts using this prefix
Examples
--------
>>> from pprint import pprint
>>> d = {1:{2:{3:{'b':'B','c':'C'},4:'D'}}}
>>> pprint(flatten2d(d))
{(1, 2): {4: 'D'}, (1, 2, 3): {'b': 'B', 'c': 'C'}}
>>> pprint(flatten2d(d,key_as_tuple=False,delim=','))
{'1,2': {4: 'D'}, '1,2,3': {'b': 'B', 'c': 'C'}}
"""
return flattennd(d, 1, key_as_tuple, delim, list_of_dicts=list_of_dicts) | get nested dict as {key:dict,...},
where key is tuple/string of all-1 nested keys
NB: is same as flattennd(d,1,key_as_tuple,delim)
Parameters
----------
d : dict
key_as_tuple : bool
whether keys are list of nested keys or delimited string of nested keys
delim : str
if key_as_tuple=False, delimiter for keys
list_of_dicts: str or None
if not None, flatten lists of dicts using this prefix
Examples
--------
>>> from pprint import pprint
>>> d = {1:{2:{3:{'b':'B','c':'C'},4:'D'}}}
>>> pprint(flatten2d(d))
{(1, 2): {4: 'D'}, (1, 2, 3): {'b': 'B', 'c': 'C'}}
>>> pprint(flatten2d(d,key_as_tuple=False,delim=','))
{'1,2': {4: 'D'}, '1,2,3': {'b': 'B', 'c': 'C'}} | Below is the the instruction that describes the task:
### Input:
get nested dict as {key:dict,...},
where key is tuple/string of all-1 nested keys
NB: is same as flattennd(d,1,key_as_tuple,delim)
Parameters
----------
d : dict
key_as_tuple : bool
whether keys are list of nested keys or delimited string of nested keys
delim : str
if key_as_tuple=False, delimiter for keys
list_of_dicts: str or None
if not None, flatten lists of dicts using this prefix
Examples
--------
>>> from pprint import pprint
>>> d = {1:{2:{3:{'b':'B','c':'C'},4:'D'}}}
>>> pprint(flatten2d(d))
{(1, 2): {4: 'D'}, (1, 2, 3): {'b': 'B', 'c': 'C'}}
>>> pprint(flatten2d(d,key_as_tuple=False,delim=','))
{'1,2': {4: 'D'}, '1,2,3': {'b': 'B', 'c': 'C'}}
### Response:
def flatten2d(d, key_as_tuple=True, delim='.',
list_of_dicts=None):
""" get nested dict as {key:dict,...},
where key is tuple/string of all-1 nested keys
NB: is same as flattennd(d,1,key_as_tuple,delim)
Parameters
----------
d : dict
key_as_tuple : bool
whether keys are list of nested keys or delimited string of nested keys
delim : str
if key_as_tuple=False, delimiter for keys
list_of_dicts: str or None
if not None, flatten lists of dicts using this prefix
Examples
--------
>>> from pprint import pprint
>>> d = {1:{2:{3:{'b':'B','c':'C'},4:'D'}}}
>>> pprint(flatten2d(d))
{(1, 2): {4: 'D'}, (1, 2, 3): {'b': 'B', 'c': 'C'}}
>>> pprint(flatten2d(d,key_as_tuple=False,delim=','))
{'1,2': {4: 'D'}, '1,2,3': {'b': 'B', 'c': 'C'}}
"""
return flattennd(d, 1, key_as_tuple, delim, list_of_dicts=list_of_dicts) |
def save_point(self) -> str:
"""
Indexes the measured data with the current point as a key and saves the
current position once the 'Enter' key is pressed to the 'actual points'
vector.
"""
if self._current_mount is left:
msg = self.save_mount_offset()
self._current_mount = right
elif self._current_mount is types.Mount.LEFT:
msg = self.save_mount_offset()
self._current_mount = types.Mount.RIGHT
else:
pos = self._position()[:-1]
self.actual_points[self._current_point] = pos
log.debug("Saving {} for point {}".format(
pos, self._current_point))
msg = 'saved #{}: {}'.format(
self._current_point, self.actual_points[self._current_point])
return msg | Indexes the measured data with the current point as a key and saves the
current position once the 'Enter' key is pressed to the 'actual points'
vector. | Below is the the instruction that describes the task:
### Input:
Indexes the measured data with the current point as a key and saves the
current position once the 'Enter' key is pressed to the 'actual points'
vector.
### Response:
def save_point(self) -> str:
"""
Indexes the measured data with the current point as a key and saves the
current position once the 'Enter' key is pressed to the 'actual points'
vector.
"""
if self._current_mount is left:
msg = self.save_mount_offset()
self._current_mount = right
elif self._current_mount is types.Mount.LEFT:
msg = self.save_mount_offset()
self._current_mount = types.Mount.RIGHT
else:
pos = self._position()[:-1]
self.actual_points[self._current_point] = pos
log.debug("Saving {} for point {}".format(
pos, self._current_point))
msg = 'saved #{}: {}'.format(
self._current_point, self.actual_points[self._current_point])
return msg |
def create_storage_policy(policy_name, policy_dict, service_instance=None):
'''
Creates a storage policy.
Supported capability types: scalar, set, range.
policy_name
Name of the policy to create.
The value of the argument will override any existing name in
``policy_dict``.
policy_dict
Dictionary containing the changes to apply to the policy.
(example in salt.states.pbm)
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.create_storage_policy policy_name='policy name'
policy_dict="$policy_dict"
'''
log.trace('create storage policy \'%s\', dict = %s', policy_name, policy_dict)
profile_manager = salt.utils.pbm.get_profile_manager(service_instance)
policy_create_spec = pbm.profile.CapabilityBasedProfileCreateSpec()
# Hardcode the storage profile resource type
policy_create_spec.resourceType = pbm.profile.ResourceType(
resourceType=pbm.profile.ResourceTypeEnum.STORAGE)
# Set name argument
policy_dict['name'] = policy_name
log.trace('Setting policy values in policy_update_spec')
_apply_policy_config(policy_create_spec, policy_dict)
salt.utils.pbm.create_storage_policy(profile_manager, policy_create_spec)
return {'create_storage_policy': True} | Creates a storage policy.
Supported capability types: scalar, set, range.
policy_name
Name of the policy to create.
The value of the argument will override any existing name in
``policy_dict``.
policy_dict
Dictionary containing the changes to apply to the policy.
(example in salt.states.pbm)
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.create_storage_policy policy_name='policy name'
policy_dict="$policy_dict" | Below is the the instruction that describes the task:
### Input:
Creates a storage policy.
Supported capability types: scalar, set, range.
policy_name
Name of the policy to create.
The value of the argument will override any existing name in
``policy_dict``.
policy_dict
Dictionary containing the changes to apply to the policy.
(example in salt.states.pbm)
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.create_storage_policy policy_name='policy name'
policy_dict="$policy_dict"
### Response:
def create_storage_policy(policy_name, policy_dict, service_instance=None):
'''
Creates a storage policy.
Supported capability types: scalar, set, range.
policy_name
Name of the policy to create.
The value of the argument will override any existing name in
``policy_dict``.
policy_dict
Dictionary containing the changes to apply to the policy.
(example in salt.states.pbm)
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.create_storage_policy policy_name='policy name'
policy_dict="$policy_dict"
'''
log.trace('create storage policy \'%s\', dict = %s', policy_name, policy_dict)
profile_manager = salt.utils.pbm.get_profile_manager(service_instance)
policy_create_spec = pbm.profile.CapabilityBasedProfileCreateSpec()
# Hardcode the storage profile resource type
policy_create_spec.resourceType = pbm.profile.ResourceType(
resourceType=pbm.profile.ResourceTypeEnum.STORAGE)
# Set name argument
policy_dict['name'] = policy_name
log.trace('Setting policy values in policy_update_spec')
_apply_policy_config(policy_create_spec, policy_dict)
salt.utils.pbm.create_storage_policy(profile_manager, policy_create_spec)
return {'create_storage_policy': True} |
def extract_solvent_accessibility_dssp(in_dssp, path=True):
"""Uses DSSP to extract solvent accessibilty information on every residue.
Notes
-----
For more information on the solvent accessibility metrics used in dssp, see:
http://swift.cmbi.ru.nl/gv/dssp/HTML/descrip.html#ACC
In the dssp files value is labeled 'ACC'.
Parameters
----------
in_dssp : str
Path to DSSP file.
path : bool
Indicates if in_dssp is a path or a string.
Returns
-------
dssp_residues : list
Each internal list contains:
[0] int Residue number
[1] str Chain identifier
[2] str Residue type
[3] int dssp solvent accessibilty
"""
if path:
with open(in_dssp, 'r') as inf:
dssp_out = inf.read()
else:
dssp_out = in_dssp[:]
dssp_residues = []
go = False
for line in dssp_out.splitlines():
if go:
try:
res_num = int(line[5:10].strip())
chain = line[10:12].strip()
residue = line[13]
acc = int(line[35:38].strip())
# It is IMPORTANT that acc remains the final value of the
# returned list, due to its usage in
# isambard.ampal.base_ampal.tag_dssp_solvent_accessibility
dssp_residues.append([res_num, chain, residue, acc])
except ValueError:
pass
else:
if line[2] == '#':
go = True
pass
return dssp_residues | Uses DSSP to extract solvent accessibilty information on every residue.
Notes
-----
For more information on the solvent accessibility metrics used in dssp, see:
http://swift.cmbi.ru.nl/gv/dssp/HTML/descrip.html#ACC
In the dssp files value is labeled 'ACC'.
Parameters
----------
in_dssp : str
Path to DSSP file.
path : bool
Indicates if in_dssp is a path or a string.
Returns
-------
dssp_residues : list
Each internal list contains:
[0] int Residue number
[1] str Chain identifier
[2] str Residue type
[3] int dssp solvent accessibilty | Below is the the instruction that describes the task:
### Input:
Uses DSSP to extract solvent accessibilty information on every residue.
Notes
-----
For more information on the solvent accessibility metrics used in dssp, see:
http://swift.cmbi.ru.nl/gv/dssp/HTML/descrip.html#ACC
In the dssp files value is labeled 'ACC'.
Parameters
----------
in_dssp : str
Path to DSSP file.
path : bool
Indicates if in_dssp is a path or a string.
Returns
-------
dssp_residues : list
Each internal list contains:
[0] int Residue number
[1] str Chain identifier
[2] str Residue type
[3] int dssp solvent accessibilty
### Response:
def extract_solvent_accessibility_dssp(in_dssp, path=True):
"""Uses DSSP to extract solvent accessibilty information on every residue.
Notes
-----
For more information on the solvent accessibility metrics used in dssp, see:
http://swift.cmbi.ru.nl/gv/dssp/HTML/descrip.html#ACC
In the dssp files value is labeled 'ACC'.
Parameters
----------
in_dssp : str
Path to DSSP file.
path : bool
Indicates if in_dssp is a path or a string.
Returns
-------
dssp_residues : list
Each internal list contains:
[0] int Residue number
[1] str Chain identifier
[2] str Residue type
[3] int dssp solvent accessibilty
"""
if path:
with open(in_dssp, 'r') as inf:
dssp_out = inf.read()
else:
dssp_out = in_dssp[:]
dssp_residues = []
go = False
for line in dssp_out.splitlines():
if go:
try:
res_num = int(line[5:10].strip())
chain = line[10:12].strip()
residue = line[13]
acc = int(line[35:38].strip())
# It is IMPORTANT that acc remains the final value of the
# returned list, due to its usage in
# isambard.ampal.base_ampal.tag_dssp_solvent_accessibility
dssp_residues.append([res_num, chain, residue, acc])
except ValueError:
pass
else:
if line[2] == '#':
go = True
pass
return dssp_residues |
def allow(self, comment, content_object, request):
"""Moderates comments."""
POST = urlencode({
"blog": settings.AKISMET_BLOG.encode("utf-8"),
"user_ip": comment.ip_address,
"user_agent": request.META.get('HTTP_USER_AGENT', "").
encode("utf-8"),
"referrer": request.META.get('HTTP_REFERRER', "").
encode("utf-8"),
"comment_author": comment.user_name.encode("utf-8"),
"comment_author_email": comment.user_email.encode("utf-8"),
"comment_author_url": comment.user_url.encode("utf-8"),
"comment_content": comment.comment.encode("utf-8")})
connection = HTTPConnection(AKISMET_URL, AKISMET_PORT)
connection.request("POST", AKISMET_PATH, POST,
{"User-Agent": AKISMET_USERAGENT,
"Content-type":"application/x-www-form-urlencoded"
})
response = connection.getresponse()
status, result = response.status, response.read()
if result == "false":
return True
elif result == "true" and settings.DISCARD_SPAM:
return False
elif result == "true":
comment.is_removed = True
comment.is_public = False
return True
else:
raise AkismetError(status, result) | Moderates comments. | Below is the the instruction that describes the task:
### Input:
Moderates comments.
### Response:
def allow(self, comment, content_object, request):
"""Moderates comments."""
POST = urlencode({
"blog": settings.AKISMET_BLOG.encode("utf-8"),
"user_ip": comment.ip_address,
"user_agent": request.META.get('HTTP_USER_AGENT', "").
encode("utf-8"),
"referrer": request.META.get('HTTP_REFERRER', "").
encode("utf-8"),
"comment_author": comment.user_name.encode("utf-8"),
"comment_author_email": comment.user_email.encode("utf-8"),
"comment_author_url": comment.user_url.encode("utf-8"),
"comment_content": comment.comment.encode("utf-8")})
connection = HTTPConnection(AKISMET_URL, AKISMET_PORT)
connection.request("POST", AKISMET_PATH, POST,
{"User-Agent": AKISMET_USERAGENT,
"Content-type":"application/x-www-form-urlencoded"
})
response = connection.getresponse()
status, result = response.status, response.read()
if result == "false":
return True
elif result == "true" and settings.DISCARD_SPAM:
return False
elif result == "true":
comment.is_removed = True
comment.is_public = False
return True
else:
raise AkismetError(status, result) |
def sigmoid_accuracy_one_hot(logits, labels, weights_fn=None):
"""Calculate accuracy for a set, given one-hot labels and logits.
Args:
logits: Tensor of size [batch-size, o=1, p=1, num-classes]
labels: Tensor of size [batch-size, o=1, p=1, num-classes]
weights_fn: Function that takes in labels and weighs examples (unused)
Returns:
accuracy (scalar), weights
"""
with tf.variable_scope("sigmoid_accuracy_one_hot", values=[logits, labels]):
del weights_fn
predictions = tf.nn.sigmoid(logits)
labels = tf.argmax(labels, -1)
predictions = tf.argmax(predictions, -1)
_, accuracy = tf.metrics.accuracy(labels=labels, predictions=predictions)
return accuracy, tf.constant(1.0) | Calculate accuracy for a set, given one-hot labels and logits.
Args:
logits: Tensor of size [batch-size, o=1, p=1, num-classes]
labels: Tensor of size [batch-size, o=1, p=1, num-classes]
weights_fn: Function that takes in labels and weighs examples (unused)
Returns:
accuracy (scalar), weights | Below is the the instruction that describes the task:
### Input:
Calculate accuracy for a set, given one-hot labels and logits.
Args:
logits: Tensor of size [batch-size, o=1, p=1, num-classes]
labels: Tensor of size [batch-size, o=1, p=1, num-classes]
weights_fn: Function that takes in labels and weighs examples (unused)
Returns:
accuracy (scalar), weights
### Response:
def sigmoid_accuracy_one_hot(logits, labels, weights_fn=None):
"""Calculate accuracy for a set, given one-hot labels and logits.
Args:
logits: Tensor of size [batch-size, o=1, p=1, num-classes]
labels: Tensor of size [batch-size, o=1, p=1, num-classes]
weights_fn: Function that takes in labels and weighs examples (unused)
Returns:
accuracy (scalar), weights
"""
with tf.variable_scope("sigmoid_accuracy_one_hot", values=[logits, labels]):
del weights_fn
predictions = tf.nn.sigmoid(logits)
labels = tf.argmax(labels, -1)
predictions = tf.argmax(predictions, -1)
_, accuracy = tf.metrics.accuracy(labels=labels, predictions=predictions)
return accuracy, tf.constant(1.0) |
def patch_for_specialized_compiler():
"""
Patch functions in distutils.msvc9compiler to use the standalone compiler
build for Python (Windows only). Fall back to original behavior when the
standalone compiler is not available.
"""
if 'distutils' not in globals():
# The module isn't available to be patched
return
if unpatched:
# Already patched
return
unpatched.update(vars(distutils.msvc9compiler))
distutils.msvc9compiler.find_vcvarsall = find_vcvarsall
distutils.msvc9compiler.query_vcvarsall = query_vcvarsall | Patch functions in distutils.msvc9compiler to use the standalone compiler
build for Python (Windows only). Fall back to original behavior when the
standalone compiler is not available. | Below is the the instruction that describes the task:
### Input:
Patch functions in distutils.msvc9compiler to use the standalone compiler
build for Python (Windows only). Fall back to original behavior when the
standalone compiler is not available.
### Response:
def patch_for_specialized_compiler():
"""
Patch functions in distutils.msvc9compiler to use the standalone compiler
build for Python (Windows only). Fall back to original behavior when the
standalone compiler is not available.
"""
if 'distutils' not in globals():
# The module isn't available to be patched
return
if unpatched:
# Already patched
return
unpatched.update(vars(distutils.msvc9compiler))
distutils.msvc9compiler.find_vcvarsall = find_vcvarsall
distutils.msvc9compiler.query_vcvarsall = query_vcvarsall |
def close_tab(self):
"""
Close active tab.
"""
if len(self.tab_pages) > 1: # Cannot close last tab.
del self.tab_pages[self.active_tab_index]
self.active_tab_index = max(0, self.active_tab_index - 1)
# Clean up buffers.
self._auto_close_new_empty_buffers() | Close active tab. | Below is the the instruction that describes the task:
### Input:
Close active tab.
### Response:
def close_tab(self):
"""
Close active tab.
"""
if len(self.tab_pages) > 1: # Cannot close last tab.
del self.tab_pages[self.active_tab_index]
self.active_tab_index = max(0, self.active_tab_index - 1)
# Clean up buffers.
self._auto_close_new_empty_buffers() |
def s_l(l, alpha):
"""
get sigma as a function of degree l from Constable and Parker (1988)
"""
a2 = alpha**2
c_a = 0.547
s_l = np.sqrt(old_div(((c_a**(2. * l)) * a2), ((l + 1.) * (2. * l + 1.))))
return s_l | get sigma as a function of degree l from Constable and Parker (1988) | Below is the the instruction that describes the task:
### Input:
get sigma as a function of degree l from Constable and Parker (1988)
### Response:
def s_l(l, alpha):
"""
get sigma as a function of degree l from Constable and Parker (1988)
"""
a2 = alpha**2
c_a = 0.547
s_l = np.sqrt(old_div(((c_a**(2. * l)) * a2), ((l + 1.) * (2. * l + 1.))))
return s_l |
def interfaces(self):
"""Collect the available wlan interfaces."""
self._ifaces = []
wifi_ctrl = wifiutil.WifiUtil()
for interface in wifi_ctrl.interfaces():
iface = Interface(interface)
self._ifaces.append(iface)
self._logger.info("Get interface: %s", iface.name())
if not self._ifaces:
self._logger.error("Can't get wifi interface")
return self._ifaces | Collect the available wlan interfaces. | Below is the the instruction that describes the task:
### Input:
Collect the available wlan interfaces.
### Response:
def interfaces(self):
"""Collect the available wlan interfaces."""
self._ifaces = []
wifi_ctrl = wifiutil.WifiUtil()
for interface in wifi_ctrl.interfaces():
iface = Interface(interface)
self._ifaces.append(iface)
self._logger.info("Get interface: %s", iface.name())
if not self._ifaces:
self._logger.error("Can't get wifi interface")
return self._ifaces |
def get_hosts_by_explosion(self, hostgroups):
# pylint: disable=access-member-before-definition
"""
Get hosts of this group
:param hostgroups: Hostgroup object
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:return: list of hosts of this group
:rtype: list
"""
# First we tag the hg so it will not be explode
# if a son of it already call it
self.already_exploded = True
# Now the recursive part
# rec_tag is set to False every HG we explode
# so if True here, it must be a loop in HG
# calls... not GOOD!
if self.rec_tag:
logger.error("[hostgroup::%s] got a loop in hostgroup definition", self.get_name())
return self.get_hosts()
# Ok, not a loop, we tag it and continue
self.rec_tag = True
hg_mbrs = self.get_hostgroup_members()
for hg_mbr in hg_mbrs:
hostgroup = hostgroups.find_by_name(hg_mbr.strip())
if hostgroup is not None:
value = hostgroup.get_hosts_by_explosion(hostgroups)
if value is not None:
self.add_members(value)
return self.get_hosts() | Get hosts of this group
:param hostgroups: Hostgroup object
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:return: list of hosts of this group
:rtype: list | Below is the the instruction that describes the task:
### Input:
Get hosts of this group
:param hostgroups: Hostgroup object
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:return: list of hosts of this group
:rtype: list
### Response:
def get_hosts_by_explosion(self, hostgroups):
# pylint: disable=access-member-before-definition
"""
Get hosts of this group
:param hostgroups: Hostgroup object
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:return: list of hosts of this group
:rtype: list
"""
# First we tag the hg so it will not be explode
# if a son of it already call it
self.already_exploded = True
# Now the recursive part
# rec_tag is set to False every HG we explode
# so if True here, it must be a loop in HG
# calls... not GOOD!
if self.rec_tag:
logger.error("[hostgroup::%s] got a loop in hostgroup definition", self.get_name())
return self.get_hosts()
# Ok, not a loop, we tag it and continue
self.rec_tag = True
hg_mbrs = self.get_hostgroup_members()
for hg_mbr in hg_mbrs:
hostgroup = hostgroups.find_by_name(hg_mbr.strip())
if hostgroup is not None:
value = hostgroup.get_hosts_by_explosion(hostgroups)
if value is not None:
self.add_members(value)
return self.get_hosts() |
def user_has_group(user, group, superuser_skip=True):
"""
Check if a user is in a certaing group.
By default, the check is skipped for superusers.
"""
if user.is_superuser and superuser_skip:
return True
return user.groups.filter(name=group).exists() | Check if a user is in a certaing group.
By default, the check is skipped for superusers. | Below is the the instruction that describes the task:
### Input:
Check if a user is in a certaing group.
By default, the check is skipped for superusers.
### Response:
def user_has_group(user, group, superuser_skip=True):
"""
Check if a user is in a certaing group.
By default, the check is skipped for superusers.
"""
if user.is_superuser and superuser_skip:
return True
return user.groups.filter(name=group).exists() |
def attach(self, screen):
"""Adds a given screen to the listener queue.
:param pyte.screens.Screen screen: a screen to attach to.
"""
if self.listener is not None:
warnings.warn("As of version 0.6.0 the listener queue is "
"restricted to a single element. Existing "
"listener {0} will be replaced."
.format(self.listener), DeprecationWarning)
if self.strict:
for event in self.events:
if not hasattr(screen, event):
raise TypeError("{0} is missing {1}".format(screen, event))
self.listener = screen
self._parser = None
self._initialize_parser() | Adds a given screen to the listener queue.
:param pyte.screens.Screen screen: a screen to attach to. | Below is the the instruction that describes the task:
### Input:
Adds a given screen to the listener queue.
:param pyte.screens.Screen screen: a screen to attach to.
### Response:
def attach(self, screen):
"""Adds a given screen to the listener queue.
:param pyte.screens.Screen screen: a screen to attach to.
"""
if self.listener is not None:
warnings.warn("As of version 0.6.0 the listener queue is "
"restricted to a single element. Existing "
"listener {0} will be replaced."
.format(self.listener), DeprecationWarning)
if self.strict:
for event in self.events:
if not hasattr(screen, event):
raise TypeError("{0} is missing {1}".format(screen, event))
self.listener = screen
self._parser = None
self._initialize_parser() |
def from_json(self, fname):
'''
Read contents of a CSV containing a list of servers.
'''
with open(fname, 'rt') as fp:
for row in json.load(fp):
nn = ServerInfo.from_dict(row)
self[str(nn)] = nn | Read contents of a CSV containing a list of servers. | Below is the the instruction that describes the task:
### Input:
Read contents of a CSV containing a list of servers.
### Response:
def from_json(self, fname):
'''
Read contents of a CSV containing a list of servers.
'''
with open(fname, 'rt') as fp:
for row in json.load(fp):
nn = ServerInfo.from_dict(row)
self[str(nn)] = nn |
def bar_chart_mf(data, path_name):
"""Make a bar chart for data on MF quantities."""
N = len(data)
ind = np.arange(N) # the x locations for the groups
width = 0.8 # the width of the bars
fig, ax = pyplot.subplots()
rects1 = ax.bar(ind, data, width, color='g')
# add some text for labels, title and axes ticks
ax.set_ylabel('Population')
ax.set_xticks(ind+width/2)
labs = ['m='+str(i) for i in range(-N/2+1, N/2+1)]
ax.set_xticklabels(labs)
def autolabel(rects):
# attach some text labels
for rect in rects:
rect.get_height()
autolabel(rects1)
pyplot.savefig(path_name)
pyplot.close() | Make a bar chart for data on MF quantities. | Below is the the instruction that describes the task:
### Input:
Make a bar chart for data on MF quantities.
### Response:
def bar_chart_mf(data, path_name):
"""Make a bar chart for data on MF quantities."""
N = len(data)
ind = np.arange(N) # the x locations for the groups
width = 0.8 # the width of the bars
fig, ax = pyplot.subplots()
rects1 = ax.bar(ind, data, width, color='g')
# add some text for labels, title and axes ticks
ax.set_ylabel('Population')
ax.set_xticks(ind+width/2)
labs = ['m='+str(i) for i in range(-N/2+1, N/2+1)]
ax.set_xticklabels(labs)
def autolabel(rects):
# attach some text labels
for rect in rects:
rect.get_height()
autolabel(rects1)
pyplot.savefig(path_name)
pyplot.close() |
def add_state_editor(self, state_m):
"""Triggered whenever a state is selected.
:param state_m: The selected state model.
"""
state_identifier = self.get_state_identifier(state_m)
if state_identifier in self.closed_tabs:
state_editor_ctrl = self.closed_tabs[state_identifier]['controller']
state_editor_view = state_editor_ctrl.view
handler_id = self.closed_tabs[state_identifier]['source_code_changed_handler_id']
source_code_view_is_dirty = self.closed_tabs[state_identifier]['source_code_view_is_dirty']
del self.closed_tabs[state_identifier] # pages not in self.closed_tabs and self.tabs at the same time
else:
state_editor_view = StateEditorView()
if isinstance(state_m, LibraryStateModel):
state_editor_view['main_notebook_1'].set_current_page(
state_editor_view['main_notebook_1'].page_num(state_editor_view.page_dict["Data Linkage"]))
state_editor_ctrl = StateEditorController(state_m, state_editor_view)
self.add_controller(state_identifier, state_editor_ctrl)
if state_editor_ctrl.get_controller('source_ctrl') and state_m.state.get_next_upper_library_root_state() is None:
# observe changed to set the mark dirty flag
handler_id = state_editor_view.source_view.get_buffer().connect('changed', self.script_text_changed,
state_m)
self.view.get_top_widget().connect('draw', state_editor_view.source_view.on_draw)
else:
handler_id = None
source_code_view_is_dirty = False
(tab, inner_label, sticky_button) = create_tab_header('', self.on_tab_close_clicked,
self.on_toggle_sticky_clicked, state_m)
set_tab_label_texts(inner_label, state_m, source_code_view_is_dirty)
state_editor_view.get_top_widget().title_label = inner_label
state_editor_view.get_top_widget().sticky_button = sticky_button
page_content = state_editor_view.get_top_widget()
page_id = self.view.notebook.prepend_page(page_content, tab)
page = self.view.notebook.get_nth_page(page_id)
self.view.notebook.set_tab_reorderable(page, True)
page.show_all()
self.view.notebook.show()
self.tabs[state_identifier] = {'page': page, 'state_m': state_m,
'controller': state_editor_ctrl, 'sm_id': self.model.selected_state_machine_id,
'is_sticky': False,
'source_code_view_is_dirty': source_code_view_is_dirty,
'source_code_changed_handler_id': handler_id}
return page_id | Triggered whenever a state is selected.
:param state_m: The selected state model. | Below is the the instruction that describes the task:
### Input:
Triggered whenever a state is selected.
:param state_m: The selected state model.
### Response:
def add_state_editor(self, state_m):
"""Triggered whenever a state is selected.
:param state_m: The selected state model.
"""
state_identifier = self.get_state_identifier(state_m)
if state_identifier in self.closed_tabs:
state_editor_ctrl = self.closed_tabs[state_identifier]['controller']
state_editor_view = state_editor_ctrl.view
handler_id = self.closed_tabs[state_identifier]['source_code_changed_handler_id']
source_code_view_is_dirty = self.closed_tabs[state_identifier]['source_code_view_is_dirty']
del self.closed_tabs[state_identifier] # pages not in self.closed_tabs and self.tabs at the same time
else:
state_editor_view = StateEditorView()
if isinstance(state_m, LibraryStateModel):
state_editor_view['main_notebook_1'].set_current_page(
state_editor_view['main_notebook_1'].page_num(state_editor_view.page_dict["Data Linkage"]))
state_editor_ctrl = StateEditorController(state_m, state_editor_view)
self.add_controller(state_identifier, state_editor_ctrl)
if state_editor_ctrl.get_controller('source_ctrl') and state_m.state.get_next_upper_library_root_state() is None:
# observe changed to set the mark dirty flag
handler_id = state_editor_view.source_view.get_buffer().connect('changed', self.script_text_changed,
state_m)
self.view.get_top_widget().connect('draw', state_editor_view.source_view.on_draw)
else:
handler_id = None
source_code_view_is_dirty = False
(tab, inner_label, sticky_button) = create_tab_header('', self.on_tab_close_clicked,
self.on_toggle_sticky_clicked, state_m)
set_tab_label_texts(inner_label, state_m, source_code_view_is_dirty)
state_editor_view.get_top_widget().title_label = inner_label
state_editor_view.get_top_widget().sticky_button = sticky_button
page_content = state_editor_view.get_top_widget()
page_id = self.view.notebook.prepend_page(page_content, tab)
page = self.view.notebook.get_nth_page(page_id)
self.view.notebook.set_tab_reorderable(page, True)
page.show_all()
self.view.notebook.show()
self.tabs[state_identifier] = {'page': page, 'state_m': state_m,
'controller': state_editor_ctrl, 'sm_id': self.model.selected_state_machine_id,
'is_sticky': False,
'source_code_view_is_dirty': source_code_view_is_dirty,
'source_code_changed_handler_id': handler_id}
return page_id |
def Verify(self, public_key):
"""Verifies the certificate using the given key.
Args:
public_key: The public key to use.
Returns:
True: Everything went well.
Raises:
VerificationError: The certificate did not verify.
"""
# TODO(amoser): We have to do this manually for now since cryptography does
# not yet support cert verification. There is PR 2460:
# https://github.com/pyca/cryptography/pull/2460/files
# that will add it, once it's in we should switch to using this.
# Note that all times here are in UTC.
now = rdfvalue.RDFDatetime.Now().AsDatetime()
if now > self._value.not_valid_after:
raise VerificationError("Certificate expired!")
if now < self._value.not_valid_before:
raise VerificationError("Certificate not yet valid!")
public_key.Verify(
self._value.tbs_certificate_bytes,
self._value.signature,
hash_algorithm=self._value.signature_hash_algorithm)
return True | Verifies the certificate using the given key.
Args:
public_key: The public key to use.
Returns:
True: Everything went well.
Raises:
VerificationError: The certificate did not verify. | Below is the the instruction that describes the task:
### Input:
Verifies the certificate using the given key.
Args:
public_key: The public key to use.
Returns:
True: Everything went well.
Raises:
VerificationError: The certificate did not verify.
### Response:
def Verify(self, public_key):
"""Verifies the certificate using the given key.
Args:
public_key: The public key to use.
Returns:
True: Everything went well.
Raises:
VerificationError: The certificate did not verify.
"""
# TODO(amoser): We have to do this manually for now since cryptography does
# not yet support cert verification. There is PR 2460:
# https://github.com/pyca/cryptography/pull/2460/files
# that will add it, once it's in we should switch to using this.
# Note that all times here are in UTC.
now = rdfvalue.RDFDatetime.Now().AsDatetime()
if now > self._value.not_valid_after:
raise VerificationError("Certificate expired!")
if now < self._value.not_valid_before:
raise VerificationError("Certificate not yet valid!")
public_key.Verify(
self._value.tbs_certificate_bytes,
self._value.signature,
hash_algorithm=self._value.signature_hash_algorithm)
return True |
def _context(self):
"""
Return the context to pass to the template.
The context is a dict of the form:
{
'css_url': CSS_URL,
'report_name': REPORT_NAME,
'diff_name': DIFF_NAME,
'src_stats': {SRC_PATH: {
'percent_covered': PERCENT_COVERED,
'violation_lines': [LINE_NUM, ...]
}, ... }
'total_num_lines': TOTAL_NUM_LINES,
'total_num_violations': TOTAL_NUM_VIOLATIONS,
'total_percent_covered': TOTAL_PERCENT_COVERED
}
"""
# Calculate the information to pass to the template
src_stats = {
src: self._src_path_stats(src) for src in self.src_paths()
}
# Include snippet style info if we're displaying
# source code snippets
if self.INCLUDE_SNIPPETS:
snippet_style = Snippet.style_defs()
else:
snippet_style = None
return {
'css_url': self.css_url,
'report_name': self.coverage_report_name(),
'diff_name': self.diff_report_name(),
'src_stats': src_stats,
'total_num_lines': self.total_num_lines(),
'total_num_violations': self.total_num_violations(),
'total_percent_covered': self.total_percent_covered(),
'snippet_style': snippet_style
} | Return the context to pass to the template.
The context is a dict of the form:
{
'css_url': CSS_URL,
'report_name': REPORT_NAME,
'diff_name': DIFF_NAME,
'src_stats': {SRC_PATH: {
'percent_covered': PERCENT_COVERED,
'violation_lines': [LINE_NUM, ...]
}, ... }
'total_num_lines': TOTAL_NUM_LINES,
'total_num_violations': TOTAL_NUM_VIOLATIONS,
'total_percent_covered': TOTAL_PERCENT_COVERED
} | Below is the the instruction that describes the task:
### Input:
Return the context to pass to the template.
The context is a dict of the form:
{
'css_url': CSS_URL,
'report_name': REPORT_NAME,
'diff_name': DIFF_NAME,
'src_stats': {SRC_PATH: {
'percent_covered': PERCENT_COVERED,
'violation_lines': [LINE_NUM, ...]
}, ... }
'total_num_lines': TOTAL_NUM_LINES,
'total_num_violations': TOTAL_NUM_VIOLATIONS,
'total_percent_covered': TOTAL_PERCENT_COVERED
}
### Response:
def _context(self):
"""
Return the context to pass to the template.
The context is a dict of the form:
{
'css_url': CSS_URL,
'report_name': REPORT_NAME,
'diff_name': DIFF_NAME,
'src_stats': {SRC_PATH: {
'percent_covered': PERCENT_COVERED,
'violation_lines': [LINE_NUM, ...]
}, ... }
'total_num_lines': TOTAL_NUM_LINES,
'total_num_violations': TOTAL_NUM_VIOLATIONS,
'total_percent_covered': TOTAL_PERCENT_COVERED
}
"""
# Calculate the information to pass to the template
src_stats = {
src: self._src_path_stats(src) for src in self.src_paths()
}
# Include snippet style info if we're displaying
# source code snippets
if self.INCLUDE_SNIPPETS:
snippet_style = Snippet.style_defs()
else:
snippet_style = None
return {
'css_url': self.css_url,
'report_name': self.coverage_report_name(),
'diff_name': self.diff_report_name(),
'src_stats': src_stats,
'total_num_lines': self.total_num_lines(),
'total_num_violations': self.total_num_violations(),
'total_percent_covered': self.total_percent_covered(),
'snippet_style': snippet_style
} |
def get_edge_annotations(self, u, v, key: str) -> Optional[AnnotationsDict]:
"""Get the annotations for a given edge."""
return self._get_edge_attr(u, v, key, ANNOTATIONS) | Get the annotations for a given edge. | Below is the the instruction that describes the task:
### Input:
Get the annotations for a given edge.
### Response:
def get_edge_annotations(self, u, v, key: str) -> Optional[AnnotationsDict]:
"""Get the annotations for a given edge."""
return self._get_edge_attr(u, v, key, ANNOTATIONS) |
def write_c_string( self, value ):
"""
Read a zero terminated (C style) string
"""
self.file.write( value )
self.file.write( b'\0' ) | Read a zero terminated (C style) string | Below is the the instruction that describes the task:
### Input:
Read a zero terminated (C style) string
### Response:
def write_c_string( self, value ):
"""
Read a zero terminated (C style) string
"""
self.file.write( value )
self.file.write( b'\0' ) |
def read_namespace_status(self, name, **kwargs): # noqa: E501
"""read_namespace_status # noqa: E501
read status of the specified Namespace # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespace_status(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Namespace (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Namespace
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespace_status_with_http_info(name, **kwargs) # noqa: E501
else:
(data) = self.read_namespace_status_with_http_info(name, **kwargs) # noqa: E501
return data | read_namespace_status # noqa: E501
read status of the specified Namespace # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespace_status(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Namespace (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Namespace
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
read_namespace_status # noqa: E501
read status of the specified Namespace # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespace_status(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Namespace (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Namespace
If the method is called asynchronously,
returns the request thread.
### Response:
def read_namespace_status(self, name, **kwargs): # noqa: E501
"""read_namespace_status # noqa: E501
read status of the specified Namespace # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespace_status(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Namespace (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Namespace
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespace_status_with_http_info(name, **kwargs) # noqa: E501
else:
(data) = self.read_namespace_status_with_http_info(name, **kwargs) # noqa: E501
return data |
def from_ISO_8601(cls, date_string, time_string, tz_string):
"""Sufficiently general ISO 8601 parser.
Inputs must be in "basic" format, i.e. no '-' or ':' separators.
See https://en.wikipedia.org/wiki/ISO_8601
"""
# parse tz_string
if tz_string:
tz_offset = (int(tz_string[1:3]) * 60) + int(tz_string[3:])
if tz_string[0] == '-':
tz_offset = -tz_offset
else:
tz_offset = None
if time_string == '000000':
# assume no time information
time_string = ''
tz_offset = None
datetime_string = date_string + time_string[:13]
precision = min((len(datetime_string) - 2) // 2, 7)
if precision <= 0:
return None
fmt = ''.join(('%Y', '%m', '%d', '%H', '%M', '%S', '.%f')[:precision])
return cls(
(datetime.strptime(datetime_string, fmt), precision, tz_offset)) | Sufficiently general ISO 8601 parser.
Inputs must be in "basic" format, i.e. no '-' or ':' separators.
See https://en.wikipedia.org/wiki/ISO_8601 | Below is the the instruction that describes the task:
### Input:
Sufficiently general ISO 8601 parser.
Inputs must be in "basic" format, i.e. no '-' or ':' separators.
See https://en.wikipedia.org/wiki/ISO_8601
### Response:
def from_ISO_8601(cls, date_string, time_string, tz_string):
"""Sufficiently general ISO 8601 parser.
Inputs must be in "basic" format, i.e. no '-' or ':' separators.
See https://en.wikipedia.org/wiki/ISO_8601
"""
# parse tz_string
if tz_string:
tz_offset = (int(tz_string[1:3]) * 60) + int(tz_string[3:])
if tz_string[0] == '-':
tz_offset = -tz_offset
else:
tz_offset = None
if time_string == '000000':
# assume no time information
time_string = ''
tz_offset = None
datetime_string = date_string + time_string[:13]
precision = min((len(datetime_string) - 2) // 2, 7)
if precision <= 0:
return None
fmt = ''.join(('%Y', '%m', '%d', '%H', '%M', '%S', '.%f')[:precision])
return cls(
(datetime.strptime(datetime_string, fmt), precision, tz_offset)) |
def parameters(self, params):
"""
Sets the list of search parameters to use.
:param params: list of AbstractSearchParameter objects
:type params: list
"""
array = JavaArray(jobject=JavaArray.new_instance("weka.core.setupgenerator.AbstractParameter", len(params)))
for idx, obj in enumerate(params):
array[idx] = obj.jobject
javabridge.call(self.jobject, "setParameters", "([Lweka/core/setupgenerator/AbstractParameter;)V", array.jobject) | Sets the list of search parameters to use.
:param params: list of AbstractSearchParameter objects
:type params: list | Below is the the instruction that describes the task:
### Input:
Sets the list of search parameters to use.
:param params: list of AbstractSearchParameter objects
:type params: list
### Response:
def parameters(self, params):
"""
Sets the list of search parameters to use.
:param params: list of AbstractSearchParameter objects
:type params: list
"""
array = JavaArray(jobject=JavaArray.new_instance("weka.core.setupgenerator.AbstractParameter", len(params)))
for idx, obj in enumerate(params):
array[idx] = obj.jobject
javabridge.call(self.jobject, "setParameters", "([Lweka/core/setupgenerator/AbstractParameter;)V", array.jobject) |
def O(self):
"""
Pairwise strandedness matrix. Each cell contains whether i-th and j-th
contig are the same orientation +1, or opposite orientation -1.
"""
N = self.N
tig_to_idx = self.tig_to_idx
O = np.zeros((N, N), dtype=int)
for (at, bt), (strandedness, md, mh) in self.orientations.items():
if not (at in tig_to_idx and bt in tig_to_idx):
continue
ai = tig_to_idx[at]
bi = tig_to_idx[bt]
score = strandedness * md
O[ai, bi] = O[bi, ai] = score
return O | Pairwise strandedness matrix. Each cell contains whether i-th and j-th
contig are the same orientation +1, or opposite orientation -1. | Below is the the instruction that describes the task:
### Input:
Pairwise strandedness matrix. Each cell contains whether i-th and j-th
contig are the same orientation +1, or opposite orientation -1.
### Response:
def O(self):
"""
Pairwise strandedness matrix. Each cell contains whether i-th and j-th
contig are the same orientation +1, or opposite orientation -1.
"""
N = self.N
tig_to_idx = self.tig_to_idx
O = np.zeros((N, N), dtype=int)
for (at, bt), (strandedness, md, mh) in self.orientations.items():
if not (at in tig_to_idx and bt in tig_to_idx):
continue
ai = tig_to_idx[at]
bi = tig_to_idx[bt]
score = strandedness * md
O[ai, bi] = O[bi, ai] = score
return O |
def add(self, rulefactory):
"""Add a new rule or factory to the map and bind it. Requires that the
rule is not bound to another map.
:param rulefactory: a :class:`Rule` or :class:`RuleFactory`
"""
for rule in rulefactory.get_rules(self):
rule.bind(self)
self._rules.append(rule)
self._rules_by_endpoint.setdefault(rule.endpoint, []).append(rule)
self._remap = True | Add a new rule or factory to the map and bind it. Requires that the
rule is not bound to another map.
:param rulefactory: a :class:`Rule` or :class:`RuleFactory` | Below is the the instruction that describes the task:
### Input:
Add a new rule or factory to the map and bind it. Requires that the
rule is not bound to another map.
:param rulefactory: a :class:`Rule` or :class:`RuleFactory`
### Response:
def add(self, rulefactory):
"""Add a new rule or factory to the map and bind it. Requires that the
rule is not bound to another map.
:param rulefactory: a :class:`Rule` or :class:`RuleFactory`
"""
for rule in rulefactory.get_rules(self):
rule.bind(self)
self._rules.append(rule)
self._rules_by_endpoint.setdefault(rule.endpoint, []).append(rule)
self._remap = True |
def comparison(self):
"""
comparison: expr (('==' | '!=' | '<=' | '>=' | '<' | '>') expr)*
"""
node = self.expr()
while self.token.nature in (
Nature.EQ,
Nature.NE,
Nature.LE,
Nature.GE,
Nature.LT,
Nature.GT,
):
token = self.token
if token.nature == Nature.EQ:
self._process(Nature.EQ)
elif token.nature == Nature.NE:
self._process(Nature.NE)
elif token.nature == Nature.LE:
self._process(Nature.LE)
elif token.nature == Nature.GE:
self._process(Nature.GE)
elif token.nature == Nature.LT:
self._process(Nature.LT)
elif token.nature == Nature.GT:
self._process(Nature.GT)
else:
self.error()
node = BinaryOperation(left=node, op=token, right=self.expr())
return node | comparison: expr (('==' | '!=' | '<=' | '>=' | '<' | '>') expr)* | Below is the the instruction that describes the task:
### Input:
comparison: expr (('==' | '!=' | '<=' | '>=' | '<' | '>') expr)*
### Response:
def comparison(self):
"""
comparison: expr (('==' | '!=' | '<=' | '>=' | '<' | '>') expr)*
"""
node = self.expr()
while self.token.nature in (
Nature.EQ,
Nature.NE,
Nature.LE,
Nature.GE,
Nature.LT,
Nature.GT,
):
token = self.token
if token.nature == Nature.EQ:
self._process(Nature.EQ)
elif token.nature == Nature.NE:
self._process(Nature.NE)
elif token.nature == Nature.LE:
self._process(Nature.LE)
elif token.nature == Nature.GE:
self._process(Nature.GE)
elif token.nature == Nature.LT:
self._process(Nature.LT)
elif token.nature == Nature.GT:
self._process(Nature.GT)
else:
self.error()
node = BinaryOperation(left=node, op=token, right=self.expr())
return node |
def create_quiz_report(self, quiz_id, course_id, quiz_report_report_type, include=None, quiz_report_includes_all_versions=None):
"""
Create a quiz report.
Create and return a new report for this quiz. If a previously
generated report matches the arguments and is still current (i.e.
there have been no new submissions), it will be returned.
*Responses*
* <code>400 Bad Request</code> if the specified report type is invalid
* <code>409 Conflict</code> if a quiz report of the specified type is already being
generated
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - quiz_id
"""ID"""
path["quiz_id"] = quiz_id
# REQUIRED - quiz_report[report_type]
"""The type of report to be generated."""
self._validate_enum(quiz_report_report_type, ["student_analysis", "item_analysis"])
data["quiz_report[report_type]"] = quiz_report_report_type
# OPTIONAL - quiz_report[includes_all_versions]
"""Whether the report should consider all submissions or only the most
recent. Defaults to false, ignored for item_analysis."""
if quiz_report_includes_all_versions is not None:
data["quiz_report[includes_all_versions]"] = quiz_report_includes_all_versions
# OPTIONAL - include
"""Whether the output should include documents for the file and/or progress
objects associated with this report. (Note: JSON-API only)"""
if include is not None:
self._validate_enum(include, ["file", "progress"])
data["include"] = include
self.logger.debug("POST /api/v1/courses/{course_id}/quizzes/{quiz_id}/reports with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/quizzes/{quiz_id}/reports".format(**path), data=data, params=params, single_item=True) | Create a quiz report.
Create and return a new report for this quiz. If a previously
generated report matches the arguments and is still current (i.e.
there have been no new submissions), it will be returned.
*Responses*
* <code>400 Bad Request</code> if the specified report type is invalid
* <code>409 Conflict</code> if a quiz report of the specified type is already being
generated | Below is the the instruction that describes the task:
### Input:
Create a quiz report.
Create and return a new report for this quiz. If a previously
generated report matches the arguments and is still current (i.e.
there have been no new submissions), it will be returned.
*Responses*
* <code>400 Bad Request</code> if the specified report type is invalid
* <code>409 Conflict</code> if a quiz report of the specified type is already being
generated
### Response:
def create_quiz_report(self, quiz_id, course_id, quiz_report_report_type, include=None, quiz_report_includes_all_versions=None):
"""
Create a quiz report.
Create and return a new report for this quiz. If a previously
generated report matches the arguments and is still current (i.e.
there have been no new submissions), it will be returned.
*Responses*
* <code>400 Bad Request</code> if the specified report type is invalid
* <code>409 Conflict</code> if a quiz report of the specified type is already being
generated
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - quiz_id
"""ID"""
path["quiz_id"] = quiz_id
# REQUIRED - quiz_report[report_type]
"""The type of report to be generated."""
self._validate_enum(quiz_report_report_type, ["student_analysis", "item_analysis"])
data["quiz_report[report_type]"] = quiz_report_report_type
# OPTIONAL - quiz_report[includes_all_versions]
"""Whether the report should consider all submissions or only the most
recent. Defaults to false, ignored for item_analysis."""
if quiz_report_includes_all_versions is not None:
data["quiz_report[includes_all_versions]"] = quiz_report_includes_all_versions
# OPTIONAL - include
"""Whether the output should include documents for the file and/or progress
objects associated with this report. (Note: JSON-API only)"""
if include is not None:
self._validate_enum(include, ["file", "progress"])
data["include"] = include
self.logger.debug("POST /api/v1/courses/{course_id}/quizzes/{quiz_id}/reports with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/quizzes/{quiz_id}/reports".format(**path), data=data, params=params, single_item=True) |
def getinfo(self):
"""
Backwards-compatibility for 0.14 and later
"""
try:
old_getinfo = AuthServiceProxy(self.__service_url, 'getinfo', self.__timeout, self.__conn, True)
res = old_getinfo()
if 'error' not in res:
# 0.13 and earlier
return res
except JSONRPCException:
pass
network_info = self.getnetworkinfo()
blockchain_info = self.getblockchaininfo()
try:
wallet_info = self.getwalletinfo()
except:
wallet_info = {
'walletversion': None,
'balance': None,
'keypoololdest': None,
'keypoolsize': None,
'paytxfee': None,
}
res = {
'version': network_info['version'],
'protocolversion': network_info['protocolversion'],
'walletversion': wallet_info['walletversion'],
'balance': wallet_info['balance'],
'blocks': blockchain_info['blocks'],
'timeoffset': network_info['timeoffset'],
'connections': network_info['connections'],
'proxy': network_info['networks'],
'difficulty': blockchain_info['difficulty'],
'testnet': blockchain_info['chain'] == 'testnet',
'keypoololdest': wallet_info['keypoololdest'],
'keypoolsize': wallet_info['keypoolsize'],
'paytxfee': wallet_info['paytxfee'],
'errors': network_info['warnings'],
}
for k in ['unlocked_until', 'relayfee', 'paytxfee']:
if wallet_info.has_key(k):
res[k] = wallet_info[k]
return res | Backwards-compatibility for 0.14 and later | Below is the the instruction that describes the task:
### Input:
Backwards-compatibility for 0.14 and later
### Response:
def getinfo(self):
"""
Backwards-compatibility for 0.14 and later
"""
try:
old_getinfo = AuthServiceProxy(self.__service_url, 'getinfo', self.__timeout, self.__conn, True)
res = old_getinfo()
if 'error' not in res:
# 0.13 and earlier
return res
except JSONRPCException:
pass
network_info = self.getnetworkinfo()
blockchain_info = self.getblockchaininfo()
try:
wallet_info = self.getwalletinfo()
except:
wallet_info = {
'walletversion': None,
'balance': None,
'keypoololdest': None,
'keypoolsize': None,
'paytxfee': None,
}
res = {
'version': network_info['version'],
'protocolversion': network_info['protocolversion'],
'walletversion': wallet_info['walletversion'],
'balance': wallet_info['balance'],
'blocks': blockchain_info['blocks'],
'timeoffset': network_info['timeoffset'],
'connections': network_info['connections'],
'proxy': network_info['networks'],
'difficulty': blockchain_info['difficulty'],
'testnet': blockchain_info['chain'] == 'testnet',
'keypoololdest': wallet_info['keypoololdest'],
'keypoolsize': wallet_info['keypoolsize'],
'paytxfee': wallet_info['paytxfee'],
'errors': network_info['warnings'],
}
for k in ['unlocked_until', 'relayfee', 'paytxfee']:
if wallet_info.has_key(k):
res[k] = wallet_info[k]
return res |
def _get_randomized_range(val,
provided_range,
default_range):
"""
Helper to initialize by either value or a range
Returns a range to randomize from
"""
if val is None:
if provided_range is None:
return default_range
else:
return provided_range
else:
if provided_range is not None:
raise ValueError('Value {} overrides range {}'
.format(str(val), str(provided_range)))
return [val] | Helper to initialize by either value or a range
Returns a range to randomize from | Below is the the instruction that describes the task:
### Input:
Helper to initialize by either value or a range
Returns a range to randomize from
### Response:
def _get_randomized_range(val,
provided_range,
default_range):
"""
Helper to initialize by either value or a range
Returns a range to randomize from
"""
if val is None:
if provided_range is None:
return default_range
else:
return provided_range
else:
if provided_range is not None:
raise ValueError('Value {} overrides range {}'
.format(str(val), str(provided_range)))
return [val] |
def find_newline(self, size=-1):
"""Search for newline char in buffer starting from current offset.
Args:
size: number of bytes to search. -1 means all.
Returns:
offset of newline char in buffer. -1 if doesn't exist.
"""
if size < 0:
return self._buffer.find('\n', self._offset)
return self._buffer.find('\n', self._offset, self._offset + size) | Search for newline char in buffer starting from current offset.
Args:
size: number of bytes to search. -1 means all.
Returns:
offset of newline char in buffer. -1 if doesn't exist. | Below is the the instruction that describes the task:
### Input:
Search for newline char in buffer starting from current offset.
Args:
size: number of bytes to search. -1 means all.
Returns:
offset of newline char in buffer. -1 if doesn't exist.
### Response:
def find_newline(self, size=-1):
"""Search for newline char in buffer starting from current offset.
Args:
size: number of bytes to search. -1 means all.
Returns:
offset of newline char in buffer. -1 if doesn't exist.
"""
if size < 0:
return self._buffer.find('\n', self._offset)
return self._buffer.find('\n', self._offset, self._offset + size) |
def get_rate(self, zipcode, city=None, state=None, multiple_rates=False):
"""
Finds sales tax for given info.
Returns Decimal of the tax rate, e.g. 8.750.
"""
data = self.make_request_data(zipcode, city, state)
r = requests.get(self.url, params=data)
resp = r.json()
return self.process_response(resp, multiple_rates) | Finds sales tax for given info.
Returns Decimal of the tax rate, e.g. 8.750. | Below is the the instruction that describes the task:
### Input:
Finds sales tax for given info.
Returns Decimal of the tax rate, e.g. 8.750.
### Response:
def get_rate(self, zipcode, city=None, state=None, multiple_rates=False):
"""
Finds sales tax for given info.
Returns Decimal of the tax rate, e.g. 8.750.
"""
data = self.make_request_data(zipcode, city, state)
r = requests.get(self.url, params=data)
resp = r.json()
return self.process_response(resp, multiple_rates) |
def load_data_old(self):
"""
Loads time series of 2D data grids from each opened file. The code
handles loading a full time series from one file or individual time steps
from multiple files. Missing files are supported.
"""
units = ""
if len(self.file_objects) == 1 and self.file_objects[0] is not None:
data = self.file_objects[0].variables[self.variable][self.forecast_hours]
if hasattr(self.file_objects[0].variables[self.variable], "units"):
units = self.file_objects[0].variables[self.variable].units
elif len(self.file_objects) > 1:
grid_shape = [len(self.file_objects), 1, 1]
for file_object in self.file_objects:
if file_object is not None:
if self.variable in file_object.variables.keys():
grid_shape = file_object.variables[self.variable].shape
elif self.variable.ljust(6, "_") in file_object.variables.keys():
grid_shape = file_object.variables[self.variable.ljust(6, "_")].shape
else:
print("{0} not found".format(self.variable))
raise KeyError
break
data = np.zeros((len(self.file_objects), grid_shape[1], grid_shape[2]))
for f, file_object in enumerate(self.file_objects):
if file_object is not None:
if self.variable in file_object.variables.keys():
var_name = self.variable
elif self.variable.ljust(6, "_") in file_object.variables.keys():
var_name = self.variable.ljust(6, "_")
else:
print("{0} not found".format(self.variable))
raise KeyError
data[f] = file_object.variables[var_name][0]
if units == "" and hasattr(file_object.variables[var_name], "units"):
units = file_object.variables[var_name].units
else:
data = None
return data, units | Loads time series of 2D data grids from each opened file. The code
handles loading a full time series from one file or individual time steps
from multiple files. Missing files are supported. | Below is the the instruction that describes the task:
### Input:
Loads time series of 2D data grids from each opened file. The code
handles loading a full time series from one file or individual time steps
from multiple files. Missing files are supported.
### Response:
def load_data_old(self):
"""
Loads time series of 2D data grids from each opened file. The code
handles loading a full time series from one file or individual time steps
from multiple files. Missing files are supported.
"""
units = ""
if len(self.file_objects) == 1 and self.file_objects[0] is not None:
data = self.file_objects[0].variables[self.variable][self.forecast_hours]
if hasattr(self.file_objects[0].variables[self.variable], "units"):
units = self.file_objects[0].variables[self.variable].units
elif len(self.file_objects) > 1:
grid_shape = [len(self.file_objects), 1, 1]
for file_object in self.file_objects:
if file_object is not None:
if self.variable in file_object.variables.keys():
grid_shape = file_object.variables[self.variable].shape
elif self.variable.ljust(6, "_") in file_object.variables.keys():
grid_shape = file_object.variables[self.variable.ljust(6, "_")].shape
else:
print("{0} not found".format(self.variable))
raise KeyError
break
data = np.zeros((len(self.file_objects), grid_shape[1], grid_shape[2]))
for f, file_object in enumerate(self.file_objects):
if file_object is not None:
if self.variable in file_object.variables.keys():
var_name = self.variable
elif self.variable.ljust(6, "_") in file_object.variables.keys():
var_name = self.variable.ljust(6, "_")
else:
print("{0} not found".format(self.variable))
raise KeyError
data[f] = file_object.variables[var_name][0]
if units == "" and hasattr(file_object.variables[var_name], "units"):
units = file_object.variables[var_name].units
else:
data = None
return data, units |
def to_range(obj, score=None, id=None, strand=None):
"""
Given a gffutils object, convert it to a range object
"""
from jcvi.utils.range import Range
if score or id:
_score = score if score else obj.score
_id = id if id else obj.id
return Range(seqid=obj.seqid, start=obj.start, end=obj.end, \
score=_score, id=_id)
elif strand:
return (obj.seqid, obj.start, obj.end, obj.strand)
return (obj.seqid, obj.start, obj.end) | Given a gffutils object, convert it to a range object | Below is the the instruction that describes the task:
### Input:
Given a gffutils object, convert it to a range object
### Response:
def to_range(obj, score=None, id=None, strand=None):
"""
Given a gffutils object, convert it to a range object
"""
from jcvi.utils.range import Range
if score or id:
_score = score if score else obj.score
_id = id if id else obj.id
return Range(seqid=obj.seqid, start=obj.start, end=obj.end, \
score=_score, id=_id)
elif strand:
return (obj.seqid, obj.start, obj.end, obj.strand)
return (obj.seqid, obj.start, obj.end) |
def parse(inp, format=None, encoding='utf-8', force_types=True):
"""Parse input from file-like object, unicode string or byte string.
Args:
inp: file-like object, unicode string or byte string with the markup
format: explicitly override the guessed `inp` markup format
encoding: `inp` encoding, defaults to utf-8
force_types:
if `True`, integers, floats, booleans and none/null
are recognized and returned as proper types instead of strings;
if `False`, everything is converted to strings
if `None`, backend return value is used
Returns:
parsed input (dict or list) containing unicode values
Raises:
AnyMarkupError if a problem occurs while parsing or inp
"""
proper_inp = inp
if hasattr(inp, 'read'):
proper_inp = inp.read()
# if proper_inp is unicode, encode it
if isinstance(proper_inp, six.text_type):
proper_inp = proper_inp.encode(encoding)
# try to guess markup type
fname = None
if hasattr(inp, 'name'):
fname = inp.name
fmt = _get_format(format, fname, proper_inp)
# make it look like file-like bytes-yielding object
proper_inp = six.BytesIO(proper_inp)
try:
res = _do_parse(proper_inp, fmt, encoding, force_types)
except Exception as e:
# I wish there was only Python 3 and I could just use "raise ... from e"
raise AnyMarkupError(e, traceback.format_exc())
if res is None:
res = {}
return res | Parse input from file-like object, unicode string or byte string.
Args:
inp: file-like object, unicode string or byte string with the markup
format: explicitly override the guessed `inp` markup format
encoding: `inp` encoding, defaults to utf-8
force_types:
if `True`, integers, floats, booleans and none/null
are recognized and returned as proper types instead of strings;
if `False`, everything is converted to strings
if `None`, backend return value is used
Returns:
parsed input (dict or list) containing unicode values
Raises:
AnyMarkupError if a problem occurs while parsing or inp | Below is the the instruction that describes the task:
### Input:
Parse input from file-like object, unicode string or byte string.
Args:
inp: file-like object, unicode string or byte string with the markup
format: explicitly override the guessed `inp` markup format
encoding: `inp` encoding, defaults to utf-8
force_types:
if `True`, integers, floats, booleans and none/null
are recognized and returned as proper types instead of strings;
if `False`, everything is converted to strings
if `None`, backend return value is used
Returns:
parsed input (dict or list) containing unicode values
Raises:
AnyMarkupError if a problem occurs while parsing or inp
### Response:
def parse(inp, format=None, encoding='utf-8', force_types=True):
"""Parse input from file-like object, unicode string or byte string.
Args:
inp: file-like object, unicode string or byte string with the markup
format: explicitly override the guessed `inp` markup format
encoding: `inp` encoding, defaults to utf-8
force_types:
if `True`, integers, floats, booleans and none/null
are recognized and returned as proper types instead of strings;
if `False`, everything is converted to strings
if `None`, backend return value is used
Returns:
parsed input (dict or list) containing unicode values
Raises:
AnyMarkupError if a problem occurs while parsing or inp
"""
proper_inp = inp
if hasattr(inp, 'read'):
proper_inp = inp.read()
# if proper_inp is unicode, encode it
if isinstance(proper_inp, six.text_type):
proper_inp = proper_inp.encode(encoding)
# try to guess markup type
fname = None
if hasattr(inp, 'name'):
fname = inp.name
fmt = _get_format(format, fname, proper_inp)
# make it look like file-like bytes-yielding object
proper_inp = six.BytesIO(proper_inp)
try:
res = _do_parse(proper_inp, fmt, encoding, force_types)
except Exception as e:
# I wish there was only Python 3 and I could just use "raise ... from e"
raise AnyMarkupError(e, traceback.format_exc())
if res is None:
res = {}
return res |
def list(self, size=1000, tree_depth=1):
""" Creates a random #list
@size: #int number of random values to include in each @tree_depth
@tree_depth: #int dict tree dimensions size, i.e.
1=|[value1, value2]|
2=|[[value1, value2], [value1, value2]]|
-> random #list
"""
if not tree_depth: return self._map_type()
return list(self.deque(size, tree_depth-1) for x in range(size)) | Creates a random #list
@size: #int number of random values to include in each @tree_depth
@tree_depth: #int dict tree dimensions size, i.e.
1=|[value1, value2]|
2=|[[value1, value2], [value1, value2]]|
-> random #list | Below is the the instruction that describes the task:
### Input:
Creates a random #list
@size: #int number of random values to include in each @tree_depth
@tree_depth: #int dict tree dimensions size, i.e.
1=|[value1, value2]|
2=|[[value1, value2], [value1, value2]]|
-> random #list
### Response:
def list(self, size=1000, tree_depth=1):
""" Creates a random #list
@size: #int number of random values to include in each @tree_depth
@tree_depth: #int dict tree dimensions size, i.e.
1=|[value1, value2]|
2=|[[value1, value2], [value1, value2]]|
-> random #list
"""
if not tree_depth: return self._map_type()
return list(self.deque(size, tree_depth-1) for x in range(size)) |
def printArray(self, node, printState: PrintState):
""" Prints out the array declaration in a format of Array class
object declaration. 'arrayName = Array(Type, [bounds])'
"""
if (
self.nameMapper[node["name"]] not in printState.definedVars
and self.nameMapper[node["name"]] not in printState.globalVars
):
printState.definedVars += [self.nameMapper[node["name"]]]
assert int(node["count"]) > 0
printState.definedVars += [node["name"]]
varType = ""
if node["type"].upper() == "INTEGER":
varType = "int"
elif node["type"].upper() in ("DOUBLE", "REAL"):
varType = "float"
elif node["type"].upper() == "CHARACTER":
varType = "str"
elif node["isDevTypeVar"]:
varType = node["type"].lower() + "()"
assert varType != ""
self.pyStrings.append(f"{node['name']} = Array({varType}, [")
for i in range(0, int(node["count"])):
loBound = node["low" + str(i + 1)]
upBound = node["up" + str(i + 1)]
dimensions = f"({loBound}, {upBound})"
if i < int(node["count"]) - 1:
self.pyStrings.append(f"{dimensions}, ")
else:
self.pyStrings.append(f"{dimensions}")
self.pyStrings.append("])")
if node["isDevTypeVar"]:
self.pyStrings.append(printState.sep)
# This may require updating later when we have to deal with the
# multi-dimensional derived type arrays
upBound = node["up1"]
self.pyStrings.append(
f"for z in range(1, {upBound}+1):" + printState.sep
)
self.pyStrings.append(
f" obj = {node['type']}()" + printState.sep
)
self.pyStrings.append(
f" {node['name']}.set_(z, obj)" + printState.sep
) | Prints out the array declaration in a format of Array class
object declaration. 'arrayName = Array(Type, [bounds])' | Below is the the instruction that describes the task:
### Input:
Prints out the array declaration in a format of Array class
object declaration. 'arrayName = Array(Type, [bounds])'
### Response:
def printArray(self, node, printState: PrintState):
""" Prints out the array declaration in a format of Array class
object declaration. 'arrayName = Array(Type, [bounds])'
"""
if (
self.nameMapper[node["name"]] not in printState.definedVars
and self.nameMapper[node["name"]] not in printState.globalVars
):
printState.definedVars += [self.nameMapper[node["name"]]]
assert int(node["count"]) > 0
printState.definedVars += [node["name"]]
varType = ""
if node["type"].upper() == "INTEGER":
varType = "int"
elif node["type"].upper() in ("DOUBLE", "REAL"):
varType = "float"
elif node["type"].upper() == "CHARACTER":
varType = "str"
elif node["isDevTypeVar"]:
varType = node["type"].lower() + "()"
assert varType != ""
self.pyStrings.append(f"{node['name']} = Array({varType}, [")
for i in range(0, int(node["count"])):
loBound = node["low" + str(i + 1)]
upBound = node["up" + str(i + 1)]
dimensions = f"({loBound}, {upBound})"
if i < int(node["count"]) - 1:
self.pyStrings.append(f"{dimensions}, ")
else:
self.pyStrings.append(f"{dimensions}")
self.pyStrings.append("])")
if node["isDevTypeVar"]:
self.pyStrings.append(printState.sep)
# This may require updating later when we have to deal with the
# multi-dimensional derived type arrays
upBound = node["up1"]
self.pyStrings.append(
f"for z in range(1, {upBound}+1):" + printState.sep
)
self.pyStrings.append(
f" obj = {node['type']}()" + printState.sep
)
self.pyStrings.append(
f" {node['name']}.set_(z, obj)" + printState.sep
) |
def zeros_like(array, dtype=None, keepmeta=True):
"""Create an array of zeros with the same shape and type as the input array.
Args:
array (xarray.DataArray): The shape and data-type of it define
these same attributes of the output array.
dtype (data-type, optional): If specified, this function overrides
the data-type of the output array.
keepmeta (bool, optional): Whether *coords, attrs, and name of the input
array are kept in the output one. Default is True.
Returns:
array (decode.array): Decode array filled with zeros.
"""
if keepmeta:
return xr.zeros_like(array, dtype)
else:
return dc.zeros(array.shape, dtype) | Create an array of zeros with the same shape and type as the input array.
Args:
array (xarray.DataArray): The shape and data-type of it define
these same attributes of the output array.
dtype (data-type, optional): If specified, this function overrides
the data-type of the output array.
keepmeta (bool, optional): Whether *coords, attrs, and name of the input
array are kept in the output one. Default is True.
Returns:
array (decode.array): Decode array filled with zeros. | Below is the the instruction that describes the task:
### Input:
Create an array of zeros with the same shape and type as the input array.
Args:
array (xarray.DataArray): The shape and data-type of it define
these same attributes of the output array.
dtype (data-type, optional): If specified, this function overrides
the data-type of the output array.
keepmeta (bool, optional): Whether *coords, attrs, and name of the input
array are kept in the output one. Default is True.
Returns:
array (decode.array): Decode array filled with zeros.
### Response:
def zeros_like(array, dtype=None, keepmeta=True):
"""Create an array of zeros with the same shape and type as the input array.
Args:
array (xarray.DataArray): The shape and data-type of it define
these same attributes of the output array.
dtype (data-type, optional): If specified, this function overrides
the data-type of the output array.
keepmeta (bool, optional): Whether *coords, attrs, and name of the input
array are kept in the output one. Default is True.
Returns:
array (decode.array): Decode array filled with zeros.
"""
if keepmeta:
return xr.zeros_like(array, dtype)
else:
return dc.zeros(array.shape, dtype) |
def to_representation(self, instance):
"""
Return the updated course data dictionary.
Arguments:
instance (dict): The course data.
Returns:
dict: The updated course data.
"""
updated_course = copy.deepcopy(instance)
enterprise_customer_catalog = self.context['enterprise_customer_catalog']
updated_course['enrollment_url'] = enterprise_customer_catalog.get_course_enrollment_url(
updated_course['key']
)
for course_run in updated_course['course_runs']:
course_run['enrollment_url'] = enterprise_customer_catalog.get_course_run_enrollment_url(
course_run['key']
)
return updated_course | Return the updated course data dictionary.
Arguments:
instance (dict): The course data.
Returns:
dict: The updated course data. | Below is the the instruction that describes the task:
### Input:
Return the updated course data dictionary.
Arguments:
instance (dict): The course data.
Returns:
dict: The updated course data.
### Response:
def to_representation(self, instance):
"""
Return the updated course data dictionary.
Arguments:
instance (dict): The course data.
Returns:
dict: The updated course data.
"""
updated_course = copy.deepcopy(instance)
enterprise_customer_catalog = self.context['enterprise_customer_catalog']
updated_course['enrollment_url'] = enterprise_customer_catalog.get_course_enrollment_url(
updated_course['key']
)
for course_run in updated_course['course_runs']:
course_run['enrollment_url'] = enterprise_customer_catalog.get_course_run_enrollment_url(
course_run['key']
)
return updated_course |
def change_cloud_password(
self,
current_password: str,
new_password: str,
new_hint: str = ""
) -> bool:
"""Use this method to change your Two-Step Verification password (Cloud Password) with a new one.
Args:
current_password (``str``):
Your current password.
new_password (``str``):
Your new password.
new_hint (``str``, *optional*):
A new password hint.
Returns:
True on success.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
``ValueError`` in case there is no cloud password to change.
"""
r = self.send(functions.account.GetPassword())
if not r.has_password:
raise ValueError("There is no cloud password to change")
r.new_algo.salt1 += os.urandom(32)
new_hash = btoi(compute_hash(r.new_algo, new_password))
new_hash = itob(pow(r.new_algo.g, new_hash, btoi(r.new_algo.p)))
self.send(
functions.account.UpdatePasswordSettings(
password=compute_check(r, current_password),
new_settings=types.account.PasswordInputSettings(
new_algo=r.new_algo,
new_password_hash=new_hash,
hint=new_hint
)
)
)
return True | Use this method to change your Two-Step Verification password (Cloud Password) with a new one.
Args:
current_password (``str``):
Your current password.
new_password (``str``):
Your new password.
new_hint (``str``, *optional*):
A new password hint.
Returns:
True on success.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
``ValueError`` in case there is no cloud password to change. | Below is the the instruction that describes the task:
### Input:
Use this method to change your Two-Step Verification password (Cloud Password) with a new one.
Args:
current_password (``str``):
Your current password.
new_password (``str``):
Your new password.
new_hint (``str``, *optional*):
A new password hint.
Returns:
True on success.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
``ValueError`` in case there is no cloud password to change.
### Response:
def change_cloud_password(
self,
current_password: str,
new_password: str,
new_hint: str = ""
) -> bool:
"""Use this method to change your Two-Step Verification password (Cloud Password) with a new one.
Args:
current_password (``str``):
Your current password.
new_password (``str``):
Your new password.
new_hint (``str``, *optional*):
A new password hint.
Returns:
True on success.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
``ValueError`` in case there is no cloud password to change.
"""
r = self.send(functions.account.GetPassword())
if not r.has_password:
raise ValueError("There is no cloud password to change")
r.new_algo.salt1 += os.urandom(32)
new_hash = btoi(compute_hash(r.new_algo, new_password))
new_hash = itob(pow(r.new_algo.g, new_hash, btoi(r.new_algo.p)))
self.send(
functions.account.UpdatePasswordSettings(
password=compute_check(r, current_password),
new_settings=types.account.PasswordInputSettings(
new_algo=r.new_algo,
new_password_hash=new_hash,
hint=new_hint
)
)
)
return True |
def sludge(self, column=None, value=None, **kwargs):
"""
Sludge information describes the volumn of sludge produced at a
facility, identification information on a sludge handler, and
classification/permitting information on a facility that handles
sludge, such as a pretreatment POTW.
>>> PCS().sludge('county_name', 'San Francisco')
"""
return self._resolve_call('PCS_SLUDGE', column, value, **kwargs) | Sludge information describes the volumn of sludge produced at a
facility, identification information on a sludge handler, and
classification/permitting information on a facility that handles
sludge, such as a pretreatment POTW.
>>> PCS().sludge('county_name', 'San Francisco') | Below is the the instruction that describes the task:
### Input:
Sludge information describes the volumn of sludge produced at a
facility, identification information on a sludge handler, and
classification/permitting information on a facility that handles
sludge, such as a pretreatment POTW.
>>> PCS().sludge('county_name', 'San Francisco')
### Response:
def sludge(self, column=None, value=None, **kwargs):
"""
Sludge information describes the volumn of sludge produced at a
facility, identification information on a sludge handler, and
classification/permitting information on a facility that handles
sludge, such as a pretreatment POTW.
>>> PCS().sludge('county_name', 'San Francisco')
"""
return self._resolve_call('PCS_SLUDGE', column, value, **kwargs) |
def __split_file(self):
'''
Splits combined SAR output file (in ASCII format) in order to
extract info we need for it, in the format we want.
:return: ``List``-style of SAR file sections separated by
the type of info they contain (SAR file sections) without
parsing what is exactly what at this point
'''
# Filename passed checks through __init__
if (self.__filename and os.access(self.__filename, os.R_OK)):
fhandle = None
try:
fhandle = os.open(self.__filename, os.O_RDONLY)
except OSError:
print(("Couldn't open file %s" % (self.__filename)))
fhandle = None
if (fhandle):
try:
sarmap = mmap.mmap(fhandle, length=0, prot=mmap.PROT_READ)
except (TypeError, IndexError):
os.close(fhandle)
traceback.print_exc()
#sys.exit(-1)
return False
sfpos = sarmap.find(PATTERN_MULTISPLIT, 0)
while (sfpos > -1):
'''Split by day found'''
self.__splitpointers.append(sfpos)
# Iterate for new position
try:
sfpos = sarmap.find(PATTERN_MULTISPLIT, (sfpos + 1))
except ValueError:
print("ValueError on mmap.find()")
return True
if (self.__splitpointers):
# Not sure if this will work - if empty set
# goes back as True here
return True
return False | Splits combined SAR output file (in ASCII format) in order to
extract info we need for it, in the format we want.
:return: ``List``-style of SAR file sections separated by
the type of info they contain (SAR file sections) without
parsing what is exactly what at this point | Below is the the instruction that describes the task:
### Input:
Splits combined SAR output file (in ASCII format) in order to
extract info we need for it, in the format we want.
:return: ``List``-style of SAR file sections separated by
the type of info they contain (SAR file sections) without
parsing what is exactly what at this point
### Response:
def __split_file(self):
'''
Splits combined SAR output file (in ASCII format) in order to
extract info we need for it, in the format we want.
:return: ``List``-style of SAR file sections separated by
the type of info they contain (SAR file sections) without
parsing what is exactly what at this point
'''
# Filename passed checks through __init__
if (self.__filename and os.access(self.__filename, os.R_OK)):
fhandle = None
try:
fhandle = os.open(self.__filename, os.O_RDONLY)
except OSError:
print(("Couldn't open file %s" % (self.__filename)))
fhandle = None
if (fhandle):
try:
sarmap = mmap.mmap(fhandle, length=0, prot=mmap.PROT_READ)
except (TypeError, IndexError):
os.close(fhandle)
traceback.print_exc()
#sys.exit(-1)
return False
sfpos = sarmap.find(PATTERN_MULTISPLIT, 0)
while (sfpos > -1):
'''Split by day found'''
self.__splitpointers.append(sfpos)
# Iterate for new position
try:
sfpos = sarmap.find(PATTERN_MULTISPLIT, (sfpos + 1))
except ValueError:
print("ValueError on mmap.find()")
return True
if (self.__splitpointers):
# Not sure if this will work - if empty set
# goes back as True here
return True
return False |
def __validate_datetime_string(self):
"""
This will require validating version string (such as "3.3.5").
A version string could be converted to a datetime value if this
validation is not executed.
"""
try:
try:
StrictVersion(self._value)
raise TypeConversionError(
"invalid datetime string: version string found {}".format(self._value)
)
except ValueError:
pass
except TypeError:
raise TypeConversionError("invalid datetime string: type={}".format(type(self._value))) | This will require validating version string (such as "3.3.5").
A version string could be converted to a datetime value if this
validation is not executed. | Below is the the instruction that describes the task:
### Input:
This will require validating version string (such as "3.3.5").
A version string could be converted to a datetime value if this
validation is not executed.
### Response:
def __validate_datetime_string(self):
"""
This will require validating version string (such as "3.3.5").
A version string could be converted to a datetime value if this
validation is not executed.
"""
try:
try:
StrictVersion(self._value)
raise TypeConversionError(
"invalid datetime string: version string found {}".format(self._value)
)
except ValueError:
pass
except TypeError:
raise TypeConversionError("invalid datetime string: type={}".format(type(self._value))) |
Subsets and Splits