code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def output_flair_stats(self):
"""Display statistics (number of users) for each unique flair item."""
css_counter = Counter()
text_counter = Counter()
for flair in self.current_flair():
if flair['flair_css_class']:
css_counter[flair['flair_css_class']] += 1
if flair['flair_text']:
text_counter[flair['flair_text']] += 1
print('Flair CSS Statistics')
for flair, count in sorted(css_counter.items(),
key=lambda x: (x[1], x[0])):
print('{0:3} {1}'.format(count, flair))
print('Flair Text Statistics')
for flair, count in sorted(text_counter.items(),
key=lambda x: (x[1], x[0]), reverse=True):
print('{0:3} {1}'.format(count, flair)) | Display statistics (number of users) for each unique flair item. | Below is the the instruction that describes the task:
### Input:
Display statistics (number of users) for each unique flair item.
### Response:
def output_flair_stats(self):
"""Display statistics (number of users) for each unique flair item."""
css_counter = Counter()
text_counter = Counter()
for flair in self.current_flair():
if flair['flair_css_class']:
css_counter[flair['flair_css_class']] += 1
if flair['flair_text']:
text_counter[flair['flair_text']] += 1
print('Flair CSS Statistics')
for flair, count in sorted(css_counter.items(),
key=lambda x: (x[1], x[0])):
print('{0:3} {1}'.format(count, flair))
print('Flair Text Statistics')
for flair, count in sorted(text_counter.items(),
key=lambda x: (x[1], x[0]), reverse=True):
print('{0:3} {1}'.format(count, flair)) |
def include_codemirror(self):
"""Include resources in pages"""
contents = []
# base
js = self._get_tag('codemirror.js', 'script')
css = self._get_tag('codemirror.css', 'stylesheet')
if js and css:
contents.append(js)
contents.append(css)
# languages
for language in self.languages:
url = self.__class__.LANGUAGE_REL_URL.format(language)
js = self._get_tag(url, 'script')
if js:
contents.append(js)
# theme
if self.theme:
url = self.__class__.THEME_REL_URL.format(self.theme)
css = self._get_tag(url, 'stylesheet')
if css:
contents.append(css)
# addons
if self.addons:
# add to list
for addon_type, name in self.addons:
url = self.__class__.ADDON_REL_URL.format(addon_type, name)
js = self._get_tag(url, 'script')
if js:
contents.append(js)
# if there is a css file relative to this addon
url = self.__class__.ADDON_CSS_REL_URL.format(addon_type, name)
css = self._get_tag(url, 'stylesheet', False)
if css:
contents.append(css)
# return html
return Markup('\n'.join(contents)) | Include resources in pages | Below is the the instruction that describes the task:
### Input:
Include resources in pages
### Response:
def include_codemirror(self):
"""Include resources in pages"""
contents = []
# base
js = self._get_tag('codemirror.js', 'script')
css = self._get_tag('codemirror.css', 'stylesheet')
if js and css:
contents.append(js)
contents.append(css)
# languages
for language in self.languages:
url = self.__class__.LANGUAGE_REL_URL.format(language)
js = self._get_tag(url, 'script')
if js:
contents.append(js)
# theme
if self.theme:
url = self.__class__.THEME_REL_URL.format(self.theme)
css = self._get_tag(url, 'stylesheet')
if css:
contents.append(css)
# addons
if self.addons:
# add to list
for addon_type, name in self.addons:
url = self.__class__.ADDON_REL_URL.format(addon_type, name)
js = self._get_tag(url, 'script')
if js:
contents.append(js)
# if there is a css file relative to this addon
url = self.__class__.ADDON_CSS_REL_URL.format(addon_type, name)
css = self._get_tag(url, 'stylesheet', False)
if css:
contents.append(css)
# return html
return Markup('\n'.join(contents)) |
def heartbeat_tick(self, rate=2):
"""Send heartbeat packets, if necessary, and fail if none have been
received recently. This should be called frequently, on the order of
once per second.
:keyword rate: Ignored
"""
if not self.heartbeat:
return
# treat actual data exchange in either direction as a heartbeat
sent_now = self.method_writer.bytes_sent
recv_now = self.method_reader.bytes_recv
if self.prev_sent is None or self.prev_sent != sent_now:
self.last_heartbeat_sent = monotonic()
if self.prev_recv is None or self.prev_recv != recv_now:
self.last_heartbeat_received = monotonic()
self.prev_sent, self.prev_recv = sent_now, recv_now
# send a heartbeat if it's time to do so
if monotonic() > self.last_heartbeat_sent + self.heartbeat:
self.send_heartbeat()
self.last_heartbeat_sent = monotonic()
# if we've missed two intervals' heartbeats, fail; this gives the
# server enough time to send heartbeats a little late
if (self.last_heartbeat_received and
self.last_heartbeat_received + 2 *
self.heartbeat < monotonic()):
raise ConnectionForced('Too many heartbeats missed') | Send heartbeat packets, if necessary, and fail if none have been
received recently. This should be called frequently, on the order of
once per second.
:keyword rate: Ignored | Below is the the instruction that describes the task:
### Input:
Send heartbeat packets, if necessary, and fail if none have been
received recently. This should be called frequently, on the order of
once per second.
:keyword rate: Ignored
### Response:
def heartbeat_tick(self, rate=2):
"""Send heartbeat packets, if necessary, and fail if none have been
received recently. This should be called frequently, on the order of
once per second.
:keyword rate: Ignored
"""
if not self.heartbeat:
return
# treat actual data exchange in either direction as a heartbeat
sent_now = self.method_writer.bytes_sent
recv_now = self.method_reader.bytes_recv
if self.prev_sent is None or self.prev_sent != sent_now:
self.last_heartbeat_sent = monotonic()
if self.prev_recv is None or self.prev_recv != recv_now:
self.last_heartbeat_received = monotonic()
self.prev_sent, self.prev_recv = sent_now, recv_now
# send a heartbeat if it's time to do so
if monotonic() > self.last_heartbeat_sent + self.heartbeat:
self.send_heartbeat()
self.last_heartbeat_sent = monotonic()
# if we've missed two intervals' heartbeats, fail; this gives the
# server enough time to send heartbeats a little late
if (self.last_heartbeat_received and
self.last_heartbeat_received + 2 *
self.heartbeat < monotonic()):
raise ConnectionForced('Too many heartbeats missed') |
def build_mv_grid_district(self, poly_id, subst_id, grid_district_geo_data,
station_geo_data):
"""Initiates single MV grid_district including station and grid
Parameters
----------
poly_id: int
ID of grid_district according to database table. Also used as ID for created grid #TODO: check type
subst_id: int
ID of station according to database table #TODO: check type
grid_district_geo_data: :shapely:`Shapely Polygon object<polygons>`
Polygon of grid district
station_geo_data: :shapely:`Shapely Point object<points>`
Point of station
Returns
-------
:shapely:`Shapely Polygon object<polygons>`
Description of return #TODO: check
"""
mv_station = MVStationDing0(id_db=subst_id, geo_data=station_geo_data)
mv_grid = MVGridDing0(network=self,
id_db=poly_id,
station=mv_station)
mv_grid_district = MVGridDistrictDing0(id_db=poly_id,
mv_grid=mv_grid,
geo_data=grid_district_geo_data)
mv_grid.grid_district = mv_grid_district
mv_station.grid = mv_grid
self.add_mv_grid_district(mv_grid_district)
return mv_grid_district | Initiates single MV grid_district including station and grid
Parameters
----------
poly_id: int
ID of grid_district according to database table. Also used as ID for created grid #TODO: check type
subst_id: int
ID of station according to database table #TODO: check type
grid_district_geo_data: :shapely:`Shapely Polygon object<polygons>`
Polygon of grid district
station_geo_data: :shapely:`Shapely Point object<points>`
Point of station
Returns
-------
:shapely:`Shapely Polygon object<polygons>`
Description of return #TODO: check | Below is the the instruction that describes the task:
### Input:
Initiates single MV grid_district including station and grid
Parameters
----------
poly_id: int
ID of grid_district according to database table. Also used as ID for created grid #TODO: check type
subst_id: int
ID of station according to database table #TODO: check type
grid_district_geo_data: :shapely:`Shapely Polygon object<polygons>`
Polygon of grid district
station_geo_data: :shapely:`Shapely Point object<points>`
Point of station
Returns
-------
:shapely:`Shapely Polygon object<polygons>`
Description of return #TODO: check
### Response:
def build_mv_grid_district(self, poly_id, subst_id, grid_district_geo_data,
station_geo_data):
"""Initiates single MV grid_district including station and grid
Parameters
----------
poly_id: int
ID of grid_district according to database table. Also used as ID for created grid #TODO: check type
subst_id: int
ID of station according to database table #TODO: check type
grid_district_geo_data: :shapely:`Shapely Polygon object<polygons>`
Polygon of grid district
station_geo_data: :shapely:`Shapely Point object<points>`
Point of station
Returns
-------
:shapely:`Shapely Polygon object<polygons>`
Description of return #TODO: check
"""
mv_station = MVStationDing0(id_db=subst_id, geo_data=station_geo_data)
mv_grid = MVGridDing0(network=self,
id_db=poly_id,
station=mv_station)
mv_grid_district = MVGridDistrictDing0(id_db=poly_id,
mv_grid=mv_grid,
geo_data=grid_district_geo_data)
mv_grid.grid_district = mv_grid_district
mv_station.grid = mv_grid
self.add_mv_grid_district(mv_grid_district)
return mv_grid_district |
def jtag_disable(self):
"""
Disables JTAG output on the controller. JTAG operations executed
immediately after this function will return useless data or fail.
Usage:
>>> from proteusisc import getAttachedControllers, bitarray
>>> c = getAttachedControllers()[0]
>>> c.jtag_enable()
>>> c.write_tms_bits(bitarray("001011111"), return_tdo=True)
>>> c.jtag_disable()
"""
if not self._jtagon: return
status, _ = self.bulkCommand(_BMSG_DISABLE_JTAG)
if status == 0:
self._jtagon = False
elif status == 3:
raise JTAGControlError("Error Code %s"%status)
self.close_handle() | Disables JTAG output on the controller. JTAG operations executed
immediately after this function will return useless data or fail.
Usage:
>>> from proteusisc import getAttachedControllers, bitarray
>>> c = getAttachedControllers()[0]
>>> c.jtag_enable()
>>> c.write_tms_bits(bitarray("001011111"), return_tdo=True)
>>> c.jtag_disable() | Below is the the instruction that describes the task:
### Input:
Disables JTAG output on the controller. JTAG operations executed
immediately after this function will return useless data or fail.
Usage:
>>> from proteusisc import getAttachedControllers, bitarray
>>> c = getAttachedControllers()[0]
>>> c.jtag_enable()
>>> c.write_tms_bits(bitarray("001011111"), return_tdo=True)
>>> c.jtag_disable()
### Response:
def jtag_disable(self):
"""
Disables JTAG output on the controller. JTAG operations executed
immediately after this function will return useless data or fail.
Usage:
>>> from proteusisc import getAttachedControllers, bitarray
>>> c = getAttachedControllers()[0]
>>> c.jtag_enable()
>>> c.write_tms_bits(bitarray("001011111"), return_tdo=True)
>>> c.jtag_disable()
"""
if not self._jtagon: return
status, _ = self.bulkCommand(_BMSG_DISABLE_JTAG)
if status == 0:
self._jtagon = False
elif status == 3:
raise JTAGControlError("Error Code %s"%status)
self.close_handle() |
def readlines(self, hint=-1):
"""Read lines until EOF, and return them as a list.
If *hint* is specified, then stop reading lines as soon as the total
size of all lines exceeds *hint*.
"""
self._check_readable()
lines = []
chunks = []
bytes_read = 0
while True:
chunk = self._buffer.get_chunk(-1, b'\n')
if not chunk:
break
chunks.append(chunk)
if chunk.endswith(b'\n'):
lines.append(b''.join(chunks))
del chunks[:]
bytes_read += len(lines[-1])
if hint >= 0 and bytes_read > hint:
break
if chunks:
lines.append(b''.join(chunks))
if not lines and not self._buffer.eof and self._buffer.error:
raise compat.saved_exc(self._buffer.error)
return lines | Read lines until EOF, and return them as a list.
If *hint* is specified, then stop reading lines as soon as the total
size of all lines exceeds *hint*. | Below is the the instruction that describes the task:
### Input:
Read lines until EOF, and return them as a list.
If *hint* is specified, then stop reading lines as soon as the total
size of all lines exceeds *hint*.
### Response:
def readlines(self, hint=-1):
"""Read lines until EOF, and return them as a list.
If *hint* is specified, then stop reading lines as soon as the total
size of all lines exceeds *hint*.
"""
self._check_readable()
lines = []
chunks = []
bytes_read = 0
while True:
chunk = self._buffer.get_chunk(-1, b'\n')
if not chunk:
break
chunks.append(chunk)
if chunk.endswith(b'\n'):
lines.append(b''.join(chunks))
del chunks[:]
bytes_read += len(lines[-1])
if hint >= 0 and bytes_read > hint:
break
if chunks:
lines.append(b''.join(chunks))
if not lines and not self._buffer.eof and self._buffer.error:
raise compat.saved_exc(self._buffer.error)
return lines |
def initialize(module_name=None):
"""
Build the giotto settings object. This function gets called
at the very begining of every request cycle.
"""
import giotto
from giotto.utils import random_string, switchout_keyvalue
from django.conf import settings
setattr(giotto, '_config', GiottoSettings())
if not module_name:
# For testing. No settings will be set.
return
project_module = importlib.import_module(module_name)
project_path = os.path.dirname(project_module.__file__)
setattr(giotto._config, 'project_path', project_path)
try:
secrets = importlib.import_module("%s.controllers.secrets" % module_name)
except ImportError:
secrets = None
try:
machine = importlib.import_module("%s.controllers.machine" % module_name)
except ImportError:
machine = None
config = importlib.import_module("%s.controllers.config" % module_name)
if config:
for item in dir(config):
setting_value = getattr(config, item)
setattr(giotto._config, item, setting_value)
if secrets:
for item in dir(secrets):
setting_value = getattr(secrets, item)
setattr(giotto._config, item, setting_value)
else:
logging.warning("No secrets.py found")
if machine:
for item in dir(machine):
setting_value = getattr(machine, item)
setattr(giotto._config, item, setting_value)
else:
logging.warning("No machine.py found")
settings.configure(
SECRET_KEY=random_string(32),
DATABASES=get_config('DATABASES'),
INSTALLED_APPS=(module_name, 'giotto')
)
ss = get_config('session_store', None)
if ss:
class_ = switchout_keyvalue(ss)
setattr(giotto._config, "session_store", class_())
cache_engine = get_config("cache", None)
if hasattr(cache_engine, 'lower'):
# session engine was passed in as string, exchange for engine object.
class_ = switchout_keyvalue(cache_engine)
e = class_(host=get_config("cache_host", "localhost"))
setattr(giotto._config, "cache_engine", e) | Build the giotto settings object. This function gets called
at the very begining of every request cycle. | Below is the the instruction that describes the task:
### Input:
Build the giotto settings object. This function gets called
at the very begining of every request cycle.
### Response:
def initialize(module_name=None):
"""
Build the giotto settings object. This function gets called
at the very begining of every request cycle.
"""
import giotto
from giotto.utils import random_string, switchout_keyvalue
from django.conf import settings
setattr(giotto, '_config', GiottoSettings())
if not module_name:
# For testing. No settings will be set.
return
project_module = importlib.import_module(module_name)
project_path = os.path.dirname(project_module.__file__)
setattr(giotto._config, 'project_path', project_path)
try:
secrets = importlib.import_module("%s.controllers.secrets" % module_name)
except ImportError:
secrets = None
try:
machine = importlib.import_module("%s.controllers.machine" % module_name)
except ImportError:
machine = None
config = importlib.import_module("%s.controllers.config" % module_name)
if config:
for item in dir(config):
setting_value = getattr(config, item)
setattr(giotto._config, item, setting_value)
if secrets:
for item in dir(secrets):
setting_value = getattr(secrets, item)
setattr(giotto._config, item, setting_value)
else:
logging.warning("No secrets.py found")
if machine:
for item in dir(machine):
setting_value = getattr(machine, item)
setattr(giotto._config, item, setting_value)
else:
logging.warning("No machine.py found")
settings.configure(
SECRET_KEY=random_string(32),
DATABASES=get_config('DATABASES'),
INSTALLED_APPS=(module_name, 'giotto')
)
ss = get_config('session_store', None)
if ss:
class_ = switchout_keyvalue(ss)
setattr(giotto._config, "session_store", class_())
cache_engine = get_config("cache", None)
if hasattr(cache_engine, 'lower'):
# session engine was passed in as string, exchange for engine object.
class_ = switchout_keyvalue(cache_engine)
e = class_(host=get_config("cache_host", "localhost"))
setattr(giotto._config, "cache_engine", e) |
def complete(self):
"""is the game over?"""
if None not in [v for v in self.squares]:
return True
if self.winner() is not None:
return True
return False | is the game over? | Below is the the instruction that describes the task:
### Input:
is the game over?
### Response:
def complete(self):
"""is the game over?"""
if None not in [v for v in self.squares]:
return True
if self.winner() is not None:
return True
return False |
def bend_rounded_Crane(Di, angle, rc=None, bend_diameters=None):
r'''Calculates the loss coefficient for any rounded bend in a pipe
according to the Crane TP 410M [1]_ method. This method effectively uses
an interpolation from tabulated values in [1]_ for friction factor
multipliers vs. curvature radius.
.. figure:: fittings/bend_rounded.png
:scale: 30 %
:alt: rounded bend; after [1]_
Parameters
----------
Di : float
Inside diameter of pipe, [m]
angle : float
Angle of bend, [degrees]
rc : float, optional
Radius of curvature of the entrance, optional [m]
bend_diameters : float, optional (used if rc not provided)
Number of diameters of pipe making up the bend radius [-]
Returns
-------
K : float
Loss coefficient [-]
Notes
-----
The Crane method does match the trend of increased pressure drop as
roughness increases.
The points in [1]_ are extrapolated to other angles via a well-fitting
Chebyshev approximation, whose accuracy can be seen in the below plot.
.. plot:: plots/bend_rounded_Crane.py
Examples
--------
>>> bend_rounded_Crane(Di=.4020, rc=.4*5, angle=30)
0.09321910015613409
References
----------
.. [1] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane,
2009.
'''
if not rc:
if bend_diameters is None:
bend_diameters = 5.0
rc = Di*bend_diameters
fd = ft_Crane(Di)
radius_ratio = rc/Di
if radius_ratio < 1.0:
radius_ratio = 1.0
elif radius_ratio > 20.0:
radius_ratio = 20.0
factor = horner(bend_rounded_Crane_coeffs, 0.105263157894736836*(radius_ratio - 10.5))
K = fd*factor
K = (angle/90.0 - 1.0)*(0.25*pi*fd*radius_ratio + 0.5*K) + K
return K | r'''Calculates the loss coefficient for any rounded bend in a pipe
according to the Crane TP 410M [1]_ method. This method effectively uses
an interpolation from tabulated values in [1]_ for friction factor
multipliers vs. curvature radius.
.. figure:: fittings/bend_rounded.png
:scale: 30 %
:alt: rounded bend; after [1]_
Parameters
----------
Di : float
Inside diameter of pipe, [m]
angle : float
Angle of bend, [degrees]
rc : float, optional
Radius of curvature of the entrance, optional [m]
bend_diameters : float, optional (used if rc not provided)
Number of diameters of pipe making up the bend radius [-]
Returns
-------
K : float
Loss coefficient [-]
Notes
-----
The Crane method does match the trend of increased pressure drop as
roughness increases.
The points in [1]_ are extrapolated to other angles via a well-fitting
Chebyshev approximation, whose accuracy can be seen in the below plot.
.. plot:: plots/bend_rounded_Crane.py
Examples
--------
>>> bend_rounded_Crane(Di=.4020, rc=.4*5, angle=30)
0.09321910015613409
References
----------
.. [1] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane,
2009. | Below is the the instruction that describes the task:
### Input:
r'''Calculates the loss coefficient for any rounded bend in a pipe
according to the Crane TP 410M [1]_ method. This method effectively uses
an interpolation from tabulated values in [1]_ for friction factor
multipliers vs. curvature radius.
.. figure:: fittings/bend_rounded.png
:scale: 30 %
:alt: rounded bend; after [1]_
Parameters
----------
Di : float
Inside diameter of pipe, [m]
angle : float
Angle of bend, [degrees]
rc : float, optional
Radius of curvature of the entrance, optional [m]
bend_diameters : float, optional (used if rc not provided)
Number of diameters of pipe making up the bend radius [-]
Returns
-------
K : float
Loss coefficient [-]
Notes
-----
The Crane method does match the trend of increased pressure drop as
roughness increases.
The points in [1]_ are extrapolated to other angles via a well-fitting
Chebyshev approximation, whose accuracy can be seen in the below plot.
.. plot:: plots/bend_rounded_Crane.py
Examples
--------
>>> bend_rounded_Crane(Di=.4020, rc=.4*5, angle=30)
0.09321910015613409
References
----------
.. [1] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane,
2009.
### Response:
def bend_rounded_Crane(Di, angle, rc=None, bend_diameters=None):
r'''Calculates the loss coefficient for any rounded bend in a pipe
according to the Crane TP 410M [1]_ method. This method effectively uses
an interpolation from tabulated values in [1]_ for friction factor
multipliers vs. curvature radius.
.. figure:: fittings/bend_rounded.png
:scale: 30 %
:alt: rounded bend; after [1]_
Parameters
----------
Di : float
Inside diameter of pipe, [m]
angle : float
Angle of bend, [degrees]
rc : float, optional
Radius of curvature of the entrance, optional [m]
bend_diameters : float, optional (used if rc not provided)
Number of diameters of pipe making up the bend radius [-]
Returns
-------
K : float
Loss coefficient [-]
Notes
-----
The Crane method does match the trend of increased pressure drop as
roughness increases.
The points in [1]_ are extrapolated to other angles via a well-fitting
Chebyshev approximation, whose accuracy can be seen in the below plot.
.. plot:: plots/bend_rounded_Crane.py
Examples
--------
>>> bend_rounded_Crane(Di=.4020, rc=.4*5, angle=30)
0.09321910015613409
References
----------
.. [1] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane,
2009.
'''
if not rc:
if bend_diameters is None:
bend_diameters = 5.0
rc = Di*bend_diameters
fd = ft_Crane(Di)
radius_ratio = rc/Di
if radius_ratio < 1.0:
radius_ratio = 1.0
elif radius_ratio > 20.0:
radius_ratio = 20.0
factor = horner(bend_rounded_Crane_coeffs, 0.105263157894736836*(radius_ratio - 10.5))
K = fd*factor
K = (angle/90.0 - 1.0)*(0.25*pi*fd*radius_ratio + 0.5*K) + K
return K |
def post(self):
'''
:ref:`Authenticate <rest_tornado-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:status 500: |500|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-H "Accept: application/json" \\
-d username='saltuser' \\
-d password='saltpass' \\
-d eauth='pam'
.. code-block:: text
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/x-www-form-urlencoded
Accept: application/json
username=saltuser&password=saltpass&eauth=pam
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
'''
try:
if not isinstance(self.request_payload, dict):
self.send_error(400)
return
creds = {'username': self.request_payload['username'],
'password': self.request_payload['password'],
'eauth': self.request_payload['eauth'],
}
# if any of the args are missing, its a bad request
except KeyError:
self.send_error(400)
return
token = self.application.auth.mk_token(creds)
if 'token' not in token:
# TODO: nicer error message
# 'Could not authenticate using provided credentials')
self.send_error(401)
# return since we don't want to execute any more
return
# Grab eauth config for the current backend for the current user
try:
eauth = self.application.opts['external_auth'][token['eauth']]
# Get sum of '*' perms, user-specific perms, and group-specific perms
_perms = eauth.get(token['name'], [])
_perms.extend(eauth.get('*', []))
if 'groups' in token and token['groups']:
user_groups = set(token['groups'])
eauth_groups = set([i.rstrip('%') for i in eauth.keys() if i.endswith('%')])
for group in user_groups & eauth_groups:
_perms.extend(eauth['{0}%'.format(group)])
# dedup. perm can be a complex dict, so we cant use set
perms = []
for perm in _perms:
if perm not in perms:
perms.append(perm)
# If we can't find the creds, then they aren't authorized
except KeyError:
self.send_error(401)
return
except (AttributeError, IndexError):
log.debug(
"Configuration for external_auth malformed for eauth '%s', "
"and user '%s'.", token.get('eauth'), token.get('name'),
exc_info=True
)
# TODO better error -- 'Configuration for external_auth could not be read.'
self.send_error(500)
return
ret = {'return': [{
'token': token['token'],
'expire': token['expire'],
'start': token['start'],
'user': token['name'],
'eauth': token['eauth'],
'perms': perms,
}]}
self.write(self.serialize(ret)) | :ref:`Authenticate <rest_tornado-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:status 500: |500|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-H "Accept: application/json" \\
-d username='saltuser' \\
-d password='saltpass' \\
-d eauth='pam'
.. code-block:: text
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/x-www-form-urlencoded
Accept: application/json
username=saltuser&password=saltpass&eauth=pam
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}} | Below is the the instruction that describes the task:
### Input:
:ref:`Authenticate <rest_tornado-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:status 500: |500|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-H "Accept: application/json" \\
-d username='saltuser' \\
-d password='saltpass' \\
-d eauth='pam'
.. code-block:: text
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/x-www-form-urlencoded
Accept: application/json
username=saltuser&password=saltpass&eauth=pam
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
### Response:
def post(self):
'''
:ref:`Authenticate <rest_tornado-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:status 500: |500|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-H "Accept: application/json" \\
-d username='saltuser' \\
-d password='saltpass' \\
-d eauth='pam'
.. code-block:: text
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/x-www-form-urlencoded
Accept: application/json
username=saltuser&password=saltpass&eauth=pam
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
'''
try:
if not isinstance(self.request_payload, dict):
self.send_error(400)
return
creds = {'username': self.request_payload['username'],
'password': self.request_payload['password'],
'eauth': self.request_payload['eauth'],
}
# if any of the args are missing, its a bad request
except KeyError:
self.send_error(400)
return
token = self.application.auth.mk_token(creds)
if 'token' not in token:
# TODO: nicer error message
# 'Could not authenticate using provided credentials')
self.send_error(401)
# return since we don't want to execute any more
return
# Grab eauth config for the current backend for the current user
try:
eauth = self.application.opts['external_auth'][token['eauth']]
# Get sum of '*' perms, user-specific perms, and group-specific perms
_perms = eauth.get(token['name'], [])
_perms.extend(eauth.get('*', []))
if 'groups' in token and token['groups']:
user_groups = set(token['groups'])
eauth_groups = set([i.rstrip('%') for i in eauth.keys() if i.endswith('%')])
for group in user_groups & eauth_groups:
_perms.extend(eauth['{0}%'.format(group)])
# dedup. perm can be a complex dict, so we cant use set
perms = []
for perm in _perms:
if perm not in perms:
perms.append(perm)
# If we can't find the creds, then they aren't authorized
except KeyError:
self.send_error(401)
return
except (AttributeError, IndexError):
log.debug(
"Configuration for external_auth malformed for eauth '%s', "
"and user '%s'.", token.get('eauth'), token.get('name'),
exc_info=True
)
# TODO better error -- 'Configuration for external_auth could not be read.'
self.send_error(500)
return
ret = {'return': [{
'token': token['token'],
'expire': token['expire'],
'start': token['start'],
'user': token['name'],
'eauth': token['eauth'],
'perms': perms,
}]}
self.write(self.serialize(ret)) |
def poll(self):
"""Poll from the buffer
It is a non-blocking operation, and when the buffer is empty, it raises Queue.Empty exception
"""
try:
# non-blocking
ret = self._buffer.get(block=False)
if self._producer_callback is not None:
self._producer_callback()
return ret
except Queue.Empty:
Log.debug("%s: Empty in poll()" % str(self))
raise Queue.Empty | Poll from the buffer
It is a non-blocking operation, and when the buffer is empty, it raises Queue.Empty exception | Below is the the instruction that describes the task:
### Input:
Poll from the buffer
It is a non-blocking operation, and when the buffer is empty, it raises Queue.Empty exception
### Response:
def poll(self):
"""Poll from the buffer
It is a non-blocking operation, and when the buffer is empty, it raises Queue.Empty exception
"""
try:
# non-blocking
ret = self._buffer.get(block=False)
if self._producer_callback is not None:
self._producer_callback()
return ret
except Queue.Empty:
Log.debug("%s: Empty in poll()" % str(self))
raise Queue.Empty |
def call(self, method, *args, **params):
"""Calls a method on the server."""
transaction_id = params.get("transaction_id")
if not transaction_id:
self.transaction_id += 1
transaction_id = self.transaction_id
obj = params.get("obj")
args = [method, transaction_id, obj] + list(args)
args_encoded = map(lambda x: encode_amf(x), args)
body = b"".join(args_encoded)
format = params.get("format", PACKET_SIZE_MEDIUM)
channel = params.get("channel", 0x03)
packet = RTMPPacket(type=PACKET_TYPE_INVOKE,
format=format, channel=channel,
body=body)
self.send_packet(packet)
return RTMPCall(self, transaction_id) | Calls a method on the server. | Below is the the instruction that describes the task:
### Input:
Calls a method on the server.
### Response:
def call(self, method, *args, **params):
"""Calls a method on the server."""
transaction_id = params.get("transaction_id")
if not transaction_id:
self.transaction_id += 1
transaction_id = self.transaction_id
obj = params.get("obj")
args = [method, transaction_id, obj] + list(args)
args_encoded = map(lambda x: encode_amf(x), args)
body = b"".join(args_encoded)
format = params.get("format", PACKET_SIZE_MEDIUM)
channel = params.get("channel", 0x03)
packet = RTMPPacket(type=PACKET_TYPE_INVOKE,
format=format, channel=channel,
body=body)
self.send_packet(packet)
return RTMPCall(self, transaction_id) |
def simple_response_str(command, status, status_text, content=""):
""" Creates an OSP response XML string.
Arguments:
command (str): OSP Command to respond to.
status (int): Status of the response.
status_text (str): Status text of the response.
content (str): Text part of the response XML element.
Return:
String of response in xml format.
"""
response = Element('%s_response' % command)
for name, value in [('status', str(status)), ('status_text', status_text)]:
response.set(name, str(value))
if isinstance(content, list):
for elem in content:
response.append(elem)
elif isinstance(content, Element):
response.append(content)
else:
response.text = content
return tostring(response) | Creates an OSP response XML string.
Arguments:
command (str): OSP Command to respond to.
status (int): Status of the response.
status_text (str): Status text of the response.
content (str): Text part of the response XML element.
Return:
String of response in xml format. | Below is the the instruction that describes the task:
### Input:
Creates an OSP response XML string.
Arguments:
command (str): OSP Command to respond to.
status (int): Status of the response.
status_text (str): Status text of the response.
content (str): Text part of the response XML element.
Return:
String of response in xml format.
### Response:
def simple_response_str(command, status, status_text, content=""):
""" Creates an OSP response XML string.
Arguments:
command (str): OSP Command to respond to.
status (int): Status of the response.
status_text (str): Status text of the response.
content (str): Text part of the response XML element.
Return:
String of response in xml format.
"""
response = Element('%s_response' % command)
for name, value in [('status', str(status)), ('status_text', status_text)]:
response.set(name, str(value))
if isinstance(content, list):
for elem in content:
response.append(elem)
elif isinstance(content, Element):
response.append(content)
else:
response.text = content
return tostring(response) |
def data(self):
"""This is the data object serialized to the js layer"""
content = {
'form_data': self.form_data,
'token': self.token,
'viz_name': self.viz_type,
'filter_select_enabled': self.datasource.filter_select_enabled,
}
return content | This is the data object serialized to the js layer | Below is the the instruction that describes the task:
### Input:
This is the data object serialized to the js layer
### Response:
def data(self):
"""This is the data object serialized to the js layer"""
content = {
'form_data': self.form_data,
'token': self.token,
'viz_name': self.viz_type,
'filter_select_enabled': self.datasource.filter_select_enabled,
}
return content |
def save(self):
"""
Save profile settings into user profile directory
"""
config = self.profiledir + '/config'
if not isdir(self.profiledir):
makedirs(self.profiledir)
cp = SafeConfigParser()
cp.add_section('ssh')
cp.set('ssh', 'private_key', self.ssh_private_key)
cp.set('ssh', 'public_key', self.ssh_public_key)
with open(config, 'w') as cfile:
cp.write(cfile) | Save profile settings into user profile directory | Below is the the instruction that describes the task:
### Input:
Save profile settings into user profile directory
### Response:
def save(self):
"""
Save profile settings into user profile directory
"""
config = self.profiledir + '/config'
if not isdir(self.profiledir):
makedirs(self.profiledir)
cp = SafeConfigParser()
cp.add_section('ssh')
cp.set('ssh', 'private_key', self.ssh_private_key)
cp.set('ssh', 'public_key', self.ssh_public_key)
with open(config, 'w') as cfile:
cp.write(cfile) |
def _find_workflows(mcs, attrs):
"""Finds all occurrences of a workflow in the attributes definitions.
Returns:
dict(str => StateField): maps an attribute name to a StateField
describing the related Workflow.
"""
workflows = {}
for attribute, value in attrs.items():
if isinstance(value, Workflow):
workflows[attribute] = StateField(value)
return workflows | Finds all occurrences of a workflow in the attributes definitions.
Returns:
dict(str => StateField): maps an attribute name to a StateField
describing the related Workflow. | Below is the the instruction that describes the task:
### Input:
Finds all occurrences of a workflow in the attributes definitions.
Returns:
dict(str => StateField): maps an attribute name to a StateField
describing the related Workflow.
### Response:
def _find_workflows(mcs, attrs):
"""Finds all occurrences of a workflow in the attributes definitions.
Returns:
dict(str => StateField): maps an attribute name to a StateField
describing the related Workflow.
"""
workflows = {}
for attribute, value in attrs.items():
if isinstance(value, Workflow):
workflows[attribute] = StateField(value)
return workflows |
def update(self, pbar, width):
'Updates the progress bar and its subcomponents'
left, marker, right = (format_updatable(i, pbar) for i in
(self.left, self.marker, self.right))
width -= len(left) + len(right)
# Marker must *always* have length of 1
marker *= int(pbar.currval / pbar.maxval * width)
if self.fill_left:
return '%s%s%s' % (left, marker.ljust(width, self.fill), right)
else:
return '%s%s%s' % (left, marker.rjust(width, self.fill), right) | Updates the progress bar and its subcomponents | Below is the the instruction that describes the task:
### Input:
Updates the progress bar and its subcomponents
### Response:
def update(self, pbar, width):
'Updates the progress bar and its subcomponents'
left, marker, right = (format_updatable(i, pbar) for i in
(self.left, self.marker, self.right))
width -= len(left) + len(right)
# Marker must *always* have length of 1
marker *= int(pbar.currval / pbar.maxval * width)
if self.fill_left:
return '%s%s%s' % (left, marker.ljust(width, self.fill), right)
else:
return '%s%s%s' % (left, marker.rjust(width, self.fill), right) |
def circos_radius(n_nodes, node_r):
"""
Automatically computes the origin-to-node centre radius of the Circos plot
using the triangle equality sine rule.
a / sin(A) = b / sin(B) = c / sin(C)
:param n_nodes: the number of nodes in the plot.
:type n_nodes: int
:param node_r: the radius of each node.
:type node_r: float
:returns: Origin-to-node centre radius.
"""
A = 2 * np.pi / n_nodes # noqa
B = (np.pi - A) / 2 # noqa
a = 2 * node_r
return a * np.sin(B) / np.sin(A) | Automatically computes the origin-to-node centre radius of the Circos plot
using the triangle equality sine rule.
a / sin(A) = b / sin(B) = c / sin(C)
:param n_nodes: the number of nodes in the plot.
:type n_nodes: int
:param node_r: the radius of each node.
:type node_r: float
:returns: Origin-to-node centre radius. | Below is the the instruction that describes the task:
### Input:
Automatically computes the origin-to-node centre radius of the Circos plot
using the triangle equality sine rule.
a / sin(A) = b / sin(B) = c / sin(C)
:param n_nodes: the number of nodes in the plot.
:type n_nodes: int
:param node_r: the radius of each node.
:type node_r: float
:returns: Origin-to-node centre radius.
### Response:
def circos_radius(n_nodes, node_r):
"""
Automatically computes the origin-to-node centre radius of the Circos plot
using the triangle equality sine rule.
a / sin(A) = b / sin(B) = c / sin(C)
:param n_nodes: the number of nodes in the plot.
:type n_nodes: int
:param node_r: the radius of each node.
:type node_r: float
:returns: Origin-to-node centre radius.
"""
A = 2 * np.pi / n_nodes # noqa
B = (np.pi - A) / 2 # noqa
a = 2 * node_r
return a * np.sin(B) / np.sin(A) |
def external2internal(xe, bounds):
""" Convert a series of external variables to internal variables"""
xi = np.empty_like(xe)
for i, (v, bound) in enumerate(zip(xe, bounds)):
a = bound[0] # minimum
b = bound[1] # maximum
if a == None and b == None: # No constraints
xi[i] = v
elif b == None: # only min
xi[i] = np.sqrt((v - a + 1.) ** 2. - 1)
elif a == None: # only max
xi[i] = np.sqrt((b - v + 1.) ** 2. - 1)
else: # both min and max
xi[i] = np.arcsin((2. * (v - a) / (b - a)) - 1.)
return xi | Convert a series of external variables to internal variables | Below is the the instruction that describes the task:
### Input:
Convert a series of external variables to internal variables
### Response:
def external2internal(xe, bounds):
""" Convert a series of external variables to internal variables"""
xi = np.empty_like(xe)
for i, (v, bound) in enumerate(zip(xe, bounds)):
a = bound[0] # minimum
b = bound[1] # maximum
if a == None and b == None: # No constraints
xi[i] = v
elif b == None: # only min
xi[i] = np.sqrt((v - a + 1.) ** 2. - 1)
elif a == None: # only max
xi[i] = np.sqrt((b - v + 1.) ** 2. - 1)
else: # both min and max
xi[i] = np.arcsin((2. * (v - a) / (b - a)) - 1.)
return xi |
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index(['a', 'b', 'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = (r[start:end] for start, end in zip(counts, counts[1:]))
result = dict(zip(categories, result))
return result | Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index(['a', 'b', 'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])} | Below is the the instruction that describes the task:
### Input:
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index(['a', 'b', 'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
### Response:
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index(['a', 'b', 'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = (r[start:end] for start, end in zip(counts, counts[1:]))
result = dict(zip(categories, result))
return result |
def _mutual_info_score(reference_indices, estimated_indices, contingency=None):
"""Compute the mutual information between two sequence labelings.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
contingency : np.ndarray
Pre-computed contingency matrix. If None, one will be computed.
(Default value = None)
Returns
-------
mi : float
Mutual information
.. note:: Based on sklearn.metrics.cluster.mutual_info_score
"""
if contingency is None:
contingency = _contingency_matrix(reference_indices,
estimated_indices).astype(float)
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + np.log(pi.sum()) + np.log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - np.log(contingency_sum)) +
contingency_nm * log_outer)
return mi.sum() | Compute the mutual information between two sequence labelings.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
contingency : np.ndarray
Pre-computed contingency matrix. If None, one will be computed.
(Default value = None)
Returns
-------
mi : float
Mutual information
.. note:: Based on sklearn.metrics.cluster.mutual_info_score | Below is the the instruction that describes the task:
### Input:
Compute the mutual information between two sequence labelings.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
contingency : np.ndarray
Pre-computed contingency matrix. If None, one will be computed.
(Default value = None)
Returns
-------
mi : float
Mutual information
.. note:: Based on sklearn.metrics.cluster.mutual_info_score
### Response:
def _mutual_info_score(reference_indices, estimated_indices, contingency=None):
"""Compute the mutual information between two sequence labelings.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
contingency : np.ndarray
Pre-computed contingency matrix. If None, one will be computed.
(Default value = None)
Returns
-------
mi : float
Mutual information
.. note:: Based on sklearn.metrics.cluster.mutual_info_score
"""
if contingency is None:
contingency = _contingency_matrix(reference_indices,
estimated_indices).astype(float)
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + np.log(pi.sum()) + np.log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - np.log(contingency_sum)) +
contingency_nm * log_outer)
return mi.sum() |
def merge_close(events, min_interval, merge_to_longer=False):
"""Merge events that are separated by a less than a minimum interval.
Parameters
----------
events : list of dict
events with 'start' and 'end' times, from one or several channels.
**Events must be sorted by their start time.**
min_interval : float
minimum delay between consecutive events, in seconds
merge_to_longer : bool (default: False)
If True, info (chan, peak, etc.) from the longer of the 2 events is
kept. Otherwise, info from the earlier onset spindle is kept.
Returns
-------
list of dict
original events list with close events merged.
"""
half_iv = min_interval / 2
merged = []
for higher in events:
if not merged:
merged.append(higher)
else:
lower = merged[-1]
if higher['start'] - half_iv <= lower['end'] + half_iv:
if merge_to_longer and (higher['end'] - higher['start'] >
lower['end'] - lower['start']):
start = min(lower['start'], higher['start'])
higher.update({'start': start})
merged[-1] = higher
else:
end = max(lower['end'], higher['end'])
merged[-1].update({'end': end})
else:
merged.append(higher)
return merged | Merge events that are separated by a less than a minimum interval.
Parameters
----------
events : list of dict
events with 'start' and 'end' times, from one or several channels.
**Events must be sorted by their start time.**
min_interval : float
minimum delay between consecutive events, in seconds
merge_to_longer : bool (default: False)
If True, info (chan, peak, etc.) from the longer of the 2 events is
kept. Otherwise, info from the earlier onset spindle is kept.
Returns
-------
list of dict
original events list with close events merged. | Below is the the instruction that describes the task:
### Input:
Merge events that are separated by a less than a minimum interval.
Parameters
----------
events : list of dict
events with 'start' and 'end' times, from one or several channels.
**Events must be sorted by their start time.**
min_interval : float
minimum delay between consecutive events, in seconds
merge_to_longer : bool (default: False)
If True, info (chan, peak, etc.) from the longer of the 2 events is
kept. Otherwise, info from the earlier onset spindle is kept.
Returns
-------
list of dict
original events list with close events merged.
### Response:
def merge_close(events, min_interval, merge_to_longer=False):
"""Merge events that are separated by a less than a minimum interval.
Parameters
----------
events : list of dict
events with 'start' and 'end' times, from one or several channels.
**Events must be sorted by their start time.**
min_interval : float
minimum delay between consecutive events, in seconds
merge_to_longer : bool (default: False)
If True, info (chan, peak, etc.) from the longer of the 2 events is
kept. Otherwise, info from the earlier onset spindle is kept.
Returns
-------
list of dict
original events list with close events merged.
"""
half_iv = min_interval / 2
merged = []
for higher in events:
if not merged:
merged.append(higher)
else:
lower = merged[-1]
if higher['start'] - half_iv <= lower['end'] + half_iv:
if merge_to_longer and (higher['end'] - higher['start'] >
lower['end'] - lower['start']):
start = min(lower['start'], higher['start'])
higher.update({'start': start})
merged[-1] = higher
else:
end = max(lower['end'], higher['end'])
merged[-1].update({'end': end})
else:
merged.append(higher)
return merged |
def evaluate(self, dataset, metric='auto', **kwargs):
"""
Evaluate the model by making predictions of target values and comparing
these to actual values.
Parameters
----------
dataset : SFrame
An SFrame having the same feature columns as provided when creating
the model.
metric : str, optional
Name of the evaluation metric. Possible values are:
- 'auto' : Returns all available metrics.
- 'accuracy' : Classification accuracy (micro average).
- 'auc' : Area under the ROC curve (macro average)
- 'precision' : Precision score (macro average)
- 'recall' : Recall score (macro average)
- 'f1_score' : F1 score (macro average)
- 'log_loss' : Log loss
- 'confusion_matrix' : An SFrame with counts of possible prediction/true label combinations.
- 'roc_curve' : An SFrame containing information needed for an ROC curve
For more flexibility in calculating evaluation metrics, use the
:class:`~turicreate.evaluation` module.
Returns
-------
out : dict
Dictionary of evaluation results where the key is the name of the
evaluation metric (e.g. `accuracy`) and the value is the evaluation
score.
See Also
----------
create, predict, classify
"""
m = self.__proxy__['classifier']
target = self.__proxy__['target']
f = _BOW_FEATURE_EXTRACTOR
test = f(dataset, target)
return m.evaluate(test, metric, **kwargs) | Evaluate the model by making predictions of target values and comparing
these to actual values.
Parameters
----------
dataset : SFrame
An SFrame having the same feature columns as provided when creating
the model.
metric : str, optional
Name of the evaluation metric. Possible values are:
- 'auto' : Returns all available metrics.
- 'accuracy' : Classification accuracy (micro average).
- 'auc' : Area under the ROC curve (macro average)
- 'precision' : Precision score (macro average)
- 'recall' : Recall score (macro average)
- 'f1_score' : F1 score (macro average)
- 'log_loss' : Log loss
- 'confusion_matrix' : An SFrame with counts of possible prediction/true label combinations.
- 'roc_curve' : An SFrame containing information needed for an ROC curve
For more flexibility in calculating evaluation metrics, use the
:class:`~turicreate.evaluation` module.
Returns
-------
out : dict
Dictionary of evaluation results where the key is the name of the
evaluation metric (e.g. `accuracy`) and the value is the evaluation
score.
See Also
----------
create, predict, classify | Below is the the instruction that describes the task:
### Input:
Evaluate the model by making predictions of target values and comparing
these to actual values.
Parameters
----------
dataset : SFrame
An SFrame having the same feature columns as provided when creating
the model.
metric : str, optional
Name of the evaluation metric. Possible values are:
- 'auto' : Returns all available metrics.
- 'accuracy' : Classification accuracy (micro average).
- 'auc' : Area under the ROC curve (macro average)
- 'precision' : Precision score (macro average)
- 'recall' : Recall score (macro average)
- 'f1_score' : F1 score (macro average)
- 'log_loss' : Log loss
- 'confusion_matrix' : An SFrame with counts of possible prediction/true label combinations.
- 'roc_curve' : An SFrame containing information needed for an ROC curve
For more flexibility in calculating evaluation metrics, use the
:class:`~turicreate.evaluation` module.
Returns
-------
out : dict
Dictionary of evaluation results where the key is the name of the
evaluation metric (e.g. `accuracy`) and the value is the evaluation
score.
See Also
----------
create, predict, classify
### Response:
def evaluate(self, dataset, metric='auto', **kwargs):
"""
Evaluate the model by making predictions of target values and comparing
these to actual values.
Parameters
----------
dataset : SFrame
An SFrame having the same feature columns as provided when creating
the model.
metric : str, optional
Name of the evaluation metric. Possible values are:
- 'auto' : Returns all available metrics.
- 'accuracy' : Classification accuracy (micro average).
- 'auc' : Area under the ROC curve (macro average)
- 'precision' : Precision score (macro average)
- 'recall' : Recall score (macro average)
- 'f1_score' : F1 score (macro average)
- 'log_loss' : Log loss
- 'confusion_matrix' : An SFrame with counts of possible prediction/true label combinations.
- 'roc_curve' : An SFrame containing information needed for an ROC curve
For more flexibility in calculating evaluation metrics, use the
:class:`~turicreate.evaluation` module.
Returns
-------
out : dict
Dictionary of evaluation results where the key is the name of the
evaluation metric (e.g. `accuracy`) and the value is the evaluation
score.
See Also
----------
create, predict, classify
"""
m = self.__proxy__['classifier']
target = self.__proxy__['target']
f = _BOW_FEATURE_EXTRACTOR
test = f(dataset, target)
return m.evaluate(test, metric, **kwargs) |
def plot_weights(self, h, **kwargs):
""" Plot the weights from the aggregating algorithm
Parameters
----------
h : int
How many steps to run the aggregating algorithm on
Returns
----------
- A plot of the weights for each model constituent over time
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
weights, _, _ = self.run(h=h)
plt.figure(figsize=figsize)
plt.plot(self.index[-h:],weights)
plt.legend(self.model_names)
plt.show() | Plot the weights from the aggregating algorithm
Parameters
----------
h : int
How many steps to run the aggregating algorithm on
Returns
----------
- A plot of the weights for each model constituent over time | Below is the the instruction that describes the task:
### Input:
Plot the weights from the aggregating algorithm
Parameters
----------
h : int
How many steps to run the aggregating algorithm on
Returns
----------
- A plot of the weights for each model constituent over time
### Response:
def plot_weights(self, h, **kwargs):
""" Plot the weights from the aggregating algorithm
Parameters
----------
h : int
How many steps to run the aggregating algorithm on
Returns
----------
- A plot of the weights for each model constituent over time
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
weights, _, _ = self.run(h=h)
plt.figure(figsize=figsize)
plt.plot(self.index[-h:],weights)
plt.legend(self.model_names)
plt.show() |
def polynomial_norm(coeffs):
r"""Computes :math:`L_2` norm of polynomial on :math:`\left[0, 1\right]`.
We have
.. math::
\left\langle f, f \right\rangle = \sum_{i, j}
\int_0^1 c_i c_j x^{i + j} \, dx = \sum_{i, j}
\frac{c_i c_j}{i + j + 1} = \sum_{i} \frac{c_i^2}{2 i + 1}
+ 2 \sum_{j > i} \frac{c_i c_j}{i + j + 1}.
Args:
coeffs (numpy.ndarray): ``d + 1``-array of coefficients in monomial /
power basis.
Returns:
float: The :math:`L_2` norm of the polynomial.
"""
num_coeffs, = coeffs.shape
result = 0.0
for i in six.moves.xrange(num_coeffs):
coeff_i = coeffs[i]
result += coeff_i * coeff_i / (2.0 * i + 1.0)
for j in six.moves.xrange(i + 1, num_coeffs):
coeff_j = coeffs[j]
result += 2.0 * coeff_i * coeff_j / (i + j + 1.0)
return np.sqrt(result) | r"""Computes :math:`L_2` norm of polynomial on :math:`\left[0, 1\right]`.
We have
.. math::
\left\langle f, f \right\rangle = \sum_{i, j}
\int_0^1 c_i c_j x^{i + j} \, dx = \sum_{i, j}
\frac{c_i c_j}{i + j + 1} = \sum_{i} \frac{c_i^2}{2 i + 1}
+ 2 \sum_{j > i} \frac{c_i c_j}{i + j + 1}.
Args:
coeffs (numpy.ndarray): ``d + 1``-array of coefficients in monomial /
power basis.
Returns:
float: The :math:`L_2` norm of the polynomial. | Below is the the instruction that describes the task:
### Input:
r"""Computes :math:`L_2` norm of polynomial on :math:`\left[0, 1\right]`.
We have
.. math::
\left\langle f, f \right\rangle = \sum_{i, j}
\int_0^1 c_i c_j x^{i + j} \, dx = \sum_{i, j}
\frac{c_i c_j}{i + j + 1} = \sum_{i} \frac{c_i^2}{2 i + 1}
+ 2 \sum_{j > i} \frac{c_i c_j}{i + j + 1}.
Args:
coeffs (numpy.ndarray): ``d + 1``-array of coefficients in monomial /
power basis.
Returns:
float: The :math:`L_2` norm of the polynomial.
### Response:
def polynomial_norm(coeffs):
r"""Computes :math:`L_2` norm of polynomial on :math:`\left[0, 1\right]`.
We have
.. math::
\left\langle f, f \right\rangle = \sum_{i, j}
\int_0^1 c_i c_j x^{i + j} \, dx = \sum_{i, j}
\frac{c_i c_j}{i + j + 1} = \sum_{i} \frac{c_i^2}{2 i + 1}
+ 2 \sum_{j > i} \frac{c_i c_j}{i + j + 1}.
Args:
coeffs (numpy.ndarray): ``d + 1``-array of coefficients in monomial /
power basis.
Returns:
float: The :math:`L_2` norm of the polynomial.
"""
num_coeffs, = coeffs.shape
result = 0.0
for i in six.moves.xrange(num_coeffs):
coeff_i = coeffs[i]
result += coeff_i * coeff_i / (2.0 * i + 1.0)
for j in six.moves.xrange(i + 1, num_coeffs):
coeff_j = coeffs[j]
result += 2.0 * coeff_i * coeff_j / (i + j + 1.0)
return np.sqrt(result) |
def _validate_output_data(
self, original_res, serialized_res, formatted_res, request):
""" Override to not validate doc output. """
if self._is_doc_request(request):
return
else:
return super(DocumentedResource, self)._validate_output_data(
original_res, serialized_res, formatted_res, request) | Override to not validate doc output. | Below is the the instruction that describes the task:
### Input:
Override to not validate doc output.
### Response:
def _validate_output_data(
self, original_res, serialized_res, formatted_res, request):
""" Override to not validate doc output. """
if self._is_doc_request(request):
return
else:
return super(DocumentedResource, self)._validate_output_data(
original_res, serialized_res, formatted_res, request) |
def format(self, response_data):
"""
Make Flask `Response` object, with data returned as a generator for the CSV content
The CSV is built from JSON-like object (Python `dict` or list of `dicts`)
"""
if "items" in response_data:
list_response_data = response_data["items"]
else:
list_response_data = [response_data]
write_column_names = type(list_response_data[0]) not in (tuple, list)
output = StringIO()
csv_writer = writer(output, quoting=QUOTE_MINIMAL)
if write_column_names:
column_names = self.get_column_names(list_response_data)
csv_writer.writerow(column_names)
for item in list_response_data:
csv_writer.writerow(
[item[column] for column in column_names] if write_column_names else list(item)
)
# Ideally we'd want to `yield` each line to stream the content
# But something downstream seems to break streaming
yield output.getvalue() | Make Flask `Response` object, with data returned as a generator for the CSV content
The CSV is built from JSON-like object (Python `dict` or list of `dicts`) | Below is the the instruction that describes the task:
### Input:
Make Flask `Response` object, with data returned as a generator for the CSV content
The CSV is built from JSON-like object (Python `dict` or list of `dicts`)
### Response:
def format(self, response_data):
"""
Make Flask `Response` object, with data returned as a generator for the CSV content
The CSV is built from JSON-like object (Python `dict` or list of `dicts`)
"""
if "items" in response_data:
list_response_data = response_data["items"]
else:
list_response_data = [response_data]
write_column_names = type(list_response_data[0]) not in (tuple, list)
output = StringIO()
csv_writer = writer(output, quoting=QUOTE_MINIMAL)
if write_column_names:
column_names = self.get_column_names(list_response_data)
csv_writer.writerow(column_names)
for item in list_response_data:
csv_writer.writerow(
[item[column] for column in column_names] if write_column_names else list(item)
)
# Ideally we'd want to `yield` each line to stream the content
# But something downstream seems to break streaming
yield output.getvalue() |
def plot_points(points_arcsec, array, units, kpc_per_arcsec, pointsize, zoom_offset_arcsec):
"""Plot a set of points over the array of data on the figure.
Parameters
-----------
positions : [[]]
Lists of (y,x) coordinates on the image which are plotted as colored dots, to highlight specific pixels.
array : data.array.scaled_array.ScaledArray
The 2D array of data which is plotted.
units : str
The units of the y / x axis of the plots, in arc-seconds ('arcsec') or kiloparsecs ('kpc').
kpc_per_arcsec : float or None
The conversion factor between arc-seconds and kiloparsecs, required to plot the units in kpc.
pointsize : int
The size of the points plotted to show the input positions.
"""
if points_arcsec is not None:
points_arcsec = list(map(lambda position_set: np.asarray(position_set), points_arcsec))
point_colors = itertools.cycle(["m", "y", "r", "w", "c", "b", "g", "k"])
for point_set_arcsec in points_arcsec:
if zoom_offset_arcsec is not None:
point_set_arcsec -= zoom_offset_arcsec
point_set_units = convert_grid_units(array=array, grid_arcsec=point_set_arcsec, units=units,
kpc_per_arcsec=kpc_per_arcsec)
plt.scatter(y=point_set_units[:,0], x=point_set_units[:,1], color=next(point_colors), s=pointsize) | Plot a set of points over the array of data on the figure.
Parameters
-----------
positions : [[]]
Lists of (y,x) coordinates on the image which are plotted as colored dots, to highlight specific pixels.
array : data.array.scaled_array.ScaledArray
The 2D array of data which is plotted.
units : str
The units of the y / x axis of the plots, in arc-seconds ('arcsec') or kiloparsecs ('kpc').
kpc_per_arcsec : float or None
The conversion factor between arc-seconds and kiloparsecs, required to plot the units in kpc.
pointsize : int
The size of the points plotted to show the input positions. | Below is the the instruction that describes the task:
### Input:
Plot a set of points over the array of data on the figure.
Parameters
-----------
positions : [[]]
Lists of (y,x) coordinates on the image which are plotted as colored dots, to highlight specific pixels.
array : data.array.scaled_array.ScaledArray
The 2D array of data which is plotted.
units : str
The units of the y / x axis of the plots, in arc-seconds ('arcsec') or kiloparsecs ('kpc').
kpc_per_arcsec : float or None
The conversion factor between arc-seconds and kiloparsecs, required to plot the units in kpc.
pointsize : int
The size of the points plotted to show the input positions.
### Response:
def plot_points(points_arcsec, array, units, kpc_per_arcsec, pointsize, zoom_offset_arcsec):
"""Plot a set of points over the array of data on the figure.
Parameters
-----------
positions : [[]]
Lists of (y,x) coordinates on the image which are plotted as colored dots, to highlight specific pixels.
array : data.array.scaled_array.ScaledArray
The 2D array of data which is plotted.
units : str
The units of the y / x axis of the plots, in arc-seconds ('arcsec') or kiloparsecs ('kpc').
kpc_per_arcsec : float or None
The conversion factor between arc-seconds and kiloparsecs, required to plot the units in kpc.
pointsize : int
The size of the points plotted to show the input positions.
"""
if points_arcsec is not None:
points_arcsec = list(map(lambda position_set: np.asarray(position_set), points_arcsec))
point_colors = itertools.cycle(["m", "y", "r", "w", "c", "b", "g", "k"])
for point_set_arcsec in points_arcsec:
if zoom_offset_arcsec is not None:
point_set_arcsec -= zoom_offset_arcsec
point_set_units = convert_grid_units(array=array, grid_arcsec=point_set_arcsec, units=units,
kpc_per_arcsec=kpc_per_arcsec)
plt.scatter(y=point_set_units[:,0], x=point_set_units[:,1], color=next(point_colors), s=pointsize) |
def add(self, index, var):
"""Add a minibatch of images to the monitor.
Args:
index (int): Index.
var (:obj:`~nnabla.Variable`, :obj:`~nnabla.NdArray`, or :obj:`~numpy.ndarray`):
A minibatch of images with ``(N, ..., C, H, W)`` format.
If C == 2, blue channel is appended with ones. If C > 3,
the array will be sliced to remove C > 3 sub-array.
"""
import nnabla as nn
from nnabla.utils.image_utils import imsave
if index != 0 and (index + 1) % self.interval != 0:
return
if isinstance(var, nn.Variable):
data = var.d.copy()
elif isinstance(var, nn.NdArray):
data = var.data.copy()
else:
assert isinstance(var, np.ndarray)
data = var.copy()
assert data.ndim > 2
channels = data.shape[-3]
data = data.reshape(-1, *data.shape[-3:])
data = data[:min(data.shape[0], self.num_images)]
data = self.normalize_method(data)
if channels > 3:
data = data[:, :3]
elif channels == 2:
data = np.concatenate(
[data, np.ones((data.shape[0], 1) + data.shape[-2:])], axis=1)
path_tmpl = os.path.join(self.save_dir, '{:06d}-{}.png')
for j in range(min(self.num_images, data.shape[0])):
img = data[j].transpose(1, 2, 0)
if img.shape[-1] == 1:
img = img[..., 0]
path = path_tmpl.format(index, '{:03d}'.format(j))
imsave(path, img)
if self.verbose:
logger.info("iter={} {{{}}} are written to {}.".format(
index, self.name, path_tmpl.format(index, '*'))) | Add a minibatch of images to the monitor.
Args:
index (int): Index.
var (:obj:`~nnabla.Variable`, :obj:`~nnabla.NdArray`, or :obj:`~numpy.ndarray`):
A minibatch of images with ``(N, ..., C, H, W)`` format.
If C == 2, blue channel is appended with ones. If C > 3,
the array will be sliced to remove C > 3 sub-array. | Below is the the instruction that describes the task:
### Input:
Add a minibatch of images to the monitor.
Args:
index (int): Index.
var (:obj:`~nnabla.Variable`, :obj:`~nnabla.NdArray`, or :obj:`~numpy.ndarray`):
A minibatch of images with ``(N, ..., C, H, W)`` format.
If C == 2, blue channel is appended with ones. If C > 3,
the array will be sliced to remove C > 3 sub-array.
### Response:
def add(self, index, var):
"""Add a minibatch of images to the monitor.
Args:
index (int): Index.
var (:obj:`~nnabla.Variable`, :obj:`~nnabla.NdArray`, or :obj:`~numpy.ndarray`):
A minibatch of images with ``(N, ..., C, H, W)`` format.
If C == 2, blue channel is appended with ones. If C > 3,
the array will be sliced to remove C > 3 sub-array.
"""
import nnabla as nn
from nnabla.utils.image_utils import imsave
if index != 0 and (index + 1) % self.interval != 0:
return
if isinstance(var, nn.Variable):
data = var.d.copy()
elif isinstance(var, nn.NdArray):
data = var.data.copy()
else:
assert isinstance(var, np.ndarray)
data = var.copy()
assert data.ndim > 2
channels = data.shape[-3]
data = data.reshape(-1, *data.shape[-3:])
data = data[:min(data.shape[0], self.num_images)]
data = self.normalize_method(data)
if channels > 3:
data = data[:, :3]
elif channels == 2:
data = np.concatenate(
[data, np.ones((data.shape[0], 1) + data.shape[-2:])], axis=1)
path_tmpl = os.path.join(self.save_dir, '{:06d}-{}.png')
for j in range(min(self.num_images, data.shape[0])):
img = data[j].transpose(1, 2, 0)
if img.shape[-1] == 1:
img = img[..., 0]
path = path_tmpl.format(index, '{:03d}'.format(j))
imsave(path, img)
if self.verbose:
logger.info("iter={} {{{}}} are written to {}.".format(
index, self.name, path_tmpl.format(index, '*'))) |
def __get_button(self, account_id, button_type, **kwargs):
"""Call documentation: `/subscription_plan/get_button
<https://www.wepay.com/developer/reference/subscription_plan#get_button>`_,
plus extra keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay`
"""
params = {
'account_id': account_id,
'button_type': button_type
}
return self.make_call(self.__get_button, params, kwargs) | Call documentation: `/subscription_plan/get_button
<https://www.wepay.com/developer/reference/subscription_plan#get_button>`_,
plus extra keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay` | Below is the the instruction that describes the task:
### Input:
Call documentation: `/subscription_plan/get_button
<https://www.wepay.com/developer/reference/subscription_plan#get_button>`_,
plus extra keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay`
### Response:
def __get_button(self, account_id, button_type, **kwargs):
"""Call documentation: `/subscription_plan/get_button
<https://www.wepay.com/developer/reference/subscription_plan#get_button>`_,
plus extra keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay`
"""
params = {
'account_id': account_id,
'button_type': button_type
}
return self.make_call(self.__get_button, params, kwargs) |
def update_query_parameters(url, query_parameters):
"""
Return url with updated query parameters.
Arguments:
url (str): Original url whose query parameters need to be updated.
query_parameters (dict): A dictionary containing query parameters to be added to course selection url.
Returns:
(slug): slug identifier for the identity provider that can be used for identity verification of
users associated the enterprise customer of the given user.
"""
scheme, netloc, path, query_string, fragment = urlsplit(url)
url_params = parse_qs(query_string)
# Update url query parameters
url_params.update(query_parameters)
return urlunsplit(
(scheme, netloc, path, urlencode(sorted(url_params.items()), doseq=True), fragment),
) | Return url with updated query parameters.
Arguments:
url (str): Original url whose query parameters need to be updated.
query_parameters (dict): A dictionary containing query parameters to be added to course selection url.
Returns:
(slug): slug identifier for the identity provider that can be used for identity verification of
users associated the enterprise customer of the given user. | Below is the the instruction that describes the task:
### Input:
Return url with updated query parameters.
Arguments:
url (str): Original url whose query parameters need to be updated.
query_parameters (dict): A dictionary containing query parameters to be added to course selection url.
Returns:
(slug): slug identifier for the identity provider that can be used for identity verification of
users associated the enterprise customer of the given user.
### Response:
def update_query_parameters(url, query_parameters):
"""
Return url with updated query parameters.
Arguments:
url (str): Original url whose query parameters need to be updated.
query_parameters (dict): A dictionary containing query parameters to be added to course selection url.
Returns:
(slug): slug identifier for the identity provider that can be used for identity verification of
users associated the enterprise customer of the given user.
"""
scheme, netloc, path, query_string, fragment = urlsplit(url)
url_params = parse_qs(query_string)
# Update url query parameters
url_params.update(query_parameters)
return urlunsplit(
(scheme, netloc, path, urlencode(sorted(url_params.items()), doseq=True), fragment),
) |
def get_tasks(self):
"""
Return tasks as list of (name, function) tuples.
"""
def predicate(item):
return (inspect.isfunction(item) and
item.__name__ not in self._helper_names)
return inspect.getmembers(self._tasks, predicate) | Return tasks as list of (name, function) tuples. | Below is the the instruction that describes the task:
### Input:
Return tasks as list of (name, function) tuples.
### Response:
def get_tasks(self):
"""
Return tasks as list of (name, function) tuples.
"""
def predicate(item):
return (inspect.isfunction(item) and
item.__name__ not in self._helper_names)
return inspect.getmembers(self._tasks, predicate) |
def _delay(self, ms):
"""Implement default delay mechanism.
"""
if ms:
self.Delay(ms)
else:
if self.default_delay:
self.Delay(self.default_delay) | Implement default delay mechanism. | Below is the the instruction that describes the task:
### Input:
Implement default delay mechanism.
### Response:
def _delay(self, ms):
"""Implement default delay mechanism.
"""
if ms:
self.Delay(ms)
else:
if self.default_delay:
self.Delay(self.default_delay) |
def plot(self,bins=10,facecolor='0.5',plot_cols=None,
filename="ensemble.pdf",func_dict = None,
**kwargs):
"""plot ensemble histograms to multipage pdf
Parameters
----------
bins : int
number of bins
facecolor : str
color
plot_cols : list of str
subset of ensemble columns to plot. If None, all are plotted.
Default is None
filename : str
pdf filename. Default is "ensemble.pdf"
func_dict : dict
a dict of functions to apply to specific columns (e.g., np.log10)
**kwargs : dict
keyword args to pass to plot_utils.ensemble_helper()
Returns
-------
None
"""
ensemble_helper(self,bins=bins,facecolor=facecolor,plot_cols=plot_cols,
filename=filename) | plot ensemble histograms to multipage pdf
Parameters
----------
bins : int
number of bins
facecolor : str
color
plot_cols : list of str
subset of ensemble columns to plot. If None, all are plotted.
Default is None
filename : str
pdf filename. Default is "ensemble.pdf"
func_dict : dict
a dict of functions to apply to specific columns (e.g., np.log10)
**kwargs : dict
keyword args to pass to plot_utils.ensemble_helper()
Returns
-------
None | Below is the the instruction that describes the task:
### Input:
plot ensemble histograms to multipage pdf
Parameters
----------
bins : int
number of bins
facecolor : str
color
plot_cols : list of str
subset of ensemble columns to plot. If None, all are plotted.
Default is None
filename : str
pdf filename. Default is "ensemble.pdf"
func_dict : dict
a dict of functions to apply to specific columns (e.g., np.log10)
**kwargs : dict
keyword args to pass to plot_utils.ensemble_helper()
Returns
-------
None
### Response:
def plot(self,bins=10,facecolor='0.5',plot_cols=None,
filename="ensemble.pdf",func_dict = None,
**kwargs):
"""plot ensemble histograms to multipage pdf
Parameters
----------
bins : int
number of bins
facecolor : str
color
plot_cols : list of str
subset of ensemble columns to plot. If None, all are plotted.
Default is None
filename : str
pdf filename. Default is "ensemble.pdf"
func_dict : dict
a dict of functions to apply to specific columns (e.g., np.log10)
**kwargs : dict
keyword args to pass to plot_utils.ensemble_helper()
Returns
-------
None
"""
ensemble_helper(self,bins=bins,facecolor=facecolor,plot_cols=plot_cols,
filename=filename) |
def update_image_location(self, timeline_json):
"""Update the image location."""
if not timeline_json:
return False
# If we get a list of objects back (likely)
# then we just want the first one as it should be the "newest"
if isinstance(timeline_json, (tuple, list)):
timeline_json = timeline_json[0]
# Verify that the event code is of the "CAPTURE IMAGE" event
event_code = timeline_json.get('event_code')
if event_code != TIMELINE.CAPTURE_IMAGE['event_code']:
raise AbodeException((ERROR.CAM_TIMELINE_EVENT_INVALID))
# The timeline response has an entry for "file_path" that acts as the
# location of the image within the Abode servers.
file_path = timeline_json.get('file_path')
if not file_path:
raise AbodeException((ERROR.CAM_IMAGE_REFRESH_NO_FILE))
# Perform a "head" request for the image and look for a
# 302 Found response
url = CONST.BASE_URL + file_path
response = self._abode.send_request("head", url)
if response.status_code != 302:
_LOGGER.warning("Unexected response code %s with body: %s",
str(response.status_code), response.text)
raise AbodeException((ERROR.CAM_IMAGE_UNEXPECTED_RESPONSE))
# The response should have a location header that is the actual
# location of the image stored on AWS
location = response.headers.get('location')
if not location:
raise AbodeException((ERROR.CAM_IMAGE_NO_LOCATION_HEADER))
self._image_url = location
return True | Update the image location. | Below is the the instruction that describes the task:
### Input:
Update the image location.
### Response:
def update_image_location(self, timeline_json):
"""Update the image location."""
if not timeline_json:
return False
# If we get a list of objects back (likely)
# then we just want the first one as it should be the "newest"
if isinstance(timeline_json, (tuple, list)):
timeline_json = timeline_json[0]
# Verify that the event code is of the "CAPTURE IMAGE" event
event_code = timeline_json.get('event_code')
if event_code != TIMELINE.CAPTURE_IMAGE['event_code']:
raise AbodeException((ERROR.CAM_TIMELINE_EVENT_INVALID))
# The timeline response has an entry for "file_path" that acts as the
# location of the image within the Abode servers.
file_path = timeline_json.get('file_path')
if not file_path:
raise AbodeException((ERROR.CAM_IMAGE_REFRESH_NO_FILE))
# Perform a "head" request for the image and look for a
# 302 Found response
url = CONST.BASE_URL + file_path
response = self._abode.send_request("head", url)
if response.status_code != 302:
_LOGGER.warning("Unexected response code %s with body: %s",
str(response.status_code), response.text)
raise AbodeException((ERROR.CAM_IMAGE_UNEXPECTED_RESPONSE))
# The response should have a location header that is the actual
# location of the image stored on AWS
location = response.headers.get('location')
if not location:
raise AbodeException((ERROR.CAM_IMAGE_NO_LOCATION_HEADER))
self._image_url = location
return True |
def ggpht_s1600_extender(pipeline_index,
finder_image_urls,
extender_image_urls=[],
*args, **kwargs):
"""
Example:
http://lh4.ggpht.com/-fFi-qJRuxeY/UjwHSOTHGOI/AAAAAAAArgE/SWTMT-hXzB4/s640/Celeber-ru-Emma-Watson-Net-A-Porter-The-Edit-Magazine-Photoshoot-2013-01.jpg
to
http://lh4.ggpht.com/-fFi-qJRuxeY/UjwHSOTHGOI/AAAAAAAArgE/SWTMT-hXzB4/s1600/Celeber-ru-Emma-Watson-Net-A-Porter-The-Edit-Magazine-Photoshoot-2013-01.jpg
"""
now_extender_image_urls = []
search_re = re.compile(r'/s\d+/', re.IGNORECASE)
for image_url in finder_image_urls:
if 'ggpht.com/' in image_url.lower():
if search_re.search(image_url):
extender_image_url = search_re.sub('/s1600/', image_url)
now_extender_image_urls.append(extender_image_url)
output = {}
output['extender_image_urls'] = extender_image_urls + now_extender_image_urls
return output | Example:
http://lh4.ggpht.com/-fFi-qJRuxeY/UjwHSOTHGOI/AAAAAAAArgE/SWTMT-hXzB4/s640/Celeber-ru-Emma-Watson-Net-A-Porter-The-Edit-Magazine-Photoshoot-2013-01.jpg
to
http://lh4.ggpht.com/-fFi-qJRuxeY/UjwHSOTHGOI/AAAAAAAArgE/SWTMT-hXzB4/s1600/Celeber-ru-Emma-Watson-Net-A-Porter-The-Edit-Magazine-Photoshoot-2013-01.jpg | Below is the the instruction that describes the task:
### Input:
Example:
http://lh4.ggpht.com/-fFi-qJRuxeY/UjwHSOTHGOI/AAAAAAAArgE/SWTMT-hXzB4/s640/Celeber-ru-Emma-Watson-Net-A-Porter-The-Edit-Magazine-Photoshoot-2013-01.jpg
to
http://lh4.ggpht.com/-fFi-qJRuxeY/UjwHSOTHGOI/AAAAAAAArgE/SWTMT-hXzB4/s1600/Celeber-ru-Emma-Watson-Net-A-Porter-The-Edit-Magazine-Photoshoot-2013-01.jpg
### Response:
def ggpht_s1600_extender(pipeline_index,
finder_image_urls,
extender_image_urls=[],
*args, **kwargs):
"""
Example:
http://lh4.ggpht.com/-fFi-qJRuxeY/UjwHSOTHGOI/AAAAAAAArgE/SWTMT-hXzB4/s640/Celeber-ru-Emma-Watson-Net-A-Porter-The-Edit-Magazine-Photoshoot-2013-01.jpg
to
http://lh4.ggpht.com/-fFi-qJRuxeY/UjwHSOTHGOI/AAAAAAAArgE/SWTMT-hXzB4/s1600/Celeber-ru-Emma-Watson-Net-A-Porter-The-Edit-Magazine-Photoshoot-2013-01.jpg
"""
now_extender_image_urls = []
search_re = re.compile(r'/s\d+/', re.IGNORECASE)
for image_url in finder_image_urls:
if 'ggpht.com/' in image_url.lower():
if search_re.search(image_url):
extender_image_url = search_re.sub('/s1600/', image_url)
now_extender_image_urls.append(extender_image_url)
output = {}
output['extender_image_urls'] = extender_image_urls + now_extender_image_urls
return output |
def get_tag(self, tag_name, **kwargs):
"""get a tag by name
Args:
tag_name (string): name of tag to get
Returns:
dictionary of the response
"""
return self._get_object_by_name(self._TAG_ENDPOINT_SUFFIX,
tag_name,
**kwargs) | get a tag by name
Args:
tag_name (string): name of tag to get
Returns:
dictionary of the response | Below is the the instruction that describes the task:
### Input:
get a tag by name
Args:
tag_name (string): name of tag to get
Returns:
dictionary of the response
### Response:
def get_tag(self, tag_name, **kwargs):
"""get a tag by name
Args:
tag_name (string): name of tag to get
Returns:
dictionary of the response
"""
return self._get_object_by_name(self._TAG_ENDPOINT_SUFFIX,
tag_name,
**kwargs) |
def from_email(self, value):
"""The email address of the sender
:param value: The email address of the sender
:type value: From, str, tuple
"""
if isinstance(value, str):
value = From(value, None)
if isinstance(value, tuple):
value = From(value[0], value[1])
self._from_email = value | The email address of the sender
:param value: The email address of the sender
:type value: From, str, tuple | Below is the the instruction that describes the task:
### Input:
The email address of the sender
:param value: The email address of the sender
:type value: From, str, tuple
### Response:
def from_email(self, value):
"""The email address of the sender
:param value: The email address of the sender
:type value: From, str, tuple
"""
if isinstance(value, str):
value = From(value, None)
if isinstance(value, tuple):
value = From(value[0], value[1])
self._from_email = value |
def Add(self, other):
"""Returns a copy of this set with a new element added."""
new_descriptors = []
for desc in self.descriptors + other.descriptors:
if desc not in new_descriptors:
new_descriptors.append(desc)
return TypeDescriptorSet(*new_descriptors) | Returns a copy of this set with a new element added. | Below is the the instruction that describes the task:
### Input:
Returns a copy of this set with a new element added.
### Response:
def Add(self, other):
"""Returns a copy of this set with a new element added."""
new_descriptors = []
for desc in self.descriptors + other.descriptors:
if desc not in new_descriptors:
new_descriptors.append(desc)
return TypeDescriptorSet(*new_descriptors) |
def choose_tasks(self, stream_id, values):
"""Choose tasks for a given stream_id and values and Returns a list of target tasks"""
if stream_id not in self.targets:
return []
ret = []
for target in self.targets[stream_id]:
ret.extend(target.choose_tasks(values))
return ret | Choose tasks for a given stream_id and values and Returns a list of target tasks | Below is the the instruction that describes the task:
### Input:
Choose tasks for a given stream_id and values and Returns a list of target tasks
### Response:
def choose_tasks(self, stream_id, values):
"""Choose tasks for a given stream_id and values and Returns a list of target tasks"""
if stream_id not in self.targets:
return []
ret = []
for target in self.targets[stream_id]:
ret.extend(target.choose_tasks(values))
return ret |
def _average_called_depth(in_file):
"""Retrieve the average depth of called reads in the provided VCF.
"""
import cyvcf2
depths = []
for rec in cyvcf2.VCF(str(in_file)):
d = rec.INFO.get("DP")
if d is not None:
depths.append(int(d))
if len(depths) > 0:
return int(math.ceil(numpy.mean(depths)))
else:
return 0 | Retrieve the average depth of called reads in the provided VCF. | Below is the the instruction that describes the task:
### Input:
Retrieve the average depth of called reads in the provided VCF.
### Response:
def _average_called_depth(in_file):
"""Retrieve the average depth of called reads in the provided VCF.
"""
import cyvcf2
depths = []
for rec in cyvcf2.VCF(str(in_file)):
d = rec.INFO.get("DP")
if d is not None:
depths.append(int(d))
if len(depths) > 0:
return int(math.ceil(numpy.mean(depths)))
else:
return 0 |
def find_files(path, filter="*.md"):
""" Finds files with an (optional) given extension in a given path. """
if os.path.isfile(path):
return [path]
if os.path.isdir(path):
matches = []
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, filter):
matches.append(os.path.join(root, filename))
return matches | Finds files with an (optional) given extension in a given path. | Below is the the instruction that describes the task:
### Input:
Finds files with an (optional) given extension in a given path.
### Response:
def find_files(path, filter="*.md"):
""" Finds files with an (optional) given extension in a given path. """
if os.path.isfile(path):
return [path]
if os.path.isdir(path):
matches = []
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, filter):
matches.append(os.path.join(root, filename))
return matches |
def upload_dict(s3_conn, s3_prefix, data_to_sync):
"""Syncs a dictionary to an S3 bucket, serializing each value in the
dictionary as a JSON file with the key as its name.
Args:
s3_conn: (boto.s3.connection) an s3 connection
s3_prefix: (str) the destination prefix
data_to_sync: (dict)
"""
bucket_name, prefix = split_s3_path(s3_prefix)
bucket = s3_conn.get_bucket(bucket_name)
for key, value in data_to_sync.items():
full_name = '{}/{}.json'.format(prefix, key)
s3_key = boto.s3.key.Key(
bucket=bucket,
name=full_name
)
logging.info('uploading key %s', full_name)
s3_key.set_contents_from_string(json.dumps(value)) | Syncs a dictionary to an S3 bucket, serializing each value in the
dictionary as a JSON file with the key as its name.
Args:
s3_conn: (boto.s3.connection) an s3 connection
s3_prefix: (str) the destination prefix
data_to_sync: (dict) | Below is the the instruction that describes the task:
### Input:
Syncs a dictionary to an S3 bucket, serializing each value in the
dictionary as a JSON file with the key as its name.
Args:
s3_conn: (boto.s3.connection) an s3 connection
s3_prefix: (str) the destination prefix
data_to_sync: (dict)
### Response:
def upload_dict(s3_conn, s3_prefix, data_to_sync):
"""Syncs a dictionary to an S3 bucket, serializing each value in the
dictionary as a JSON file with the key as its name.
Args:
s3_conn: (boto.s3.connection) an s3 connection
s3_prefix: (str) the destination prefix
data_to_sync: (dict)
"""
bucket_name, prefix = split_s3_path(s3_prefix)
bucket = s3_conn.get_bucket(bucket_name)
for key, value in data_to_sync.items():
full_name = '{}/{}.json'.format(prefix, key)
s3_key = boto.s3.key.Key(
bucket=bucket,
name=full_name
)
logging.info('uploading key %s', full_name)
s3_key.set_contents_from_string(json.dumps(value)) |
def _create_base_string(method, base, params):
"""
Returns base string for HMAC-SHA1 signature as specified in:
http://oauth.net/core/1.0a/#rfc.section.9.1.3.
"""
normalized_qs = _normalize_params(params)
return _join_by_ampersand(method, base, normalized_qs) | Returns base string for HMAC-SHA1 signature as specified in:
http://oauth.net/core/1.0a/#rfc.section.9.1.3. | Below is the the instruction that describes the task:
### Input:
Returns base string for HMAC-SHA1 signature as specified in:
http://oauth.net/core/1.0a/#rfc.section.9.1.3.
### Response:
def _create_base_string(method, base, params):
"""
Returns base string for HMAC-SHA1 signature as specified in:
http://oauth.net/core/1.0a/#rfc.section.9.1.3.
"""
normalized_qs = _normalize_params(params)
return _join_by_ampersand(method, base, normalized_qs) |
def get_preprocessing_queue(preprocessing_list):
"""Get preprocessing queue from a list of dictionaries
>>> l = [{'RemoveDuplicateTime': None},
{'ScaleAndShift': [{'center': True}]}
]
>>> get_preprocessing_queue(l)
[RemoveDuplicateTime, ScaleAndShift
- center: True
- max_width: 1
- max_height: 1
]
"""
return utils.get_objectlist(preprocessing_list,
config_key='preprocessing',
module=sys.modules[__name__]) | Get preprocessing queue from a list of dictionaries
>>> l = [{'RemoveDuplicateTime': None},
{'ScaleAndShift': [{'center': True}]}
]
>>> get_preprocessing_queue(l)
[RemoveDuplicateTime, ScaleAndShift
- center: True
- max_width: 1
- max_height: 1
] | Below is the the instruction that describes the task:
### Input:
Get preprocessing queue from a list of dictionaries
>>> l = [{'RemoveDuplicateTime': None},
{'ScaleAndShift': [{'center': True}]}
]
>>> get_preprocessing_queue(l)
[RemoveDuplicateTime, ScaleAndShift
- center: True
- max_width: 1
- max_height: 1
]
### Response:
def get_preprocessing_queue(preprocessing_list):
"""Get preprocessing queue from a list of dictionaries
>>> l = [{'RemoveDuplicateTime': None},
{'ScaleAndShift': [{'center': True}]}
]
>>> get_preprocessing_queue(l)
[RemoveDuplicateTime, ScaleAndShift
- center: True
- max_width: 1
- max_height: 1
]
"""
return utils.get_objectlist(preprocessing_list,
config_key='preprocessing',
module=sys.modules[__name__]) |
def add_chain(self, group_name, component_map):
"""
Adds the component chain to ``group_name`` in the fast5.
These are added as attributes to the group.
:param group_name: The group name you wish to add chaining data to,
e.g. ``Test_000``
:param component_map: The set of components and corresponding
group names or group paths that contribute data to the analysis.
If group names are provided, these will be converted into group
paths.
If ``Test_000`` uses data from the results of
``first_component`` stored at ``Analyses/First_000/``
the component_map could be ``{'first_component': 'First_000'}`` or
``{'first_component': 'Analyses/First_000'}``.
"""
self.assert_writeable()
for component, path in component_map.items():
if not path.startswith('Analyses/'):
path = 'Analyses/{}'.format(path)
component_map[component] = path
self.add_analysis_attributes(group_name, component_map) | Adds the component chain to ``group_name`` in the fast5.
These are added as attributes to the group.
:param group_name: The group name you wish to add chaining data to,
e.g. ``Test_000``
:param component_map: The set of components and corresponding
group names or group paths that contribute data to the analysis.
If group names are provided, these will be converted into group
paths.
If ``Test_000`` uses data from the results of
``first_component`` stored at ``Analyses/First_000/``
the component_map could be ``{'first_component': 'First_000'}`` or
``{'first_component': 'Analyses/First_000'}``. | Below is the the instruction that describes the task:
### Input:
Adds the component chain to ``group_name`` in the fast5.
These are added as attributes to the group.
:param group_name: The group name you wish to add chaining data to,
e.g. ``Test_000``
:param component_map: The set of components and corresponding
group names or group paths that contribute data to the analysis.
If group names are provided, these will be converted into group
paths.
If ``Test_000`` uses data from the results of
``first_component`` stored at ``Analyses/First_000/``
the component_map could be ``{'first_component': 'First_000'}`` or
``{'first_component': 'Analyses/First_000'}``.
### Response:
def add_chain(self, group_name, component_map):
"""
Adds the component chain to ``group_name`` in the fast5.
These are added as attributes to the group.
:param group_name: The group name you wish to add chaining data to,
e.g. ``Test_000``
:param component_map: The set of components and corresponding
group names or group paths that contribute data to the analysis.
If group names are provided, these will be converted into group
paths.
If ``Test_000`` uses data from the results of
``first_component`` stored at ``Analyses/First_000/``
the component_map could be ``{'first_component': 'First_000'}`` or
``{'first_component': 'Analyses/First_000'}``.
"""
self.assert_writeable()
for component, path in component_map.items():
if not path.startswith('Analyses/'):
path = 'Analyses/{}'.format(path)
component_map[component] = path
self.add_analysis_attributes(group_name, component_map) |
def alarm_disable(self):
"""
disable the alarm
"""
log.debug("alarm => disable...")
params = {"enabled": False}
self._app_exec("com.lametric.clock", "clock.alarm", params=params) | disable the alarm | Below is the the instruction that describes the task:
### Input:
disable the alarm
### Response:
def alarm_disable(self):
"""
disable the alarm
"""
log.debug("alarm => disable...")
params = {"enabled": False}
self._app_exec("com.lametric.clock", "clock.alarm", params=params) |
def length(self):
"""
The total discretized length of every entity.
Returns
--------
length: float, summed length of every entity
"""
length = float(sum(i.length(self.vertices)
for i in self.entities))
return length | The total discretized length of every entity.
Returns
--------
length: float, summed length of every entity | Below is the the instruction that describes the task:
### Input:
The total discretized length of every entity.
Returns
--------
length: float, summed length of every entity
### Response:
def length(self):
"""
The total discretized length of every entity.
Returns
--------
length: float, summed length of every entity
"""
length = float(sum(i.length(self.vertices)
for i in self.entities))
return length |
def log_metrics(metrics, summ_writer, log_prefix, step, history=None):
"""Log metrics to summary writer and history."""
rjust_len = max([len(name) for name in metrics])
for name, value in six.iteritems(metrics):
step_log(step, "%s %s | % .8f" % (
log_prefix.ljust(5), name.rjust(rjust_len), value))
full_name = "metrics/" + name
if history:
history.append(log_prefix, full_name, step, value)
if summ_writer:
summ_writer.scalar(full_name, value, step) | Log metrics to summary writer and history. | Below is the the instruction that describes the task:
### Input:
Log metrics to summary writer and history.
### Response:
def log_metrics(metrics, summ_writer, log_prefix, step, history=None):
"""Log metrics to summary writer and history."""
rjust_len = max([len(name) for name in metrics])
for name, value in six.iteritems(metrics):
step_log(step, "%s %s | % .8f" % (
log_prefix.ljust(5), name.rjust(rjust_len), value))
full_name = "metrics/" + name
if history:
history.append(log_prefix, full_name, step, value)
if summ_writer:
summ_writer.scalar(full_name, value, step) |
def encrypt(self, value, precision=None, r_value=None):
"""Encode and Paillier encrypt a real number *value*.
Args:
value: an int or float to be encrypted.
If int, it must satisfy abs(*value*) < :attr:`n`/3.
If float, it must satisfy abs(*value* / *precision*) <<
:attr:`n`/3
(i.e. if a float is near the limit then detectable
overflow may still occur)
precision (float): Passed to :meth:`EncodedNumber.encode`.
If *value* is a float then *precision* is the maximum
**absolute** error allowed when encoding *value*. Defaults
to encoding *value* exactly.
r_value (int): obfuscator for the ciphertext; by default (i.e.
if *r_value* is None), a random value is used.
Returns:
EncryptedNumber: An encryption of *value*.
Raises:
ValueError: if *value* is out of range or *precision* is so
high that *value* is rounded to zero.
"""
if isinstance(value, EncodedNumber):
encoding = value
else:
encoding = EncodedNumber.encode(self, value, precision)
return self.encrypt_encoded(encoding, r_value) | Encode and Paillier encrypt a real number *value*.
Args:
value: an int or float to be encrypted.
If int, it must satisfy abs(*value*) < :attr:`n`/3.
If float, it must satisfy abs(*value* / *precision*) <<
:attr:`n`/3
(i.e. if a float is near the limit then detectable
overflow may still occur)
precision (float): Passed to :meth:`EncodedNumber.encode`.
If *value* is a float then *precision* is the maximum
**absolute** error allowed when encoding *value*. Defaults
to encoding *value* exactly.
r_value (int): obfuscator for the ciphertext; by default (i.e.
if *r_value* is None), a random value is used.
Returns:
EncryptedNumber: An encryption of *value*.
Raises:
ValueError: if *value* is out of range or *precision* is so
high that *value* is rounded to zero. | Below is the the instruction that describes the task:
### Input:
Encode and Paillier encrypt a real number *value*.
Args:
value: an int or float to be encrypted.
If int, it must satisfy abs(*value*) < :attr:`n`/3.
If float, it must satisfy abs(*value* / *precision*) <<
:attr:`n`/3
(i.e. if a float is near the limit then detectable
overflow may still occur)
precision (float): Passed to :meth:`EncodedNumber.encode`.
If *value* is a float then *precision* is the maximum
**absolute** error allowed when encoding *value*. Defaults
to encoding *value* exactly.
r_value (int): obfuscator for the ciphertext; by default (i.e.
if *r_value* is None), a random value is used.
Returns:
EncryptedNumber: An encryption of *value*.
Raises:
ValueError: if *value* is out of range or *precision* is so
high that *value* is rounded to zero.
### Response:
def encrypt(self, value, precision=None, r_value=None):
"""Encode and Paillier encrypt a real number *value*.
Args:
value: an int or float to be encrypted.
If int, it must satisfy abs(*value*) < :attr:`n`/3.
If float, it must satisfy abs(*value* / *precision*) <<
:attr:`n`/3
(i.e. if a float is near the limit then detectable
overflow may still occur)
precision (float): Passed to :meth:`EncodedNumber.encode`.
If *value* is a float then *precision* is the maximum
**absolute** error allowed when encoding *value*. Defaults
to encoding *value* exactly.
r_value (int): obfuscator for the ciphertext; by default (i.e.
if *r_value* is None), a random value is used.
Returns:
EncryptedNumber: An encryption of *value*.
Raises:
ValueError: if *value* is out of range or *precision* is so
high that *value* is rounded to zero.
"""
if isinstance(value, EncodedNumber):
encoding = value
else:
encoding = EncodedNumber.encode(self, value, precision)
return self.encrypt_encoded(encoding, r_value) |
def export(self, nidm_version, export_dir):
"""
Create prov graph.
"""
# Contrast Map entity
atts = (
(PROV['type'], NIDM_CONTRAST_MAP),
(NIDM_CONTRAST_NAME, self.name))
if not self.isderfrommap:
atts = atts + (
(NIDM_IN_COORDINATE_SPACE, self.coord_space.id),)
if self.label is not None:
atts = atts + (
(PROV['label'], self.label),)
if self.name is not None:
atts = atts + (
(NIDM_CONTRAST_NAME, self.name),)
# Parameter estimate entity
self.add_attributes(atts) | Create prov graph. | Below is the the instruction that describes the task:
### Input:
Create prov graph.
### Response:
def export(self, nidm_version, export_dir):
"""
Create prov graph.
"""
# Contrast Map entity
atts = (
(PROV['type'], NIDM_CONTRAST_MAP),
(NIDM_CONTRAST_NAME, self.name))
if not self.isderfrommap:
atts = atts + (
(NIDM_IN_COORDINATE_SPACE, self.coord_space.id),)
if self.label is not None:
atts = atts + (
(PROV['label'], self.label),)
if self.name is not None:
atts = atts + (
(NIDM_CONTRAST_NAME, self.name),)
# Parameter estimate entity
self.add_attributes(atts) |
def _insert_file(cursor, file, media_type):
"""Upsert the ``file`` and ``media_type`` into the files table.
Returns the ``fileid`` and ``sha1`` of the upserted file.
"""
resource_hash = _get_file_sha1(file)
cursor.execute("SELECT fileid FROM files WHERE sha1 = %s",
(resource_hash,))
try:
fileid = cursor.fetchone()[0]
except (IndexError, TypeError):
cursor.execute("INSERT INTO files (file, media_type) "
"VALUES (%s, %s)"
"RETURNING fileid",
(psycopg2.Binary(file.read()), media_type,))
fileid = cursor.fetchone()[0]
return fileid, resource_hash | Upsert the ``file`` and ``media_type`` into the files table.
Returns the ``fileid`` and ``sha1`` of the upserted file. | Below is the the instruction that describes the task:
### Input:
Upsert the ``file`` and ``media_type`` into the files table.
Returns the ``fileid`` and ``sha1`` of the upserted file.
### Response:
def _insert_file(cursor, file, media_type):
"""Upsert the ``file`` and ``media_type`` into the files table.
Returns the ``fileid`` and ``sha1`` of the upserted file.
"""
resource_hash = _get_file_sha1(file)
cursor.execute("SELECT fileid FROM files WHERE sha1 = %s",
(resource_hash,))
try:
fileid = cursor.fetchone()[0]
except (IndexError, TypeError):
cursor.execute("INSERT INTO files (file, media_type) "
"VALUES (%s, %s)"
"RETURNING fileid",
(psycopg2.Binary(file.read()), media_type,))
fileid = cursor.fetchone()[0]
return fileid, resource_hash |
def mchirp_compression(m1, m2, fmin, fmax, min_seglen=0.02, df_multiple=None):
"""Return the frequencies needed to compress a waveform with the given
chirp mass. This is based on the estimate in rough_time_estimate.
Parameters
----------
m1: float
mass of first component object in solar masses
m2: float
mass of second component object in solar masses
fmin : float
The starting frequency of the compressed waveform.
fmax : float
The ending frequency of the compressed waveform.
min_seglen : float
The inverse of this gives the maximum frequency step that is used.
df_multiple : {None, float}
Make the compressed sampling frequencies a multiple of the given value.
If None provided, the returned sample points can have any floating
point value.
Returns
-------
array
The frequencies at which to evaluate the compressed waveform.
"""
sample_points = []
f = fmin
while f < fmax:
if df_multiple is not None:
f = int(f/df_multiple)*df_multiple
sample_points.append(f)
f += 1.0 / rough_time_estimate(m1, m2, f, fudge_min=min_seglen)
# add the last point
if sample_points[-1] < fmax:
sample_points.append(fmax)
return numpy.array(sample_points) | Return the frequencies needed to compress a waveform with the given
chirp mass. This is based on the estimate in rough_time_estimate.
Parameters
----------
m1: float
mass of first component object in solar masses
m2: float
mass of second component object in solar masses
fmin : float
The starting frequency of the compressed waveform.
fmax : float
The ending frequency of the compressed waveform.
min_seglen : float
The inverse of this gives the maximum frequency step that is used.
df_multiple : {None, float}
Make the compressed sampling frequencies a multiple of the given value.
If None provided, the returned sample points can have any floating
point value.
Returns
-------
array
The frequencies at which to evaluate the compressed waveform. | Below is the the instruction that describes the task:
### Input:
Return the frequencies needed to compress a waveform with the given
chirp mass. This is based on the estimate in rough_time_estimate.
Parameters
----------
m1: float
mass of first component object in solar masses
m2: float
mass of second component object in solar masses
fmin : float
The starting frequency of the compressed waveform.
fmax : float
The ending frequency of the compressed waveform.
min_seglen : float
The inverse of this gives the maximum frequency step that is used.
df_multiple : {None, float}
Make the compressed sampling frequencies a multiple of the given value.
If None provided, the returned sample points can have any floating
point value.
Returns
-------
array
The frequencies at which to evaluate the compressed waveform.
### Response:
def mchirp_compression(m1, m2, fmin, fmax, min_seglen=0.02, df_multiple=None):
"""Return the frequencies needed to compress a waveform with the given
chirp mass. This is based on the estimate in rough_time_estimate.
Parameters
----------
m1: float
mass of first component object in solar masses
m2: float
mass of second component object in solar masses
fmin : float
The starting frequency of the compressed waveform.
fmax : float
The ending frequency of the compressed waveform.
min_seglen : float
The inverse of this gives the maximum frequency step that is used.
df_multiple : {None, float}
Make the compressed sampling frequencies a multiple of the given value.
If None provided, the returned sample points can have any floating
point value.
Returns
-------
array
The frequencies at which to evaluate the compressed waveform.
"""
sample_points = []
f = fmin
while f < fmax:
if df_multiple is not None:
f = int(f/df_multiple)*df_multiple
sample_points.append(f)
f += 1.0 / rough_time_estimate(m1, m2, f, fudge_min=min_seglen)
# add the last point
if sample_points[-1] < fmax:
sample_points.append(fmax)
return numpy.array(sample_points) |
def separator_width(self, value):
"""
Setter for **self.__separator_width** attribute.
:param value: Attribute value.
:type value: int
"""
if value is not None:
assert type(value) is int, "'{0}' attribute: '{1}' type is not 'int'!".format("separator_width", value)
assert value > 0, "'{0}' attribute: '{1}' need to be exactly positive!".format("separator_width", value)
self.__separator_width = value | Setter for **self.__separator_width** attribute.
:param value: Attribute value.
:type value: int | Below is the the instruction that describes the task:
### Input:
Setter for **self.__separator_width** attribute.
:param value: Attribute value.
:type value: int
### Response:
def separator_width(self, value):
"""
Setter for **self.__separator_width** attribute.
:param value: Attribute value.
:type value: int
"""
if value is not None:
assert type(value) is int, "'{0}' attribute: '{1}' type is not 'int'!".format("separator_width", value)
assert value > 0, "'{0}' attribute: '{1}' need to be exactly positive!".format("separator_width", value)
self.__separator_width = value |
def get_py_file_if_possible(pyc_name):
"""Try to retrieve a X.py file for a given X.py[c] file."""
if pyc_name.endswith(('.py', '.so', '.pyd')):
return pyc_name
assert pyc_name.endswith('.pyc')
non_compiled_file = pyc_name[:-1]
if os.path.exists(non_compiled_file):
return non_compiled_file
return pyc_name | Try to retrieve a X.py file for a given X.py[c] file. | Below is the the instruction that describes the task:
### Input:
Try to retrieve a X.py file for a given X.py[c] file.
### Response:
def get_py_file_if_possible(pyc_name):
"""Try to retrieve a X.py file for a given X.py[c] file."""
if pyc_name.endswith(('.py', '.so', '.pyd')):
return pyc_name
assert pyc_name.endswith('.pyc')
non_compiled_file = pyc_name[:-1]
if os.path.exists(non_compiled_file):
return non_compiled_file
return pyc_name |
def move_to(self, xpos, ypos):
"""
Move cursor to specified position
"""
self.stream.write(self.move(ypos, xpos)) | Move cursor to specified position | Below is the the instruction that describes the task:
### Input:
Move cursor to specified position
### Response:
def move_to(self, xpos, ypos):
"""
Move cursor to specified position
"""
self.stream.write(self.move(ypos, xpos)) |
def is_permitted(self, permission_s):
"""
:param permission_s: a collection of 1..N permissions
:type permission_s: List of authz_abcs.Permission object(s) or String(s)
:returns: a List of tuple(s), containing the authz_abcs.Permission and a
Boolean indicating whether the permission is granted
"""
if self.authorized:
self.check_security_manager()
return (self.security_manager.is_permitted(
self.identifiers, permission_s))
msg = 'Cannot check permission when user isn\'t authenticated nor remembered'
raise ValueError(msg) | :param permission_s: a collection of 1..N permissions
:type permission_s: List of authz_abcs.Permission object(s) or String(s)
:returns: a List of tuple(s), containing the authz_abcs.Permission and a
Boolean indicating whether the permission is granted | Below is the the instruction that describes the task:
### Input:
:param permission_s: a collection of 1..N permissions
:type permission_s: List of authz_abcs.Permission object(s) or String(s)
:returns: a List of tuple(s), containing the authz_abcs.Permission and a
Boolean indicating whether the permission is granted
### Response:
def is_permitted(self, permission_s):
"""
:param permission_s: a collection of 1..N permissions
:type permission_s: List of authz_abcs.Permission object(s) or String(s)
:returns: a List of tuple(s), containing the authz_abcs.Permission and a
Boolean indicating whether the permission is granted
"""
if self.authorized:
self.check_security_manager()
return (self.security_manager.is_permitted(
self.identifiers, permission_s))
msg = 'Cannot check permission when user isn\'t authenticated nor remembered'
raise ValueError(msg) |
def get_version(self) -> str:
"""
Open the file referenced in this object, and scrape the version.
:return:
The version as a string, an empty string if there is no match
to the magic_line, or any file exception messages encountered.
"""
try:
f = open(self.file_path, 'r')
lines = f.readlines()
f.close()
except Exception as e:
return str(e)
result = ''
for line in lines:
if self.magic_line in line:
start = len(self.magic_line)
end = len(line) - self.strip_end_chars
result = line[start:end]
break
return result | Open the file referenced in this object, and scrape the version.
:return:
The version as a string, an empty string if there is no match
to the magic_line, or any file exception messages encountered. | Below is the the instruction that describes the task:
### Input:
Open the file referenced in this object, and scrape the version.
:return:
The version as a string, an empty string if there is no match
to the magic_line, or any file exception messages encountered.
### Response:
def get_version(self) -> str:
"""
Open the file referenced in this object, and scrape the version.
:return:
The version as a string, an empty string if there is no match
to the magic_line, or any file exception messages encountered.
"""
try:
f = open(self.file_path, 'r')
lines = f.readlines()
f.close()
except Exception as e:
return str(e)
result = ''
for line in lines:
if self.magic_line in line:
start = len(self.magic_line)
end = len(line) - self.strip_end_chars
result = line[start:end]
break
return result |
def list_member_groups(self, member_id):
''' a method to retrieve a list of meetup groups member belongs to
:param member_id: integer with meetup member id
:return: dictionary with list of group details in [json]
group_details = self.objects.group_profile.schema
'''
# https://www.meetup.com/meetup_api/docs/members/:member_id/#get
title = '%s.list_member_groups' % self.__class__.__name__
# validate inputs
input_fields = {
'member_id': member_id
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct member id
if not member_id:
raise IndexError('%s requires member id argument.' % title)
# compose request fields
url = '%s/members/%s' % (self.endpoint, str(member_id))
params = {
'fields': 'memberships'
}
# send requests
response_details = self._get_request(url, params=params)
# construct method output dictionary
member_groups = {
'json': []
}
for key, value in response_details.items():
if not key == 'json':
member_groups[key] = value
# parse response
if response_details['json']:
if 'memberships' in response_details['json'].keys():
for group in response_details['json']['memberships']['member']:
member_groups['json'].append(self.objects.group_profile.ingest(**group))
return member_groups | a method to retrieve a list of meetup groups member belongs to
:param member_id: integer with meetup member id
:return: dictionary with list of group details in [json]
group_details = self.objects.group_profile.schema | Below is the the instruction that describes the task:
### Input:
a method to retrieve a list of meetup groups member belongs to
:param member_id: integer with meetup member id
:return: dictionary with list of group details in [json]
group_details = self.objects.group_profile.schema
### Response:
def list_member_groups(self, member_id):
''' a method to retrieve a list of meetup groups member belongs to
:param member_id: integer with meetup member id
:return: dictionary with list of group details in [json]
group_details = self.objects.group_profile.schema
'''
# https://www.meetup.com/meetup_api/docs/members/:member_id/#get
title = '%s.list_member_groups' % self.__class__.__name__
# validate inputs
input_fields = {
'member_id': member_id
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct member id
if not member_id:
raise IndexError('%s requires member id argument.' % title)
# compose request fields
url = '%s/members/%s' % (self.endpoint, str(member_id))
params = {
'fields': 'memberships'
}
# send requests
response_details = self._get_request(url, params=params)
# construct method output dictionary
member_groups = {
'json': []
}
for key, value in response_details.items():
if not key == 'json':
member_groups[key] = value
# parse response
if response_details['json']:
if 'memberships' in response_details['json'].keys():
for group in response_details['json']['memberships']['member']:
member_groups['json'].append(self.objects.group_profile.ingest(**group))
return member_groups |
def collection(self):
"""Return the redis-collection instance."""
if not self.include_collections:
return None
ctx = stack.top
if ctx is not None:
if not hasattr(ctx, 'redislite_collection'):
ctx.redislite_collection = Collection(redis=self.connection)
return ctx.redislite_collection | Return the redis-collection instance. | Below is the the instruction that describes the task:
### Input:
Return the redis-collection instance.
### Response:
def collection(self):
"""Return the redis-collection instance."""
if not self.include_collections:
return None
ctx = stack.top
if ctx is not None:
if not hasattr(ctx, 'redislite_collection'):
ctx.redislite_collection = Collection(redis=self.connection)
return ctx.redislite_collection |
def get_calls(self, job_name):
'''
Reads file by given name and returns CallEdge array
'''
config = self.file_index.get_by_name(job_name).yaml
calls = self.get_calls_from_dict(config, from_name=job_name)
return calls | Reads file by given name and returns CallEdge array | Below is the the instruction that describes the task:
### Input:
Reads file by given name and returns CallEdge array
### Response:
def get_calls(self, job_name):
'''
Reads file by given name and returns CallEdge array
'''
config = self.file_index.get_by_name(job_name).yaml
calls = self.get_calls_from_dict(config, from_name=job_name)
return calls |
def generate_init(self, dst, out_format, vms_to_include, filters=None):
"""
Generate an init file which represents this env and can
be used with the images created by self.export_vms
Args:
dst (str): path and name of the new init file
out_format (plugins.output.OutFormatPlugin):
formatter for the output (the default is yaml)
filters (list): list of paths to keys that should be removed from
the init file
vms_to_include (list of :class:lago.plugins.vm.VMPlugin):
list of vms to include in the init file
Returns:
None
"""
# todo: move this logic to PrefixExportManager
with LogTask('Exporting init file to: {}'.format(dst)):
# Set the default formatter to yaml. The default formatter
# doesn't generate a valid init file, so it's not reasonable
# to use it
if isinstance(out_format, plugins.output.DefaultOutFormatPlugin):
out_format = plugins.output.YAMLOutFormatPlugin()
if not filters:
filters = [
'domains/*/disks/*/metadata',
'domains/*/metadata/deploy-scripts', 'domains/*/snapshots',
'domains/*/name', 'nets/*/mapping', 'nets/*/dns_records'
]
spec = self.get_env_spec(filters)
temp = {}
for vm in vms_to_include:
temp[vm.name()] = spec['domains'][vm.name()]
spec['domains'] = temp
for _, domain in spec['domains'].viewitems():
domain['disks'] = [
d for d in domain['disks'] if not d.get('skip-export')
]
for disk in domain['disks']:
if disk['type'] == 'template':
disk['template_type'] = 'qcow2'
elif disk['type'] == 'empty':
disk['type'] = 'file'
disk['make_a_copy'] = 'True'
# Insert the relative path to the exported images
disk['path'] = os.path.join(
'$LAGO_INITFILE_PATH', os.path.basename(disk['path'])
)
with open(dst, 'wt') as f:
if isinstance(out_format, plugins.output.YAMLOutFormatPlugin):
# Dump the yaml file without type tags
# TODO: Allow passing parameters to output plugins
f.write(yaml.safe_dump(spec))
else:
f.write(out_format.format(spec)) | Generate an init file which represents this env and can
be used with the images created by self.export_vms
Args:
dst (str): path and name of the new init file
out_format (plugins.output.OutFormatPlugin):
formatter for the output (the default is yaml)
filters (list): list of paths to keys that should be removed from
the init file
vms_to_include (list of :class:lago.plugins.vm.VMPlugin):
list of vms to include in the init file
Returns:
None | Below is the the instruction that describes the task:
### Input:
Generate an init file which represents this env and can
be used with the images created by self.export_vms
Args:
dst (str): path and name of the new init file
out_format (plugins.output.OutFormatPlugin):
formatter for the output (the default is yaml)
filters (list): list of paths to keys that should be removed from
the init file
vms_to_include (list of :class:lago.plugins.vm.VMPlugin):
list of vms to include in the init file
Returns:
None
### Response:
def generate_init(self, dst, out_format, vms_to_include, filters=None):
"""
Generate an init file which represents this env and can
be used with the images created by self.export_vms
Args:
dst (str): path and name of the new init file
out_format (plugins.output.OutFormatPlugin):
formatter for the output (the default is yaml)
filters (list): list of paths to keys that should be removed from
the init file
vms_to_include (list of :class:lago.plugins.vm.VMPlugin):
list of vms to include in the init file
Returns:
None
"""
# todo: move this logic to PrefixExportManager
with LogTask('Exporting init file to: {}'.format(dst)):
# Set the default formatter to yaml. The default formatter
# doesn't generate a valid init file, so it's not reasonable
# to use it
if isinstance(out_format, plugins.output.DefaultOutFormatPlugin):
out_format = plugins.output.YAMLOutFormatPlugin()
if not filters:
filters = [
'domains/*/disks/*/metadata',
'domains/*/metadata/deploy-scripts', 'domains/*/snapshots',
'domains/*/name', 'nets/*/mapping', 'nets/*/dns_records'
]
spec = self.get_env_spec(filters)
temp = {}
for vm in vms_to_include:
temp[vm.name()] = spec['domains'][vm.name()]
spec['domains'] = temp
for _, domain in spec['domains'].viewitems():
domain['disks'] = [
d for d in domain['disks'] if not d.get('skip-export')
]
for disk in domain['disks']:
if disk['type'] == 'template':
disk['template_type'] = 'qcow2'
elif disk['type'] == 'empty':
disk['type'] = 'file'
disk['make_a_copy'] = 'True'
# Insert the relative path to the exported images
disk['path'] = os.path.join(
'$LAGO_INITFILE_PATH', os.path.basename(disk['path'])
)
with open(dst, 'wt') as f:
if isinstance(out_format, plugins.output.YAMLOutFormatPlugin):
# Dump the yaml file without type tags
# TODO: Allow passing parameters to output plugins
f.write(yaml.safe_dump(spec))
else:
f.write(out_format.format(spec)) |
def put(self, pid, record, key):
"""Handle the file rename through the PUT deposit file.
Permission required: `update_permission_factory`.
:param pid: Pid object (from url).
:param record: Record object resolved from the pid.
:param key: Unique identifier for the file in the deposit.
"""
try:
data = json.loads(request.data.decode('utf-8'))
new_key = data['filename']
except KeyError:
raise WrongFile()
new_key_secure = secure_filename(new_key)
if not new_key_secure or new_key != new_key_secure:
raise WrongFile()
try:
obj = record.files.rename(str(key), new_key_secure)
except KeyError:
abort(404)
record.commit()
db.session.commit()
return self.make_response(obj=obj, pid=pid, record=record) | Handle the file rename through the PUT deposit file.
Permission required: `update_permission_factory`.
:param pid: Pid object (from url).
:param record: Record object resolved from the pid.
:param key: Unique identifier for the file in the deposit. | Below is the the instruction that describes the task:
### Input:
Handle the file rename through the PUT deposit file.
Permission required: `update_permission_factory`.
:param pid: Pid object (from url).
:param record: Record object resolved from the pid.
:param key: Unique identifier for the file in the deposit.
### Response:
def put(self, pid, record, key):
"""Handle the file rename through the PUT deposit file.
Permission required: `update_permission_factory`.
:param pid: Pid object (from url).
:param record: Record object resolved from the pid.
:param key: Unique identifier for the file in the deposit.
"""
try:
data = json.loads(request.data.decode('utf-8'))
new_key = data['filename']
except KeyError:
raise WrongFile()
new_key_secure = secure_filename(new_key)
if not new_key_secure or new_key != new_key_secure:
raise WrongFile()
try:
obj = record.files.rename(str(key), new_key_secure)
except KeyError:
abort(404)
record.commit()
db.session.commit()
return self.make_response(obj=obj, pid=pid, record=record) |
def query_handler(cls, identifier, role=None):
'''
Lookup the handler for the giving idetifier (descriptor_type) and role.
In case it was not found return the default.
Logic goes as follows:
- First try to find exact match for identifier and role,
- Try to find match for identifier and role=None,
- Return default handler.
'''
key = cls._key_for(identifier, role)
handler = cls._handlers.get(key, None)
if handler is None:
default_for_identifier = cls._key_for(identifier, None)
handler = cls._handlers.get(default_for_identifier,
cls._handlers['_default'])
return handler | Lookup the handler for the giving idetifier (descriptor_type) and role.
In case it was not found return the default.
Logic goes as follows:
- First try to find exact match for identifier and role,
- Try to find match for identifier and role=None,
- Return default handler. | Below is the the instruction that describes the task:
### Input:
Lookup the handler for the giving idetifier (descriptor_type) and role.
In case it was not found return the default.
Logic goes as follows:
- First try to find exact match for identifier and role,
- Try to find match for identifier and role=None,
- Return default handler.
### Response:
def query_handler(cls, identifier, role=None):
'''
Lookup the handler for the giving idetifier (descriptor_type) and role.
In case it was not found return the default.
Logic goes as follows:
- First try to find exact match for identifier and role,
- Try to find match for identifier and role=None,
- Return default handler.
'''
key = cls._key_for(identifier, role)
handler = cls._handlers.get(key, None)
if handler is None:
default_for_identifier = cls._key_for(identifier, None)
handler = cls._handlers.get(default_for_identifier,
cls._handlers['_default'])
return handler |
def _inherited_panel(panel, base_panels_from_pillar, ret):
'''Return a panel with properties from parents.'''
base_panels = []
for base_panel_from_pillar in base_panels_from_pillar:
base_panel = __salt__['pillar.get'](base_panel_from_pillar)
if base_panel:
base_panels.append(base_panel)
elif base_panel_from_pillar != _DEFAULT_PANEL_PILLAR:
ret.setdefault('warnings', [])
warning_message = 'Cannot find panel pillar "{0}".'.format(
base_panel_from_pillar)
if warning_message not in ret['warnings']:
ret['warnings'].append(warning_message)
base_panels.append(panel)
result_panel = {}
for panel in base_panels:
result_panel.update(panel)
return result_panel | Return a panel with properties from parents. | Below is the the instruction that describes the task:
### Input:
Return a panel with properties from parents.
### Response:
def _inherited_panel(panel, base_panels_from_pillar, ret):
'''Return a panel with properties from parents.'''
base_panels = []
for base_panel_from_pillar in base_panels_from_pillar:
base_panel = __salt__['pillar.get'](base_panel_from_pillar)
if base_panel:
base_panels.append(base_panel)
elif base_panel_from_pillar != _DEFAULT_PANEL_PILLAR:
ret.setdefault('warnings', [])
warning_message = 'Cannot find panel pillar "{0}".'.format(
base_panel_from_pillar)
if warning_message not in ret['warnings']:
ret['warnings'].append(warning_message)
base_panels.append(panel)
result_panel = {}
for panel in base_panels:
result_panel.update(panel)
return result_panel |
def addOption(classobj, name, default, dtype=str, doc=None):
"""Adds a renderer option named 'name', with the given default value.
'dtype' must be a callable to convert a string to an option.
'doc' is a doc string.
Options will be initialized from config file here.
"""
# make a config object
if not hasattr(classobj, '_config'):
classobj._config = Kittens.config.SectionParser(ConfigFile, "render-" + classobj.renderer_id)
# make a class-specific copy of the current option set
if classobj._options_owner is not classobj:
classobj.options = dict(DefaultRenderer.options)
classobj._options_owner = classobj
# overrid default value from config file
if dtype is bool:
value = classobj._config.getbool(name, default)
else:
value = dtype(classobj._config.get(name, default))
# insert into dict
classobj.options[name] = (value, default, dtype, doc) | Adds a renderer option named 'name', with the given default value.
'dtype' must be a callable to convert a string to an option.
'doc' is a doc string.
Options will be initialized from config file here. | Below is the the instruction that describes the task:
### Input:
Adds a renderer option named 'name', with the given default value.
'dtype' must be a callable to convert a string to an option.
'doc' is a doc string.
Options will be initialized from config file here.
### Response:
def addOption(classobj, name, default, dtype=str, doc=None):
"""Adds a renderer option named 'name', with the given default value.
'dtype' must be a callable to convert a string to an option.
'doc' is a doc string.
Options will be initialized from config file here.
"""
# make a config object
if not hasattr(classobj, '_config'):
classobj._config = Kittens.config.SectionParser(ConfigFile, "render-" + classobj.renderer_id)
# make a class-specific copy of the current option set
if classobj._options_owner is not classobj:
classobj.options = dict(DefaultRenderer.options)
classobj._options_owner = classobj
# overrid default value from config file
if dtype is bool:
value = classobj._config.getbool(name, default)
else:
value = dtype(classobj._config.get(name, default))
# insert into dict
classobj.options[name] = (value, default, dtype, doc) |
def set_public_transport_route(self, public_transport_route):
"""
Set the public transport route.
:param public_transport_route: TransportRoute
"""
self._query_params += str(QueryParam.ROUTE_ID) + str(public_transport_route) | Set the public transport route.
:param public_transport_route: TransportRoute | Below is the the instruction that describes the task:
### Input:
Set the public transport route.
:param public_transport_route: TransportRoute
### Response:
def set_public_transport_route(self, public_transport_route):
"""
Set the public transport route.
:param public_transport_route: TransportRoute
"""
self._query_params += str(QueryParam.ROUTE_ID) + str(public_transport_route) |
def upgrade(refresh=True, dist_upgrade=False, **kwargs):
'''
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any apt-get/dpkg commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Upgrades all packages via ``apt-get upgrade`` or ``apt-get dist-upgrade``
if ``dist_upgrade`` is ``True``.
Returns a dictionary containing the changes:
.. code-block:: python
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
dist_upgrade
Whether to perform the upgrade using dist-upgrade vs upgrade. Default
is to use upgrade.
.. versionadded:: 2014.7.0
cache_valid_time
.. versionadded:: 2016.11.0
Skip refreshing the package database if refresh has already occurred within
<value> seconds
download_only
Only download the packages, don't unpack or install them
.. versionadded:: 2018.3.0
force_conf_new
Always install the new version of any configuration files.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade
'''
cache_valid_time = kwargs.pop('cache_valid_time', 0)
if salt.utils.data.is_true(refresh):
refresh_db(cache_valid_time)
old = list_pkgs()
if 'force_conf_new' in kwargs and kwargs['force_conf_new']:
force_conf = '--force-confnew'
else:
force_conf = '--force-confold'
cmd = ['apt-get', '-q', '-y', '-o', 'DPkg::Options::={0}'.format(force_conf),
'-o', 'DPkg::Options::=--force-confdef']
if kwargs.get('force_yes', False):
cmd.append('--force-yes')
if kwargs.get('skip_verify', False):
cmd.append('--allow-unauthenticated')
if kwargs.get('download_only', False):
cmd.append('--download-only')
cmd.append('dist-upgrade' if dist_upgrade else 'upgrade')
result = _call_apt(cmd, env=DPKG_ENV_VARS.copy())
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if result['retcode'] != 0:
raise CommandExecutionError(
'Problem encountered upgrading packages',
info={'changes': ret, 'result': result}
)
return ret | .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any apt-get/dpkg commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Upgrades all packages via ``apt-get upgrade`` or ``apt-get dist-upgrade``
if ``dist_upgrade`` is ``True``.
Returns a dictionary containing the changes:
.. code-block:: python
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
dist_upgrade
Whether to perform the upgrade using dist-upgrade vs upgrade. Default
is to use upgrade.
.. versionadded:: 2014.7.0
cache_valid_time
.. versionadded:: 2016.11.0
Skip refreshing the package database if refresh has already occurred within
<value> seconds
download_only
Only download the packages, don't unpack or install them
.. versionadded:: 2018.3.0
force_conf_new
Always install the new version of any configuration files.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade | Below is the the instruction that describes the task:
### Input:
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any apt-get/dpkg commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Upgrades all packages via ``apt-get upgrade`` or ``apt-get dist-upgrade``
if ``dist_upgrade`` is ``True``.
Returns a dictionary containing the changes:
.. code-block:: python
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
dist_upgrade
Whether to perform the upgrade using dist-upgrade vs upgrade. Default
is to use upgrade.
.. versionadded:: 2014.7.0
cache_valid_time
.. versionadded:: 2016.11.0
Skip refreshing the package database if refresh has already occurred within
<value> seconds
download_only
Only download the packages, don't unpack or install them
.. versionadded:: 2018.3.0
force_conf_new
Always install the new version of any configuration files.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade
### Response:
def upgrade(refresh=True, dist_upgrade=False, **kwargs):
'''
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any apt-get/dpkg commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Upgrades all packages via ``apt-get upgrade`` or ``apt-get dist-upgrade``
if ``dist_upgrade`` is ``True``.
Returns a dictionary containing the changes:
.. code-block:: python
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
dist_upgrade
Whether to perform the upgrade using dist-upgrade vs upgrade. Default
is to use upgrade.
.. versionadded:: 2014.7.0
cache_valid_time
.. versionadded:: 2016.11.0
Skip refreshing the package database if refresh has already occurred within
<value> seconds
download_only
Only download the packages, don't unpack or install them
.. versionadded:: 2018.3.0
force_conf_new
Always install the new version of any configuration files.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade
'''
cache_valid_time = kwargs.pop('cache_valid_time', 0)
if salt.utils.data.is_true(refresh):
refresh_db(cache_valid_time)
old = list_pkgs()
if 'force_conf_new' in kwargs and kwargs['force_conf_new']:
force_conf = '--force-confnew'
else:
force_conf = '--force-confold'
cmd = ['apt-get', '-q', '-y', '-o', 'DPkg::Options::={0}'.format(force_conf),
'-o', 'DPkg::Options::=--force-confdef']
if kwargs.get('force_yes', False):
cmd.append('--force-yes')
if kwargs.get('skip_verify', False):
cmd.append('--allow-unauthenticated')
if kwargs.get('download_only', False):
cmd.append('--download-only')
cmd.append('dist-upgrade' if dist_upgrade else 'upgrade')
result = _call_apt(cmd, env=DPKG_ENV_VARS.copy())
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if result['retcode'] != 0:
raise CommandExecutionError(
'Problem encountered upgrading packages',
info={'changes': ret, 'result': result}
)
return ret |
def __json(self):
"""
Using the exclude lists, convert fields to a string.
"""
if self.exclude_list is None:
self.exclude_list = []
fields = {}
for key, item in vars(self).items():
if hasattr(self, '_sa_instance_state'):
# load only deferred objects
if len(orm.attributes.instance_state(self).unloaded) > 0:
mapper = inspect(self)
for column in mapper.attrs:
column.key
column.value
if str(key).startswith('_') or key in self.exclude_list:
continue
fields[key] = item
obj = Json.safe_object(fields)
return str(obj) | Using the exclude lists, convert fields to a string. | Below is the the instruction that describes the task:
### Input:
Using the exclude lists, convert fields to a string.
### Response:
def __json(self):
"""
Using the exclude lists, convert fields to a string.
"""
if self.exclude_list is None:
self.exclude_list = []
fields = {}
for key, item in vars(self).items():
if hasattr(self, '_sa_instance_state'):
# load only deferred objects
if len(orm.attributes.instance_state(self).unloaded) > 0:
mapper = inspect(self)
for column in mapper.attrs:
column.key
column.value
if str(key).startswith('_') or key in self.exclude_list:
continue
fields[key] = item
obj = Json.safe_object(fields)
return str(obj) |
def increment_failed_logins(self):
""" Increment failed logins counter"""
if not self.failed_logins:
self.failed_logins = 1
elif not self.failed_login_limit_reached():
self.failed_logins += 1
else:
self.reset_login_counter()
self.lock_account(30) | Increment failed logins counter | Below is the the instruction that describes the task:
### Input:
Increment failed logins counter
### Response:
def increment_failed_logins(self):
""" Increment failed logins counter"""
if not self.failed_logins:
self.failed_logins = 1
elif not self.failed_login_limit_reached():
self.failed_logins += 1
else:
self.reset_login_counter()
self.lock_account(30) |
def set_prompt(self, prompt=None):
"""
Defines a pattern that is waited for when calling the expect_prompt()
method.
If the set_prompt() method is not called, or if it is called with the
prompt argument set to None, a default prompt is used that should
work with many devices running Unix, IOS, IOS-XR, or Junos and others.
:type prompt: RegEx
:param prompt: The pattern that matches the prompt of the remote host.
"""
if prompt is None:
self.manual_prompt_re = prompt
else:
self.manual_prompt_re = to_regexs(prompt) | Defines a pattern that is waited for when calling the expect_prompt()
method.
If the set_prompt() method is not called, or if it is called with the
prompt argument set to None, a default prompt is used that should
work with many devices running Unix, IOS, IOS-XR, or Junos and others.
:type prompt: RegEx
:param prompt: The pattern that matches the prompt of the remote host. | Below is the the instruction that describes the task:
### Input:
Defines a pattern that is waited for when calling the expect_prompt()
method.
If the set_prompt() method is not called, or if it is called with the
prompt argument set to None, a default prompt is used that should
work with many devices running Unix, IOS, IOS-XR, or Junos and others.
:type prompt: RegEx
:param prompt: The pattern that matches the prompt of the remote host.
### Response:
def set_prompt(self, prompt=None):
"""
Defines a pattern that is waited for when calling the expect_prompt()
method.
If the set_prompt() method is not called, or if it is called with the
prompt argument set to None, a default prompt is used that should
work with many devices running Unix, IOS, IOS-XR, or Junos and others.
:type prompt: RegEx
:param prompt: The pattern that matches the prompt of the remote host.
"""
if prompt is None:
self.manual_prompt_re = prompt
else:
self.manual_prompt_re = to_regexs(prompt) |
def as_xml(self,parent):
"""Create vcard-tmp XML representation of the field.
:Parameters:
- `parent`: parent node for the element
:Types:
- `parent`: `libxml2.xmlNode`
:return: xml node with the field data.
:returntype: `libxml2.xmlNode`"""
n=parent.newChild(None,"CATEGORIES",None)
for k in self.keywords:
n.newTextChild(None,"KEYWORD",to_utf8(k))
return n | Create vcard-tmp XML representation of the field.
:Parameters:
- `parent`: parent node for the element
:Types:
- `parent`: `libxml2.xmlNode`
:return: xml node with the field data.
:returntype: `libxml2.xmlNode` | Below is the the instruction that describes the task:
### Input:
Create vcard-tmp XML representation of the field.
:Parameters:
- `parent`: parent node for the element
:Types:
- `parent`: `libxml2.xmlNode`
:return: xml node with the field data.
:returntype: `libxml2.xmlNode`
### Response:
def as_xml(self,parent):
"""Create vcard-tmp XML representation of the field.
:Parameters:
- `parent`: parent node for the element
:Types:
- `parent`: `libxml2.xmlNode`
:return: xml node with the field data.
:returntype: `libxml2.xmlNode`"""
n=parent.newChild(None,"CATEGORIES",None)
for k in self.keywords:
n.newTextChild(None,"KEYWORD",to_utf8(k))
return n |
async def invite(self, room_id: str, user_id: str, check_cache: bool = False
) -> Optional[dict]:
"""
Invite a user to participate in a particular room. See also: `API reference`_
Args:
room_id: The room identifier (not alias) to which to invite the user.
user_id: The fully qualified user ID of the invitee.
check_cache: Whether or not to check the state cache before inviting.
If true, the actual invite HTTP request will only be made if the user is not in the
room according to local state caches.
Returns:
.. _API reference:
https://matrix.org/docs/spec/client_server/r0.3.0.html#post-matrix-client-r0-createroom
"""
await self.ensure_joined(room_id)
try:
ok_states = {"invite", "join"}
do_invite = (not check_cache
or self.state_store.get_membership(room_id, user_id) not in ok_states)
if do_invite:
response = await self._invite_direct(room_id, user_id)
self.state_store.invited(room_id, user_id)
return response
except MatrixRequestError as e:
if e.errcode != "M_FORBIDDEN":
raise IntentError(f"Failed to invite {user_id} to {room_id}", e)
if "is already in the room" in e.message:
self.state_store.joined(room_id, user_id) | Invite a user to participate in a particular room. See also: `API reference`_
Args:
room_id: The room identifier (not alias) to which to invite the user.
user_id: The fully qualified user ID of the invitee.
check_cache: Whether or not to check the state cache before inviting.
If true, the actual invite HTTP request will only be made if the user is not in the
room according to local state caches.
Returns:
.. _API reference:
https://matrix.org/docs/spec/client_server/r0.3.0.html#post-matrix-client-r0-createroom | Below is the the instruction that describes the task:
### Input:
Invite a user to participate in a particular room. See also: `API reference`_
Args:
room_id: The room identifier (not alias) to which to invite the user.
user_id: The fully qualified user ID of the invitee.
check_cache: Whether or not to check the state cache before inviting.
If true, the actual invite HTTP request will only be made if the user is not in the
room according to local state caches.
Returns:
.. _API reference:
https://matrix.org/docs/spec/client_server/r0.3.0.html#post-matrix-client-r0-createroom
### Response:
async def invite(self, room_id: str, user_id: str, check_cache: bool = False
) -> Optional[dict]:
"""
Invite a user to participate in a particular room. See also: `API reference`_
Args:
room_id: The room identifier (not alias) to which to invite the user.
user_id: The fully qualified user ID of the invitee.
check_cache: Whether or not to check the state cache before inviting.
If true, the actual invite HTTP request will only be made if the user is not in the
room according to local state caches.
Returns:
.. _API reference:
https://matrix.org/docs/spec/client_server/r0.3.0.html#post-matrix-client-r0-createroom
"""
await self.ensure_joined(room_id)
try:
ok_states = {"invite", "join"}
do_invite = (not check_cache
or self.state_store.get_membership(room_id, user_id) not in ok_states)
if do_invite:
response = await self._invite_direct(room_id, user_id)
self.state_store.invited(room_id, user_id)
return response
except MatrixRequestError as e:
if e.errcode != "M_FORBIDDEN":
raise IntentError(f"Failed to invite {user_id} to {room_id}", e)
if "is already in the room" in e.message:
self.state_store.joined(room_id, user_id) |
def aggregate(self, func, *columns):
"""
Execute an aggregate function against the database
:param func: The aggregate function
:type func: str
:param columns: The columns to execute the fnction for
:type columns: tuple
:return: The aggregate result
:rtype: mixed
"""
if not columns:
columns = ["*"]
self.aggregate_ = {"function": func, "columns": columns}
previous_columns = self.columns
results = self.get(*columns).all()
self.aggregate_ = None
self.columns = previous_columns
if len(results) > 0:
return dict((k.lower(), v) for k, v in results[0].items())["aggregate"] | Execute an aggregate function against the database
:param func: The aggregate function
:type func: str
:param columns: The columns to execute the fnction for
:type columns: tuple
:return: The aggregate result
:rtype: mixed | Below is the the instruction that describes the task:
### Input:
Execute an aggregate function against the database
:param func: The aggregate function
:type func: str
:param columns: The columns to execute the fnction for
:type columns: tuple
:return: The aggregate result
:rtype: mixed
### Response:
def aggregate(self, func, *columns):
"""
Execute an aggregate function against the database
:param func: The aggregate function
:type func: str
:param columns: The columns to execute the fnction for
:type columns: tuple
:return: The aggregate result
:rtype: mixed
"""
if not columns:
columns = ["*"]
self.aggregate_ = {"function": func, "columns": columns}
previous_columns = self.columns
results = self.get(*columns).all()
self.aggregate_ = None
self.columns = previous_columns
if len(results) > 0:
return dict((k.lower(), v) for k, v in results[0].items())["aggregate"] |
def download_SRA(self, email, directory='./', **kwargs):
"""Download RAW data as SRA file.
The files will be downloaded to the sample directory created ad hoc
or the directory specified by the parameter. The sample has to come
from sequencing eg. mRNA-seq, CLIP etc.
An important parameter is a filetype. By default an SRA
is accessed by FTP and such file is downloaded. This does not
require additional libraries. However in order
to produce FASTA of FASTQ files one would need to use SRA-Toolkit.
Thus, it is assumed that this library is already installed or it
will be installed in the near future. One can immediately specify
the download type to fasta or fastq.
To see all possible ``**kwargs`` that could be passed to the function
see the description of :class:`~GEOparse.sra_downloader.SRADownloader`.
Args:
email (:obj:`str`): an email (any) - Required by NCBI for access
directory (:obj:`str`, optional): The directory to which download
the data. Defaults to "./".
**kwargs: Arbitrary keyword arguments, see description
Returns:
:obj:`dict`: A dictionary containing only one key (``SRA``) with
the list of downloaded files.
Raises:
:obj:`TypeError`: Type to download unknown
:obj:`NoSRARelationException`: No SRAToolkit
:obj:`Exception`: Wrong e-mail
:obj:`HTTPError`: Cannot access or connect to DB
"""
downloader = SRADownloader(self, email, directory, **kwargs)
return {"SRA": downloader.download()} | Download RAW data as SRA file.
The files will be downloaded to the sample directory created ad hoc
or the directory specified by the parameter. The sample has to come
from sequencing eg. mRNA-seq, CLIP etc.
An important parameter is a filetype. By default an SRA
is accessed by FTP and such file is downloaded. This does not
require additional libraries. However in order
to produce FASTA of FASTQ files one would need to use SRA-Toolkit.
Thus, it is assumed that this library is already installed or it
will be installed in the near future. One can immediately specify
the download type to fasta or fastq.
To see all possible ``**kwargs`` that could be passed to the function
see the description of :class:`~GEOparse.sra_downloader.SRADownloader`.
Args:
email (:obj:`str`): an email (any) - Required by NCBI for access
directory (:obj:`str`, optional): The directory to which download
the data. Defaults to "./".
**kwargs: Arbitrary keyword arguments, see description
Returns:
:obj:`dict`: A dictionary containing only one key (``SRA``) with
the list of downloaded files.
Raises:
:obj:`TypeError`: Type to download unknown
:obj:`NoSRARelationException`: No SRAToolkit
:obj:`Exception`: Wrong e-mail
:obj:`HTTPError`: Cannot access or connect to DB | Below is the the instruction that describes the task:
### Input:
Download RAW data as SRA file.
The files will be downloaded to the sample directory created ad hoc
or the directory specified by the parameter. The sample has to come
from sequencing eg. mRNA-seq, CLIP etc.
An important parameter is a filetype. By default an SRA
is accessed by FTP and such file is downloaded. This does not
require additional libraries. However in order
to produce FASTA of FASTQ files one would need to use SRA-Toolkit.
Thus, it is assumed that this library is already installed or it
will be installed in the near future. One can immediately specify
the download type to fasta or fastq.
To see all possible ``**kwargs`` that could be passed to the function
see the description of :class:`~GEOparse.sra_downloader.SRADownloader`.
Args:
email (:obj:`str`): an email (any) - Required by NCBI for access
directory (:obj:`str`, optional): The directory to which download
the data. Defaults to "./".
**kwargs: Arbitrary keyword arguments, see description
Returns:
:obj:`dict`: A dictionary containing only one key (``SRA``) with
the list of downloaded files.
Raises:
:obj:`TypeError`: Type to download unknown
:obj:`NoSRARelationException`: No SRAToolkit
:obj:`Exception`: Wrong e-mail
:obj:`HTTPError`: Cannot access or connect to DB
### Response:
def download_SRA(self, email, directory='./', **kwargs):
"""Download RAW data as SRA file.
The files will be downloaded to the sample directory created ad hoc
or the directory specified by the parameter. The sample has to come
from sequencing eg. mRNA-seq, CLIP etc.
An important parameter is a filetype. By default an SRA
is accessed by FTP and such file is downloaded. This does not
require additional libraries. However in order
to produce FASTA of FASTQ files one would need to use SRA-Toolkit.
Thus, it is assumed that this library is already installed or it
will be installed in the near future. One can immediately specify
the download type to fasta or fastq.
To see all possible ``**kwargs`` that could be passed to the function
see the description of :class:`~GEOparse.sra_downloader.SRADownloader`.
Args:
email (:obj:`str`): an email (any) - Required by NCBI for access
directory (:obj:`str`, optional): The directory to which download
the data. Defaults to "./".
**kwargs: Arbitrary keyword arguments, see description
Returns:
:obj:`dict`: A dictionary containing only one key (``SRA``) with
the list of downloaded files.
Raises:
:obj:`TypeError`: Type to download unknown
:obj:`NoSRARelationException`: No SRAToolkit
:obj:`Exception`: Wrong e-mail
:obj:`HTTPError`: Cannot access or connect to DB
"""
downloader = SRADownloader(self, email, directory, **kwargs)
return {"SRA": downloader.download()} |
def rm(self, name):
"""
Remove a data analog called 'name'.
The 'name' can contain a path specifier.
Warning: see
http://stackoverflow.com/questions/5844672/delete-an-element-from-a-dictionary
deleting from the snode_current changes dictionary contents for any other
agents that have references to the same instance.
This deletes either directories or files.
"""
b_OK = False
str_here = self.cwd()
l_path = name.split('/')
if len(l_path) > 1:
self.cd('/'.join(l_path[0:-1]))
name = l_path[-1]
if name in self.snode_current.d_data:
del self.snode_current.d_data[name]
b_OK = True
if name in self.snode_current.d_nodes:
del self.snode_current.d_nodes[name]
b_OK = True
self.cd(str_here)
return b_OK | Remove a data analog called 'name'.
The 'name' can contain a path specifier.
Warning: see
http://stackoverflow.com/questions/5844672/delete-an-element-from-a-dictionary
deleting from the snode_current changes dictionary contents for any other
agents that have references to the same instance.
This deletes either directories or files. | Below is the the instruction that describes the task:
### Input:
Remove a data analog called 'name'.
The 'name' can contain a path specifier.
Warning: see
http://stackoverflow.com/questions/5844672/delete-an-element-from-a-dictionary
deleting from the snode_current changes dictionary contents for any other
agents that have references to the same instance.
This deletes either directories or files.
### Response:
def rm(self, name):
"""
Remove a data analog called 'name'.
The 'name' can contain a path specifier.
Warning: see
http://stackoverflow.com/questions/5844672/delete-an-element-from-a-dictionary
deleting from the snode_current changes dictionary contents for any other
agents that have references to the same instance.
This deletes either directories or files.
"""
b_OK = False
str_here = self.cwd()
l_path = name.split('/')
if len(l_path) > 1:
self.cd('/'.join(l_path[0:-1]))
name = l_path[-1]
if name in self.snode_current.d_data:
del self.snode_current.d_data[name]
b_OK = True
if name in self.snode_current.d_nodes:
del self.snode_current.d_nodes[name]
b_OK = True
self.cd(str_here)
return b_OK |
def save_data(self, trigger_id, **data):
"""
let's save the data
:param trigger_id: trigger ID from which to save data
:param data: the data to check to be used and save
:type trigger_id: int
:type data: dict
:return: the status of the save statement
:rtype: boolean
"""
# convert the format to be released in Markdown
status = False
data['output_format'] = 'md'
title, content = super(ServiceReddit, self).save_data(trigger_id, **data)
if self.token:
trigger = Reddit.objects.get(trigger_id=trigger_id)
if trigger.share_link:
status = self.reddit.subreddit(trigger.subreddit).submit(title=title, url=content)
else:
status = self.reddit.subreddit(trigger.subreddit).submit(title=title, selftext=content)
sentence = str('reddit submission {} created').format(title)
logger.debug(sentence)
else:
msg = "no token or link provided for trigger ID {} ".format(trigger_id)
logger.critical(msg)
update_result(trigger_id, msg=msg, status=False)
return status | let's save the data
:param trigger_id: trigger ID from which to save data
:param data: the data to check to be used and save
:type trigger_id: int
:type data: dict
:return: the status of the save statement
:rtype: boolean | Below is the the instruction that describes the task:
### Input:
let's save the data
:param trigger_id: trigger ID from which to save data
:param data: the data to check to be used and save
:type trigger_id: int
:type data: dict
:return: the status of the save statement
:rtype: boolean
### Response:
def save_data(self, trigger_id, **data):
"""
let's save the data
:param trigger_id: trigger ID from which to save data
:param data: the data to check to be used and save
:type trigger_id: int
:type data: dict
:return: the status of the save statement
:rtype: boolean
"""
# convert the format to be released in Markdown
status = False
data['output_format'] = 'md'
title, content = super(ServiceReddit, self).save_data(trigger_id, **data)
if self.token:
trigger = Reddit.objects.get(trigger_id=trigger_id)
if trigger.share_link:
status = self.reddit.subreddit(trigger.subreddit).submit(title=title, url=content)
else:
status = self.reddit.subreddit(trigger.subreddit).submit(title=title, selftext=content)
sentence = str('reddit submission {} created').format(title)
logger.debug(sentence)
else:
msg = "no token or link provided for trigger ID {} ".format(trigger_id)
logger.critical(msg)
update_result(trigger_id, msg=msg, status=False)
return status |
def _create_variable(orig_v, step, variables):
"""Create a new output variable, potentially over-writing existing or creating new.
"""
# get current variable, and convert to be the output of our process step
try:
v = _get_variable(orig_v["id"], variables)
except ValueError:
v = copy.deepcopy(orig_v)
if not isinstance(v["id"], six.string_types):
v["id"] = _get_string_vid(v["id"])
for key, val in orig_v.items():
if key not in ["id", "type"]:
v[key] = val
if orig_v.get("type") != "null":
v["type"] = orig_v["type"]
v["id"] = "%s/%s" % (step.name, get_base_id(v["id"]))
return v | Create a new output variable, potentially over-writing existing or creating new. | Below is the the instruction that describes the task:
### Input:
Create a new output variable, potentially over-writing existing or creating new.
### Response:
def _create_variable(orig_v, step, variables):
"""Create a new output variable, potentially over-writing existing or creating new.
"""
# get current variable, and convert to be the output of our process step
try:
v = _get_variable(orig_v["id"], variables)
except ValueError:
v = copy.deepcopy(orig_v)
if not isinstance(v["id"], six.string_types):
v["id"] = _get_string_vid(v["id"])
for key, val in orig_v.items():
if key not in ["id", "type"]:
v[key] = val
if orig_v.get("type") != "null":
v["type"] = orig_v["type"]
v["id"] = "%s/%s" % (step.name, get_base_id(v["id"]))
return v |
def _get_refreshed_check_result(self, check_id):
"""
Given the ``check_id``, return the dict of Trusted Advisor check
results. This handles refreshing the Trusted Advisor check, if desired,
according to ``self.refresh_mode`` and ``self.refresh_timeout``.
:param check_id: the Trusted Advisor check ID
:type check_id: str
:returns: dict check result. The return value of
:py:meth:`Support.Client.describe_trusted_advisor_check_result`
:rtype: dict
"""
# handle a refresh_mode of None right off the bat
if self.refresh_mode is None:
logger.info("Not refreshing Trusted Advisor check (refresh mode "
"is None)")
return self._get_check_result(check_id)[0]
logger.debug("Handling refresh of check: %s", check_id)
# if we want to refresh, step 1 is to see if we can yet...
if not self._can_refresh_check(check_id):
return self._get_check_result(check_id)[0]
# either it's not too soon to refresh, or we have no idea...
if isinstance(self.refresh_mode, type(1)):
# mode is an int, check the last refresh time and compare
checks, check_datetime = self._get_check_result(check_id)
logger.debug('ta_refresh_mode older; check last refresh: %s; '
'threshold=%d seconds', check_datetime,
self.refresh_mode)
if check_datetime >= datetime.now(utc) - timedelta(
seconds=self.refresh_mode):
logger.warning('Trusted Advisor check %s last refresh time '
'of %s is newer than refresh threshold of %d '
'seconds.', check_id, check_datetime,
self.refresh_mode)
return self._get_check_result(check_id)[0]
# do the refresh
logger.info("Refreshing Trusted Advisor check: %s", check_id)
self.conn.refresh_trusted_advisor_check(checkId=check_id)
# if mode isn't trigger, wait for refresh up to timeout
if self.refresh_mode == 'trigger':
result = self._get_check_result(check_id)[0]
else:
result = self._poll_for_refresh(check_id)
return result | Given the ``check_id``, return the dict of Trusted Advisor check
results. This handles refreshing the Trusted Advisor check, if desired,
according to ``self.refresh_mode`` and ``self.refresh_timeout``.
:param check_id: the Trusted Advisor check ID
:type check_id: str
:returns: dict check result. The return value of
:py:meth:`Support.Client.describe_trusted_advisor_check_result`
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Given the ``check_id``, return the dict of Trusted Advisor check
results. This handles refreshing the Trusted Advisor check, if desired,
according to ``self.refresh_mode`` and ``self.refresh_timeout``.
:param check_id: the Trusted Advisor check ID
:type check_id: str
:returns: dict check result. The return value of
:py:meth:`Support.Client.describe_trusted_advisor_check_result`
:rtype: dict
### Response:
def _get_refreshed_check_result(self, check_id):
"""
Given the ``check_id``, return the dict of Trusted Advisor check
results. This handles refreshing the Trusted Advisor check, if desired,
according to ``self.refresh_mode`` and ``self.refresh_timeout``.
:param check_id: the Trusted Advisor check ID
:type check_id: str
:returns: dict check result. The return value of
:py:meth:`Support.Client.describe_trusted_advisor_check_result`
:rtype: dict
"""
# handle a refresh_mode of None right off the bat
if self.refresh_mode is None:
logger.info("Not refreshing Trusted Advisor check (refresh mode "
"is None)")
return self._get_check_result(check_id)[0]
logger.debug("Handling refresh of check: %s", check_id)
# if we want to refresh, step 1 is to see if we can yet...
if not self._can_refresh_check(check_id):
return self._get_check_result(check_id)[0]
# either it's not too soon to refresh, or we have no idea...
if isinstance(self.refresh_mode, type(1)):
# mode is an int, check the last refresh time and compare
checks, check_datetime = self._get_check_result(check_id)
logger.debug('ta_refresh_mode older; check last refresh: %s; '
'threshold=%d seconds', check_datetime,
self.refresh_mode)
if check_datetime >= datetime.now(utc) - timedelta(
seconds=self.refresh_mode):
logger.warning('Trusted Advisor check %s last refresh time '
'of %s is newer than refresh threshold of %d '
'seconds.', check_id, check_datetime,
self.refresh_mode)
return self._get_check_result(check_id)[0]
# do the refresh
logger.info("Refreshing Trusted Advisor check: %s", check_id)
self.conn.refresh_trusted_advisor_check(checkId=check_id)
# if mode isn't trigger, wait for refresh up to timeout
if self.refresh_mode == 'trigger':
result = self._get_check_result(check_id)[0]
else:
result = self._poll_for_refresh(check_id)
return result |
def html_format(data, out, opts=None, **kwargs):
'''
Return the formatted string as HTML.
'''
ansi_escaped_string = string_format(data, out, opts, **kwargs)
return ansi_escaped_string.replace(' ', ' ').replace('\n', '<br />') | Return the formatted string as HTML. | Below is the the instruction that describes the task:
### Input:
Return the formatted string as HTML.
### Response:
def html_format(data, out, opts=None, **kwargs):
'''
Return the formatted string as HTML.
'''
ansi_escaped_string = string_format(data, out, opts, **kwargs)
return ansi_escaped_string.replace(' ', ' ').replace('\n', '<br />') |
def tokenize(text, regexps=TOKENIZERRULES):
"""Tokenizes a string and returns a list of tokens
:param text: The text to tokenise
:type text: string
:param regexps: Regular expressions to use as tokeniser rules in tokenisation (default=_pynlpl.textprocessors.TOKENIZERRULES_)
:type regexps: Tuple/list of regular expressions to use in tokenisation
:rtype: Returns a list of tokens
Examples:
>>> for token in tokenize("This is a test."):
... print(token)
This
is
a
test
.
"""
for i,regexp in list(enumerate(regexps)):
if isstring(regexp):
regexps[i] = re.compile(regexp)
tokens = []
begin = 0
for i, c in enumerate(text):
if begin > i:
continue
elif i == begin:
m = False
for regexp in regexps:
m = regexp.findall(text[i:i+300])
if m:
tokens.append(m[0])
begin = i + len(m[0])
break
if m: continue
if c in string.punctuation or c in WHITESPACE:
prev = text[i-1] if i > 0 else ""
next = text[i+1] if i < len(text)-1 else ""
if (c == '.' or c == ',') and prev.isdigit() and next.isdigit():
#punctuation in between numbers, keep as one token
pass
elif (c == "'" or c == "`") and prev.isalpha() and next.isalpha():
#quote in between chars, keep...
pass
elif c not in WHITESPACE and next == c: #group clusters of identical punctuation together
continue
elif c == '\r' and prev == '\n':
#ignore
begin = i+1
continue
else:
token = text[begin:i]
if token: tokens.append(token)
if c not in WHITESPACE:
tokens.append(c) #anything but spaces and newlines (i.e. punctuation) counts as a token too
begin = i + 1 #set the begin cursor
if begin <= len(text) - 1:
token = text[begin:]
tokens.append(token)
return tokens | Tokenizes a string and returns a list of tokens
:param text: The text to tokenise
:type text: string
:param regexps: Regular expressions to use as tokeniser rules in tokenisation (default=_pynlpl.textprocessors.TOKENIZERRULES_)
:type regexps: Tuple/list of regular expressions to use in tokenisation
:rtype: Returns a list of tokens
Examples:
>>> for token in tokenize("This is a test."):
... print(token)
This
is
a
test
. | Below is the the instruction that describes the task:
### Input:
Tokenizes a string and returns a list of tokens
:param text: The text to tokenise
:type text: string
:param regexps: Regular expressions to use as tokeniser rules in tokenisation (default=_pynlpl.textprocessors.TOKENIZERRULES_)
:type regexps: Tuple/list of regular expressions to use in tokenisation
:rtype: Returns a list of tokens
Examples:
>>> for token in tokenize("This is a test."):
... print(token)
This
is
a
test
.
### Response:
def tokenize(text, regexps=TOKENIZERRULES):
"""Tokenizes a string and returns a list of tokens
:param text: The text to tokenise
:type text: string
:param regexps: Regular expressions to use as tokeniser rules in tokenisation (default=_pynlpl.textprocessors.TOKENIZERRULES_)
:type regexps: Tuple/list of regular expressions to use in tokenisation
:rtype: Returns a list of tokens
Examples:
>>> for token in tokenize("This is a test."):
... print(token)
This
is
a
test
.
"""
for i,regexp in list(enumerate(regexps)):
if isstring(regexp):
regexps[i] = re.compile(regexp)
tokens = []
begin = 0
for i, c in enumerate(text):
if begin > i:
continue
elif i == begin:
m = False
for regexp in regexps:
m = regexp.findall(text[i:i+300])
if m:
tokens.append(m[0])
begin = i + len(m[0])
break
if m: continue
if c in string.punctuation or c in WHITESPACE:
prev = text[i-1] if i > 0 else ""
next = text[i+1] if i < len(text)-1 else ""
if (c == '.' or c == ',') and prev.isdigit() and next.isdigit():
#punctuation in between numbers, keep as one token
pass
elif (c == "'" or c == "`") and prev.isalpha() and next.isalpha():
#quote in between chars, keep...
pass
elif c not in WHITESPACE and next == c: #group clusters of identical punctuation together
continue
elif c == '\r' and prev == '\n':
#ignore
begin = i+1
continue
else:
token = text[begin:i]
if token: tokens.append(token)
if c not in WHITESPACE:
tokens.append(c) #anything but spaces and newlines (i.e. punctuation) counts as a token too
begin = i + 1 #set the begin cursor
if begin <= len(text) - 1:
token = text[begin:]
tokens.append(token)
return tokens |
def certify_set(
value, certifier=None, min_len=None, max_len=None, include_collections=False,
required=True,
):
"""
Certifier for a set.
:param set value:
The set to be certified.
:param func certifier:
A function to be called on each value in the list to check that it is valid.
:param int min_len:
The minimum acceptable length for the list. If None, the minimum length is not checked.
:param int max_len:
The maximum acceptable length for the list. If None, the maximum length is not checked.
:param bool include_collections:
Include types from collections.
:param bool required:
Whether the value can be `None`. Defaults to True.
:return:
The certified set.
:rtype:
set
:raises CertifierTypeError:
The type is invalid
:raises CertifierValueError:
The valid is invalid
"""
certify_bool(include_collections, required=True)
certify_iterable(
value=value,
types=tuple([set, MutableSet, Set]) if include_collections else tuple([set]),
certifier=certifier,
min_len=min_len,
max_len=max_len,
schema=None,
required=required,
) | Certifier for a set.
:param set value:
The set to be certified.
:param func certifier:
A function to be called on each value in the list to check that it is valid.
:param int min_len:
The minimum acceptable length for the list. If None, the minimum length is not checked.
:param int max_len:
The maximum acceptable length for the list. If None, the maximum length is not checked.
:param bool include_collections:
Include types from collections.
:param bool required:
Whether the value can be `None`. Defaults to True.
:return:
The certified set.
:rtype:
set
:raises CertifierTypeError:
The type is invalid
:raises CertifierValueError:
The valid is invalid | Below is the the instruction that describes the task:
### Input:
Certifier for a set.
:param set value:
The set to be certified.
:param func certifier:
A function to be called on each value in the list to check that it is valid.
:param int min_len:
The minimum acceptable length for the list. If None, the minimum length is not checked.
:param int max_len:
The maximum acceptable length for the list. If None, the maximum length is not checked.
:param bool include_collections:
Include types from collections.
:param bool required:
Whether the value can be `None`. Defaults to True.
:return:
The certified set.
:rtype:
set
:raises CertifierTypeError:
The type is invalid
:raises CertifierValueError:
The valid is invalid
### Response:
def certify_set(
value, certifier=None, min_len=None, max_len=None, include_collections=False,
required=True,
):
"""
Certifier for a set.
:param set value:
The set to be certified.
:param func certifier:
A function to be called on each value in the list to check that it is valid.
:param int min_len:
The minimum acceptable length for the list. If None, the minimum length is not checked.
:param int max_len:
The maximum acceptable length for the list. If None, the maximum length is not checked.
:param bool include_collections:
Include types from collections.
:param bool required:
Whether the value can be `None`. Defaults to True.
:return:
The certified set.
:rtype:
set
:raises CertifierTypeError:
The type is invalid
:raises CertifierValueError:
The valid is invalid
"""
certify_bool(include_collections, required=True)
certify_iterable(
value=value,
types=tuple([set, MutableSet, Set]) if include_collections else tuple([set]),
certifier=certifier,
min_len=min_len,
max_len=max_len,
schema=None,
required=required,
) |
def destroy(self, force=False):
"""
Like shutdown(), but also removes all accounts, hosts, etc., and
does not restart the queue. In other words, the queue can no longer
be used after calling this method.
:type force: bool
:param force: Whether to wait until all jobs were processed.
"""
try:
if not force:
self.join()
finally:
self._dbg(2, 'Destroying queue...')
self.workqueue.destroy()
self.account_manager.reset()
self.completed = 0
self.total = 0
self.failed = 0
self.status_bar_length = 0
self._dbg(2, 'Queue destroyed.')
self._del_status_bar() | Like shutdown(), but also removes all accounts, hosts, etc., and
does not restart the queue. In other words, the queue can no longer
be used after calling this method.
:type force: bool
:param force: Whether to wait until all jobs were processed. | Below is the the instruction that describes the task:
### Input:
Like shutdown(), but also removes all accounts, hosts, etc., and
does not restart the queue. In other words, the queue can no longer
be used after calling this method.
:type force: bool
:param force: Whether to wait until all jobs were processed.
### Response:
def destroy(self, force=False):
"""
Like shutdown(), but also removes all accounts, hosts, etc., and
does not restart the queue. In other words, the queue can no longer
be used after calling this method.
:type force: bool
:param force: Whether to wait until all jobs were processed.
"""
try:
if not force:
self.join()
finally:
self._dbg(2, 'Destroying queue...')
self.workqueue.destroy()
self.account_manager.reset()
self.completed = 0
self.total = 0
self.failed = 0
self.status_bar_length = 0
self._dbg(2, 'Queue destroyed.')
self._del_status_bar() |
def _roc(y_true, y_score, ax=None):
"""
Plot ROC curve for binary classification.
Parameters
----------
y_true : array-like, shape = [n_samples]
Correct target values (ground truth).
y_score : array-like, shape = [n_samples]
Target scores (estimator predictions).
ax: matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot
"""
# check dimensions
fpr, tpr, _ = roc_curve(y_true, y_score)
roc_auc = auc(fpr, tpr)
ax.plot(fpr, tpr, label=('ROC curve (area = {0:0.2f})'.format(roc_auc)))
_set_ax_settings(ax)
return ax | Plot ROC curve for binary classification.
Parameters
----------
y_true : array-like, shape = [n_samples]
Correct target values (ground truth).
y_score : array-like, shape = [n_samples]
Target scores (estimator predictions).
ax: matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot | Below is the the instruction that describes the task:
### Input:
Plot ROC curve for binary classification.
Parameters
----------
y_true : array-like, shape = [n_samples]
Correct target values (ground truth).
y_score : array-like, shape = [n_samples]
Target scores (estimator predictions).
ax: matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot
### Response:
def _roc(y_true, y_score, ax=None):
"""
Plot ROC curve for binary classification.
Parameters
----------
y_true : array-like, shape = [n_samples]
Correct target values (ground truth).
y_score : array-like, shape = [n_samples]
Target scores (estimator predictions).
ax: matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot
"""
# check dimensions
fpr, tpr, _ = roc_curve(y_true, y_score)
roc_auc = auc(fpr, tpr)
ax.plot(fpr, tpr, label=('ROC curve (area = {0:0.2f})'.format(roc_auc)))
_set_ax_settings(ax)
return ax |
def execute(self):
"""
Stops the cluster if it's running.
"""
cluster_name = self.params.cluster
creator = make_creator(self.params.config,
storage_path=self.params.storage)
try:
cluster = creator.load_cluster(cluster_name)
except (ClusterNotFound, ConfigurationError) as err:
log.error("Cannot stop cluster `%s`: %s", cluster_name, err)
return os.EX_NOINPUT
if not self.params.yes:
confirm_or_abort(
"Do you want really want to stop cluster `{cluster_name}`?"
.format(cluster_name=cluster_name),
msg="Aborting upon user request.")
print("Destroying cluster `%s` ..." % cluster_name)
cluster.stop(force=self.params.force, wait=self.params.wait) | Stops the cluster if it's running. | Below is the the instruction that describes the task:
### Input:
Stops the cluster if it's running.
### Response:
def execute(self):
"""
Stops the cluster if it's running.
"""
cluster_name = self.params.cluster
creator = make_creator(self.params.config,
storage_path=self.params.storage)
try:
cluster = creator.load_cluster(cluster_name)
except (ClusterNotFound, ConfigurationError) as err:
log.error("Cannot stop cluster `%s`: %s", cluster_name, err)
return os.EX_NOINPUT
if not self.params.yes:
confirm_or_abort(
"Do you want really want to stop cluster `{cluster_name}`?"
.format(cluster_name=cluster_name),
msg="Aborting upon user request.")
print("Destroying cluster `%s` ..." % cluster_name)
cluster.stop(force=self.params.force, wait=self.params.wait) |
def get_ini_config(config=os.path.join(os.path.expanduser('~'), '.zdeskcfg'),
default_section=None, section=None):
"""This is a convenience function for getting the zdesk configuration
from an ini file without the need to decorate and call your own function.
Handy when using zdesk and zdeskcfg from the interactive prompt."""
plac_ini.call(__placeholder__, config=config, default_section=default_section)
return __placeholder__.getconfig(section) | This is a convenience function for getting the zdesk configuration
from an ini file without the need to decorate and call your own function.
Handy when using zdesk and zdeskcfg from the interactive prompt. | Below is the the instruction that describes the task:
### Input:
This is a convenience function for getting the zdesk configuration
from an ini file without the need to decorate and call your own function.
Handy when using zdesk and zdeskcfg from the interactive prompt.
### Response:
def get_ini_config(config=os.path.join(os.path.expanduser('~'), '.zdeskcfg'),
default_section=None, section=None):
"""This is a convenience function for getting the zdesk configuration
from an ini file without the need to decorate and call your own function.
Handy when using zdesk and zdeskcfg from the interactive prompt."""
plac_ini.call(__placeholder__, config=config, default_section=default_section)
return __placeholder__.getconfig(section) |
def __can_attempt(self, namespace: str, add_attempt=True) -> bool:
"""
Checks if a namespace is rate limited or not with including/excluding the current call
:param namespace: Rate limiting namespace
:type namespace: str
:param add_attempt: Boolean value indicating if the current call should be considered as an attempt or not
:type add_attempt: bool
:return: Returns true if attempt can go ahead under current rate limiting rules, false otherwise
"""
can_attempt = False
if not PyRateLimit.redis_helper:
raise PyRateLimitException("redis connection information not provided")
connection = PyRateLimit.redis_helper.get_atomic_connection()
current_time = int(round(time.time() * 1000000))
old_time_limit = current_time - (self.period * 1000000)
connection.zremrangebyscore(namespace, 0, old_time_limit)
connection.expire(namespace, self.period)
if add_attempt:
current_count = 0
connection.zadd(namespace, current_time, current_time)
else:
current_count = 1 # initialize at 1 to compensate the case that this attempt is not getting counted
connection.zcard(namespace)
redis_result = connection.execute()
current_count += redis_result[-1]
if current_count <= self.limit:
can_attempt = True
return can_attempt | Checks if a namespace is rate limited or not with including/excluding the current call
:param namespace: Rate limiting namespace
:type namespace: str
:param add_attempt: Boolean value indicating if the current call should be considered as an attempt or not
:type add_attempt: bool
:return: Returns true if attempt can go ahead under current rate limiting rules, false otherwise | Below is the the instruction that describes the task:
### Input:
Checks if a namespace is rate limited or not with including/excluding the current call
:param namespace: Rate limiting namespace
:type namespace: str
:param add_attempt: Boolean value indicating if the current call should be considered as an attempt or not
:type add_attempt: bool
:return: Returns true if attempt can go ahead under current rate limiting rules, false otherwise
### Response:
def __can_attempt(self, namespace: str, add_attempt=True) -> bool:
"""
Checks if a namespace is rate limited or not with including/excluding the current call
:param namespace: Rate limiting namespace
:type namespace: str
:param add_attempt: Boolean value indicating if the current call should be considered as an attempt or not
:type add_attempt: bool
:return: Returns true if attempt can go ahead under current rate limiting rules, false otherwise
"""
can_attempt = False
if not PyRateLimit.redis_helper:
raise PyRateLimitException("redis connection information not provided")
connection = PyRateLimit.redis_helper.get_atomic_connection()
current_time = int(round(time.time() * 1000000))
old_time_limit = current_time - (self.period * 1000000)
connection.zremrangebyscore(namespace, 0, old_time_limit)
connection.expire(namespace, self.period)
if add_attempt:
current_count = 0
connection.zadd(namespace, current_time, current_time)
else:
current_count = 1 # initialize at 1 to compensate the case that this attempt is not getting counted
connection.zcard(namespace)
redis_result = connection.execute()
current_count += redis_result[-1]
if current_count <= self.limit:
can_attempt = True
return can_attempt |
def load_module(self, name):
"""Load a namespace module as if coming from an empty file.
"""
_verbose_message('namespace module loaded with path {!r}', self.path)
# Adjusting code from LoaderBasics
if name in sys.modules:
mod = sys.modules[name]
self.exec_module(mod)
# In this case we do not want to remove the module in case of error
# Ref : https://docs.python.org/3/reference/import.html#loaders
else:
try:
# Building custom spec and loading as in _LoaderBasics...
spec = ModuleSpec(name, self, origin='namespace', is_package=True)
spec.submodule_search_locations = self.path
# this will call create_module and also initialize the module properly (like for py3)
mod = module_from_spec(spec)
# as per https://docs.python.org/3/reference/import.html#loaders
assert mod.__name__ in sys.modules
self.exec_module(mod)
# We don't ensure that the import-related module attributes get
# set in the sys.modules replacement case. Such modules are on
# their own.
except:
# as per https://docs.python.org/3/reference/import.html#loaders
if name in sys.modules:
del sys.modules[name]
raise
return sys.modules[name] | Load a namespace module as if coming from an empty file. | Below is the the instruction that describes the task:
### Input:
Load a namespace module as if coming from an empty file.
### Response:
def load_module(self, name):
"""Load a namespace module as if coming from an empty file.
"""
_verbose_message('namespace module loaded with path {!r}', self.path)
# Adjusting code from LoaderBasics
if name in sys.modules:
mod = sys.modules[name]
self.exec_module(mod)
# In this case we do not want to remove the module in case of error
# Ref : https://docs.python.org/3/reference/import.html#loaders
else:
try:
# Building custom spec and loading as in _LoaderBasics...
spec = ModuleSpec(name, self, origin='namespace', is_package=True)
spec.submodule_search_locations = self.path
# this will call create_module and also initialize the module properly (like for py3)
mod = module_from_spec(spec)
# as per https://docs.python.org/3/reference/import.html#loaders
assert mod.__name__ in sys.modules
self.exec_module(mod)
# We don't ensure that the import-related module attributes get
# set in the sys.modules replacement case. Such modules are on
# their own.
except:
# as per https://docs.python.org/3/reference/import.html#loaders
if name in sys.modules:
del sys.modules[name]
raise
return sys.modules[name] |
def add_arguments(self):
"""
Add specific command line arguments for this command
"""
# Call our parent to add the default arguments
ApiCli.add_arguments(self)
# Command specific arguments
self.parser.add_argument('-f', '--format', dest='format', action='store', required=False,
choices=['csv', 'json', 'raw', 'xml'], help='Output format. Default is raw')
self.parser.add_argument('-n', '--name', dest='metric_name', action='store', required=True,
metavar="metric_name", help='Metric identifier')
self.parser.add_argument('-g', '--aggregate', dest='aggregate', action='store', required=False,
choices=['sum', 'avg', 'max', 'min'], help='Metric default aggregate')
self.parser.add_argument('-r', '--sample', dest='sample', action='store', type=int, metavar="sample",
help='Down sample rate sample in seconds')
self.parser.add_argument('-s', '--source', dest='source', action='store', metavar="source", required=True,
help='Source of measurement')
self.parser.add_argument('-b', '--start', dest='start', action='store', required=True, metavar="start",
help='Start of time range as ISO 8601 string or epoch seconds')
self.parser.add_argument('-d', '--end', dest='end', action='store', metavar="end", required=False,
help='End of time range as ISO 8601 string or epoch seconds')
self.parser.add_argument('-o', '--date-format', dest='date_format', action='store', metavar="format",
required=False,
help='For CSV, JSON, and XML output formats dates (see Python date.strftime). ' +
'Default format is %%s') | Add specific command line arguments for this command | Below is the the instruction that describes the task:
### Input:
Add specific command line arguments for this command
### Response:
def add_arguments(self):
"""
Add specific command line arguments for this command
"""
# Call our parent to add the default arguments
ApiCli.add_arguments(self)
# Command specific arguments
self.parser.add_argument('-f', '--format', dest='format', action='store', required=False,
choices=['csv', 'json', 'raw', 'xml'], help='Output format. Default is raw')
self.parser.add_argument('-n', '--name', dest='metric_name', action='store', required=True,
metavar="metric_name", help='Metric identifier')
self.parser.add_argument('-g', '--aggregate', dest='aggregate', action='store', required=False,
choices=['sum', 'avg', 'max', 'min'], help='Metric default aggregate')
self.parser.add_argument('-r', '--sample', dest='sample', action='store', type=int, metavar="sample",
help='Down sample rate sample in seconds')
self.parser.add_argument('-s', '--source', dest='source', action='store', metavar="source", required=True,
help='Source of measurement')
self.parser.add_argument('-b', '--start', dest='start', action='store', required=True, metavar="start",
help='Start of time range as ISO 8601 string or epoch seconds')
self.parser.add_argument('-d', '--end', dest='end', action='store', metavar="end", required=False,
help='End of time range as ISO 8601 string or epoch seconds')
self.parser.add_argument('-o', '--date-format', dest='date_format', action='store', metavar="format",
required=False,
help='For CSV, JSON, and XML output formats dates (see Python date.strftime). ' +
'Default format is %%s') |
def serv(args):
"""Serve a rueckenwind application"""
if not args.no_debug:
tornado.autoreload.start()
extra = []
if sys.stdout.isatty():
# set terminal title
sys.stdout.write('\x1b]2;rw: {}\x07'.format(' '.join(sys.argv[2:])))
if args.cfg:
extra.append(os.path.abspath(args.cfg))
listen = (int(args.port), args.address)
ioloop = tornado.ioloop.IOLoop.instance()
setup_app(app=args.MODULE, extra_configs=extra,
ioloop=ioloop, listen=listen)
ioloop.start() | Serve a rueckenwind application | Below is the the instruction that describes the task:
### Input:
Serve a rueckenwind application
### Response:
def serv(args):
"""Serve a rueckenwind application"""
if not args.no_debug:
tornado.autoreload.start()
extra = []
if sys.stdout.isatty():
# set terminal title
sys.stdout.write('\x1b]2;rw: {}\x07'.format(' '.join(sys.argv[2:])))
if args.cfg:
extra.append(os.path.abspath(args.cfg))
listen = (int(args.port), args.address)
ioloop = tornado.ioloop.IOLoop.instance()
setup_app(app=args.MODULE, extra_configs=extra,
ioloop=ioloop, listen=listen)
ioloop.start() |
def matchImpObjStrs(fdefs,imp_obj_strs,cdefs):
'''returns imp_funcs, a dictionary with filepath keys that contains
lists of function definition nodes that were imported using
from __ import __ style syntax. also returns imp_classes, which
is the same for class definition nodes.'''
imp_funcs=dict()
imp_classes=dict()
for source in imp_obj_strs:
if not imp_obj_strs[source]:
continue
imp_funcs[source]=[]
imp_classes[source]=[]
for (mod,func) in imp_obj_strs[source]:
if mod not in fdefs:
#print(mod+" is not part of the project.")
continue
if func=='*':
all_fns = [x for x in fdefs[mod] if x.name!='body']
imp_funcs[source] += all_fns
all_cls = [x for x in cdefs[mod]]
imp_classes[source] += all_cls
else:
fn_node = [x for x in fdefs[mod] if x.name==func]
cls_node = [x for x in cdefs[mod] if x.name==func]
#assert len(fn_node) in [1,0]
#assert len(cls_node) in [1,0]
if cls_node:
imp_classes[source] += cls_node
if fn_node:
imp_funcs[source] += fn_node
if not fn_node and not cls_node:
pass
#print(func+' not found in function and class definitions.')
return imp_funcs,imp_classes | returns imp_funcs, a dictionary with filepath keys that contains
lists of function definition nodes that were imported using
from __ import __ style syntax. also returns imp_classes, which
is the same for class definition nodes. | Below is the the instruction that describes the task:
### Input:
returns imp_funcs, a dictionary with filepath keys that contains
lists of function definition nodes that were imported using
from __ import __ style syntax. also returns imp_classes, which
is the same for class definition nodes.
### Response:
def matchImpObjStrs(fdefs,imp_obj_strs,cdefs):
'''returns imp_funcs, a dictionary with filepath keys that contains
lists of function definition nodes that were imported using
from __ import __ style syntax. also returns imp_classes, which
is the same for class definition nodes.'''
imp_funcs=dict()
imp_classes=dict()
for source in imp_obj_strs:
if not imp_obj_strs[source]:
continue
imp_funcs[source]=[]
imp_classes[source]=[]
for (mod,func) in imp_obj_strs[source]:
if mod not in fdefs:
#print(mod+" is not part of the project.")
continue
if func=='*':
all_fns = [x for x in fdefs[mod] if x.name!='body']
imp_funcs[source] += all_fns
all_cls = [x for x in cdefs[mod]]
imp_classes[source] += all_cls
else:
fn_node = [x for x in fdefs[mod] if x.name==func]
cls_node = [x for x in cdefs[mod] if x.name==func]
#assert len(fn_node) in [1,0]
#assert len(cls_node) in [1,0]
if cls_node:
imp_classes[source] += cls_node
if fn_node:
imp_funcs[source] += fn_node
if not fn_node and not cls_node:
pass
#print(func+' not found in function and class definitions.')
return imp_funcs,imp_classes |
def _check_subresource(self, subresource: str):
"""Check if specific_resources parameter is valid.
:param str resource: subresource to check.
"""
warnings.warn(
"subresource in URL is deprecated." " Use _include mecanism instead.",
DeprecationWarning,
)
l_subresources = (
"conditions",
"contacts",
"coordinate-system",
"events",
"feature-attributes",
"keywords",
"layers",
"limitations",
"links",
"operations",
"specifications",
)
if isinstance(subresource, str):
if subresource in l_subresources:
subresource = subresource
elif subresource == "tags":
subresource = "keywords"
logging.debug(
"'tags' is an include not a subresource."
" Don't worry, it has be automatically renamed "
"into 'keywords' which is the correct subresource."
)
elif subresource == "serviceLayers":
subresource = "layers"
logging.debug(
"'serviceLayers' is an include not a subresource."
" Don't worry, it has be automatically renamed "
"into 'layers' which is the correct subresource."
)
else:
raise ValueError(
"Invalid subresource. Must be one of: {}".format(
"|".join(l_subresources)
)
)
else:
raise TypeError("'subresource' expects a str")
return subresource | Check if specific_resources parameter is valid.
:param str resource: subresource to check. | Below is the the instruction that describes the task:
### Input:
Check if specific_resources parameter is valid.
:param str resource: subresource to check.
### Response:
def _check_subresource(self, subresource: str):
"""Check if specific_resources parameter is valid.
:param str resource: subresource to check.
"""
warnings.warn(
"subresource in URL is deprecated." " Use _include mecanism instead.",
DeprecationWarning,
)
l_subresources = (
"conditions",
"contacts",
"coordinate-system",
"events",
"feature-attributes",
"keywords",
"layers",
"limitations",
"links",
"operations",
"specifications",
)
if isinstance(subresource, str):
if subresource in l_subresources:
subresource = subresource
elif subresource == "tags":
subresource = "keywords"
logging.debug(
"'tags' is an include not a subresource."
" Don't worry, it has be automatically renamed "
"into 'keywords' which is the correct subresource."
)
elif subresource == "serviceLayers":
subresource = "layers"
logging.debug(
"'serviceLayers' is an include not a subresource."
" Don't worry, it has be automatically renamed "
"into 'layers' which is the correct subresource."
)
else:
raise ValueError(
"Invalid subresource. Must be one of: {}".format(
"|".join(l_subresources)
)
)
else:
raise TypeError("'subresource' expects a str")
return subresource |
def cmd_sync(self, low):
'''
Execute a salt-ssh call synchronously.
.. versionadded:: 2015.5.0
WARNING: Eauth is **NOT** respected
.. code-block:: python
client.cmd_sync({
'tgt': 'silver',
'fun': 'test.ping',
'arg': (),
'tgt_type'='glob',
'kwarg'={}
})
{'silver': {'fun_args': [], 'jid': '20141202152721523072', 'return': True, 'retcode': 0, 'success': True, 'fun': 'test.ping', 'id': 'silver'}}
'''
kwargs = copy.deepcopy(low)
for ignore in ['tgt', 'fun', 'arg', 'timeout', 'tgt_type', 'kwarg']:
if ignore in kwargs:
del kwargs[ignore]
return self.cmd(low['tgt'],
low['fun'],
low.get('arg', []),
low.get('timeout'),
low.get('tgt_type'),
low.get('kwarg'),
**kwargs) | Execute a salt-ssh call synchronously.
.. versionadded:: 2015.5.0
WARNING: Eauth is **NOT** respected
.. code-block:: python
client.cmd_sync({
'tgt': 'silver',
'fun': 'test.ping',
'arg': (),
'tgt_type'='glob',
'kwarg'={}
})
{'silver': {'fun_args': [], 'jid': '20141202152721523072', 'return': True, 'retcode': 0, 'success': True, 'fun': 'test.ping', 'id': 'silver'}} | Below is the the instruction that describes the task:
### Input:
Execute a salt-ssh call synchronously.
.. versionadded:: 2015.5.0
WARNING: Eauth is **NOT** respected
.. code-block:: python
client.cmd_sync({
'tgt': 'silver',
'fun': 'test.ping',
'arg': (),
'tgt_type'='glob',
'kwarg'={}
})
{'silver': {'fun_args': [], 'jid': '20141202152721523072', 'return': True, 'retcode': 0, 'success': True, 'fun': 'test.ping', 'id': 'silver'}}
### Response:
def cmd_sync(self, low):
'''
Execute a salt-ssh call synchronously.
.. versionadded:: 2015.5.0
WARNING: Eauth is **NOT** respected
.. code-block:: python
client.cmd_sync({
'tgt': 'silver',
'fun': 'test.ping',
'arg': (),
'tgt_type'='glob',
'kwarg'={}
})
{'silver': {'fun_args': [], 'jid': '20141202152721523072', 'return': True, 'retcode': 0, 'success': True, 'fun': 'test.ping', 'id': 'silver'}}
'''
kwargs = copy.deepcopy(low)
for ignore in ['tgt', 'fun', 'arg', 'timeout', 'tgt_type', 'kwarg']:
if ignore in kwargs:
del kwargs[ignore]
return self.cmd(low['tgt'],
low['fun'],
low.get('arg', []),
low.get('timeout'),
low.get('tgt_type'),
low.get('kwarg'),
**kwargs) |
def verifications(self):
"""
Access the verifications
:returns: twilio.rest.preview.acc_security.service.verification.VerificationList
:rtype: twilio.rest.preview.acc_security.service.verification.VerificationList
"""
if self._verifications is None:
self._verifications = VerificationList(self._version, service_sid=self._solution['sid'], )
return self._verifications | Access the verifications
:returns: twilio.rest.preview.acc_security.service.verification.VerificationList
:rtype: twilio.rest.preview.acc_security.service.verification.VerificationList | Below is the the instruction that describes the task:
### Input:
Access the verifications
:returns: twilio.rest.preview.acc_security.service.verification.VerificationList
:rtype: twilio.rest.preview.acc_security.service.verification.VerificationList
### Response:
def verifications(self):
"""
Access the verifications
:returns: twilio.rest.preview.acc_security.service.verification.VerificationList
:rtype: twilio.rest.preview.acc_security.service.verification.VerificationList
"""
if self._verifications is None:
self._verifications = VerificationList(self._version, service_sid=self._solution['sid'], )
return self._verifications |
def usergroup_get(name=None, usrgrpids=None, userids=None, **kwargs):
'''
.. versionadded:: 2016.3.0
Retrieve user groups according to the given parameters
.. note::
This function accepts all usergroup_get properties: keyword argument
names differ depending on your zabbix version, see here__.
.. __: https://www.zabbix.com/documentation/2.4/manual/api/reference/usergroup/get
:param name: names of the user groups
:param usrgrpids: return only user groups with the given IDs
:param userids: return only user groups that contain the given users
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
:return: Array with convenient user groups details, False if no user group found or on failure.
CLI Example:
.. code-block:: bash
salt '*' zabbix.usergroup_get Guests
'''
conn_args = _login(**kwargs)
zabbix_version = apiinfo_version(**kwargs)
ret = {}
try:
if conn_args:
method = 'usergroup.get'
# Versions above 2.4 allow retrieving user group permissions
if _LooseVersion(zabbix_version) > _LooseVersion("2.5"):
params = {"selectRights": "extend", "output": "extend", "filter": {}}
else:
params = {"output": "extend", "filter": {}}
if not name and not usrgrpids and not userids:
return False
if name:
params['filter'].setdefault('name', name)
if usrgrpids:
params.setdefault('usrgrpids', usrgrpids)
if userids:
params.setdefault('userids', userids)
params = _params_extend(params, **kwargs)
ret = _query(method, params, conn_args['url'], conn_args['auth'])
return False if not ret['result'] else ret['result']
else:
raise KeyError
except KeyError:
return ret | .. versionadded:: 2016.3.0
Retrieve user groups according to the given parameters
.. note::
This function accepts all usergroup_get properties: keyword argument
names differ depending on your zabbix version, see here__.
.. __: https://www.zabbix.com/documentation/2.4/manual/api/reference/usergroup/get
:param name: names of the user groups
:param usrgrpids: return only user groups with the given IDs
:param userids: return only user groups that contain the given users
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
:return: Array with convenient user groups details, False if no user group found or on failure.
CLI Example:
.. code-block:: bash
salt '*' zabbix.usergroup_get Guests | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2016.3.0
Retrieve user groups according to the given parameters
.. note::
This function accepts all usergroup_get properties: keyword argument
names differ depending on your zabbix version, see here__.
.. __: https://www.zabbix.com/documentation/2.4/manual/api/reference/usergroup/get
:param name: names of the user groups
:param usrgrpids: return only user groups with the given IDs
:param userids: return only user groups that contain the given users
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
:return: Array with convenient user groups details, False if no user group found or on failure.
CLI Example:
.. code-block:: bash
salt '*' zabbix.usergroup_get Guests
### Response:
def usergroup_get(name=None, usrgrpids=None, userids=None, **kwargs):
'''
.. versionadded:: 2016.3.0
Retrieve user groups according to the given parameters
.. note::
This function accepts all usergroup_get properties: keyword argument
names differ depending on your zabbix version, see here__.
.. __: https://www.zabbix.com/documentation/2.4/manual/api/reference/usergroup/get
:param name: names of the user groups
:param usrgrpids: return only user groups with the given IDs
:param userids: return only user groups that contain the given users
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
:return: Array with convenient user groups details, False if no user group found or on failure.
CLI Example:
.. code-block:: bash
salt '*' zabbix.usergroup_get Guests
'''
conn_args = _login(**kwargs)
zabbix_version = apiinfo_version(**kwargs)
ret = {}
try:
if conn_args:
method = 'usergroup.get'
# Versions above 2.4 allow retrieving user group permissions
if _LooseVersion(zabbix_version) > _LooseVersion("2.5"):
params = {"selectRights": "extend", "output": "extend", "filter": {}}
else:
params = {"output": "extend", "filter": {}}
if not name and not usrgrpids and not userids:
return False
if name:
params['filter'].setdefault('name', name)
if usrgrpids:
params.setdefault('usrgrpids', usrgrpids)
if userids:
params.setdefault('userids', userids)
params = _params_extend(params, **kwargs)
ret = _query(method, params, conn_args['url'], conn_args['auth'])
return False if not ret['result'] else ret['result']
else:
raise KeyError
except KeyError:
return ret |
def slice(self, start_time, end_time, strict=False):
'''
Slice every annotation contained in the annotation array using
`Annotation.slice`
and return as a new AnnotationArray
See `Annotation.slice` for details about slicing. This function does
not modify the annotations in the original annotation array.
Parameters
----------
start_time : float
The desired start time for slicing in seconds.
end_time
The desired end time for slicing in seconds. Must be greater than
``start_time``.
strict : bool
When ``False`` (default) observations that lie at the boundaries of
the slicing range (see `Annotation.slice` for details) will have
their time and/or duration adjusted such that only the part of the
observation that lies within the trim range is kept. When ``True``
such observations are discarded and not included in the sliced
annotation.
Returns
-------
sliced_array : AnnotationArray
An annotation array where every annotation has been sliced.
'''
sliced_array = AnnotationArray()
for ann in self:
sliced_array.append(ann.slice(start_time, end_time, strict=strict))
return sliced_array | Slice every annotation contained in the annotation array using
`Annotation.slice`
and return as a new AnnotationArray
See `Annotation.slice` for details about slicing. This function does
not modify the annotations in the original annotation array.
Parameters
----------
start_time : float
The desired start time for slicing in seconds.
end_time
The desired end time for slicing in seconds. Must be greater than
``start_time``.
strict : bool
When ``False`` (default) observations that lie at the boundaries of
the slicing range (see `Annotation.slice` for details) will have
their time and/or duration adjusted such that only the part of the
observation that lies within the trim range is kept. When ``True``
such observations are discarded and not included in the sliced
annotation.
Returns
-------
sliced_array : AnnotationArray
An annotation array where every annotation has been sliced. | Below is the the instruction that describes the task:
### Input:
Slice every annotation contained in the annotation array using
`Annotation.slice`
and return as a new AnnotationArray
See `Annotation.slice` for details about slicing. This function does
not modify the annotations in the original annotation array.
Parameters
----------
start_time : float
The desired start time for slicing in seconds.
end_time
The desired end time for slicing in seconds. Must be greater than
``start_time``.
strict : bool
When ``False`` (default) observations that lie at the boundaries of
the slicing range (see `Annotation.slice` for details) will have
their time and/or duration adjusted such that only the part of the
observation that lies within the trim range is kept. When ``True``
such observations are discarded and not included in the sliced
annotation.
Returns
-------
sliced_array : AnnotationArray
An annotation array where every annotation has been sliced.
### Response:
def slice(self, start_time, end_time, strict=False):
'''
Slice every annotation contained in the annotation array using
`Annotation.slice`
and return as a new AnnotationArray
See `Annotation.slice` for details about slicing. This function does
not modify the annotations in the original annotation array.
Parameters
----------
start_time : float
The desired start time for slicing in seconds.
end_time
The desired end time for slicing in seconds. Must be greater than
``start_time``.
strict : bool
When ``False`` (default) observations that lie at the boundaries of
the slicing range (see `Annotation.slice` for details) will have
their time and/or duration adjusted such that only the part of the
observation that lies within the trim range is kept. When ``True``
such observations are discarded and not included in the sliced
annotation.
Returns
-------
sliced_array : AnnotationArray
An annotation array where every annotation has been sliced.
'''
sliced_array = AnnotationArray()
for ann in self:
sliced_array.append(ann.slice(start_time, end_time, strict=strict))
return sliced_array |
def get_selected_python_frame(cls):
'''Try to obtain the Frame for the python code in the selected frame,
or None'''
frame = cls.get_selected_frame()
while frame:
if frame.is_evalframeex():
return frame
frame = frame.older()
# Not found:
return None | Try to obtain the Frame for the python code in the selected frame,
or None | Below is the the instruction that describes the task:
### Input:
Try to obtain the Frame for the python code in the selected frame,
or None
### Response:
def get_selected_python_frame(cls):
'''Try to obtain the Frame for the python code in the selected frame,
or None'''
frame = cls.get_selected_frame()
while frame:
if frame.is_evalframeex():
return frame
frame = frame.older()
# Not found:
return None |
def _filter_hovered_items(self, items, event):
"""Filters out items that cannot be hovered
:param list items: Sorted list of items beneath the cursor
:param Gtk.Event event: Motion event
:return: filtered items
:rtype: list
"""
items = self._filter_library_state(items)
if not items:
return items
top_most_item = items[0]
second_top_most_item = items[1] if len(items) > 1 else None
# States/Names take precedence over connections if the connections are on the same hierarchy and if there is
# a port beneath the cursor
first_state_v = next(filter(lambda item: isinstance(item, (NameView, StateView)), items))
first_state_v = first_state_v.parent if isinstance(first_state_v, NameView) else first_state_v
if first_state_v:
# There can be several connections above the state/name skip those and find the first non-connection-item
for item in items:
if isinstance(item, ConnectionView):
# connection is on the same hierarchy level as the state/name, thus we dismiss it
if self.view.canvas.get_parent(top_most_item) is not first_state_v:
continue
break
# Connections are only dismissed, if there is a port beneath the cursor. Search for ports here:
port_beneath_cursor = False
state_ports = first_state_v.get_all_ports()
position = self.view.get_matrix_v2i(first_state_v).transform_point(event.x, event.y)
i2v_matrix = self.view.get_matrix_i2v(first_state_v)
for port_v in state_ports:
item_distance = port_v.port.glue(position)[1]
view_distance = i2v_matrix.transform_distance(item_distance, 0)[0]
if view_distance == 0:
port_beneath_cursor = True
break
if port_beneath_cursor:
items = self.dismiss_upper_items(items, item)
top_most_item = items[0]
second_top_most_item = items[1] if len(items) > 1 else None
# NameView can only be hovered if it or its parent state is selected
if isinstance(top_most_item, NameView):
state_v = second_top_most_item # second item in the list must be the parent state of the NameView
if state_v not in self.view.selected_items and top_most_item not in self.view.selected_items:
items = items[1:]
return items | Filters out items that cannot be hovered
:param list items: Sorted list of items beneath the cursor
:param Gtk.Event event: Motion event
:return: filtered items
:rtype: list | Below is the the instruction that describes the task:
### Input:
Filters out items that cannot be hovered
:param list items: Sorted list of items beneath the cursor
:param Gtk.Event event: Motion event
:return: filtered items
:rtype: list
### Response:
def _filter_hovered_items(self, items, event):
"""Filters out items that cannot be hovered
:param list items: Sorted list of items beneath the cursor
:param Gtk.Event event: Motion event
:return: filtered items
:rtype: list
"""
items = self._filter_library_state(items)
if not items:
return items
top_most_item = items[0]
second_top_most_item = items[1] if len(items) > 1 else None
# States/Names take precedence over connections if the connections are on the same hierarchy and if there is
# a port beneath the cursor
first_state_v = next(filter(lambda item: isinstance(item, (NameView, StateView)), items))
first_state_v = first_state_v.parent if isinstance(first_state_v, NameView) else first_state_v
if first_state_v:
# There can be several connections above the state/name skip those and find the first non-connection-item
for item in items:
if isinstance(item, ConnectionView):
# connection is on the same hierarchy level as the state/name, thus we dismiss it
if self.view.canvas.get_parent(top_most_item) is not first_state_v:
continue
break
# Connections are only dismissed, if there is a port beneath the cursor. Search for ports here:
port_beneath_cursor = False
state_ports = first_state_v.get_all_ports()
position = self.view.get_matrix_v2i(first_state_v).transform_point(event.x, event.y)
i2v_matrix = self.view.get_matrix_i2v(first_state_v)
for port_v in state_ports:
item_distance = port_v.port.glue(position)[1]
view_distance = i2v_matrix.transform_distance(item_distance, 0)[0]
if view_distance == 0:
port_beneath_cursor = True
break
if port_beneath_cursor:
items = self.dismiss_upper_items(items, item)
top_most_item = items[0]
second_top_most_item = items[1] if len(items) > 1 else None
# NameView can only be hovered if it or its parent state is selected
if isinstance(top_most_item, NameView):
state_v = second_top_most_item # second item in the list must be the parent state of the NameView
if state_v not in self.view.selected_items and top_most_item not in self.view.selected_items:
items = items[1:]
return items |
def as_cache_key(self, ireq):
"""Given a requirement, return its cache key.
This behavior is a little weird in order to allow backwards
compatibility with cache files. For a requirement without extras, this
will return, for example::
("ipython", "2.1.0")
For a requirement with extras, the extras will be comma-separated and
appended to the version, inside brackets, like so::
("ipython", "2.1.0[nbconvert,notebook]")
"""
extras = tuple(sorted(ireq.extras))
if not extras:
extras_string = ""
else:
extras_string = "[{}]".format(",".join(extras))
name = key_from_req(ireq.req)
version = get_pinned_version(ireq)
return name, "{}{}".format(version, extras_string) | Given a requirement, return its cache key.
This behavior is a little weird in order to allow backwards
compatibility with cache files. For a requirement without extras, this
will return, for example::
("ipython", "2.1.0")
For a requirement with extras, the extras will be comma-separated and
appended to the version, inside brackets, like so::
("ipython", "2.1.0[nbconvert,notebook]") | Below is the the instruction that describes the task:
### Input:
Given a requirement, return its cache key.
This behavior is a little weird in order to allow backwards
compatibility with cache files. For a requirement without extras, this
will return, for example::
("ipython", "2.1.0")
For a requirement with extras, the extras will be comma-separated and
appended to the version, inside brackets, like so::
("ipython", "2.1.0[nbconvert,notebook]")
### Response:
def as_cache_key(self, ireq):
"""Given a requirement, return its cache key.
This behavior is a little weird in order to allow backwards
compatibility with cache files. For a requirement without extras, this
will return, for example::
("ipython", "2.1.0")
For a requirement with extras, the extras will be comma-separated and
appended to the version, inside brackets, like so::
("ipython", "2.1.0[nbconvert,notebook]")
"""
extras = tuple(sorted(ireq.extras))
if not extras:
extras_string = ""
else:
extras_string = "[{}]".format(",".join(extras))
name = key_from_req(ireq.req)
version = get_pinned_version(ireq)
return name, "{}{}".format(version, extras_string) |
def active_url(context, urls, css=None):
"""
Highlight menu item based on url tag.
Returns a css class if ``request.path`` is in given ``url``.
:param url:
Django url to be reversed.
:param css:
Css class to be returned for highlighting. Return active if none set.
"""
request = context['request']
if request.get_full_path in (reverse(url) for url in urls.split()):
return css if css else 'active'
return '' | Highlight menu item based on url tag.
Returns a css class if ``request.path`` is in given ``url``.
:param url:
Django url to be reversed.
:param css:
Css class to be returned for highlighting. Return active if none set. | Below is the the instruction that describes the task:
### Input:
Highlight menu item based on url tag.
Returns a css class if ``request.path`` is in given ``url``.
:param url:
Django url to be reversed.
:param css:
Css class to be returned for highlighting. Return active if none set.
### Response:
def active_url(context, urls, css=None):
"""
Highlight menu item based on url tag.
Returns a css class if ``request.path`` is in given ``url``.
:param url:
Django url to be reversed.
:param css:
Css class to be returned for highlighting. Return active if none set.
"""
request = context['request']
if request.get_full_path in (reverse(url) for url in urls.split()):
return css if css else 'active'
return '' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.