code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def as_dfile(self):
"""
Return the dataset representation in BMDS .(d) file.
"""
rows = ["Dose Response"]
for dose, response in zip(self.individual_doses, self.responses):
dose_idx = self.doses.index(dose)
if dose_idx >= self.num_dose_groups:
continue
rows.append("%f %f" % (dose, response))
return "\n".join(rows) | Return the dataset representation in BMDS .(d) file. | Below is the the instruction that describes the task:
### Input:
Return the dataset representation in BMDS .(d) file.
### Response:
def as_dfile(self):
"""
Return the dataset representation in BMDS .(d) file.
"""
rows = ["Dose Response"]
for dose, response in zip(self.individual_doses, self.responses):
dose_idx = self.doses.index(dose)
if dose_idx >= self.num_dose_groups:
continue
rows.append("%f %f" % (dose, response))
return "\n".join(rows) |
def _process_extensions(exts):
"""
Generate the `Extensions String` from an iterable of data forms.
:param exts: The data forms to generate the extensions string from.
:type exts: :class:`~collections.abc.Iterable` of
:class:`~.forms.xso.Data`
:return: The `Extensions String`
:rtype: :class:`bytes`
Generate the `Extensions String` from the given `exts` as specified
in :xep:`390`.
"""
parts = [
_process_form(form)
for form in exts
]
parts.sort()
return b"".join(parts)+b"\x1c" | Generate the `Extensions String` from an iterable of data forms.
:param exts: The data forms to generate the extensions string from.
:type exts: :class:`~collections.abc.Iterable` of
:class:`~.forms.xso.Data`
:return: The `Extensions String`
:rtype: :class:`bytes`
Generate the `Extensions String` from the given `exts` as specified
in :xep:`390`. | Below is the the instruction that describes the task:
### Input:
Generate the `Extensions String` from an iterable of data forms.
:param exts: The data forms to generate the extensions string from.
:type exts: :class:`~collections.abc.Iterable` of
:class:`~.forms.xso.Data`
:return: The `Extensions String`
:rtype: :class:`bytes`
Generate the `Extensions String` from the given `exts` as specified
in :xep:`390`.
### Response:
def _process_extensions(exts):
"""
Generate the `Extensions String` from an iterable of data forms.
:param exts: The data forms to generate the extensions string from.
:type exts: :class:`~collections.abc.Iterable` of
:class:`~.forms.xso.Data`
:return: The `Extensions String`
:rtype: :class:`bytes`
Generate the `Extensions String` from the given `exts` as specified
in :xep:`390`.
"""
parts = [
_process_form(form)
for form in exts
]
parts.sort()
return b"".join(parts)+b"\x1c" |
def image(self):
"""
Returns a PngImageFile instance of the chart
You must have PIL installed for this to work
"""
try:
try:
import Image
except ImportError:
from PIL import Image
except ImportError:
raise ImportError('You must install PIL to fetch image objects')
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
return Image.open(StringIO(self.urlopen().read())) | Returns a PngImageFile instance of the chart
You must have PIL installed for this to work | Below is the the instruction that describes the task:
### Input:
Returns a PngImageFile instance of the chart
You must have PIL installed for this to work
### Response:
def image(self):
"""
Returns a PngImageFile instance of the chart
You must have PIL installed for this to work
"""
try:
try:
import Image
except ImportError:
from PIL import Image
except ImportError:
raise ImportError('You must install PIL to fetch image objects')
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
return Image.open(StringIO(self.urlopen().read())) |
def operator(self, text):
"""Push an operator onto the token queue."""
cls = self.OPERATORS[text]
self.push_token(cls(text, self.lineno, self.offset)) | Push an operator onto the token queue. | Below is the the instruction that describes the task:
### Input:
Push an operator onto the token queue.
### Response:
def operator(self, text):
"""Push an operator onto the token queue."""
cls = self.OPERATORS[text]
self.push_token(cls(text, self.lineno, self.offset)) |
def exists_locally(cls, path):
"""Returns whether the resource exists locally, at `resource.path`."""
# If INFO file doesn't exist, consider resource does NOT exist, as it would
# prevent guessing the `extract_method`.
return (tf.io.gfile.exists(path) and
tf.io.gfile.exists(_get_info_path(path))) | Returns whether the resource exists locally, at `resource.path`. | Below is the the instruction that describes the task:
### Input:
Returns whether the resource exists locally, at `resource.path`.
### Response:
def exists_locally(cls, path):
"""Returns whether the resource exists locally, at `resource.path`."""
# If INFO file doesn't exist, consider resource does NOT exist, as it would
# prevent guessing the `extract_method`.
return (tf.io.gfile.exists(path) and
tf.io.gfile.exists(_get_info_path(path))) |
def validate_scenario(blocks, scenario_directory):
"""Function to validate input layer stored in scenario file.
Check whether the files that are used in scenario file need to be
updated or not.
:param blocks: dictionary from read_scenarios
:type blocks: dictionary
:param scenario_directory: directory where scenario text file is saved
:type scenario_directory: file directory
:return: pass message to dialog and log detailed status
"""
# dictionary to temporary contain status message
blocks_update = {}
for section, section_item in list(blocks.items()):
ready = True
for item in section_item:
if item in ['hazard', 'exposure', 'aggregation']:
# get relative path
rel_path = section_item[item]
full_path = os.path.join(scenario_directory, rel_path)
filepath = os.path.normpath(full_path)
if not os.path.exists(filepath):
blocks_update[section] = {
'status': 'Please update scenario'}
LOGGER.info(section + ' needs to be updated')
LOGGER.info('Unable to find ' + filepath)
ready = False
if ready:
blocks_update[section] = {'status': 'Scenario ready'}
# LOGGER.info(section + " scenario is ready")
for section, section_item in list(blocks_update.items()):
blocks[section]['status'] = blocks_update[section]['status'] | Function to validate input layer stored in scenario file.
Check whether the files that are used in scenario file need to be
updated or not.
:param blocks: dictionary from read_scenarios
:type blocks: dictionary
:param scenario_directory: directory where scenario text file is saved
:type scenario_directory: file directory
:return: pass message to dialog and log detailed status | Below is the the instruction that describes the task:
### Input:
Function to validate input layer stored in scenario file.
Check whether the files that are used in scenario file need to be
updated or not.
:param blocks: dictionary from read_scenarios
:type blocks: dictionary
:param scenario_directory: directory where scenario text file is saved
:type scenario_directory: file directory
:return: pass message to dialog and log detailed status
### Response:
def validate_scenario(blocks, scenario_directory):
"""Function to validate input layer stored in scenario file.
Check whether the files that are used in scenario file need to be
updated or not.
:param blocks: dictionary from read_scenarios
:type blocks: dictionary
:param scenario_directory: directory where scenario text file is saved
:type scenario_directory: file directory
:return: pass message to dialog and log detailed status
"""
# dictionary to temporary contain status message
blocks_update = {}
for section, section_item in list(blocks.items()):
ready = True
for item in section_item:
if item in ['hazard', 'exposure', 'aggregation']:
# get relative path
rel_path = section_item[item]
full_path = os.path.join(scenario_directory, rel_path)
filepath = os.path.normpath(full_path)
if not os.path.exists(filepath):
blocks_update[section] = {
'status': 'Please update scenario'}
LOGGER.info(section + ' needs to be updated')
LOGGER.info('Unable to find ' + filepath)
ready = False
if ready:
blocks_update[section] = {'status': 'Scenario ready'}
# LOGGER.info(section + " scenario is ready")
for section, section_item in list(blocks_update.items()):
blocks[section]['status'] = blocks_update[section]['status'] |
def _detect(self):
""" Detect the constant function changing the state
Recursively visit the calls
Returns:
list: {'vuln', 'filename,'contract','func','#varsWritten'}
"""
results = []
for c in self.contracts:
for f in c.functions:
if f.contract != c:
continue
if f.view or f.pure:
if f.contains_assembly:
attr = 'view' if f.view else 'pure'
info = '{}.{} ({}) is declared {} but contains assembly code\n'
info = info.format(f.contract.name, f.name, f.source_mapping_str, attr)
json = self.generate_json_result(info)
self.add_function_to_json(f, json)
json['elements'].append({'type': 'info',
'contains_assembly' : True})
results.append(json)
variables_written = f.all_state_variables_written()
if variables_written:
attr = 'view' if f.view else 'pure'
info = '{}.{} ({}) is declared {} but changes state variables:\n'
info = info.format(f.contract.name, f.name, f.source_mapping_str, attr)
for variable_written in variables_written:
info += '\t- {}.{}\n'.format(variable_written.contract.name,
variable_written.name)
json = self.generate_json_result(info)
self.add_function_to_json(f, json)
self.add_variables_to_json(variables_written, json)
json['elements'].append({'type': 'info',
'contains_assembly' : False})
results.append(json)
return results | Detect the constant function changing the state
Recursively visit the calls
Returns:
list: {'vuln', 'filename,'contract','func','#varsWritten'} | Below is the the instruction that describes the task:
### Input:
Detect the constant function changing the state
Recursively visit the calls
Returns:
list: {'vuln', 'filename,'contract','func','#varsWritten'}
### Response:
def _detect(self):
""" Detect the constant function changing the state
Recursively visit the calls
Returns:
list: {'vuln', 'filename,'contract','func','#varsWritten'}
"""
results = []
for c in self.contracts:
for f in c.functions:
if f.contract != c:
continue
if f.view or f.pure:
if f.contains_assembly:
attr = 'view' if f.view else 'pure'
info = '{}.{} ({}) is declared {} but contains assembly code\n'
info = info.format(f.contract.name, f.name, f.source_mapping_str, attr)
json = self.generate_json_result(info)
self.add_function_to_json(f, json)
json['elements'].append({'type': 'info',
'contains_assembly' : True})
results.append(json)
variables_written = f.all_state_variables_written()
if variables_written:
attr = 'view' if f.view else 'pure'
info = '{}.{} ({}) is declared {} but changes state variables:\n'
info = info.format(f.contract.name, f.name, f.source_mapping_str, attr)
for variable_written in variables_written:
info += '\t- {}.{}\n'.format(variable_written.contract.name,
variable_written.name)
json = self.generate_json_result(info)
self.add_function_to_json(f, json)
self.add_variables_to_json(variables_written, json)
json['elements'].append({'type': 'info',
'contains_assembly' : False})
results.append(json)
return results |
def check_params_types(self, method):
'''Types in argument annotations must be instances, not classes.'''
mn = method.__name__
annos = dict(method.__annotations__)
errors = []
# Take a look at the syntax
msg_tuple = 'Parameter {} in method {} is not annotated with a tuple.'
msg_ptype = 'Parameter {} in method {} is not a valid Ptype.'
msg_mod = 'Type for param {} in method {} must descend from Model.'
msg_cls = 'Type for param {} in method {} must be instance (not class)'
bodies = []
for pname, anno in annos.items():
if pname == 'return':
continue
elif len(anno) != 2:
errors.append(msg_tuple.format(pname, mn))
else:
param_type, value_type = anno
if param_type not in Ptypes:
errors.append(msg_ptype.format(pname, mn))
elif param_type == 'body':
bodies.append(pname)
elif param_type == 'path':
default = method.signature.parameters[pname].default
if default is not inspect._empty:
msg = ('Path prameter {} in method {} has a default '
'value ({}) that would make it optional (which '
'is wrong!)')
errors.append(msg.format(pname, mn, default))
if hasattr(value_type, '__bases__'):
errors.append(msg_cls.format(pname, mn))
elif Model not in value_type.__class__.__bases__:
errors.append(msg_mod.format(pname, mn))
# Only one body parameter!
if len(bodies) > 1:
msg = 'Too many "Ptypes.body" params {} for method {} (max=1).'
errors.append(msg.format(bodies, mn))
return errors | Types in argument annotations must be instances, not classes. | Below is the the instruction that describes the task:
### Input:
Types in argument annotations must be instances, not classes.
### Response:
def check_params_types(self, method):
'''Types in argument annotations must be instances, not classes.'''
mn = method.__name__
annos = dict(method.__annotations__)
errors = []
# Take a look at the syntax
msg_tuple = 'Parameter {} in method {} is not annotated with a tuple.'
msg_ptype = 'Parameter {} in method {} is not a valid Ptype.'
msg_mod = 'Type for param {} in method {} must descend from Model.'
msg_cls = 'Type for param {} in method {} must be instance (not class)'
bodies = []
for pname, anno in annos.items():
if pname == 'return':
continue
elif len(anno) != 2:
errors.append(msg_tuple.format(pname, mn))
else:
param_type, value_type = anno
if param_type not in Ptypes:
errors.append(msg_ptype.format(pname, mn))
elif param_type == 'body':
bodies.append(pname)
elif param_type == 'path':
default = method.signature.parameters[pname].default
if default is not inspect._empty:
msg = ('Path prameter {} in method {} has a default '
'value ({}) that would make it optional (which '
'is wrong!)')
errors.append(msg.format(pname, mn, default))
if hasattr(value_type, '__bases__'):
errors.append(msg_cls.format(pname, mn))
elif Model not in value_type.__class__.__bases__:
errors.append(msg_mod.format(pname, mn))
# Only one body parameter!
if len(bodies) > 1:
msg = 'Too many "Ptypes.body" params {} for method {} (max=1).'
errors.append(msg.format(bodies, mn))
return errors |
def calc_mass(nu_max, delta_nu, teff):
""" asteroseismic scaling relations """
NU_MAX = 3140.0 # microHz
DELTA_NU = 135.03 # microHz
TEFF = 5777.0
return (nu_max/NU_MAX)**3 * (delta_nu/DELTA_NU)**(-4) * (teff/TEFF)**1.5 | asteroseismic scaling relations | Below is the the instruction that describes the task:
### Input:
asteroseismic scaling relations
### Response:
def calc_mass(nu_max, delta_nu, teff):
""" asteroseismic scaling relations """
NU_MAX = 3140.0 # microHz
DELTA_NU = 135.03 # microHz
TEFF = 5777.0
return (nu_max/NU_MAX)**3 * (delta_nu/DELTA_NU)**(-4) * (teff/TEFF)**1.5 |
def gen_opt_str(ser_rec: pd.Series)->str:
'''generate rst option string
Parameters
----------
ser_rec : pd.Series
record for specifications
Returns
-------
str
rst string
'''
name = ser_rec.name
indent = r' '
str_opt = f'.. option:: {name}'+'\n\n'
for spec in ser_rec.sort_index().index:
str_opt += indent+f':{spec}:'+'\n'
spec_content = ser_rec[spec]
str_opt += indent+indent+f'{spec_content}'+'\n'
return str_opt | generate rst option string
Parameters
----------
ser_rec : pd.Series
record for specifications
Returns
-------
str
rst string | Below is the the instruction that describes the task:
### Input:
generate rst option string
Parameters
----------
ser_rec : pd.Series
record for specifications
Returns
-------
str
rst string
### Response:
def gen_opt_str(ser_rec: pd.Series)->str:
'''generate rst option string
Parameters
----------
ser_rec : pd.Series
record for specifications
Returns
-------
str
rst string
'''
name = ser_rec.name
indent = r' '
str_opt = f'.. option:: {name}'+'\n\n'
for spec in ser_rec.sort_index().index:
str_opt += indent+f':{spec}:'+'\n'
spec_content = ser_rec[spec]
str_opt += indent+indent+f'{spec_content}'+'\n'
return str_opt |
def map_column(self, value):
"""
Applied to each column of every row returned by `rows`.
Default behaviour is to escape special characters and identify any self.null_values.
"""
if value in self.null_values:
return r'\\N'
else:
return default_escape(six.text_type(value)) | Applied to each column of every row returned by `rows`.
Default behaviour is to escape special characters and identify any self.null_values. | Below is the the instruction that describes the task:
### Input:
Applied to each column of every row returned by `rows`.
Default behaviour is to escape special characters and identify any self.null_values.
### Response:
def map_column(self, value):
"""
Applied to each column of every row returned by `rows`.
Default behaviour is to escape special characters and identify any self.null_values.
"""
if value in self.null_values:
return r'\\N'
else:
return default_escape(six.text_type(value)) |
def imshow(data, title=None, vmin=0, vmax=None, cmap=None,
bitspersample=None, photometric='rgb', interpolation='nearest',
dpi=96, figure=None, subplot=111, maxdim=4096, **kwargs):
"""Plot n-dimensional images using matplotlib.pyplot.
Return figure, subplot and plot axis.
Requires pyplot already imported ``from matplotlib import pyplot``.
Parameters
----------
bitspersample : int or None
Number of bits per channel in integer RGB images.
photometric : {'miniswhite', 'minisblack', 'rgb', or 'palette'}
The color space of the image data.
title : str
Window and subplot title.
figure : matplotlib.figure.Figure (optional).
Matplotlib to use for plotting.
subplot : int
A matplotlib.pyplot.subplot axis.
maxdim : int
maximum image size in any dimension.
kwargs : optional
Arguments for matplotlib.pyplot.imshow.
"""
#if photometric not in ('miniswhite', 'minisblack', 'rgb', 'palette'):
# raise ValueError("Can't handle %s photometrics" % photometric)
isrgb = photometric in ('rgb', 'palette')
data = numpy.atleast_2d(data.squeeze())
data = data[(slice(0, maxdim), ) * len(data.shape)]
dims = data.ndim
if dims < 2:
raise ValueError("not an image")
elif dims == 2:
dims = 0
isrgb = False
else:
if (isrgb and data.shape[-3] in (3, 4)):
data = numpy.swapaxes(data, -3, -2)
data = numpy.swapaxes(data, -2, -1)
elif (not isrgb and data.shape[-1] in (3, 4)):
data = numpy.swapaxes(data, -3, -1)
data = numpy.swapaxes(data, -2, -1)
isrgb = isrgb and data.shape[-1] in (3, 4)
dims -= 3 if isrgb else 2
if photometric == 'palette':
datamax = data.max()
if datamax > 255:
data >>= 8 # possible precision loss
data = data.astype('B')
elif data.dtype.kind in 'ui':
if not isrgb or bitspersample is None:
bitspersample = int(math.ceil(math.log(data.max(), 2)))
elif not isinstance(bitspersample, int):
# bitspersample can be tuple, e.g. (5, 6, 5)
bitspersample = data.dtype.itemsize * 8
datamax = 2**bitspersample
if isrgb:
if bitspersample < 8:
data <<= 8 - bitspersample
elif bitspersample > 8:
data >>= bitspersample - 8 # precision loss
data = data.astype('B')
elif data.dtype.kind == 'f':
datamax = data.max()
if isrgb and datamax > 1.0:
if data.dtype.char == 'd':
data = data.astype('f')
data /= datamax
elif data.dtype.kind == 'b':
datamax = 1
if vmax is None:
vmax = datamax
if vmin is None:
if data.dtype.kind != 'f':
vmin = 0
pyplot = sys.modules['matplotlib.pyplot']
if figure is None:
pyplot.rc('font', family='sans-serif', weight='normal', size=8)
figure = pyplot.figure(dpi=dpi, figsize=(10.3, 6.3), frameon=True,
facecolor='1.0', edgecolor='w')
try:
figure.canvas.manager.window.title(title)
except Exception:
pass
pyplot.subplots_adjust(bottom=0.03*(dims+2), top=0.9,
left=0.1, right=0.95, hspace=0.05, wspace=0.0)
subplot = pyplot.subplot(subplot)
if title:
pyplot.title(title, size=11)
if cmap is None:
if photometric == 'miniswhite':
cmap = 'gray_r' if vmin == 0 else 'coolwarm_r'
else:
cmap = 'gray' if vmin == 0 else 'coolwarm'
image = pyplot.imshow(data[(0, ) * dims].squeeze(), vmin=vmin, vmax=vmax,
cmap=cmap, interpolation=interpolation, **kwargs)
if not isrgb:
pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05
def format_coord(x, y):
# callback function to format coordinate display in toolbar
x = int(x + 0.5)
y = int(y + 0.5)
try:
if dims:
return "%s @ %s [%4i, %4i]" % (cur_ax_dat[1][y, x],
current, x, y)
else:
return "%s @ [%4i, %4i]" % (data[y, x], x, y)
except IndexError:
return ""
pyplot.gca().format_coord = format_coord
if dims:
current = list((0, ) * dims)
cur_ax_dat = [0, data[tuple(current)].squeeze()]
sliders = [pyplot.Slider(
pyplot.axes([0.125, 0.03*(axis+1), 0.725, 0.025]),
'Dimension %i' % axis, 0, data.shape[axis]-1, 0, facecolor='0.5',
valfmt='%%.0f [%i]' % data.shape[axis]) for axis in range(dims)]
for slider in sliders:
slider.drawon = False
def set_image(current, sliders=sliders, data=data):
# change image and redraw canvas
cur_ax_dat[1] = data[tuple(current)].squeeze()
image.set_data(cur_ax_dat[1])
for ctrl, index in zip(sliders, current):
ctrl.eventson = False
ctrl.set_val(index)
ctrl.eventson = True
figure.canvas.draw()
def on_changed(index, axis, data=data, current=current):
# callback function for slider change event
index = int(round(index))
cur_ax_dat[0] = axis
if index == current[axis]:
return
if index >= data.shape[axis]:
index = 0
elif index < 0:
index = data.shape[axis] - 1
current[axis] = index
set_image(current)
def on_keypressed(event, data=data, current=current):
# callback function for key press event
key = event.key
axis = cur_ax_dat[0]
if str(key) in '0123456789':
on_changed(key, axis)
elif key == 'right':
on_changed(current[axis] + 1, axis)
elif key == 'left':
on_changed(current[axis] - 1, axis)
elif key == 'up':
cur_ax_dat[0] = 0 if axis == len(data.shape)-1 else axis + 1
elif key == 'down':
cur_ax_dat[0] = len(data.shape)-1 if axis == 0 else axis - 1
elif key == 'end':
on_changed(data.shape[axis] - 1, axis)
elif key == 'home':
on_changed(0, axis)
figure.canvas.mpl_connect('key_press_event', on_keypressed)
for axis, ctrl in enumerate(sliders):
ctrl.on_changed(lambda k, a=axis: on_changed(k, a))
return figure, subplot, image | Plot n-dimensional images using matplotlib.pyplot.
Return figure, subplot and plot axis.
Requires pyplot already imported ``from matplotlib import pyplot``.
Parameters
----------
bitspersample : int or None
Number of bits per channel in integer RGB images.
photometric : {'miniswhite', 'minisblack', 'rgb', or 'palette'}
The color space of the image data.
title : str
Window and subplot title.
figure : matplotlib.figure.Figure (optional).
Matplotlib to use for plotting.
subplot : int
A matplotlib.pyplot.subplot axis.
maxdim : int
maximum image size in any dimension.
kwargs : optional
Arguments for matplotlib.pyplot.imshow. | Below is the the instruction that describes the task:
### Input:
Plot n-dimensional images using matplotlib.pyplot.
Return figure, subplot and plot axis.
Requires pyplot already imported ``from matplotlib import pyplot``.
Parameters
----------
bitspersample : int or None
Number of bits per channel in integer RGB images.
photometric : {'miniswhite', 'minisblack', 'rgb', or 'palette'}
The color space of the image data.
title : str
Window and subplot title.
figure : matplotlib.figure.Figure (optional).
Matplotlib to use for plotting.
subplot : int
A matplotlib.pyplot.subplot axis.
maxdim : int
maximum image size in any dimension.
kwargs : optional
Arguments for matplotlib.pyplot.imshow.
### Response:
def imshow(data, title=None, vmin=0, vmax=None, cmap=None,
bitspersample=None, photometric='rgb', interpolation='nearest',
dpi=96, figure=None, subplot=111, maxdim=4096, **kwargs):
"""Plot n-dimensional images using matplotlib.pyplot.
Return figure, subplot and plot axis.
Requires pyplot already imported ``from matplotlib import pyplot``.
Parameters
----------
bitspersample : int or None
Number of bits per channel in integer RGB images.
photometric : {'miniswhite', 'minisblack', 'rgb', or 'palette'}
The color space of the image data.
title : str
Window and subplot title.
figure : matplotlib.figure.Figure (optional).
Matplotlib to use for plotting.
subplot : int
A matplotlib.pyplot.subplot axis.
maxdim : int
maximum image size in any dimension.
kwargs : optional
Arguments for matplotlib.pyplot.imshow.
"""
#if photometric not in ('miniswhite', 'minisblack', 'rgb', 'palette'):
# raise ValueError("Can't handle %s photometrics" % photometric)
isrgb = photometric in ('rgb', 'palette')
data = numpy.atleast_2d(data.squeeze())
data = data[(slice(0, maxdim), ) * len(data.shape)]
dims = data.ndim
if dims < 2:
raise ValueError("not an image")
elif dims == 2:
dims = 0
isrgb = False
else:
if (isrgb and data.shape[-3] in (3, 4)):
data = numpy.swapaxes(data, -3, -2)
data = numpy.swapaxes(data, -2, -1)
elif (not isrgb and data.shape[-1] in (3, 4)):
data = numpy.swapaxes(data, -3, -1)
data = numpy.swapaxes(data, -2, -1)
isrgb = isrgb and data.shape[-1] in (3, 4)
dims -= 3 if isrgb else 2
if photometric == 'palette':
datamax = data.max()
if datamax > 255:
data >>= 8 # possible precision loss
data = data.astype('B')
elif data.dtype.kind in 'ui':
if not isrgb or bitspersample is None:
bitspersample = int(math.ceil(math.log(data.max(), 2)))
elif not isinstance(bitspersample, int):
# bitspersample can be tuple, e.g. (5, 6, 5)
bitspersample = data.dtype.itemsize * 8
datamax = 2**bitspersample
if isrgb:
if bitspersample < 8:
data <<= 8 - bitspersample
elif bitspersample > 8:
data >>= bitspersample - 8 # precision loss
data = data.astype('B')
elif data.dtype.kind == 'f':
datamax = data.max()
if isrgb and datamax > 1.0:
if data.dtype.char == 'd':
data = data.astype('f')
data /= datamax
elif data.dtype.kind == 'b':
datamax = 1
if vmax is None:
vmax = datamax
if vmin is None:
if data.dtype.kind != 'f':
vmin = 0
pyplot = sys.modules['matplotlib.pyplot']
if figure is None:
pyplot.rc('font', family='sans-serif', weight='normal', size=8)
figure = pyplot.figure(dpi=dpi, figsize=(10.3, 6.3), frameon=True,
facecolor='1.0', edgecolor='w')
try:
figure.canvas.manager.window.title(title)
except Exception:
pass
pyplot.subplots_adjust(bottom=0.03*(dims+2), top=0.9,
left=0.1, right=0.95, hspace=0.05, wspace=0.0)
subplot = pyplot.subplot(subplot)
if title:
pyplot.title(title, size=11)
if cmap is None:
if photometric == 'miniswhite':
cmap = 'gray_r' if vmin == 0 else 'coolwarm_r'
else:
cmap = 'gray' if vmin == 0 else 'coolwarm'
image = pyplot.imshow(data[(0, ) * dims].squeeze(), vmin=vmin, vmax=vmax,
cmap=cmap, interpolation=interpolation, **kwargs)
if not isrgb:
pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05
def format_coord(x, y):
# callback function to format coordinate display in toolbar
x = int(x + 0.5)
y = int(y + 0.5)
try:
if dims:
return "%s @ %s [%4i, %4i]" % (cur_ax_dat[1][y, x],
current, x, y)
else:
return "%s @ [%4i, %4i]" % (data[y, x], x, y)
except IndexError:
return ""
pyplot.gca().format_coord = format_coord
if dims:
current = list((0, ) * dims)
cur_ax_dat = [0, data[tuple(current)].squeeze()]
sliders = [pyplot.Slider(
pyplot.axes([0.125, 0.03*(axis+1), 0.725, 0.025]),
'Dimension %i' % axis, 0, data.shape[axis]-1, 0, facecolor='0.5',
valfmt='%%.0f [%i]' % data.shape[axis]) for axis in range(dims)]
for slider in sliders:
slider.drawon = False
def set_image(current, sliders=sliders, data=data):
# change image and redraw canvas
cur_ax_dat[1] = data[tuple(current)].squeeze()
image.set_data(cur_ax_dat[1])
for ctrl, index in zip(sliders, current):
ctrl.eventson = False
ctrl.set_val(index)
ctrl.eventson = True
figure.canvas.draw()
def on_changed(index, axis, data=data, current=current):
# callback function for slider change event
index = int(round(index))
cur_ax_dat[0] = axis
if index == current[axis]:
return
if index >= data.shape[axis]:
index = 0
elif index < 0:
index = data.shape[axis] - 1
current[axis] = index
set_image(current)
def on_keypressed(event, data=data, current=current):
# callback function for key press event
key = event.key
axis = cur_ax_dat[0]
if str(key) in '0123456789':
on_changed(key, axis)
elif key == 'right':
on_changed(current[axis] + 1, axis)
elif key == 'left':
on_changed(current[axis] - 1, axis)
elif key == 'up':
cur_ax_dat[0] = 0 if axis == len(data.shape)-1 else axis + 1
elif key == 'down':
cur_ax_dat[0] = len(data.shape)-1 if axis == 0 else axis - 1
elif key == 'end':
on_changed(data.shape[axis] - 1, axis)
elif key == 'home':
on_changed(0, axis)
figure.canvas.mpl_connect('key_press_event', on_keypressed)
for axis, ctrl in enumerate(sliders):
ctrl.on_changed(lambda k, a=axis: on_changed(k, a))
return figure, subplot, image |
def merge_programs(prog_list):
"""
Merges a list of pyQuil programs into a single one by appending them in sequence.
If multiple programs in the list contain the same gate and/or noisy gate definition
with identical name, this definition will only be applied once. If different definitions
with the same name appear multiple times in the program list, each will be applied once
in the order of last occurrence.
:param list prog_list: A list of pyquil programs
:return: a single pyQuil program
:rtype: Program
"""
definitions = [gate for prog in prog_list for gate in Program(prog).defined_gates]
seen = {}
# Collect definitions in reverse order and reapply definitions in reverse
# collected order to ensure that the last occurrence of a definition is applied last.
for definition in reversed(definitions):
name = definition.name
if name in seen.keys():
# Do not add truly identical definitions with the same name
# If two different definitions share a name, we include each definition so as to provide
# a waring to the user when the contradictory defgate is called.
if definition not in seen[name]:
seen[name].append(definition)
else:
seen[name] = [definition]
new_definitions = [gate for key in seen.keys() for gate in reversed(seen[key])]
p = sum([Program(prog).instructions for prog in prog_list], Program()) # Combine programs without gate definitions
for definition in new_definitions:
p.defgate(definition.name, definition.matrix, definition.parameters)
return p | Merges a list of pyQuil programs into a single one by appending them in sequence.
If multiple programs in the list contain the same gate and/or noisy gate definition
with identical name, this definition will only be applied once. If different definitions
with the same name appear multiple times in the program list, each will be applied once
in the order of last occurrence.
:param list prog_list: A list of pyquil programs
:return: a single pyQuil program
:rtype: Program | Below is the the instruction that describes the task:
### Input:
Merges a list of pyQuil programs into a single one by appending them in sequence.
If multiple programs in the list contain the same gate and/or noisy gate definition
with identical name, this definition will only be applied once. If different definitions
with the same name appear multiple times in the program list, each will be applied once
in the order of last occurrence.
:param list prog_list: A list of pyquil programs
:return: a single pyQuil program
:rtype: Program
### Response:
def merge_programs(prog_list):
"""
Merges a list of pyQuil programs into a single one by appending them in sequence.
If multiple programs in the list contain the same gate and/or noisy gate definition
with identical name, this definition will only be applied once. If different definitions
with the same name appear multiple times in the program list, each will be applied once
in the order of last occurrence.
:param list prog_list: A list of pyquil programs
:return: a single pyQuil program
:rtype: Program
"""
definitions = [gate for prog in prog_list for gate in Program(prog).defined_gates]
seen = {}
# Collect definitions in reverse order and reapply definitions in reverse
# collected order to ensure that the last occurrence of a definition is applied last.
for definition in reversed(definitions):
name = definition.name
if name in seen.keys():
# Do not add truly identical definitions with the same name
# If two different definitions share a name, we include each definition so as to provide
# a waring to the user when the contradictory defgate is called.
if definition not in seen[name]:
seen[name].append(definition)
else:
seen[name] = [definition]
new_definitions = [gate for key in seen.keys() for gate in reversed(seen[key])]
p = sum([Program(prog).instructions for prog in prog_list], Program()) # Combine programs without gate definitions
for definition in new_definitions:
p.defgate(definition.name, definition.matrix, definition.parameters)
return p |
def _bracket_complete_sig(self, symbol, fullsymbol):
"""Returns the call signature and docstring for the executable
immediately preceding a bracket '(' that was typed."""
if symbol != fullsymbol:
#We have a sym%sym%... chain and the completion just needs to
#be the signature of the member method.
target, targmod = self._get_chain_parent_symbol(symbol, fullsymbol)
if symbol in target.executables:
child = target.executables[symbol]
return self._compile_signature(child.target, child.name)
elif symbol in target.members:
#We are dealing with a dimension request on an array that
#is a member of the type.
child = target.members[symbol]
return self._bracket_dim_suggest(child)
else:
return {}
else:
#We must be dealing with a regular executable or builtin fxn
#or a regular variable dimension.
iexec = self._bracket_exact_exec(symbol)
if iexec is not None:
#It is indeed a function we are completing for.
return self._compile_signature(iexec, iexec.name)
else:
#We need to look at local and global variables to find the
#variable declaration and dimensionality.
ivar = self._bracket_exact_var(symbol)
return self._bracket_dim_suggest(ivar) | Returns the call signature and docstring for the executable
immediately preceding a bracket '(' that was typed. | Below is the the instruction that describes the task:
### Input:
Returns the call signature and docstring for the executable
immediately preceding a bracket '(' that was typed.
### Response:
def _bracket_complete_sig(self, symbol, fullsymbol):
"""Returns the call signature and docstring for the executable
immediately preceding a bracket '(' that was typed."""
if symbol != fullsymbol:
#We have a sym%sym%... chain and the completion just needs to
#be the signature of the member method.
target, targmod = self._get_chain_parent_symbol(symbol, fullsymbol)
if symbol in target.executables:
child = target.executables[symbol]
return self._compile_signature(child.target, child.name)
elif symbol in target.members:
#We are dealing with a dimension request on an array that
#is a member of the type.
child = target.members[symbol]
return self._bracket_dim_suggest(child)
else:
return {}
else:
#We must be dealing with a regular executable or builtin fxn
#or a regular variable dimension.
iexec = self._bracket_exact_exec(symbol)
if iexec is not None:
#It is indeed a function we are completing for.
return self._compile_signature(iexec, iexec.name)
else:
#We need to look at local and global variables to find the
#variable declaration and dimensionality.
ivar = self._bracket_exact_var(symbol)
return self._bracket_dim_suggest(ivar) |
def event_return(events):
'''
Return event to one of potentially many clustered cassandra nodes
Requires that configuration be enabled via 'event_return'
option in master config.
Cassandra does not support an auto-increment feature due to the
highly inefficient nature of creating a monotonically increasing
number across all nodes in a distributed database. Each event
will be assigned a uuid by the connecting client.
'''
for event in events:
tag = event.get('tag', '')
data = event.get('data', '')
query = '''INSERT INTO {keyspace}.salt_events (
id, alter_time, data, master_id, tag
) VALUES (
?, ?, ?, ?, ?)
'''.format(keyspace=_get_keyspace())
statement_arguments = [six.text_type(uuid.uuid1()),
int(time.time() * 1000),
salt.utils.json.dumps(data).replace("'", "''"),
__opts__['id'],
tag]
# cassandra_cql.cql_query may raise a CommandExecutionError
try:
__salt__['cassandra_cql.cql_query_with_prepare'](query, 'salt_events',
statement_arguments,
asynchronous=True)
except CommandExecutionError:
log.critical('Could not store events with Cassandra returner.')
raise
except Exception as e:
log.critical(
'Unexpected error while inserting into salt_events: %s', e)
raise | Return event to one of potentially many clustered cassandra nodes
Requires that configuration be enabled via 'event_return'
option in master config.
Cassandra does not support an auto-increment feature due to the
highly inefficient nature of creating a monotonically increasing
number across all nodes in a distributed database. Each event
will be assigned a uuid by the connecting client. | Below is the the instruction that describes the task:
### Input:
Return event to one of potentially many clustered cassandra nodes
Requires that configuration be enabled via 'event_return'
option in master config.
Cassandra does not support an auto-increment feature due to the
highly inefficient nature of creating a monotonically increasing
number across all nodes in a distributed database. Each event
will be assigned a uuid by the connecting client.
### Response:
def event_return(events):
'''
Return event to one of potentially many clustered cassandra nodes
Requires that configuration be enabled via 'event_return'
option in master config.
Cassandra does not support an auto-increment feature due to the
highly inefficient nature of creating a monotonically increasing
number across all nodes in a distributed database. Each event
will be assigned a uuid by the connecting client.
'''
for event in events:
tag = event.get('tag', '')
data = event.get('data', '')
query = '''INSERT INTO {keyspace}.salt_events (
id, alter_time, data, master_id, tag
) VALUES (
?, ?, ?, ?, ?)
'''.format(keyspace=_get_keyspace())
statement_arguments = [six.text_type(uuid.uuid1()),
int(time.time() * 1000),
salt.utils.json.dumps(data).replace("'", "''"),
__opts__['id'],
tag]
# cassandra_cql.cql_query may raise a CommandExecutionError
try:
__salt__['cassandra_cql.cql_query_with_prepare'](query, 'salt_events',
statement_arguments,
asynchronous=True)
except CommandExecutionError:
log.critical('Could not store events with Cassandra returner.')
raise
except Exception as e:
log.critical(
'Unexpected error while inserting into salt_events: %s', e)
raise |
def corner(xs, bins=20, range=None, weights=None, color="k",
smooth=None, smooth1d=None,
labels=None, label_kwargs=None,
show_titles=False, title_fmt=".2f", title_kwargs=None,
truths=None, truth_color="#4682b4",
scale_hist=False, quantiles=None, verbose=False, fig=None,
max_n_ticks=5, top_ticks=False, use_math_text=False,
hist_kwargs=None, **hist2d_kwargs):
"""
Make a *sick* corner plot showing the projections of a data set in a
multi-dimensional space. kwargs are passed to hist2d() or used for
`matplotlib` styling.
Parameters
----------
xs : array_like (nsamples, ndim)
The samples. This should be a 1- or 2-dimensional array. For a 1-D
array this results in a simple histogram. For a 2-D array, the zeroth
axis is the list of samples and the next axis are the dimensions of
the space.
bins : int or array_like (ndim,) (optional)
The number of bins to use in histograms, either as a fixed value for
all dimensions, or as a list of integers for each dimension.
weights : array_like (nsamples,)
The weight of each sample. If `None` (default), samples are given
equal weight.
color : str (optional)
A ``matplotlib`` style color for all histograms.
smooth, smooth1d : float (optional)
The standard deviation for Gaussian kernel passed to
`scipy.ndimage.gaussian_filter` to smooth the 2-D and 1-D histograms
respectively. If `None` (default), no smoothing is applied.
labels : iterable (ndim,) (optional)
A list of names for the dimensions. If a ``xs`` is a
``pandas.DataFrame``, labels will default to column names.
label_kwargs : dict (optional)
Any extra keyword arguments to send to the `set_xlabel` and
`set_ylabel` methods.
show_titles : bool (optional)
Displays a title above each 1-D histogram showing the 0.5 quantile
with the upper and lower errors supplied by the quantiles argument.
title_fmt : string (optional)
The format string for the quantiles given in titles. If you explicitly
set ``show_titles=True`` and ``title_fmt=None``, the labels will be
shown as the titles. (default: ``.2f``)
title_kwargs : dict (optional)
Any extra keyword arguments to send to the `set_title` command.
range : iterable (ndim,) (optional)
A list where each element is either a length 2 tuple containing
lower and upper bounds or a float in range (0., 1.)
giving the fraction of samples to include in bounds, e.g.,
[(0.,10.), (1.,5), 0.999, etc.].
If a fraction, the bounds are chosen to be equal-tailed.
truths : iterable (ndim,) (optional)
A list of reference values to indicate on the plots. Individual
values can be omitted by using ``None``.
truth_color : str (optional)
A ``matplotlib`` style color for the ``truths`` makers.
scale_hist : bool (optional)
Should the 1-D histograms be scaled in such a way that the zero line
is visible?
quantiles : iterable (optional)
A list of fractional quantiles to show on the 1-D histograms as
vertical dashed lines.
verbose : bool (optional)
If true, print the values of the computed quantiles.
plot_contours : bool (optional)
Draw contours for dense regions of the plot.
use_math_text : bool (optional)
If true, then axis tick labels for very large or small exponents will
be displayed as powers of 10 rather than using `e`.
max_n_ticks: int (optional)
Maximum number of ticks to try to use
top_ticks : bool (optional)
If true, label the top ticks of each axis
fig : matplotlib.Figure (optional)
Overplot onto the provided figure object.
hist_kwargs : dict (optional)
Any extra keyword arguments to send to the 1-D histogram plots.
**hist2d_kwargs : (optional)
Any remaining keyword arguments are sent to `corner.hist2d` to generate
the 2-D histogram plots.
"""
if quantiles is None:
quantiles = []
if title_kwargs is None:
title_kwargs = dict()
if label_kwargs is None:
label_kwargs = dict()
# Try filling in labels from pandas.DataFrame columns.
if labels is None:
try:
labels = xs.columns
except AttributeError:
pass
# Deal with 1D sample lists.
xs = np.atleast_1d(xs)
if len(xs.shape) == 1:
xs = np.atleast_2d(xs)
else:
assert len(xs.shape) == 2, "The input sample array must be 1- or 2-D."
xs = xs.T
assert xs.shape[0] <= xs.shape[1], "I don't believe that you want more " \
"dimensions than samples!"
# Parse the weight array.
if weights is not None:
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("Weights must be 1-D")
if xs.shape[1] != weights.shape[0]:
raise ValueError("Lengths of weights must match number of samples")
# Parse the parameter ranges.
if range is None:
if "extents" in hist2d_kwargs:
logging.warn("Deprecated keyword argument 'extents'. "
"Use 'range' instead.")
range = hist2d_kwargs.pop("extents")
else:
range = [[x.min(), x.max()] for x in xs]
# Check for parameters that never change.
m = np.array([e[0] == e[1] for e in range], dtype=bool)
if np.any(m):
raise ValueError(("It looks like the parameter(s) in "
"column(s) {0} have no dynamic range. "
"Please provide a `range` argument.")
.format(", ".join(map(
"{0}".format, np.arange(len(m))[m]))))
else:
# If any of the extents are percentiles, convert them to ranges.
# Also make sure it's a normal list.
range = list(range)
for i, _ in enumerate(range):
try:
emin, emax = range[i]
except TypeError:
q = [0.5 - 0.5*range[i], 0.5 + 0.5*range[i]]
range[i] = quantile(xs[i], q, weights=weights)
if len(range) != xs.shape[0]:
raise ValueError("Dimension mismatch between samples and range")
# Parse the bin specifications.
try:
bins = [float(bins) for _ in range]
except TypeError:
if len(bins) != len(range):
raise ValueError("Dimension mismatch between bins and range")
# Some magic numbers for pretty axis layout.
K = len(xs)
factor = 2.0 # size of one side of one panel
lbdim = 0.5 * factor # size of left/bottom margin
trdim = 0.2 * factor # size of top/right margin
whspace = 0.05 # w/hspace size
plotdim = factor * K + factor * (K - 1.) * whspace
dim = lbdim + plotdim + trdim
# Create a new figure if one wasn't provided.
if fig is None:
fig, axes = pl.subplots(K, K, figsize=(dim, dim))
else:
try:
axes = np.array(fig.axes).reshape((K, K))
except:
raise ValueError("Provided figure has {0} axes, but data has "
"dimensions K={1}".format(len(fig.axes), K))
# Format the figure.
lb = lbdim / dim
tr = (lbdim + plotdim) / dim
fig.subplots_adjust(left=lb, bottom=lb, right=tr, top=tr,
wspace=whspace, hspace=whspace)
# Set up the default histogram keywords.
if hist_kwargs is None:
hist_kwargs = dict()
hist_kwargs["color"] = hist_kwargs.get("color", color)
if smooth1d is None:
hist_kwargs["histtype"] = hist_kwargs.get("histtype", "step")
for i, x in enumerate(xs):
# Deal with masked arrays.
if hasattr(x, "compressed"):
x = x.compressed()
if np.shape(xs)[0] == 1:
ax = axes
else:
ax = axes[i, i]
# Plot the histograms.
if smooth1d is None:
n, _, _ = ax.hist(x, bins=bins[i], weights=weights,
range=range[i], **hist_kwargs)
else:
if gaussian_filter is None:
raise ImportError("Please install scipy for smoothing")
n, b = np.histogram(x, bins=bins[i], weights=weights,
range=range[i])
n = gaussian_filter(n, smooth1d)
x0 = np.array(list(zip(b[:-1], b[1:]))).flatten()
y0 = np.array(list(zip(n, n))).flatten()
ax.plot(x0, y0, **hist_kwargs)
if truths is not None and truths[i] is not None:
ax.axvline(truths[i], color=truth_color)
# Plot quantiles if wanted.
if len(quantiles) > 0:
qvalues = quantile(x, quantiles, weights=weights)
for q in qvalues:
ax.axvline(q, ls="dashed", color=color)
if verbose:
print("Quantiles:")
print([item for item in zip(quantiles, qvalues)])
if show_titles:
title = None
if title_fmt is not None:
# Compute the quantiles for the title. This might redo
# unneeded computation but who cares.
q_16, q_50, q_84 = quantile(x, [0.16, 0.5, 0.84],
weights=weights)
q_m, q_p = q_50-q_16, q_84-q_50
# Format the quantile display.
fmt = "{{0:{0}}}".format(title_fmt).format
title = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
title = title.format(fmt(q_50), fmt(q_m), fmt(q_p))
# Add in the column name if it's given.
if labels is not None:
title = "{0} = {1}".format(labels[i], title)
elif labels is not None:
title = "{0}".format(labels[i])
if title is not None:
ax.set_title(title, **title_kwargs)
# Set up the axes.
ax.set_xlim(range[i])
if scale_hist:
maxn = np.max(n)
ax.set_ylim(-0.1 * maxn, 1.1 * maxn)
else:
ax.set_ylim(0, 1.1 * np.max(n))
ax.set_yticklabels([])
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
if i < K - 1:
if top_ticks:
ax.xaxis.set_ticks_position("top")
[l.set_rotation(45) for l in ax.get_xticklabels()]
else:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
if labels is not None:
ax.set_xlabel(labels[i], **label_kwargs)
ax.xaxis.set_label_coords(0.5, -0.3)
# use MathText for axes ticks
ax.xaxis.set_major_formatter(
ScalarFormatter(useMathText=use_math_text))
for j, y in enumerate(xs):
if np.shape(xs)[0] == 1:
ax = axes
else:
ax = axes[i, j]
if j > i:
ax.set_frame_on(False)
ax.set_xticks([])
ax.set_yticks([])
continue
elif j == i:
continue
# Deal with masked arrays.
if hasattr(y, "compressed"):
y = y.compressed()
hist2d(y, x, ax=ax, range=[range[j], range[i]], weights=weights,
color=color, smooth=smooth, bins=[bins[j], bins[i]],
**hist2d_kwargs)
if truths is not None:
if truths[i] is not None and truths[j] is not None:
ax.plot(truths[j], truths[i], "s", color=truth_color)
if truths[j] is not None:
ax.axvline(truths[j], color=truth_color)
if truths[i] is not None:
ax.axhline(truths[i], color=truth_color)
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
ax.yaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
if i < K - 1:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
if labels is not None:
ax.set_xlabel(labels[j], **label_kwargs)
ax.xaxis.set_label_coords(0.5, -0.3)
# use MathText for axes ticks
ax.xaxis.set_major_formatter(
ScalarFormatter(useMathText=use_math_text))
if j > 0:
ax.set_yticklabels([])
else:
[l.set_rotation(45) for l in ax.get_yticklabels()]
if labels is not None:
ax.set_ylabel(labels[i], **label_kwargs)
ax.yaxis.set_label_coords(-0.3, 0.5)
# use MathText for axes ticks
ax.yaxis.set_major_formatter(
ScalarFormatter(useMathText=use_math_text))
return fig | Make a *sick* corner plot showing the projections of a data set in a
multi-dimensional space. kwargs are passed to hist2d() or used for
`matplotlib` styling.
Parameters
----------
xs : array_like (nsamples, ndim)
The samples. This should be a 1- or 2-dimensional array. For a 1-D
array this results in a simple histogram. For a 2-D array, the zeroth
axis is the list of samples and the next axis are the dimensions of
the space.
bins : int or array_like (ndim,) (optional)
The number of bins to use in histograms, either as a fixed value for
all dimensions, or as a list of integers for each dimension.
weights : array_like (nsamples,)
The weight of each sample. If `None` (default), samples are given
equal weight.
color : str (optional)
A ``matplotlib`` style color for all histograms.
smooth, smooth1d : float (optional)
The standard deviation for Gaussian kernel passed to
`scipy.ndimage.gaussian_filter` to smooth the 2-D and 1-D histograms
respectively. If `None` (default), no smoothing is applied.
labels : iterable (ndim,) (optional)
A list of names for the dimensions. If a ``xs`` is a
``pandas.DataFrame``, labels will default to column names.
label_kwargs : dict (optional)
Any extra keyword arguments to send to the `set_xlabel` and
`set_ylabel` methods.
show_titles : bool (optional)
Displays a title above each 1-D histogram showing the 0.5 quantile
with the upper and lower errors supplied by the quantiles argument.
title_fmt : string (optional)
The format string for the quantiles given in titles. If you explicitly
set ``show_titles=True`` and ``title_fmt=None``, the labels will be
shown as the titles. (default: ``.2f``)
title_kwargs : dict (optional)
Any extra keyword arguments to send to the `set_title` command.
range : iterable (ndim,) (optional)
A list where each element is either a length 2 tuple containing
lower and upper bounds or a float in range (0., 1.)
giving the fraction of samples to include in bounds, e.g.,
[(0.,10.), (1.,5), 0.999, etc.].
If a fraction, the bounds are chosen to be equal-tailed.
truths : iterable (ndim,) (optional)
A list of reference values to indicate on the plots. Individual
values can be omitted by using ``None``.
truth_color : str (optional)
A ``matplotlib`` style color for the ``truths`` makers.
scale_hist : bool (optional)
Should the 1-D histograms be scaled in such a way that the zero line
is visible?
quantiles : iterable (optional)
A list of fractional quantiles to show on the 1-D histograms as
vertical dashed lines.
verbose : bool (optional)
If true, print the values of the computed quantiles.
plot_contours : bool (optional)
Draw contours for dense regions of the plot.
use_math_text : bool (optional)
If true, then axis tick labels for very large or small exponents will
be displayed as powers of 10 rather than using `e`.
max_n_ticks: int (optional)
Maximum number of ticks to try to use
top_ticks : bool (optional)
If true, label the top ticks of each axis
fig : matplotlib.Figure (optional)
Overplot onto the provided figure object.
hist_kwargs : dict (optional)
Any extra keyword arguments to send to the 1-D histogram plots.
**hist2d_kwargs : (optional)
Any remaining keyword arguments are sent to `corner.hist2d` to generate
the 2-D histogram plots. | Below is the the instruction that describes the task:
### Input:
Make a *sick* corner plot showing the projections of a data set in a
multi-dimensional space. kwargs are passed to hist2d() or used for
`matplotlib` styling.
Parameters
----------
xs : array_like (nsamples, ndim)
The samples. This should be a 1- or 2-dimensional array. For a 1-D
array this results in a simple histogram. For a 2-D array, the zeroth
axis is the list of samples and the next axis are the dimensions of
the space.
bins : int or array_like (ndim,) (optional)
The number of bins to use in histograms, either as a fixed value for
all dimensions, or as a list of integers for each dimension.
weights : array_like (nsamples,)
The weight of each sample. If `None` (default), samples are given
equal weight.
color : str (optional)
A ``matplotlib`` style color for all histograms.
smooth, smooth1d : float (optional)
The standard deviation for Gaussian kernel passed to
`scipy.ndimage.gaussian_filter` to smooth the 2-D and 1-D histograms
respectively. If `None` (default), no smoothing is applied.
labels : iterable (ndim,) (optional)
A list of names for the dimensions. If a ``xs`` is a
``pandas.DataFrame``, labels will default to column names.
label_kwargs : dict (optional)
Any extra keyword arguments to send to the `set_xlabel` and
`set_ylabel` methods.
show_titles : bool (optional)
Displays a title above each 1-D histogram showing the 0.5 quantile
with the upper and lower errors supplied by the quantiles argument.
title_fmt : string (optional)
The format string for the quantiles given in titles. If you explicitly
set ``show_titles=True`` and ``title_fmt=None``, the labels will be
shown as the titles. (default: ``.2f``)
title_kwargs : dict (optional)
Any extra keyword arguments to send to the `set_title` command.
range : iterable (ndim,) (optional)
A list where each element is either a length 2 tuple containing
lower and upper bounds or a float in range (0., 1.)
giving the fraction of samples to include in bounds, e.g.,
[(0.,10.), (1.,5), 0.999, etc.].
If a fraction, the bounds are chosen to be equal-tailed.
truths : iterable (ndim,) (optional)
A list of reference values to indicate on the plots. Individual
values can be omitted by using ``None``.
truth_color : str (optional)
A ``matplotlib`` style color for the ``truths`` makers.
scale_hist : bool (optional)
Should the 1-D histograms be scaled in such a way that the zero line
is visible?
quantiles : iterable (optional)
A list of fractional quantiles to show on the 1-D histograms as
vertical dashed lines.
verbose : bool (optional)
If true, print the values of the computed quantiles.
plot_contours : bool (optional)
Draw contours for dense regions of the plot.
use_math_text : bool (optional)
If true, then axis tick labels for very large or small exponents will
be displayed as powers of 10 rather than using `e`.
max_n_ticks: int (optional)
Maximum number of ticks to try to use
top_ticks : bool (optional)
If true, label the top ticks of each axis
fig : matplotlib.Figure (optional)
Overplot onto the provided figure object.
hist_kwargs : dict (optional)
Any extra keyword arguments to send to the 1-D histogram plots.
**hist2d_kwargs : (optional)
Any remaining keyword arguments are sent to `corner.hist2d` to generate
the 2-D histogram plots.
### Response:
def corner(xs, bins=20, range=None, weights=None, color="k",
smooth=None, smooth1d=None,
labels=None, label_kwargs=None,
show_titles=False, title_fmt=".2f", title_kwargs=None,
truths=None, truth_color="#4682b4",
scale_hist=False, quantiles=None, verbose=False, fig=None,
max_n_ticks=5, top_ticks=False, use_math_text=False,
hist_kwargs=None, **hist2d_kwargs):
"""
Make a *sick* corner plot showing the projections of a data set in a
multi-dimensional space. kwargs are passed to hist2d() or used for
`matplotlib` styling.
Parameters
----------
xs : array_like (nsamples, ndim)
The samples. This should be a 1- or 2-dimensional array. For a 1-D
array this results in a simple histogram. For a 2-D array, the zeroth
axis is the list of samples and the next axis are the dimensions of
the space.
bins : int or array_like (ndim,) (optional)
The number of bins to use in histograms, either as a fixed value for
all dimensions, or as a list of integers for each dimension.
weights : array_like (nsamples,)
The weight of each sample. If `None` (default), samples are given
equal weight.
color : str (optional)
A ``matplotlib`` style color for all histograms.
smooth, smooth1d : float (optional)
The standard deviation for Gaussian kernel passed to
`scipy.ndimage.gaussian_filter` to smooth the 2-D and 1-D histograms
respectively. If `None` (default), no smoothing is applied.
labels : iterable (ndim,) (optional)
A list of names for the dimensions. If a ``xs`` is a
``pandas.DataFrame``, labels will default to column names.
label_kwargs : dict (optional)
Any extra keyword arguments to send to the `set_xlabel` and
`set_ylabel` methods.
show_titles : bool (optional)
Displays a title above each 1-D histogram showing the 0.5 quantile
with the upper and lower errors supplied by the quantiles argument.
title_fmt : string (optional)
The format string for the quantiles given in titles. If you explicitly
set ``show_titles=True`` and ``title_fmt=None``, the labels will be
shown as the titles. (default: ``.2f``)
title_kwargs : dict (optional)
Any extra keyword arguments to send to the `set_title` command.
range : iterable (ndim,) (optional)
A list where each element is either a length 2 tuple containing
lower and upper bounds or a float in range (0., 1.)
giving the fraction of samples to include in bounds, e.g.,
[(0.,10.), (1.,5), 0.999, etc.].
If a fraction, the bounds are chosen to be equal-tailed.
truths : iterable (ndim,) (optional)
A list of reference values to indicate on the plots. Individual
values can be omitted by using ``None``.
truth_color : str (optional)
A ``matplotlib`` style color for the ``truths`` makers.
scale_hist : bool (optional)
Should the 1-D histograms be scaled in such a way that the zero line
is visible?
quantiles : iterable (optional)
A list of fractional quantiles to show on the 1-D histograms as
vertical dashed lines.
verbose : bool (optional)
If true, print the values of the computed quantiles.
plot_contours : bool (optional)
Draw contours for dense regions of the plot.
use_math_text : bool (optional)
If true, then axis tick labels for very large or small exponents will
be displayed as powers of 10 rather than using `e`.
max_n_ticks: int (optional)
Maximum number of ticks to try to use
top_ticks : bool (optional)
If true, label the top ticks of each axis
fig : matplotlib.Figure (optional)
Overplot onto the provided figure object.
hist_kwargs : dict (optional)
Any extra keyword arguments to send to the 1-D histogram plots.
**hist2d_kwargs : (optional)
Any remaining keyword arguments are sent to `corner.hist2d` to generate
the 2-D histogram plots.
"""
if quantiles is None:
quantiles = []
if title_kwargs is None:
title_kwargs = dict()
if label_kwargs is None:
label_kwargs = dict()
# Try filling in labels from pandas.DataFrame columns.
if labels is None:
try:
labels = xs.columns
except AttributeError:
pass
# Deal with 1D sample lists.
xs = np.atleast_1d(xs)
if len(xs.shape) == 1:
xs = np.atleast_2d(xs)
else:
assert len(xs.shape) == 2, "The input sample array must be 1- or 2-D."
xs = xs.T
assert xs.shape[0] <= xs.shape[1], "I don't believe that you want more " \
"dimensions than samples!"
# Parse the weight array.
if weights is not None:
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("Weights must be 1-D")
if xs.shape[1] != weights.shape[0]:
raise ValueError("Lengths of weights must match number of samples")
# Parse the parameter ranges.
if range is None:
if "extents" in hist2d_kwargs:
logging.warn("Deprecated keyword argument 'extents'. "
"Use 'range' instead.")
range = hist2d_kwargs.pop("extents")
else:
range = [[x.min(), x.max()] for x in xs]
# Check for parameters that never change.
m = np.array([e[0] == e[1] for e in range], dtype=bool)
if np.any(m):
raise ValueError(("It looks like the parameter(s) in "
"column(s) {0} have no dynamic range. "
"Please provide a `range` argument.")
.format(", ".join(map(
"{0}".format, np.arange(len(m))[m]))))
else:
# If any of the extents are percentiles, convert them to ranges.
# Also make sure it's a normal list.
range = list(range)
for i, _ in enumerate(range):
try:
emin, emax = range[i]
except TypeError:
q = [0.5 - 0.5*range[i], 0.5 + 0.5*range[i]]
range[i] = quantile(xs[i], q, weights=weights)
if len(range) != xs.shape[0]:
raise ValueError("Dimension mismatch between samples and range")
# Parse the bin specifications.
try:
bins = [float(bins) for _ in range]
except TypeError:
if len(bins) != len(range):
raise ValueError("Dimension mismatch between bins and range")
# Some magic numbers for pretty axis layout.
K = len(xs)
factor = 2.0 # size of one side of one panel
lbdim = 0.5 * factor # size of left/bottom margin
trdim = 0.2 * factor # size of top/right margin
whspace = 0.05 # w/hspace size
plotdim = factor * K + factor * (K - 1.) * whspace
dim = lbdim + plotdim + trdim
# Create a new figure if one wasn't provided.
if fig is None:
fig, axes = pl.subplots(K, K, figsize=(dim, dim))
else:
try:
axes = np.array(fig.axes).reshape((K, K))
except:
raise ValueError("Provided figure has {0} axes, but data has "
"dimensions K={1}".format(len(fig.axes), K))
# Format the figure.
lb = lbdim / dim
tr = (lbdim + plotdim) / dim
fig.subplots_adjust(left=lb, bottom=lb, right=tr, top=tr,
wspace=whspace, hspace=whspace)
# Set up the default histogram keywords.
if hist_kwargs is None:
hist_kwargs = dict()
hist_kwargs["color"] = hist_kwargs.get("color", color)
if smooth1d is None:
hist_kwargs["histtype"] = hist_kwargs.get("histtype", "step")
for i, x in enumerate(xs):
# Deal with masked arrays.
if hasattr(x, "compressed"):
x = x.compressed()
if np.shape(xs)[0] == 1:
ax = axes
else:
ax = axes[i, i]
# Plot the histograms.
if smooth1d is None:
n, _, _ = ax.hist(x, bins=bins[i], weights=weights,
range=range[i], **hist_kwargs)
else:
if gaussian_filter is None:
raise ImportError("Please install scipy for smoothing")
n, b = np.histogram(x, bins=bins[i], weights=weights,
range=range[i])
n = gaussian_filter(n, smooth1d)
x0 = np.array(list(zip(b[:-1], b[1:]))).flatten()
y0 = np.array(list(zip(n, n))).flatten()
ax.plot(x0, y0, **hist_kwargs)
if truths is not None and truths[i] is not None:
ax.axvline(truths[i], color=truth_color)
# Plot quantiles if wanted.
if len(quantiles) > 0:
qvalues = quantile(x, quantiles, weights=weights)
for q in qvalues:
ax.axvline(q, ls="dashed", color=color)
if verbose:
print("Quantiles:")
print([item for item in zip(quantiles, qvalues)])
if show_titles:
title = None
if title_fmt is not None:
# Compute the quantiles for the title. This might redo
# unneeded computation but who cares.
q_16, q_50, q_84 = quantile(x, [0.16, 0.5, 0.84],
weights=weights)
q_m, q_p = q_50-q_16, q_84-q_50
# Format the quantile display.
fmt = "{{0:{0}}}".format(title_fmt).format
title = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
title = title.format(fmt(q_50), fmt(q_m), fmt(q_p))
# Add in the column name if it's given.
if labels is not None:
title = "{0} = {1}".format(labels[i], title)
elif labels is not None:
title = "{0}".format(labels[i])
if title is not None:
ax.set_title(title, **title_kwargs)
# Set up the axes.
ax.set_xlim(range[i])
if scale_hist:
maxn = np.max(n)
ax.set_ylim(-0.1 * maxn, 1.1 * maxn)
else:
ax.set_ylim(0, 1.1 * np.max(n))
ax.set_yticklabels([])
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
if i < K - 1:
if top_ticks:
ax.xaxis.set_ticks_position("top")
[l.set_rotation(45) for l in ax.get_xticklabels()]
else:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
if labels is not None:
ax.set_xlabel(labels[i], **label_kwargs)
ax.xaxis.set_label_coords(0.5, -0.3)
# use MathText for axes ticks
ax.xaxis.set_major_formatter(
ScalarFormatter(useMathText=use_math_text))
for j, y in enumerate(xs):
if np.shape(xs)[0] == 1:
ax = axes
else:
ax = axes[i, j]
if j > i:
ax.set_frame_on(False)
ax.set_xticks([])
ax.set_yticks([])
continue
elif j == i:
continue
# Deal with masked arrays.
if hasattr(y, "compressed"):
y = y.compressed()
hist2d(y, x, ax=ax, range=[range[j], range[i]], weights=weights,
color=color, smooth=smooth, bins=[bins[j], bins[i]],
**hist2d_kwargs)
if truths is not None:
if truths[i] is not None and truths[j] is not None:
ax.plot(truths[j], truths[i], "s", color=truth_color)
if truths[j] is not None:
ax.axvline(truths[j], color=truth_color)
if truths[i] is not None:
ax.axhline(truths[i], color=truth_color)
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
ax.yaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
if i < K - 1:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
if labels is not None:
ax.set_xlabel(labels[j], **label_kwargs)
ax.xaxis.set_label_coords(0.5, -0.3)
# use MathText for axes ticks
ax.xaxis.set_major_formatter(
ScalarFormatter(useMathText=use_math_text))
if j > 0:
ax.set_yticklabels([])
else:
[l.set_rotation(45) for l in ax.get_yticklabels()]
if labels is not None:
ax.set_ylabel(labels[i], **label_kwargs)
ax.yaxis.set_label_coords(-0.3, 0.5)
# use MathText for axes ticks
ax.yaxis.set_major_formatter(
ScalarFormatter(useMathText=use_math_text))
return fig |
def _add_meta(xs, sample=None, config=None):
"""Add top level information about the sample or flowcell to output.
Sorts outputs into sample names (sample input) and project (config input).
"""
out = []
for x in xs:
if not isinstance(x["path"], six.string_types) or not os.path.exists(x["path"]):
raise ValueError("Unexpected path for upload: %s" % x)
x["mtime"] = shared.get_file_timestamp(x["path"])
if sample:
sample_name = dd.get_sample_name(sample)
if "sample" not in x:
x["sample"] = sample_name
elif x["sample"] != sample_name:
x["run"] = sample_name
if config:
fc_name = config.get("fc_name") or "project"
fc_date = config.get("fc_date") or datetime.datetime.now().strftime("%Y-%m-%d")
x["run"] = "%s_%s" % (fc_date, fc_name)
out.append(x)
return out | Add top level information about the sample or flowcell to output.
Sorts outputs into sample names (sample input) and project (config input). | Below is the the instruction that describes the task:
### Input:
Add top level information about the sample or flowcell to output.
Sorts outputs into sample names (sample input) and project (config input).
### Response:
def _add_meta(xs, sample=None, config=None):
"""Add top level information about the sample or flowcell to output.
Sorts outputs into sample names (sample input) and project (config input).
"""
out = []
for x in xs:
if not isinstance(x["path"], six.string_types) or not os.path.exists(x["path"]):
raise ValueError("Unexpected path for upload: %s" % x)
x["mtime"] = shared.get_file_timestamp(x["path"])
if sample:
sample_name = dd.get_sample_name(sample)
if "sample" not in x:
x["sample"] = sample_name
elif x["sample"] != sample_name:
x["run"] = sample_name
if config:
fc_name = config.get("fc_name") or "project"
fc_date = config.get("fc_date") or datetime.datetime.now().strftime("%Y-%m-%d")
x["run"] = "%s_%s" % (fc_date, fc_name)
out.append(x)
return out |
def _rangeGen(data, std=1):
"""
Return reasonable min/max values to use given the data.
"""
dataStd = np.std(data)
if dataStd == 0:
dataStd = 1
minval = np.min(data) - std * dataStd
maxval = np.max(data) + std * dataStd
return minval, maxval | Return reasonable min/max values to use given the data. | Below is the the instruction that describes the task:
### Input:
Return reasonable min/max values to use given the data.
### Response:
def _rangeGen(data, std=1):
"""
Return reasonable min/max values to use given the data.
"""
dataStd = np.std(data)
if dataStd == 0:
dataStd = 1
minval = np.min(data) - std * dataStd
maxval = np.max(data) + std * dataStd
return minval, maxval |
def set_Name(self, Name, SaveName=None,
Include=defInclude,
ForceUpdate=False):
""" Set the Name of the instance, automatically updating the SaveName
The name should be a str without spaces or underscores (removed)
When the name is changed, if SaveName (i.e. the name used for saving)
was not user-defined, it is automatically updated
Parameters
----------
Name : str
Name of the instance, without ' ' or '_' (automatically removed)
SaveName : None / str
If provided, overrides the default name for saving (not recommended)
Include: list
Controls how te default SaveName is generated
Each element of the list is a key str indicating whether an element
should be present in the SaveName
"""
self._check_inputs(Name=Name, SaveName=SaveName, Include=Include)
self._Name = Name
self.set_SaveName(SaveName=SaveName, Include=Include,
ForceUpdate=ForceUpdate) | Set the Name of the instance, automatically updating the SaveName
The name should be a str without spaces or underscores (removed)
When the name is changed, if SaveName (i.e. the name used for saving)
was not user-defined, it is automatically updated
Parameters
----------
Name : str
Name of the instance, without ' ' or '_' (automatically removed)
SaveName : None / str
If provided, overrides the default name for saving (not recommended)
Include: list
Controls how te default SaveName is generated
Each element of the list is a key str indicating whether an element
should be present in the SaveName | Below is the the instruction that describes the task:
### Input:
Set the Name of the instance, automatically updating the SaveName
The name should be a str without spaces or underscores (removed)
When the name is changed, if SaveName (i.e. the name used for saving)
was not user-defined, it is automatically updated
Parameters
----------
Name : str
Name of the instance, without ' ' or '_' (automatically removed)
SaveName : None / str
If provided, overrides the default name for saving (not recommended)
Include: list
Controls how te default SaveName is generated
Each element of the list is a key str indicating whether an element
should be present in the SaveName
### Response:
def set_Name(self, Name, SaveName=None,
Include=defInclude,
ForceUpdate=False):
""" Set the Name of the instance, automatically updating the SaveName
The name should be a str without spaces or underscores (removed)
When the name is changed, if SaveName (i.e. the name used for saving)
was not user-defined, it is automatically updated
Parameters
----------
Name : str
Name of the instance, without ' ' or '_' (automatically removed)
SaveName : None / str
If provided, overrides the default name for saving (not recommended)
Include: list
Controls how te default SaveName is generated
Each element of the list is a key str indicating whether an element
should be present in the SaveName
"""
self._check_inputs(Name=Name, SaveName=SaveName, Include=Include)
self._Name = Name
self.set_SaveName(SaveName=SaveName, Include=Include,
ForceUpdate=ForceUpdate) |
def list_datacenters(kwargs=None, call=None):
'''
List all the data centers for this VMware environment
CLI Example:
.. code-block:: bash
salt-cloud -f list_datacenters my-vmware-config
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_datacenters function must be called with '
'-f or --function.'
)
return {'Datacenters': salt.utils.vmware.list_datacenters(_get_si())} | List all the data centers for this VMware environment
CLI Example:
.. code-block:: bash
salt-cloud -f list_datacenters my-vmware-config | Below is the the instruction that describes the task:
### Input:
List all the data centers for this VMware environment
CLI Example:
.. code-block:: bash
salt-cloud -f list_datacenters my-vmware-config
### Response:
def list_datacenters(kwargs=None, call=None):
'''
List all the data centers for this VMware environment
CLI Example:
.. code-block:: bash
salt-cloud -f list_datacenters my-vmware-config
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_datacenters function must be called with '
'-f or --function.'
)
return {'Datacenters': salt.utils.vmware.list_datacenters(_get_si())} |
def readVersion(self):
""" Read the document version.
::
<designspace format="3">
"""
ds = self.root.findall("[@format]")[0]
raw_format = ds.attrib['format']
try:
self.documentFormatVersion = int(raw_format)
except ValueError:
# as of fontTools >= 3.27 'format' is formatted as a float "4.0"
self.documentFormatVersion = float(raw_format) | Read the document version.
::
<designspace format="3"> | Below is the the instruction that describes the task:
### Input:
Read the document version.
::
<designspace format="3">
### Response:
def readVersion(self):
""" Read the document version.
::
<designspace format="3">
"""
ds = self.root.findall("[@format]")[0]
raw_format = ds.attrib['format']
try:
self.documentFormatVersion = int(raw_format)
except ValueError:
# as of fontTools >= 3.27 'format' is formatted as a float "4.0"
self.documentFormatVersion = float(raw_format) |
def send_contributor_email(self, contributor):
"""Send an EmailMessage object for a given contributor."""
ContributorReport(
contributor,
month=self.month,
year=self.year,
deadline=self._deadline,
start=self._start,
end=self._end
).send() | Send an EmailMessage object for a given contributor. | Below is the the instruction that describes the task:
### Input:
Send an EmailMessage object for a given contributor.
### Response:
def send_contributor_email(self, contributor):
"""Send an EmailMessage object for a given contributor."""
ContributorReport(
contributor,
month=self.month,
year=self.year,
deadline=self._deadline,
start=self._start,
end=self._end
).send() |
def make_json_error(error):
"""
Handle errors by logging and
"""
message = extract_error_message(error)
status_code = extract_status_code(error)
context = extract_context(error)
retryable = extract_retryable(error)
headers = extract_headers(error)
# Flask will not log user exception (fortunately), but will log an error
# for exceptions that escape out of the application entirely (e.g. if the
# error handler raises an error)
error_logger.debug("Handling {} error: {}".format(
status_code,
message,
))
# Serialize into JSON response
response_data = {
"code": status_code,
"context": context,
"message": message,
"retryable": retryable,
}
# Don't pass in the error schema because it will suppress any extra fields
return dump_response_data(None, response_data, status_code, headers) | Handle errors by logging and | Below is the the instruction that describes the task:
### Input:
Handle errors by logging and
### Response:
def make_json_error(error):
"""
Handle errors by logging and
"""
message = extract_error_message(error)
status_code = extract_status_code(error)
context = extract_context(error)
retryable = extract_retryable(error)
headers = extract_headers(error)
# Flask will not log user exception (fortunately), but will log an error
# for exceptions that escape out of the application entirely (e.g. if the
# error handler raises an error)
error_logger.debug("Handling {} error: {}".format(
status_code,
message,
))
# Serialize into JSON response
response_data = {
"code": status_code,
"context": context,
"message": message,
"retryable": retryable,
}
# Don't pass in the error schema because it will suppress any extra fields
return dump_response_data(None, response_data, status_code, headers) |
def apply_async(self, args=None, kwargs=None, **options):
"""
Checks whether task must be skipped and decreases the counter in that case.
"""
key = self._get_cache_key(args, kwargs)
counter, penalty = cache.get(key, (0, 0))
if not counter:
return super(PenalizedBackgroundTask, self).apply_async(args=args, kwargs=kwargs, **options)
cache.set(key, (counter - 1, penalty), self.CACHE_LIFETIME)
logger.info('The task %s will not be executed due to the penalty.' % self.name)
return self.AsyncResult(options.get('task_id') or str(uuid4())) | Checks whether task must be skipped and decreases the counter in that case. | Below is the the instruction that describes the task:
### Input:
Checks whether task must be skipped and decreases the counter in that case.
### Response:
def apply_async(self, args=None, kwargs=None, **options):
"""
Checks whether task must be skipped and decreases the counter in that case.
"""
key = self._get_cache_key(args, kwargs)
counter, penalty = cache.get(key, (0, 0))
if not counter:
return super(PenalizedBackgroundTask, self).apply_async(args=args, kwargs=kwargs, **options)
cache.set(key, (counter - 1, penalty), self.CACHE_LIFETIME)
logger.info('The task %s will not be executed due to the penalty.' % self.name)
return self.AsyncResult(options.get('task_id') or str(uuid4())) |
def fillPelicanHole(site, username, password, tstat_name, start_time, end_time):
"""Fill a hole in a Pelican thermostat's data stream.
Arguments:
site -- The thermostat's Pelican site name
username -- The Pelican username for the site
password -- The Pelican password for the site
tstat_name -- The name of the thermostat, as identified by Pelican
start_time -- The start of the data hole in UTC, e.g. "2018-01-29 15:00:00"
end_time -- The end of the data hole in UTC, e.g. "2018-01-29 16:00:00"
Returns:
A Pandas dataframe with historical Pelican data that falls between the
specified start and end times.
Note that this function assumes the Pelican thermostat's local time zone is
US/Pacific. It will properly handle PST vs. PDT.
"""
start = datetime.strptime(start_time, _INPUT_TIME_FORMAT).replace(tzinfo=pytz.utc).astimezone(_pelican_time)
end = datetime.strptime(end_time, _INPUT_TIME_FORMAT).replace(tzinfo=pytz.utc).astimezone(_pelican_time)
heat_needs_fan = _lookupHeatNeedsFan(site, username, password, tstat_name)
if heat_needs_fan is None:
return None
# Pelican's API only allows a query covering a time range of up to 1 month
# So we may need run multiple requests for historical data
history_blocks = []
while start < end:
block_start = start
block_end = min(start + timedelta(days=30), end)
blocks = _lookupHistoricalData(site, username, password, tstat_name, block_start, block_end)
if blocks is None:
return None
history_blocks.extend(blocks)
start += timedelta(days=30, minutes=1)
output_rows = []
for block in history_blocks:
runStatus = block.find("runStatus").text
if runStatus.startswith("Heat"):
fanState = (heatNeedsFan == "Yes")
else:
fanState = (runStatus != "Off")
api_time = datetime.strptime(block.find("timestamp").text, "%Y-%m-%dT%H:%M").replace(tzinfo=_pelican_time)
# Need to convert seconds to nanoseconds
timestamp = int(api_time.timestamp() * 10**9)
output_rows.append({
"temperature": float(block.find("temperature").text),
"relative_humidity": float(block.find("humidity").text),
"heating_setpoint": float(block.find("heatSetting").text),
"cooling_setpoint": float(block.find("coolSetting").text),
# Driver explicitly uses "Schedule" field, but we don't have this in history
"override": block.find("setBy").text != "Schedule",
"fan": fanState,
"mode": _mode_name_mappings[block.find("system").text],
"state": _state_mappings.get(runStatus, 0),
"time": timestamp,
})
df = pd.DataFrame(output_rows)
df.drop_duplicates(subset="time", keep="first", inplace=True)
return df | Fill a hole in a Pelican thermostat's data stream.
Arguments:
site -- The thermostat's Pelican site name
username -- The Pelican username for the site
password -- The Pelican password for the site
tstat_name -- The name of the thermostat, as identified by Pelican
start_time -- The start of the data hole in UTC, e.g. "2018-01-29 15:00:00"
end_time -- The end of the data hole in UTC, e.g. "2018-01-29 16:00:00"
Returns:
A Pandas dataframe with historical Pelican data that falls between the
specified start and end times.
Note that this function assumes the Pelican thermostat's local time zone is
US/Pacific. It will properly handle PST vs. PDT. | Below is the the instruction that describes the task:
### Input:
Fill a hole in a Pelican thermostat's data stream.
Arguments:
site -- The thermostat's Pelican site name
username -- The Pelican username for the site
password -- The Pelican password for the site
tstat_name -- The name of the thermostat, as identified by Pelican
start_time -- The start of the data hole in UTC, e.g. "2018-01-29 15:00:00"
end_time -- The end of the data hole in UTC, e.g. "2018-01-29 16:00:00"
Returns:
A Pandas dataframe with historical Pelican data that falls between the
specified start and end times.
Note that this function assumes the Pelican thermostat's local time zone is
US/Pacific. It will properly handle PST vs. PDT.
### Response:
def fillPelicanHole(site, username, password, tstat_name, start_time, end_time):
"""Fill a hole in a Pelican thermostat's data stream.
Arguments:
site -- The thermostat's Pelican site name
username -- The Pelican username for the site
password -- The Pelican password for the site
tstat_name -- The name of the thermostat, as identified by Pelican
start_time -- The start of the data hole in UTC, e.g. "2018-01-29 15:00:00"
end_time -- The end of the data hole in UTC, e.g. "2018-01-29 16:00:00"
Returns:
A Pandas dataframe with historical Pelican data that falls between the
specified start and end times.
Note that this function assumes the Pelican thermostat's local time zone is
US/Pacific. It will properly handle PST vs. PDT.
"""
start = datetime.strptime(start_time, _INPUT_TIME_FORMAT).replace(tzinfo=pytz.utc).astimezone(_pelican_time)
end = datetime.strptime(end_time, _INPUT_TIME_FORMAT).replace(tzinfo=pytz.utc).astimezone(_pelican_time)
heat_needs_fan = _lookupHeatNeedsFan(site, username, password, tstat_name)
if heat_needs_fan is None:
return None
# Pelican's API only allows a query covering a time range of up to 1 month
# So we may need run multiple requests for historical data
history_blocks = []
while start < end:
block_start = start
block_end = min(start + timedelta(days=30), end)
blocks = _lookupHistoricalData(site, username, password, tstat_name, block_start, block_end)
if blocks is None:
return None
history_blocks.extend(blocks)
start += timedelta(days=30, minutes=1)
output_rows = []
for block in history_blocks:
runStatus = block.find("runStatus").text
if runStatus.startswith("Heat"):
fanState = (heatNeedsFan == "Yes")
else:
fanState = (runStatus != "Off")
api_time = datetime.strptime(block.find("timestamp").text, "%Y-%m-%dT%H:%M").replace(tzinfo=_pelican_time)
# Need to convert seconds to nanoseconds
timestamp = int(api_time.timestamp() * 10**9)
output_rows.append({
"temperature": float(block.find("temperature").text),
"relative_humidity": float(block.find("humidity").text),
"heating_setpoint": float(block.find("heatSetting").text),
"cooling_setpoint": float(block.find("coolSetting").text),
# Driver explicitly uses "Schedule" field, but we don't have this in history
"override": block.find("setBy").text != "Schedule",
"fan": fanState,
"mode": _mode_name_mappings[block.find("system").text],
"state": _state_mappings.get(runStatus, 0),
"time": timestamp,
})
df = pd.DataFrame(output_rows)
df.drop_duplicates(subset="time", keep="first", inplace=True)
return df |
def store_vector(self, hash_name, bucket_key, v, data):
"""
Stores vector and JSON-serializable data in bucket with specified key.
"""
if not hash_name in self.buckets:
self.buckets[hash_name] = {}
if not bucket_key in self.buckets[hash_name]:
self.buckets[hash_name][bucket_key] = []
self.buckets[hash_name][bucket_key].append((v, data)) | Stores vector and JSON-serializable data in bucket with specified key. | Below is the the instruction that describes the task:
### Input:
Stores vector and JSON-serializable data in bucket with specified key.
### Response:
def store_vector(self, hash_name, bucket_key, v, data):
"""
Stores vector and JSON-serializable data in bucket with specified key.
"""
if not hash_name in self.buckets:
self.buckets[hash_name] = {}
if not bucket_key in self.buckets[hash_name]:
self.buckets[hash_name][bucket_key] = []
self.buckets[hash_name][bucket_key].append((v, data)) |
def _list_available_hosts(self, *args):
''' returns a list of hosts that haven't failed and aren't dark '''
return [ h for h in self.inventory.list_hosts(*args) if (h not in self.stats.failures) and (h not in self.stats.dark)] | returns a list of hosts that haven't failed and aren't dark | Below is the the instruction that describes the task:
### Input:
returns a list of hosts that haven't failed and aren't dark
### Response:
def _list_available_hosts(self, *args):
''' returns a list of hosts that haven't failed and aren't dark '''
return [ h for h in self.inventory.list_hosts(*args) if (h not in self.stats.failures) and (h not in self.stats.dark)] |
def _cursor_position(self, row=0, column=0):
"""
Set the cursor to a specific row and column.
Obnoxiously row/column is 1 based, instead of zero based, so we need
to compensate. I know I've created bugs in here somehow.
Confoundingly, inputs of 0 are still acceptable, and should move to
the beginning of the row/column as if they were 1. *sigh*
"""
if row == 0:
row = 1
if column == 0:
column = 1
self.y = min(row - 1, self.size[0] - 1)
self.x = min(column - 1, self.size[1] - 1) | Set the cursor to a specific row and column.
Obnoxiously row/column is 1 based, instead of zero based, so we need
to compensate. I know I've created bugs in here somehow.
Confoundingly, inputs of 0 are still acceptable, and should move to
the beginning of the row/column as if they were 1. *sigh* | Below is the the instruction that describes the task:
### Input:
Set the cursor to a specific row and column.
Obnoxiously row/column is 1 based, instead of zero based, so we need
to compensate. I know I've created bugs in here somehow.
Confoundingly, inputs of 0 are still acceptable, and should move to
the beginning of the row/column as if they were 1. *sigh*
### Response:
def _cursor_position(self, row=0, column=0):
"""
Set the cursor to a specific row and column.
Obnoxiously row/column is 1 based, instead of zero based, so we need
to compensate. I know I've created bugs in here somehow.
Confoundingly, inputs of 0 are still acceptable, and should move to
the beginning of the row/column as if they were 1. *sigh*
"""
if row == 0:
row = 1
if column == 0:
column = 1
self.y = min(row - 1, self.size[0] - 1)
self.x = min(column - 1, self.size[1] - 1) |
def clean_line(str, delimiter):
"""Split string on given delimiter, remove whitespace from each field."""
return [x.strip() for x in str.strip().split(delimiter) if x != ''] | Split string on given delimiter, remove whitespace from each field. | Below is the the instruction that describes the task:
### Input:
Split string on given delimiter, remove whitespace from each field.
### Response:
def clean_line(str, delimiter):
"""Split string on given delimiter, remove whitespace from each field."""
return [x.strip() for x in str.strip().split(delimiter) if x != ''] |
def output_kernels(gandi, flavor, name_list, justify=14):
""" Helper to output kernel flavor versions."""
output_line(gandi, 'flavor', flavor, justify)
for name in name_list:
output_line(gandi, 'version', name, justify) | Helper to output kernel flavor versions. | Below is the the instruction that describes the task:
### Input:
Helper to output kernel flavor versions.
### Response:
def output_kernels(gandi, flavor, name_list, justify=14):
""" Helper to output kernel flavor versions."""
output_line(gandi, 'flavor', flavor, justify)
for name in name_list:
output_line(gandi, 'version', name, justify) |
def toDict(self, msg: Dict) -> Dict:
"""
Return a dictionary form of the message
:param msg: the message to be sent
:raises: ValueError if msg cannot be converted to an appropriate format
for transmission
"""
if isinstance(msg, Request):
tmsg = msg.as_dict
elif hasattr(msg, "_asdict"):
tmsg = dict(msg._asdict())
elif hasattr(msg, "__dict__"):
tmsg = dict(msg.__dict__)
elif self.allowDictOnly:
raise ValueError("Message cannot be converted to an appropriate "
"format for transmission")
else:
tmsg = msg
return tmsg | Return a dictionary form of the message
:param msg: the message to be sent
:raises: ValueError if msg cannot be converted to an appropriate format
for transmission | Below is the the instruction that describes the task:
### Input:
Return a dictionary form of the message
:param msg: the message to be sent
:raises: ValueError if msg cannot be converted to an appropriate format
for transmission
### Response:
def toDict(self, msg: Dict) -> Dict:
"""
Return a dictionary form of the message
:param msg: the message to be sent
:raises: ValueError if msg cannot be converted to an appropriate format
for transmission
"""
if isinstance(msg, Request):
tmsg = msg.as_dict
elif hasattr(msg, "_asdict"):
tmsg = dict(msg._asdict())
elif hasattr(msg, "__dict__"):
tmsg = dict(msg.__dict__)
elif self.allowDictOnly:
raise ValueError("Message cannot be converted to an appropriate "
"format for transmission")
else:
tmsg = msg
return tmsg |
def sodium_unpad(s, blocksize):
"""
Remove ISO/IEC 7816-4 padding from the input byte array ``s``
:param s: input bytes string
:type s: bytes
:param blocksize:
:type blocksize: int
:return: unpadded string
:rtype: bytes
"""
ensure(isinstance(s, bytes),
raising=exc.TypeError)
ensure(isinstance(blocksize, integer_types),
raising=exc.TypeError)
s_len = len(s)
u_len = ffi.new("size_t []", 1)
rc = lib.sodium_unpad(u_len, s, s_len, blocksize)
if rc != 0:
raise exc.CryptoError("Unpadding failure")
return s[:u_len[0]] | Remove ISO/IEC 7816-4 padding from the input byte array ``s``
:param s: input bytes string
:type s: bytes
:param blocksize:
:type blocksize: int
:return: unpadded string
:rtype: bytes | Below is the the instruction that describes the task:
### Input:
Remove ISO/IEC 7816-4 padding from the input byte array ``s``
:param s: input bytes string
:type s: bytes
:param blocksize:
:type blocksize: int
:return: unpadded string
:rtype: bytes
### Response:
def sodium_unpad(s, blocksize):
"""
Remove ISO/IEC 7816-4 padding from the input byte array ``s``
:param s: input bytes string
:type s: bytes
:param blocksize:
:type blocksize: int
:return: unpadded string
:rtype: bytes
"""
ensure(isinstance(s, bytes),
raising=exc.TypeError)
ensure(isinstance(blocksize, integer_types),
raising=exc.TypeError)
s_len = len(s)
u_len = ffi.new("size_t []", 1)
rc = lib.sodium_unpad(u_len, s, s_len, blocksize)
if rc != 0:
raise exc.CryptoError("Unpadding failure")
return s[:u_len[0]] |
def get_changesets(self, project=None, max_comment_length=None, skip=None, top=None, orderby=None, search_criteria=None):
"""GetChangesets.
Retrieve Tfvc Changesets
:param str project: Project ID or project name
:param int max_comment_length: Include details about associated work items in the response. Default: null
:param int skip: Number of results to skip. Default: null
:param int top: The maximum number of results to return. Default: null
:param str orderby: Results are sorted by ID in descending order by default. Use id asc to sort by ID in ascending order.
:param :class:`<TfvcChangesetSearchCriteria> <azure.devops.v5_0.tfvc.models.TfvcChangesetSearchCriteria>` search_criteria: Following criteria available (.itemPath, .version, .versionType, .versionOption, .author, .fromId, .toId, .fromDate, .toDate) Default: null
:rtype: [TfvcChangesetRef]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if max_comment_length is not None:
query_parameters['maxCommentLength'] = self._serialize.query('max_comment_length', max_comment_length, 'int')
if skip is not None:
query_parameters['$skip'] = self._serialize.query('skip', skip, 'int')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query('orderby', orderby, 'str')
if search_criteria is not None:
if search_criteria.item_path is not None:
query_parameters['searchCriteria.itemPath'] = search_criteria.item_path
if search_criteria.author is not None:
query_parameters['searchCriteria.author'] = search_criteria.author
if search_criteria.from_date is not None:
query_parameters['searchCriteria.fromDate'] = search_criteria.from_date
if search_criteria.to_date is not None:
query_parameters['searchCriteria.toDate'] = search_criteria.to_date
if search_criteria.from_id is not None:
query_parameters['searchCriteria.fromId'] = search_criteria.from_id
if search_criteria.to_id is not None:
query_parameters['searchCriteria.toId'] = search_criteria.to_id
if search_criteria.follow_renames is not None:
query_parameters['searchCriteria.followRenames'] = search_criteria.follow_renames
if search_criteria.include_links is not None:
query_parameters['searchCriteria.includeLinks'] = search_criteria.include_links
if search_criteria.mappings is not None:
query_parameters['searchCriteria.mappings'] = search_criteria.mappings
response = self._send(http_method='GET',
location_id='0bc8f0a4-6bfb-42a9-ba84-139da7b99c49',
version='5.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[TfvcChangesetRef]', self._unwrap_collection(response)) | GetChangesets.
Retrieve Tfvc Changesets
:param str project: Project ID or project name
:param int max_comment_length: Include details about associated work items in the response. Default: null
:param int skip: Number of results to skip. Default: null
:param int top: The maximum number of results to return. Default: null
:param str orderby: Results are sorted by ID in descending order by default. Use id asc to sort by ID in ascending order.
:param :class:`<TfvcChangesetSearchCriteria> <azure.devops.v5_0.tfvc.models.TfvcChangesetSearchCriteria>` search_criteria: Following criteria available (.itemPath, .version, .versionType, .versionOption, .author, .fromId, .toId, .fromDate, .toDate) Default: null
:rtype: [TfvcChangesetRef] | Below is the the instruction that describes the task:
### Input:
GetChangesets.
Retrieve Tfvc Changesets
:param str project: Project ID or project name
:param int max_comment_length: Include details about associated work items in the response. Default: null
:param int skip: Number of results to skip. Default: null
:param int top: The maximum number of results to return. Default: null
:param str orderby: Results are sorted by ID in descending order by default. Use id asc to sort by ID in ascending order.
:param :class:`<TfvcChangesetSearchCriteria> <azure.devops.v5_0.tfvc.models.TfvcChangesetSearchCriteria>` search_criteria: Following criteria available (.itemPath, .version, .versionType, .versionOption, .author, .fromId, .toId, .fromDate, .toDate) Default: null
:rtype: [TfvcChangesetRef]
### Response:
def get_changesets(self, project=None, max_comment_length=None, skip=None, top=None, orderby=None, search_criteria=None):
"""GetChangesets.
Retrieve Tfvc Changesets
:param str project: Project ID or project name
:param int max_comment_length: Include details about associated work items in the response. Default: null
:param int skip: Number of results to skip. Default: null
:param int top: The maximum number of results to return. Default: null
:param str orderby: Results are sorted by ID in descending order by default. Use id asc to sort by ID in ascending order.
:param :class:`<TfvcChangesetSearchCriteria> <azure.devops.v5_0.tfvc.models.TfvcChangesetSearchCriteria>` search_criteria: Following criteria available (.itemPath, .version, .versionType, .versionOption, .author, .fromId, .toId, .fromDate, .toDate) Default: null
:rtype: [TfvcChangesetRef]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if max_comment_length is not None:
query_parameters['maxCommentLength'] = self._serialize.query('max_comment_length', max_comment_length, 'int')
if skip is not None:
query_parameters['$skip'] = self._serialize.query('skip', skip, 'int')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query('orderby', orderby, 'str')
if search_criteria is not None:
if search_criteria.item_path is not None:
query_parameters['searchCriteria.itemPath'] = search_criteria.item_path
if search_criteria.author is not None:
query_parameters['searchCriteria.author'] = search_criteria.author
if search_criteria.from_date is not None:
query_parameters['searchCriteria.fromDate'] = search_criteria.from_date
if search_criteria.to_date is not None:
query_parameters['searchCriteria.toDate'] = search_criteria.to_date
if search_criteria.from_id is not None:
query_parameters['searchCriteria.fromId'] = search_criteria.from_id
if search_criteria.to_id is not None:
query_parameters['searchCriteria.toId'] = search_criteria.to_id
if search_criteria.follow_renames is not None:
query_parameters['searchCriteria.followRenames'] = search_criteria.follow_renames
if search_criteria.include_links is not None:
query_parameters['searchCriteria.includeLinks'] = search_criteria.include_links
if search_criteria.mappings is not None:
query_parameters['searchCriteria.mappings'] = search_criteria.mappings
response = self._send(http_method='GET',
location_id='0bc8f0a4-6bfb-42a9-ba84-139da7b99c49',
version='5.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[TfvcChangesetRef]', self._unwrap_collection(response)) |
def highlight_block(context, nodelist, lexer, **kwargs):
"""
Code is nodelist ``rendered`` in ``context``
Returns highlighted code ``div`` tag from ``HtmlFormatter``
Lexer is guessed by ``lexer`` name
arguments are passed into the formatter
Syntax::
{% highlight_block [lexer name] [formatter options] %}
... source code ..
{% endhighlight_block %}
Example::
{% highlight_block python linenos=true %}
print '{{ request.path }}'
{% endhighlight_block %}
"""
if highlighter is None:
return '<pre>%s</pre>' % str(nodelist.render(context) or '')
return highlighter(nodelist.render(context) or '', get_lexer_by_name(lexer), HtmlFormatter(**kwargs)) | Code is nodelist ``rendered`` in ``context``
Returns highlighted code ``div`` tag from ``HtmlFormatter``
Lexer is guessed by ``lexer`` name
arguments are passed into the formatter
Syntax::
{% highlight_block [lexer name] [formatter options] %}
... source code ..
{% endhighlight_block %}
Example::
{% highlight_block python linenos=true %}
print '{{ request.path }}'
{% endhighlight_block %} | Below is the the instruction that describes the task:
### Input:
Code is nodelist ``rendered`` in ``context``
Returns highlighted code ``div`` tag from ``HtmlFormatter``
Lexer is guessed by ``lexer`` name
arguments are passed into the formatter
Syntax::
{% highlight_block [lexer name] [formatter options] %}
... source code ..
{% endhighlight_block %}
Example::
{% highlight_block python linenos=true %}
print '{{ request.path }}'
{% endhighlight_block %}
### Response:
def highlight_block(context, nodelist, lexer, **kwargs):
"""
Code is nodelist ``rendered`` in ``context``
Returns highlighted code ``div`` tag from ``HtmlFormatter``
Lexer is guessed by ``lexer`` name
arguments are passed into the formatter
Syntax::
{% highlight_block [lexer name] [formatter options] %}
... source code ..
{% endhighlight_block %}
Example::
{% highlight_block python linenos=true %}
print '{{ request.path }}'
{% endhighlight_block %}
"""
if highlighter is None:
return '<pre>%s</pre>' % str(nodelist.render(context) or '')
return highlighter(nodelist.render(context) or '', get_lexer_by_name(lexer), HtmlFormatter(**kwargs)) |
def ssml_w(self, words, role=None, **kwargs):
"""
Create a <W> element
:param words: Words to speak
:param role: Customize the pronunciation of words by specifying the word’s part of speech or alternate meaning
:param kwargs: additional attributes
:returns: <W> element
"""
return self.nest(SsmlW(words, role=role, **kwargs)) | Create a <W> element
:param words: Words to speak
:param role: Customize the pronunciation of words by specifying the word’s part of speech or alternate meaning
:param kwargs: additional attributes
:returns: <W> element | Below is the the instruction that describes the task:
### Input:
Create a <W> element
:param words: Words to speak
:param role: Customize the pronunciation of words by specifying the word’s part of speech or alternate meaning
:param kwargs: additional attributes
:returns: <W> element
### Response:
def ssml_w(self, words, role=None, **kwargs):
"""
Create a <W> element
:param words: Words to speak
:param role: Customize the pronunciation of words by specifying the word’s part of speech or alternate meaning
:param kwargs: additional attributes
:returns: <W> element
"""
return self.nest(SsmlW(words, role=role, **kwargs)) |
def lmom_fit(self, data=[], lmom_ratios=[]):
"""
Fit the distribution function to the given data or given L-moments.
:param data: Data to use in calculating the distribution parameters
:type data: array_like
:param lmom_ratios: L-moments (ratios) l1, l2, t3, t4, .. to use in calculating the distribution parameters
:type lmom_ratios: array_like
:returns: Distribution parameters in `scipy` order, e.g. scale, loc, shape
:rtype: :class:`OrderedDict`
"""
n_min = self.numargs + 2
if len(data) > 0:
if len(data) <= n_min:
raise ValueError("At least {} data points must be provided.".format(n_min))
lmom_ratios = lm.lmom_ratios(data, nmom=n_min)
elif not lmom_ratios:
raise Exception("Either `data` or `lmom_ratios` must be provided.")
elif len(lmom_ratios) < n_min:
raise ValueError("At least {} number of L-moments must be provided.".format(n_min))
return self._lmom_fit(lmom_ratios) | Fit the distribution function to the given data or given L-moments.
:param data: Data to use in calculating the distribution parameters
:type data: array_like
:param lmom_ratios: L-moments (ratios) l1, l2, t3, t4, .. to use in calculating the distribution parameters
:type lmom_ratios: array_like
:returns: Distribution parameters in `scipy` order, e.g. scale, loc, shape
:rtype: :class:`OrderedDict` | Below is the the instruction that describes the task:
### Input:
Fit the distribution function to the given data or given L-moments.
:param data: Data to use in calculating the distribution parameters
:type data: array_like
:param lmom_ratios: L-moments (ratios) l1, l2, t3, t4, .. to use in calculating the distribution parameters
:type lmom_ratios: array_like
:returns: Distribution parameters in `scipy` order, e.g. scale, loc, shape
:rtype: :class:`OrderedDict`
### Response:
def lmom_fit(self, data=[], lmom_ratios=[]):
"""
Fit the distribution function to the given data or given L-moments.
:param data: Data to use in calculating the distribution parameters
:type data: array_like
:param lmom_ratios: L-moments (ratios) l1, l2, t3, t4, .. to use in calculating the distribution parameters
:type lmom_ratios: array_like
:returns: Distribution parameters in `scipy` order, e.g. scale, loc, shape
:rtype: :class:`OrderedDict`
"""
n_min = self.numargs + 2
if len(data) > 0:
if len(data) <= n_min:
raise ValueError("At least {} data points must be provided.".format(n_min))
lmom_ratios = lm.lmom_ratios(data, nmom=n_min)
elif not lmom_ratios:
raise Exception("Either `data` or `lmom_ratios` must be provided.")
elif len(lmom_ratios) < n_min:
raise ValueError("At least {} number of L-moments must be provided.".format(n_min))
return self._lmom_fit(lmom_ratios) |
def refresh_stats(self):
"""
only need this when generating terrain (sea = 100 - perc_land at start).
This function forces a recount, otherwise just call the variables
"""
self.tot_pix = 0
self.tot_sea = 0
self.tot_land = 0
self.tot_blocked = 0
for row in range(self.grd.grid_height):
for col in range(self.grd.grid_width):
self.tot_pix += 1
val = self.grd.get_tile(row, col)
if val == TERRAIN_SEA:
self.tot_sea += 1
elif val == TERRAIN_LAND:
self.tot_land += 1
else:
self.tot_blocked += 1 | only need this when generating terrain (sea = 100 - perc_land at start).
This function forces a recount, otherwise just call the variables | Below is the the instruction that describes the task:
### Input:
only need this when generating terrain (sea = 100 - perc_land at start).
This function forces a recount, otherwise just call the variables
### Response:
def refresh_stats(self):
"""
only need this when generating terrain (sea = 100 - perc_land at start).
This function forces a recount, otherwise just call the variables
"""
self.tot_pix = 0
self.tot_sea = 0
self.tot_land = 0
self.tot_blocked = 0
for row in range(self.grd.grid_height):
for col in range(self.grd.grid_width):
self.tot_pix += 1
val = self.grd.get_tile(row, col)
if val == TERRAIN_SEA:
self.tot_sea += 1
elif val == TERRAIN_LAND:
self.tot_land += 1
else:
self.tot_blocked += 1 |
def get_versions(default=DEFAULT, verbose=False):
"""This variation of get_versions() will be used in versioneer.py ."""
# returns dict with two keys: 'version' and 'full'
assert versionfile_source is not None, "please set versioneer.versionfile_source"
assert tag_prefix is not None, "please set versioneer.tag_prefix"
assert parentdir_prefix is not None, "please set versioneer.parentdir_prefix"
assert VCS is not None, "please set versioneer.VCS"
# I am in versioneer.py, which must live at the top of the source tree,
# which we use to compute the root directory. py2exe/bbfreeze/non-CPython
# don't have __file__, in which case we fall back to sys.argv[0] (which
# ought to be the setup.py script). We prefer __file__ since that's more
# robust in cases where setup.py was invoked in some weird way (e.g. pip)
root = get_root()
versionfile_abs = os.path.join(root, versionfile_source)
# extract version from first of _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
# Try to get the version info from the VCS-specific replacement keywords.
get_keywords_f = vcs_function(VCS, "get_keywords")
versions_from_keywords_f = vcs_function(VCS, "versions_from_keywords")
if get_keywords_f and versions_from_keywords_f:
vcs_keywords = get_keywords_f(versionfile_abs)
ver = versions_from_keywords_f(vcs_keywords, tag_prefix)
if ver:
if verbose: print("got version from expanded keyword %s" % ver)
return ver
# Try to get the version info from _version.py .
ver = versions_from_file(versionfile_abs)
if ver:
if verbose: print("got version from file %s %s" % (versionfile_abs,ver))
return ver
# Try to get the version info from the VCS, directly.
versions_from_vcs_f = vcs_function(VCS, "versions_from_vcs")
if versions_from_vcs_f:
ver = versions_from_vcs_f(tag_prefix, root, verbose)
if ver:
if verbose: print("got version from VCS %s" % ver)
return ver
# Try to get the version info from the directory's naming.
ver = versions_from_parentdir(parentdir_prefix, root, verbose)
if ver:
if verbose: print("got version from parentdir %s" % ver)
return ver
if verbose: print("got version from default %s" % default)
return default | This variation of get_versions() will be used in versioneer.py . | Below is the the instruction that describes the task:
### Input:
This variation of get_versions() will be used in versioneer.py .
### Response:
def get_versions(default=DEFAULT, verbose=False):
"""This variation of get_versions() will be used in versioneer.py ."""
# returns dict with two keys: 'version' and 'full'
assert versionfile_source is not None, "please set versioneer.versionfile_source"
assert tag_prefix is not None, "please set versioneer.tag_prefix"
assert parentdir_prefix is not None, "please set versioneer.parentdir_prefix"
assert VCS is not None, "please set versioneer.VCS"
# I am in versioneer.py, which must live at the top of the source tree,
# which we use to compute the root directory. py2exe/bbfreeze/non-CPython
# don't have __file__, in which case we fall back to sys.argv[0] (which
# ought to be the setup.py script). We prefer __file__ since that's more
# robust in cases where setup.py was invoked in some weird way (e.g. pip)
root = get_root()
versionfile_abs = os.path.join(root, versionfile_source)
# extract version from first of _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
# Try to get the version info from the VCS-specific replacement keywords.
get_keywords_f = vcs_function(VCS, "get_keywords")
versions_from_keywords_f = vcs_function(VCS, "versions_from_keywords")
if get_keywords_f and versions_from_keywords_f:
vcs_keywords = get_keywords_f(versionfile_abs)
ver = versions_from_keywords_f(vcs_keywords, tag_prefix)
if ver:
if verbose: print("got version from expanded keyword %s" % ver)
return ver
# Try to get the version info from _version.py .
ver = versions_from_file(versionfile_abs)
if ver:
if verbose: print("got version from file %s %s" % (versionfile_abs,ver))
return ver
# Try to get the version info from the VCS, directly.
versions_from_vcs_f = vcs_function(VCS, "versions_from_vcs")
if versions_from_vcs_f:
ver = versions_from_vcs_f(tag_prefix, root, verbose)
if ver:
if verbose: print("got version from VCS %s" % ver)
return ver
# Try to get the version info from the directory's naming.
ver = versions_from_parentdir(parentdir_prefix, root, verbose)
if ver:
if verbose: print("got version from parentdir %s" % ver)
return ver
if verbose: print("got version from default %s" % default)
return default |
def _transform(self, node):
"""Call matching transforms for the given node if any and return the
transformed node.
"""
cls = node.__class__
if cls not in self.transforms:
# no transform registered for this class of node
return node
transforms = self.transforms[cls]
for transform_func, predicate in transforms:
if predicate is None or predicate(node):
ret = transform_func(node)
# if the transformation function returns something, it's
# expected to be a replacement for the node
if ret is not None:
node = ret
if ret.__class__ != cls:
# Can no longer apply the rest of the transforms.
break
return node | Call matching transforms for the given node if any and return the
transformed node. | Below is the the instruction that describes the task:
### Input:
Call matching transforms for the given node if any and return the
transformed node.
### Response:
def _transform(self, node):
"""Call matching transforms for the given node if any and return the
transformed node.
"""
cls = node.__class__
if cls not in self.transforms:
# no transform registered for this class of node
return node
transforms = self.transforms[cls]
for transform_func, predicate in transforms:
if predicate is None or predicate(node):
ret = transform_func(node)
# if the transformation function returns something, it's
# expected to be a replacement for the node
if ret is not None:
node = ret
if ret.__class__ != cls:
# Can no longer apply the rest of the transforms.
break
return node |
def call(self, timeout=None):
"""Run command with arguments. Wait for command to complete.
same as:
1. :meth:`start`
2. :meth:`wait`
3. :meth:`stop`
:rtype: self
"""
self.start().wait(timeout=timeout)
if self.is_alive():
self.stop()
return self | Run command with arguments. Wait for command to complete.
same as:
1. :meth:`start`
2. :meth:`wait`
3. :meth:`stop`
:rtype: self | Below is the the instruction that describes the task:
### Input:
Run command with arguments. Wait for command to complete.
same as:
1. :meth:`start`
2. :meth:`wait`
3. :meth:`stop`
:rtype: self
### Response:
def call(self, timeout=None):
"""Run command with arguments. Wait for command to complete.
same as:
1. :meth:`start`
2. :meth:`wait`
3. :meth:`stop`
:rtype: self
"""
self.start().wait(timeout=timeout)
if self.is_alive():
self.stop()
return self |
def cli_ping(context, prefix):
"""
Performs a ping test.
See :py:mod:`swiftly.cli.ping` for context usage information.
See :py:class:`CLIPing` for more information.
:param context: The :py:class:`swiftly.cli.context.CLIContext` to
use.
:param prefix: The container name prefix to use. Default:
swiftly-ping
"""
if not prefix:
prefix = 'swiftly-ping'
ping_ring_object_puts = collections.defaultdict(lambda: [])
ping_ring_object_gets = collections.defaultdict(lambda: [])
ping_ring_object_deletes = collections.defaultdict(lambda: [])
context.ping_begin = context.ping_begin_last = time.time()
container = prefix + '-' + uuid.uuid4().hex
objects = [uuid.uuid4().hex for x in moves.range(context.ping_count)]
conc = Concurrency(context.concurrency)
with context.client_manager.with_client() as client:
client.auth()
_cli_ping_status(context, 'auth', '-', None, None, None, None)
_cli_ping_status(context, 'account head', '-', *client.head_account())
_cli_ping_status(
context, 'container put', '-', *client.put_container(container))
if _cli_ping_objects(
context, 'put', conc, container, objects, _cli_ping_object_put,
ping_ring_object_puts):
with context.io_manager.with_stderr() as fp:
fp.write(
'ERROR put objects did not complete successfully due to '
'previous error; but continuing\n')
fp.flush()
if _cli_ping_objects(
context, 'get', conc, container, objects, _cli_ping_object_get,
ping_ring_object_gets):
with context.io_manager.with_stderr() as fp:
fp.write(
'ERROR get objects did not complete successfully due to '
'previous error; but continuing\n')
fp.flush()
if _cli_ping_objects(
context, 'delete', conc, container, objects,
_cli_ping_object_delete, ping_ring_object_deletes):
with context.io_manager.with_stderr() as fp:
fp.write(
'ERROR delete objects did not complete successfully due to '
'previous error; but continuing\n')
fp.flush()
for attempt in moves.range(5):
if attempt:
sleep(2**attempt)
with context.client_manager.with_client() as client:
try:
_cli_ping_status(
context, 'container delete', '-',
*client.delete_container(container))
break
except ReturnCode as err:
with context.io_manager.with_stderr() as fp:
fp.write(str(err))
fp.write('\n')
fp.flush()
else:
with context.io_manager.with_stderr() as fp:
fp.write(
'ERROR could not confirm deletion of container due to '
'previous error; but continuing\n')
fp.flush()
end = time.time()
with context.io_manager.with_stdout() as fp:
if context.graphite:
fp.write(
'%s.ping_overall %.02f %d\n' % (
context.graphite, end - context.ping_begin, time.time()))
if context.ping_verbose:
fp.write('% 6.02fs total\n' % (end - context.ping_begin))
elif not context.graphite:
fp.write('%.02fs\n' % (end - context.ping_begin))
fp.flush()
ping_ring_overall = collections.defaultdict(lambda: [])
_cli_ping_ring_report(context, ping_ring_object_puts, 'PUT')
for ip, timings in six.iteritems(ping_ring_object_puts):
ping_ring_overall[ip].extend(timings)
_cli_ping_ring_report(context, ping_ring_object_gets, 'GET')
for ip, timings in six.iteritems(ping_ring_object_gets):
ping_ring_overall[ip].extend(timings)
_cli_ping_ring_report(context, ping_ring_object_deletes, 'DELETE')
for ip, timings in six.iteritems(ping_ring_object_deletes):
ping_ring_overall[ip].extend(timings)
_cli_ping_ring_report(context, ping_ring_overall, 'overall') | Performs a ping test.
See :py:mod:`swiftly.cli.ping` for context usage information.
See :py:class:`CLIPing` for more information.
:param context: The :py:class:`swiftly.cli.context.CLIContext` to
use.
:param prefix: The container name prefix to use. Default:
swiftly-ping | Below is the the instruction that describes the task:
### Input:
Performs a ping test.
See :py:mod:`swiftly.cli.ping` for context usage information.
See :py:class:`CLIPing` for more information.
:param context: The :py:class:`swiftly.cli.context.CLIContext` to
use.
:param prefix: The container name prefix to use. Default:
swiftly-ping
### Response:
def cli_ping(context, prefix):
"""
Performs a ping test.
See :py:mod:`swiftly.cli.ping` for context usage information.
See :py:class:`CLIPing` for more information.
:param context: The :py:class:`swiftly.cli.context.CLIContext` to
use.
:param prefix: The container name prefix to use. Default:
swiftly-ping
"""
if not prefix:
prefix = 'swiftly-ping'
ping_ring_object_puts = collections.defaultdict(lambda: [])
ping_ring_object_gets = collections.defaultdict(lambda: [])
ping_ring_object_deletes = collections.defaultdict(lambda: [])
context.ping_begin = context.ping_begin_last = time.time()
container = prefix + '-' + uuid.uuid4().hex
objects = [uuid.uuid4().hex for x in moves.range(context.ping_count)]
conc = Concurrency(context.concurrency)
with context.client_manager.with_client() as client:
client.auth()
_cli_ping_status(context, 'auth', '-', None, None, None, None)
_cli_ping_status(context, 'account head', '-', *client.head_account())
_cli_ping_status(
context, 'container put', '-', *client.put_container(container))
if _cli_ping_objects(
context, 'put', conc, container, objects, _cli_ping_object_put,
ping_ring_object_puts):
with context.io_manager.with_stderr() as fp:
fp.write(
'ERROR put objects did not complete successfully due to '
'previous error; but continuing\n')
fp.flush()
if _cli_ping_objects(
context, 'get', conc, container, objects, _cli_ping_object_get,
ping_ring_object_gets):
with context.io_manager.with_stderr() as fp:
fp.write(
'ERROR get objects did not complete successfully due to '
'previous error; but continuing\n')
fp.flush()
if _cli_ping_objects(
context, 'delete', conc, container, objects,
_cli_ping_object_delete, ping_ring_object_deletes):
with context.io_manager.with_stderr() as fp:
fp.write(
'ERROR delete objects did not complete successfully due to '
'previous error; but continuing\n')
fp.flush()
for attempt in moves.range(5):
if attempt:
sleep(2**attempt)
with context.client_manager.with_client() as client:
try:
_cli_ping_status(
context, 'container delete', '-',
*client.delete_container(container))
break
except ReturnCode as err:
with context.io_manager.with_stderr() as fp:
fp.write(str(err))
fp.write('\n')
fp.flush()
else:
with context.io_manager.with_stderr() as fp:
fp.write(
'ERROR could not confirm deletion of container due to '
'previous error; but continuing\n')
fp.flush()
end = time.time()
with context.io_manager.with_stdout() as fp:
if context.graphite:
fp.write(
'%s.ping_overall %.02f %d\n' % (
context.graphite, end - context.ping_begin, time.time()))
if context.ping_verbose:
fp.write('% 6.02fs total\n' % (end - context.ping_begin))
elif not context.graphite:
fp.write('%.02fs\n' % (end - context.ping_begin))
fp.flush()
ping_ring_overall = collections.defaultdict(lambda: [])
_cli_ping_ring_report(context, ping_ring_object_puts, 'PUT')
for ip, timings in six.iteritems(ping_ring_object_puts):
ping_ring_overall[ip].extend(timings)
_cli_ping_ring_report(context, ping_ring_object_gets, 'GET')
for ip, timings in six.iteritems(ping_ring_object_gets):
ping_ring_overall[ip].extend(timings)
_cli_ping_ring_report(context, ping_ring_object_deletes, 'DELETE')
for ip, timings in six.iteritems(ping_ring_object_deletes):
ping_ring_overall[ip].extend(timings)
_cli_ping_ring_report(context, ping_ring_overall, 'overall') |
def httpdo(method, url, data=None):
"""
Do HTTP Request
"""
if isinstance(data, dict):
data = json.dumps(data)
if DEBUG:
print "Shell: curl -X {method} -d '{data}' '{url}'".format(method=method, data=data or '', url=url)
fn = dict(GET=requests.get, POST=requests.post, DELETE=requests.delete)[method]
response = fn(url, data=data)
retjson = response.json()
if DEBUG:
print 'Return:', json.dumps(retjson, indent=4)
r = convert(retjson)
if r.status != 0:
raise WebDriverError(r.status, r.value)
return r | Do HTTP Request | Below is the the instruction that describes the task:
### Input:
Do HTTP Request
### Response:
def httpdo(method, url, data=None):
"""
Do HTTP Request
"""
if isinstance(data, dict):
data = json.dumps(data)
if DEBUG:
print "Shell: curl -X {method} -d '{data}' '{url}'".format(method=method, data=data or '', url=url)
fn = dict(GET=requests.get, POST=requests.post, DELETE=requests.delete)[method]
response = fn(url, data=data)
retjson = response.json()
if DEBUG:
print 'Return:', json.dumps(retjson, indent=4)
r = convert(retjson)
if r.status != 0:
raise WebDriverError(r.status, r.value)
return r |
def fillup_layer(layer_length, arrow_char):
"""
Creates a layer with BreakWire elements.
Args:
layer_length (int): The length of the layer to create
arrow_char (char): The char used to create the BreakWire element.
Returns:
list: The new layer.
"""
breakwire_layer = []
for _ in range(layer_length):
breakwire_layer.append(BreakWire(arrow_char))
return breakwire_layer | Creates a layer with BreakWire elements.
Args:
layer_length (int): The length of the layer to create
arrow_char (char): The char used to create the BreakWire element.
Returns:
list: The new layer. | Below is the the instruction that describes the task:
### Input:
Creates a layer with BreakWire elements.
Args:
layer_length (int): The length of the layer to create
arrow_char (char): The char used to create the BreakWire element.
Returns:
list: The new layer.
### Response:
def fillup_layer(layer_length, arrow_char):
"""
Creates a layer with BreakWire elements.
Args:
layer_length (int): The length of the layer to create
arrow_char (char): The char used to create the BreakWire element.
Returns:
list: The new layer.
"""
breakwire_layer = []
for _ in range(layer_length):
breakwire_layer.append(BreakWire(arrow_char))
return breakwire_layer |
def calibrate_pressure(self):
'''calibrate pressure'''
if self.mavlink10():
self.mav.command_long_send(self.target_system, self.target_component,
mavlink.MAV_CMD_PREFLIGHT_CALIBRATION, 0,
0, 0, 1, 0, 0, 0, 0)
else:
MAV_ACTION_CALIBRATE_PRESSURE = 20
self.mav.action_send(self.target_system, self.target_component, MAV_ACTION_CALIBRATE_PRESSURE) | calibrate pressure | Below is the the instruction that describes the task:
### Input:
calibrate pressure
### Response:
def calibrate_pressure(self):
'''calibrate pressure'''
if self.mavlink10():
self.mav.command_long_send(self.target_system, self.target_component,
mavlink.MAV_CMD_PREFLIGHT_CALIBRATION, 0,
0, 0, 1, 0, 0, 0, 0)
else:
MAV_ACTION_CALIBRATE_PRESSURE = 20
self.mav.action_send(self.target_system, self.target_component, MAV_ACTION_CALIBRATE_PRESSURE) |
def call(self, obj, name, method, args, kwargs):
"""Trigger a method along with its beforebacks and afterbacks.
Parameters
----------
name: str
The name of the method that will be called
args: tuple
The arguments that will be passed to the base method
kwargs: dict
The keyword args that will be passed to the base method
"""
if name in self._callback_registry:
beforebacks, afterbacks = zip(*self._callback_registry.get(name, []))
hold = []
for b in beforebacks:
if b is not None:
call = Data(name=name, kwargs=kwargs.copy(), args=args)
v = b(obj, call)
else:
v = None
hold.append(v)
out = method(*args, **kwargs)
for a, bval in zip(afterbacks, hold):
if a is not None:
a(obj, Data(before=bval, name=name, value=out))
elif callable(bval):
# the beforeback's return value was an
# afterback that expects to be called
bval(out)
return out
else:
return method(*args, **kwargs) | Trigger a method along with its beforebacks and afterbacks.
Parameters
----------
name: str
The name of the method that will be called
args: tuple
The arguments that will be passed to the base method
kwargs: dict
The keyword args that will be passed to the base method | Below is the the instruction that describes the task:
### Input:
Trigger a method along with its beforebacks and afterbacks.
Parameters
----------
name: str
The name of the method that will be called
args: tuple
The arguments that will be passed to the base method
kwargs: dict
The keyword args that will be passed to the base method
### Response:
def call(self, obj, name, method, args, kwargs):
"""Trigger a method along with its beforebacks and afterbacks.
Parameters
----------
name: str
The name of the method that will be called
args: tuple
The arguments that will be passed to the base method
kwargs: dict
The keyword args that will be passed to the base method
"""
if name in self._callback_registry:
beforebacks, afterbacks = zip(*self._callback_registry.get(name, []))
hold = []
for b in beforebacks:
if b is not None:
call = Data(name=name, kwargs=kwargs.copy(), args=args)
v = b(obj, call)
else:
v = None
hold.append(v)
out = method(*args, **kwargs)
for a, bval in zip(afterbacks, hold):
if a is not None:
a(obj, Data(before=bval, name=name, value=out))
elif callable(bval):
# the beforeback's return value was an
# afterback that expects to be called
bval(out)
return out
else:
return method(*args, **kwargs) |
def ksl(A, y0, tau, verb=1, scheme='symm', space=8, rmax=2000, use_normest=1):
""" Dynamical tensor-train approximation based on projector splitting
This function performs one step of dynamical tensor-train approximation
for the equation
.. math ::
\\frac{dy}{dt} = A y, \\quad y(0) = y_0
and outputs approximation for :math:`y(\\tau)`
:References:
1. Christian Lubich, Ivan Oseledets, and Bart Vandereycken.
Time integration of tensor trains. arXiv preprint 1407.2042, 2014.
http://arxiv.org/abs/1407.2042
2. Christian Lubich and Ivan V. Oseledets. A projector-splitting integrator
for dynamical low-rank approximation. BIT, 54(1):171-188, 2014.
http://dx.doi.org/10.1007/s10543-013-0454-0
:param A: Matrix in the TT-format
:type A: matrix
:param y0: Initial condition in the TT-format,
:type y0: tensor
:param tau: Timestep
:type tau: float
:param scheme: The integration scheme, possible values: 'symm' -- second order, 'first' -- first order
:type scheme: str
:param space: Maximal dimension of the Krylov space for the local EXPOKIT solver.
:type space: int
:param use_normest: Use matrix norm estimation instead of the true 1-norm in KSL procedure. 0 -use true norm, 1 - Higham norm estimator, 2 - fixed norm=1.0 (for testing purposes only)
:type use_normest: int, default: 1
:rtype: tensor
:Example:
>>> import tt
>>> import tt.ksl
>>> import numpy as np
>>> d = 8
>>> a = tt.qlaplace_dd([d, d, d])
>>> y0, ev = tt.eigb.eigb(a, tt.rand(2 , 24, 2), 1e-6, verb=0)
Solving a block eigenvalue problem
Looking for 1 eigenvalues with accuracy 1E-06
swp: 1 er = 1.1408 rmax:2
swp: 2 er = 190.01 rmax:2
swp: 3 er = 2.72582E-08 rmax:2
Total number of matvecs: 0
>>> y1 = tt.ksl.ksl(a, y0, 1e-2)
Solving a real-valued dynamical problem with tau=1E-02
>>> print tt.dot(y1, y0) / (y1.norm() * y0.norm()) - 1 #Eigenvectors should not change
0.0
"""
y0 = y0.round(1e-14) # This will fix ranks
# to be no more than maximal reasonable.
# Fortran part doesn't handle excessive ranks
ry = y0.r.copy()
if scheme is 'symm':
tp = 2
else:
tp = 1
usenrm = int(use_normest)
# Check for dtype
y = tt.vector()
if np.iscomplex(A.tt.core).any() or np.iscomplex(y0.core).any():
dyn_tt.dyn_tt.ztt_ksl(
y0.d,
A.n,
A.m,
A.tt.r,
A.tt.core + 0j,
y0.core + 0j,
ry,
tau,
rmax,
0,
10,
verb,
tp,
space,
usenrm
)
y.core = dyn_tt.dyn_tt.zresult_core.copy()
else:
A.tt.core = np.real(A.tt.core)
y0.core = np.real(y0.core)
dyn_tt.dyn_tt.tt_ksl(
y0.d,
A.n,
A.m,
A.tt.r,
A.tt.core,
y0.core,
ry,
tau,
rmax,
0,
10,
verb,
tp,
space,
usenrm
)
y.core = dyn_tt.dyn_tt.dresult_core.copy()
dyn_tt.dyn_tt.deallocate_result()
y.d = y0.d
y.n = A.n.copy()
y.r = ry
y.get_ps()
return y | Dynamical tensor-train approximation based on projector splitting
This function performs one step of dynamical tensor-train approximation
for the equation
.. math ::
\\frac{dy}{dt} = A y, \\quad y(0) = y_0
and outputs approximation for :math:`y(\\tau)`
:References:
1. Christian Lubich, Ivan Oseledets, and Bart Vandereycken.
Time integration of tensor trains. arXiv preprint 1407.2042, 2014.
http://arxiv.org/abs/1407.2042
2. Christian Lubich and Ivan V. Oseledets. A projector-splitting integrator
for dynamical low-rank approximation. BIT, 54(1):171-188, 2014.
http://dx.doi.org/10.1007/s10543-013-0454-0
:param A: Matrix in the TT-format
:type A: matrix
:param y0: Initial condition in the TT-format,
:type y0: tensor
:param tau: Timestep
:type tau: float
:param scheme: The integration scheme, possible values: 'symm' -- second order, 'first' -- first order
:type scheme: str
:param space: Maximal dimension of the Krylov space for the local EXPOKIT solver.
:type space: int
:param use_normest: Use matrix norm estimation instead of the true 1-norm in KSL procedure. 0 -use true norm, 1 - Higham norm estimator, 2 - fixed norm=1.0 (for testing purposes only)
:type use_normest: int, default: 1
:rtype: tensor
:Example:
>>> import tt
>>> import tt.ksl
>>> import numpy as np
>>> d = 8
>>> a = tt.qlaplace_dd([d, d, d])
>>> y0, ev = tt.eigb.eigb(a, tt.rand(2 , 24, 2), 1e-6, verb=0)
Solving a block eigenvalue problem
Looking for 1 eigenvalues with accuracy 1E-06
swp: 1 er = 1.1408 rmax:2
swp: 2 er = 190.01 rmax:2
swp: 3 er = 2.72582E-08 rmax:2
Total number of matvecs: 0
>>> y1 = tt.ksl.ksl(a, y0, 1e-2)
Solving a real-valued dynamical problem with tau=1E-02
>>> print tt.dot(y1, y0) / (y1.norm() * y0.norm()) - 1 #Eigenvectors should not change
0.0 | Below is the the instruction that describes the task:
### Input:
Dynamical tensor-train approximation based on projector splitting
This function performs one step of dynamical tensor-train approximation
for the equation
.. math ::
\\frac{dy}{dt} = A y, \\quad y(0) = y_0
and outputs approximation for :math:`y(\\tau)`
:References:
1. Christian Lubich, Ivan Oseledets, and Bart Vandereycken.
Time integration of tensor trains. arXiv preprint 1407.2042, 2014.
http://arxiv.org/abs/1407.2042
2. Christian Lubich and Ivan V. Oseledets. A projector-splitting integrator
for dynamical low-rank approximation. BIT, 54(1):171-188, 2014.
http://dx.doi.org/10.1007/s10543-013-0454-0
:param A: Matrix in the TT-format
:type A: matrix
:param y0: Initial condition in the TT-format,
:type y0: tensor
:param tau: Timestep
:type tau: float
:param scheme: The integration scheme, possible values: 'symm' -- second order, 'first' -- first order
:type scheme: str
:param space: Maximal dimension of the Krylov space for the local EXPOKIT solver.
:type space: int
:param use_normest: Use matrix norm estimation instead of the true 1-norm in KSL procedure. 0 -use true norm, 1 - Higham norm estimator, 2 - fixed norm=1.0 (for testing purposes only)
:type use_normest: int, default: 1
:rtype: tensor
:Example:
>>> import tt
>>> import tt.ksl
>>> import numpy as np
>>> d = 8
>>> a = tt.qlaplace_dd([d, d, d])
>>> y0, ev = tt.eigb.eigb(a, tt.rand(2 , 24, 2), 1e-6, verb=0)
Solving a block eigenvalue problem
Looking for 1 eigenvalues with accuracy 1E-06
swp: 1 er = 1.1408 rmax:2
swp: 2 er = 190.01 rmax:2
swp: 3 er = 2.72582E-08 rmax:2
Total number of matvecs: 0
>>> y1 = tt.ksl.ksl(a, y0, 1e-2)
Solving a real-valued dynamical problem with tau=1E-02
>>> print tt.dot(y1, y0) / (y1.norm() * y0.norm()) - 1 #Eigenvectors should not change
0.0
### Response:
def ksl(A, y0, tau, verb=1, scheme='symm', space=8, rmax=2000, use_normest=1):
""" Dynamical tensor-train approximation based on projector splitting
This function performs one step of dynamical tensor-train approximation
for the equation
.. math ::
\\frac{dy}{dt} = A y, \\quad y(0) = y_0
and outputs approximation for :math:`y(\\tau)`
:References:
1. Christian Lubich, Ivan Oseledets, and Bart Vandereycken.
Time integration of tensor trains. arXiv preprint 1407.2042, 2014.
http://arxiv.org/abs/1407.2042
2. Christian Lubich and Ivan V. Oseledets. A projector-splitting integrator
for dynamical low-rank approximation. BIT, 54(1):171-188, 2014.
http://dx.doi.org/10.1007/s10543-013-0454-0
:param A: Matrix in the TT-format
:type A: matrix
:param y0: Initial condition in the TT-format,
:type y0: tensor
:param tau: Timestep
:type tau: float
:param scheme: The integration scheme, possible values: 'symm' -- second order, 'first' -- first order
:type scheme: str
:param space: Maximal dimension of the Krylov space for the local EXPOKIT solver.
:type space: int
:param use_normest: Use matrix norm estimation instead of the true 1-norm in KSL procedure. 0 -use true norm, 1 - Higham norm estimator, 2 - fixed norm=1.0 (for testing purposes only)
:type use_normest: int, default: 1
:rtype: tensor
:Example:
>>> import tt
>>> import tt.ksl
>>> import numpy as np
>>> d = 8
>>> a = tt.qlaplace_dd([d, d, d])
>>> y0, ev = tt.eigb.eigb(a, tt.rand(2 , 24, 2), 1e-6, verb=0)
Solving a block eigenvalue problem
Looking for 1 eigenvalues with accuracy 1E-06
swp: 1 er = 1.1408 rmax:2
swp: 2 er = 190.01 rmax:2
swp: 3 er = 2.72582E-08 rmax:2
Total number of matvecs: 0
>>> y1 = tt.ksl.ksl(a, y0, 1e-2)
Solving a real-valued dynamical problem with tau=1E-02
>>> print tt.dot(y1, y0) / (y1.norm() * y0.norm()) - 1 #Eigenvectors should not change
0.0
"""
y0 = y0.round(1e-14) # This will fix ranks
# to be no more than maximal reasonable.
# Fortran part doesn't handle excessive ranks
ry = y0.r.copy()
if scheme is 'symm':
tp = 2
else:
tp = 1
usenrm = int(use_normest)
# Check for dtype
y = tt.vector()
if np.iscomplex(A.tt.core).any() or np.iscomplex(y0.core).any():
dyn_tt.dyn_tt.ztt_ksl(
y0.d,
A.n,
A.m,
A.tt.r,
A.tt.core + 0j,
y0.core + 0j,
ry,
tau,
rmax,
0,
10,
verb,
tp,
space,
usenrm
)
y.core = dyn_tt.dyn_tt.zresult_core.copy()
else:
A.tt.core = np.real(A.tt.core)
y0.core = np.real(y0.core)
dyn_tt.dyn_tt.tt_ksl(
y0.d,
A.n,
A.m,
A.tt.r,
A.tt.core,
y0.core,
ry,
tau,
rmax,
0,
10,
verb,
tp,
space,
usenrm
)
y.core = dyn_tt.dyn_tt.dresult_core.copy()
dyn_tt.dyn_tt.deallocate_result()
y.d = y0.d
y.n = A.n.copy()
y.r = ry
y.get_ps()
return y |
def get_managed_policy_document(policy_arn, policy_metadata=None, client=None, **kwargs):
"""Retrieve the currently active (i.e. 'default') policy version document for a policy.
:param policy_arn:
:param policy_metadata: This is a previously fetch managed policy response from boto/cloudaux.
This is used to prevent unnecessary API calls to get the initial policy default version id.
:param client:
:param kwargs:
:return:
"""
if not policy_metadata:
policy_metadata = client.get_policy(PolicyArn=policy_arn)
policy_document = client.get_policy_version(PolicyArn=policy_arn,
VersionId=policy_metadata['Policy']['DefaultVersionId'])
return policy_document['PolicyVersion']['Document'] | Retrieve the currently active (i.e. 'default') policy version document for a policy.
:param policy_arn:
:param policy_metadata: This is a previously fetch managed policy response from boto/cloudaux.
This is used to prevent unnecessary API calls to get the initial policy default version id.
:param client:
:param kwargs:
:return: | Below is the the instruction that describes the task:
### Input:
Retrieve the currently active (i.e. 'default') policy version document for a policy.
:param policy_arn:
:param policy_metadata: This is a previously fetch managed policy response from boto/cloudaux.
This is used to prevent unnecessary API calls to get the initial policy default version id.
:param client:
:param kwargs:
:return:
### Response:
def get_managed_policy_document(policy_arn, policy_metadata=None, client=None, **kwargs):
"""Retrieve the currently active (i.e. 'default') policy version document for a policy.
:param policy_arn:
:param policy_metadata: This is a previously fetch managed policy response from boto/cloudaux.
This is used to prevent unnecessary API calls to get the initial policy default version id.
:param client:
:param kwargs:
:return:
"""
if not policy_metadata:
policy_metadata = client.get_policy(PolicyArn=policy_arn)
policy_document = client.get_policy_version(PolicyArn=policy_arn,
VersionId=policy_metadata['Policy']['DefaultVersionId'])
return policy_document['PolicyVersion']['Document'] |
def default_format(self):
"""
Returns full name (first and last) if name is available.
If not, returns username if available.
If not available too, returns the user id as a string.
"""
user = self.user
if user.first_name is not None:
return self.full_name
elif user.username is not None:
return user.username
else:
return str(user.id) | Returns full name (first and last) if name is available.
If not, returns username if available.
If not available too, returns the user id as a string. | Below is the the instruction that describes the task:
### Input:
Returns full name (first and last) if name is available.
If not, returns username if available.
If not available too, returns the user id as a string.
### Response:
def default_format(self):
"""
Returns full name (first and last) if name is available.
If not, returns username if available.
If not available too, returns the user id as a string.
"""
user = self.user
if user.first_name is not None:
return self.full_name
elif user.username is not None:
return user.username
else:
return str(user.id) |
def standardize_language(language, plugin):
"""Maps a string to the equivalent Pygments language.
Returns the standardized language string.
"""
if not language:
return None
# standardize language for this plugin
if plugin:
plugin = plugin.split(' ')[-1].split('/')[0].split('-')[0]
standardized = get_language_from_json(language, plugin)
if standardized is not None:
return standardized
# standardize language against default languages
return get_language_from_json(language, 'default') | Maps a string to the equivalent Pygments language.
Returns the standardized language string. | Below is the the instruction that describes the task:
### Input:
Maps a string to the equivalent Pygments language.
Returns the standardized language string.
### Response:
def standardize_language(language, plugin):
"""Maps a string to the equivalent Pygments language.
Returns the standardized language string.
"""
if not language:
return None
# standardize language for this plugin
if plugin:
plugin = plugin.split(' ')[-1].split('/')[0].split('-')[0]
standardized = get_language_from_json(language, plugin)
if standardized is not None:
return standardized
# standardize language against default languages
return get_language_from_json(language, 'default') |
def plot(self, x, y, **kw):
"""plot x, y values (erasing old plot),
for method options see PlotPanel.plot.
"""
return self.frame.plot(x,y,**kw) | plot x, y values (erasing old plot),
for method options see PlotPanel.plot. | Below is the the instruction that describes the task:
### Input:
plot x, y values (erasing old plot),
for method options see PlotPanel.plot.
### Response:
def plot(self, x, y, **kw):
"""plot x, y values (erasing old plot),
for method options see PlotPanel.plot.
"""
return self.frame.plot(x,y,**kw) |
def _closest_centroid(self, x):
"""Returns the index of the closest centroid to the sample
"""
closest_centroid = 0
distance = 10^9
for i in range(self.n_clusters):
current_distance = linalg.norm(x - self.centroids[i])
if current_distance < distance:
closest_centroid = i
distance = current_distance
return closest_centroid | Returns the index of the closest centroid to the sample | Below is the the instruction that describes the task:
### Input:
Returns the index of the closest centroid to the sample
### Response:
def _closest_centroid(self, x):
"""Returns the index of the closest centroid to the sample
"""
closest_centroid = 0
distance = 10^9
for i in range(self.n_clusters):
current_distance = linalg.norm(x - self.centroids[i])
if current_distance < distance:
closest_centroid = i
distance = current_distance
return closest_centroid |
def total_reads_from_grabix(in_file):
"""Retrieve total reads in a fastq file from grabix index.
"""
gbi_file = _get_grabix_index(in_file)
if gbi_file:
with open(gbi_file) as in_handle:
next(in_handle) # throw away
num_lines = int(next(in_handle).strip())
assert num_lines % 4 == 0, "Expected lines to be multiple of 4"
return num_lines // 4
else:
return 0 | Retrieve total reads in a fastq file from grabix index. | Below is the the instruction that describes the task:
### Input:
Retrieve total reads in a fastq file from grabix index.
### Response:
def total_reads_from_grabix(in_file):
"""Retrieve total reads in a fastq file from grabix index.
"""
gbi_file = _get_grabix_index(in_file)
if gbi_file:
with open(gbi_file) as in_handle:
next(in_handle) # throw away
num_lines = int(next(in_handle).strip())
assert num_lines % 4 == 0, "Expected lines to be multiple of 4"
return num_lines // 4
else:
return 0 |
def enqueue(self, klass, *args):
"""Enqueue a job into a specific queue. Make sure the class you are
passing has **queue** attribute and a **perform** method on it.
"""
queue = getattr(klass,'queue', None)
if queue:
class_name = '%s.%s' % (klass.__module__, klass.__name__)
self.enqueue_from_string(class_name, queue, *args)
else:
logger.warning("unable to enqueue job with class %s" % str(klass)) | Enqueue a job into a specific queue. Make sure the class you are
passing has **queue** attribute and a **perform** method on it. | Below is the the instruction that describes the task:
### Input:
Enqueue a job into a specific queue. Make sure the class you are
passing has **queue** attribute and a **perform** method on it.
### Response:
def enqueue(self, klass, *args):
"""Enqueue a job into a specific queue. Make sure the class you are
passing has **queue** attribute and a **perform** method on it.
"""
queue = getattr(klass,'queue', None)
if queue:
class_name = '%s.%s' % (klass.__module__, klass.__name__)
self.enqueue_from_string(class_name, queue, *args)
else:
logger.warning("unable to enqueue job with class %s" % str(klass)) |
def _load_permissions(campus, calendarid, resp_fragment, permission_list):
"""
:return: a list of sorted trumba.Permission objects
None if error, [] if not exists
"""
for record in resp_fragment:
if not _is_valid_email(record['Email']):
# skip the non UW users
continue
perm = Permission()
perm.calendarid = calendarid
perm.campus = campus
perm.uwnetid = _extract_uwnetid(record['Email'])
perm.level = record['Level']
perm.name = str(record['Name'])
permission_list.append(perm) | :return: a list of sorted trumba.Permission objects
None if error, [] if not exists | Below is the the instruction that describes the task:
### Input:
:return: a list of sorted trumba.Permission objects
None if error, [] if not exists
### Response:
def _load_permissions(campus, calendarid, resp_fragment, permission_list):
"""
:return: a list of sorted trumba.Permission objects
None if error, [] if not exists
"""
for record in resp_fragment:
if not _is_valid_email(record['Email']):
# skip the non UW users
continue
perm = Permission()
perm.calendarid = calendarid
perm.campus = campus
perm.uwnetid = _extract_uwnetid(record['Email'])
perm.level = record['Level']
perm.name = str(record['Name'])
permission_list.append(perm) |
def process_lines( self, input_lines, **kwargs ):
''' Executes the pipeline of subsequent VISL_CG3 commands. The first process
in pipeline gets input_lines as an input, and each subsequent process gets
the output of the previous process as an input.
The idea of how to construct the pipeline borrows from:
https://github.com/estnltk/estnltk/blob/1.4.0/estnltk/syntax/tagger.py
Returns the result of the last process in the pipeline, either as a string
or, alternatively, as a list of strings (if split_result == True);
Parameters
-----------
input_lines : list of str
The input text for the pipeline; Should be in same format as the output
of SyntaxPreprocessing;
split_result : bool
Optional argument specifying whether the result should be split by
newlines, and returned as a list of strings/lines instead;
Default:False
remove_info : bool
Optional argument specifying whether the additional information added
during the preprocessing and syntactic processing should be removed
from the results;
Default:True;
The method cleanup_lines() will be used for removing additional info,
and all the parameters passed to this method will be also forwarded to
the cleanup method;
'''
split_result_lines = False
remove_info = True
for argName, argVal in kwargs.items() :
if argName in ['split_result_lines', 'split_result'] and argVal in [True, False]:
split_result_lines = argVal
if argName in ['remove_info', 'info_remover', 'clean_up'] and argVal in [True, False]:
remove_info = argVal
# 1) Construct the input file for the first process in the pipeline
temp_input_file = \
tempfile.NamedTemporaryFile(prefix='vislcg3_in.', mode='w', delete=False)
temp_input_file.close()
# We have to open separately here for writing, because Py 2.7 does not support
# passing parameter encoding='utf-8' to the NamedTemporaryFile;
out_f = codecs.open(temp_input_file.name, mode='w', encoding='utf-8')
for line in input_lines:
out_f.write( line.rstrip() )
out_f.write( '\n' )
out_f.close()
# TODO: tempfile is currently used to ensure that the input is in 'utf-8',
# but perhaps we can somehow ensure it without using tempfile ??
# 2) Dynamically construct the pipeline and open processes
pipeline = []
for i in range( len(self.rules_pipeline) ):
rule_file = self.rules_pipeline[i]
process_cmd = [self.vislcg_cmd, '-o', '-g', os.path.join(self.rules_dir, rule_file)]
process = None
if i == 0:
# The first process takes input from the file
process_cmd.extend( ['-I', temp_input_file.name] )
process = Popen(process_cmd, stdin=PIPE, stdout=PIPE)
else:
# A subsequent process takes output of the last process as an input
process = Popen(process_cmd, stdin=pipeline[-1]['process'].stdout, stdout=PIPE)
# Record the process
process_dict = {'process':process, 'cmd':process_cmd}
pipeline.append( process_dict )
# 3) Close all stdout streams, except the last one
for i in range( len(pipeline) ):
if i != len(pipeline) - 1:
pipeline[i]['process'].stdout.close()
# 4) Communicate results form the last item in the pipeline
result = as_unicode( pipeline[-1]['process'].communicate()[0] )
pipeline[-1]['process'].stdout.close() # Close the last process
# Clean-up
# 1) remove temp file
os.remove(temp_input_file.name)
# 2) remove additional info, if required
if remove_info:
result = '\n'.join( cleanup_lines( result.split('\n'), **kwargs ))
return result if not split_result_lines else result.split('\n') | Executes the pipeline of subsequent VISL_CG3 commands. The first process
in pipeline gets input_lines as an input, and each subsequent process gets
the output of the previous process as an input.
The idea of how to construct the pipeline borrows from:
https://github.com/estnltk/estnltk/blob/1.4.0/estnltk/syntax/tagger.py
Returns the result of the last process in the pipeline, either as a string
or, alternatively, as a list of strings (if split_result == True);
Parameters
-----------
input_lines : list of str
The input text for the pipeline; Should be in same format as the output
of SyntaxPreprocessing;
split_result : bool
Optional argument specifying whether the result should be split by
newlines, and returned as a list of strings/lines instead;
Default:False
remove_info : bool
Optional argument specifying whether the additional information added
during the preprocessing and syntactic processing should be removed
from the results;
Default:True;
The method cleanup_lines() will be used for removing additional info,
and all the parameters passed to this method will be also forwarded to
the cleanup method; | Below is the the instruction that describes the task:
### Input:
Executes the pipeline of subsequent VISL_CG3 commands. The first process
in pipeline gets input_lines as an input, and each subsequent process gets
the output of the previous process as an input.
The idea of how to construct the pipeline borrows from:
https://github.com/estnltk/estnltk/blob/1.4.0/estnltk/syntax/tagger.py
Returns the result of the last process in the pipeline, either as a string
or, alternatively, as a list of strings (if split_result == True);
Parameters
-----------
input_lines : list of str
The input text for the pipeline; Should be in same format as the output
of SyntaxPreprocessing;
split_result : bool
Optional argument specifying whether the result should be split by
newlines, and returned as a list of strings/lines instead;
Default:False
remove_info : bool
Optional argument specifying whether the additional information added
during the preprocessing and syntactic processing should be removed
from the results;
Default:True;
The method cleanup_lines() will be used for removing additional info,
and all the parameters passed to this method will be also forwarded to
the cleanup method;
### Response:
def process_lines( self, input_lines, **kwargs ):
''' Executes the pipeline of subsequent VISL_CG3 commands. The first process
in pipeline gets input_lines as an input, and each subsequent process gets
the output of the previous process as an input.
The idea of how to construct the pipeline borrows from:
https://github.com/estnltk/estnltk/blob/1.4.0/estnltk/syntax/tagger.py
Returns the result of the last process in the pipeline, either as a string
or, alternatively, as a list of strings (if split_result == True);
Parameters
-----------
input_lines : list of str
The input text for the pipeline; Should be in same format as the output
of SyntaxPreprocessing;
split_result : bool
Optional argument specifying whether the result should be split by
newlines, and returned as a list of strings/lines instead;
Default:False
remove_info : bool
Optional argument specifying whether the additional information added
during the preprocessing and syntactic processing should be removed
from the results;
Default:True;
The method cleanup_lines() will be used for removing additional info,
and all the parameters passed to this method will be also forwarded to
the cleanup method;
'''
split_result_lines = False
remove_info = True
for argName, argVal in kwargs.items() :
if argName in ['split_result_lines', 'split_result'] and argVal in [True, False]:
split_result_lines = argVal
if argName in ['remove_info', 'info_remover', 'clean_up'] and argVal in [True, False]:
remove_info = argVal
# 1) Construct the input file for the first process in the pipeline
temp_input_file = \
tempfile.NamedTemporaryFile(prefix='vislcg3_in.', mode='w', delete=False)
temp_input_file.close()
# We have to open separately here for writing, because Py 2.7 does not support
# passing parameter encoding='utf-8' to the NamedTemporaryFile;
out_f = codecs.open(temp_input_file.name, mode='w', encoding='utf-8')
for line in input_lines:
out_f.write( line.rstrip() )
out_f.write( '\n' )
out_f.close()
# TODO: tempfile is currently used to ensure that the input is in 'utf-8',
# but perhaps we can somehow ensure it without using tempfile ??
# 2) Dynamically construct the pipeline and open processes
pipeline = []
for i in range( len(self.rules_pipeline) ):
rule_file = self.rules_pipeline[i]
process_cmd = [self.vislcg_cmd, '-o', '-g', os.path.join(self.rules_dir, rule_file)]
process = None
if i == 0:
# The first process takes input from the file
process_cmd.extend( ['-I', temp_input_file.name] )
process = Popen(process_cmd, stdin=PIPE, stdout=PIPE)
else:
# A subsequent process takes output of the last process as an input
process = Popen(process_cmd, stdin=pipeline[-1]['process'].stdout, stdout=PIPE)
# Record the process
process_dict = {'process':process, 'cmd':process_cmd}
pipeline.append( process_dict )
# 3) Close all stdout streams, except the last one
for i in range( len(pipeline) ):
if i != len(pipeline) - 1:
pipeline[i]['process'].stdout.close()
# 4) Communicate results form the last item in the pipeline
result = as_unicode( pipeline[-1]['process'].communicate()[0] )
pipeline[-1]['process'].stdout.close() # Close the last process
# Clean-up
# 1) remove temp file
os.remove(temp_input_file.name)
# 2) remove additional info, if required
if remove_info:
result = '\n'.join( cleanup_lines( result.split('\n'), **kwargs ))
return result if not split_result_lines else result.split('\n') |
def _save_datasets_as_mitiff(self, datasets, image_description,
gen_filename, **kwargs):
"""Put all togehter and save as a tiff file with the special tag
making it a mitiff file.
"""
from libtiff import TIFF
tif = TIFF.open(gen_filename, mode='w')
tif.SetField(IMAGEDESCRIPTION, (image_description).encode('utf-8'))
cns = self.translate_channel_name.get(kwargs['sensor'], {})
if isinstance(datasets, list):
LOG.debug("Saving datasets as list")
for _cn in self.channel_order[kwargs['sensor']]:
for dataset in datasets:
if dataset.attrs['name'] == _cn:
reverse_offset = 0.
reverse_scale = 1.
if dataset.attrs['calibration'] == 'brightness_temperature':
reverse_offset = 255.
reverse_scale = -1.
dataset.data += KELVIN_TO_CELSIUS
# Need to possible translate channels names from satpy to mitiff
cn = cns.get(dataset.attrs['name'], dataset.attrs['name'])
_data = reverse_offset + reverse_scale * (((dataset.data - float(self.mitiff_config[
kwargs['sensor']][cn]['min-val'])) /
(float(self.mitiff_config[kwargs['sensor']][cn]['max-val']) -
float(self.mitiff_config[kwargs['sensor']][cn]['min-val']))) * 255.)
data = _data.clip(0, 255)
tif.write_image(data.astype(np.uint8), compression='deflate')
break
elif 'dataset' in datasets.attrs['name']:
LOG.debug("Saving %s as a dataset.", datasets.attrs['name'])
for _cn in self.channel_order[kwargs['sensor']]:
for i, band in enumerate(datasets['bands']):
if band == _cn:
chn = datasets.sel(bands=band)
reverse_offset = 0.
reverse_scale = 1.
if chn.attrs['prerequisites'][i][4] == 'brightness_temperature':
reverse_offset = 255.
reverse_scale = -1.
chn.data += KELVIN_TO_CELSIUS
# Need to possible translate channels names from satpy to mitiff
cn = cns.get(chn.attrs['prerequisites'][i][0],
chn.attrs['prerequisites'][i][0])
_data = reverse_offset + reverse_scale * (((chn.data - float(self.mitiff_config[
kwargs['sensor']][cn]['min-val'])) /
(float(self.mitiff_config[kwargs['sensor']][cn]['max-val']) -
float(self.mitiff_config[kwargs['sensor']][cn]['min-val']))) * 255.)
data = _data.clip(0, 255)
tif.write_image(data.astype(np.uint8), compression='deflate')
break
else:
LOG.debug("Saving datasets as enhanced image")
img = get_enhanced_image(datasets.squeeze(), enhance=self.enhancer)
for i, band in enumerate(img.data['bands']):
chn = img.data.sel(bands=band)
data = chn.values.clip(0, 1) * 254. + 1
data = data.clip(0, 255)
tif.write_image(data.astype(np.uint8), compression='deflate')
del tif | Put all togehter and save as a tiff file with the special tag
making it a mitiff file. | Below is the the instruction that describes the task:
### Input:
Put all togehter and save as a tiff file with the special tag
making it a mitiff file.
### Response:
def _save_datasets_as_mitiff(self, datasets, image_description,
gen_filename, **kwargs):
"""Put all togehter and save as a tiff file with the special tag
making it a mitiff file.
"""
from libtiff import TIFF
tif = TIFF.open(gen_filename, mode='w')
tif.SetField(IMAGEDESCRIPTION, (image_description).encode('utf-8'))
cns = self.translate_channel_name.get(kwargs['sensor'], {})
if isinstance(datasets, list):
LOG.debug("Saving datasets as list")
for _cn in self.channel_order[kwargs['sensor']]:
for dataset in datasets:
if dataset.attrs['name'] == _cn:
reverse_offset = 0.
reverse_scale = 1.
if dataset.attrs['calibration'] == 'brightness_temperature':
reverse_offset = 255.
reverse_scale = -1.
dataset.data += KELVIN_TO_CELSIUS
# Need to possible translate channels names from satpy to mitiff
cn = cns.get(dataset.attrs['name'], dataset.attrs['name'])
_data = reverse_offset + reverse_scale * (((dataset.data - float(self.mitiff_config[
kwargs['sensor']][cn]['min-val'])) /
(float(self.mitiff_config[kwargs['sensor']][cn]['max-val']) -
float(self.mitiff_config[kwargs['sensor']][cn]['min-val']))) * 255.)
data = _data.clip(0, 255)
tif.write_image(data.astype(np.uint8), compression='deflate')
break
elif 'dataset' in datasets.attrs['name']:
LOG.debug("Saving %s as a dataset.", datasets.attrs['name'])
for _cn in self.channel_order[kwargs['sensor']]:
for i, band in enumerate(datasets['bands']):
if band == _cn:
chn = datasets.sel(bands=band)
reverse_offset = 0.
reverse_scale = 1.
if chn.attrs['prerequisites'][i][4] == 'brightness_temperature':
reverse_offset = 255.
reverse_scale = -1.
chn.data += KELVIN_TO_CELSIUS
# Need to possible translate channels names from satpy to mitiff
cn = cns.get(chn.attrs['prerequisites'][i][0],
chn.attrs['prerequisites'][i][0])
_data = reverse_offset + reverse_scale * (((chn.data - float(self.mitiff_config[
kwargs['sensor']][cn]['min-val'])) /
(float(self.mitiff_config[kwargs['sensor']][cn]['max-val']) -
float(self.mitiff_config[kwargs['sensor']][cn]['min-val']))) * 255.)
data = _data.clip(0, 255)
tif.write_image(data.astype(np.uint8), compression='deflate')
break
else:
LOG.debug("Saving datasets as enhanced image")
img = get_enhanced_image(datasets.squeeze(), enhance=self.enhancer)
for i, band in enumerate(img.data['bands']):
chn = img.data.sel(bands=band)
data = chn.values.clip(0, 1) * 254. + 1
data = data.clip(0, 255)
tif.write_image(data.astype(np.uint8), compression='deflate')
del tif |
def unparse_flags(self):
"""Unparses all flags to the point before any FLAGS(argv) was called."""
for f in self._flags().values():
f.unparse()
# We log this message before marking flags as unparsed to avoid a
# problem when the logging library causes flags access.
logging.info('unparse_flags() called; flags access will now raise errors.')
self.__dict__['__flags_parsed'] = False
self.__dict__['__unparse_flags_called'] = True | Unparses all flags to the point before any FLAGS(argv) was called. | Below is the the instruction that describes the task:
### Input:
Unparses all flags to the point before any FLAGS(argv) was called.
### Response:
def unparse_flags(self):
"""Unparses all flags to the point before any FLAGS(argv) was called."""
for f in self._flags().values():
f.unparse()
# We log this message before marking flags as unparsed to avoid a
# problem when the logging library causes flags access.
logging.info('unparse_flags() called; flags access will now raise errors.')
self.__dict__['__flags_parsed'] = False
self.__dict__['__unparse_flags_called'] = True |
def delete_client(self, identifier):
"""Delete client."""
params = {'id': identifier}
response = yield from self._transact(SERVER_DELETECLIENT, params)
self.synchronize(response) | Delete client. | Below is the the instruction that describes the task:
### Input:
Delete client.
### Response:
def delete_client(self, identifier):
"""Delete client."""
params = {'id': identifier}
response = yield from self._transact(SERVER_DELETECLIENT, params)
self.synchronize(response) |
def export_items(elastic_url, in_index, out_index, elastic_url_out=None,
search_after=False, search_after_value=None, limit=None,
copy=False):
""" Export items from in_index to out_index using the correct mapping """
if not limit:
limit = DEFAULT_LIMIT
if search_after_value:
search_after_value_timestamp = int(search_after_value[0])
search_after_value_uuid = search_after_value[1]
search_after_value = [search_after_value_timestamp, search_after_value_uuid]
logging.info("Exporting items from %s/%s to %s", elastic_url, in_index, out_index)
count_res = requests.get('%s/%s/_count' % (elastic_url, in_index))
try:
count_res.raise_for_status()
except requests.exceptions.HTTPError:
if count_res.status_code == 404:
logging.error("The index does not exists: %s", in_index)
else:
logging.error(count_res.text)
sys.exit(1)
logging.info("Total items to copy: %i", count_res.json()['count'])
# Time to upload the items with the correct mapping
elastic_in = ElasticSearch(elastic_url, in_index)
if not copy:
# Create the correct mapping for the data sources detected from in_index
ds_mapping = find_mapping(elastic_url, in_index)
else:
logging.debug('Using the input index mapping')
ds_mapping = extract_mapping(elastic_url, in_index)
if not elastic_url_out:
elastic_out = ElasticSearch(elastic_url, out_index, mappings=ds_mapping)
else:
elastic_out = ElasticSearch(elastic_url_out, out_index, mappings=ds_mapping)
# Time to just copy from in_index to our_index
uid_field = find_uuid(elastic_url, in_index)
backend = find_perceval_backend(elastic_url, in_index)
if search_after:
total = elastic_out.bulk_upload(fetch(elastic_in, backend, limit,
search_after_value, scroll=False), uid_field)
else:
total = elastic_out.bulk_upload(fetch(elastic_in, backend, limit), uid_field)
logging.info("Total items copied: %i", total) | Export items from in_index to out_index using the correct mapping | Below is the the instruction that describes the task:
### Input:
Export items from in_index to out_index using the correct mapping
### Response:
def export_items(elastic_url, in_index, out_index, elastic_url_out=None,
search_after=False, search_after_value=None, limit=None,
copy=False):
""" Export items from in_index to out_index using the correct mapping """
if not limit:
limit = DEFAULT_LIMIT
if search_after_value:
search_after_value_timestamp = int(search_after_value[0])
search_after_value_uuid = search_after_value[1]
search_after_value = [search_after_value_timestamp, search_after_value_uuid]
logging.info("Exporting items from %s/%s to %s", elastic_url, in_index, out_index)
count_res = requests.get('%s/%s/_count' % (elastic_url, in_index))
try:
count_res.raise_for_status()
except requests.exceptions.HTTPError:
if count_res.status_code == 404:
logging.error("The index does not exists: %s", in_index)
else:
logging.error(count_res.text)
sys.exit(1)
logging.info("Total items to copy: %i", count_res.json()['count'])
# Time to upload the items with the correct mapping
elastic_in = ElasticSearch(elastic_url, in_index)
if not copy:
# Create the correct mapping for the data sources detected from in_index
ds_mapping = find_mapping(elastic_url, in_index)
else:
logging.debug('Using the input index mapping')
ds_mapping = extract_mapping(elastic_url, in_index)
if not elastic_url_out:
elastic_out = ElasticSearch(elastic_url, out_index, mappings=ds_mapping)
else:
elastic_out = ElasticSearch(elastic_url_out, out_index, mappings=ds_mapping)
# Time to just copy from in_index to our_index
uid_field = find_uuid(elastic_url, in_index)
backend = find_perceval_backend(elastic_url, in_index)
if search_after:
total = elastic_out.bulk_upload(fetch(elastic_in, backend, limit,
search_after_value, scroll=False), uid_field)
else:
total = elastic_out.bulk_upload(fetch(elastic_in, backend, limit), uid_field)
logging.info("Total items copied: %i", total) |
def top_view(self):
"""Print packages status bar
"""
self.msg.template(78)
print("{0}{1}{2}{3}{4}{5}{6}{7}{8}{9}{10}".format(
"| Package", " " * 17,
"New Version", " " * 8,
"Arch", " " * 4,
"Build", " " * 2,
"Repos", " " * 10,
"Size"))
self.msg.template(78) | Print packages status bar | Below is the the instruction that describes the task:
### Input:
Print packages status bar
### Response:
def top_view(self):
"""Print packages status bar
"""
self.msg.template(78)
print("{0}{1}{2}{3}{4}{5}{6}{7}{8}{9}{10}".format(
"| Package", " " * 17,
"New Version", " " * 8,
"Arch", " " * 4,
"Build", " " * 2,
"Repos", " " * 10,
"Size"))
self.msg.template(78) |
def _map_or_starmap_async(function, iterable, args, kwargs, map_or_starmap):
"""
Shared function between parmap.map_async and parmap.starmap_async.
Refer to those functions for details.
"""
arg_newarg = (("parallel", "pm_parallel"), ("chunksize", "pm_chunksize"),
("pool", "pm_pool"), ("processes", "pm_processes"),
("callback", "pm_callback"),
("error_callback", "pm_error_callback"))
kwargs = _deprecated_kwargs(kwargs, arg_newarg)
chunksize = kwargs.pop("pm_chunksize", None)
callback = kwargs.pop("pm_callback", None)
error_callback = kwargs.pop("pm_error_callback", None)
parallel, pool, close_pool = _create_pool(kwargs)
# Map:
if parallel:
func_star = _get_helper_func(map_or_starmap)
try:
if sys.version_info[0] == 2: # does not support error_callback
result = pool.map_async(func_star,
izip(repeat(function),
iterable,
repeat(list(args)),
repeat(kwargs)),
chunksize, callback)
else:
result = pool.map_async(func_star,
izip(repeat(function),
iterable,
repeat(list(args)),
repeat(kwargs)),
chunksize, callback, error_callback)
finally:
if close_pool:
pool.close()
result = _ParallelAsyncResult(result, pool)
else:
result = _ParallelAsyncResult(result)
else:
values = _serial_map_or_starmap(function, iterable, args, kwargs,
False, map_or_starmap)
result = _DummyAsyncResult(values)
return result | Shared function between parmap.map_async and parmap.starmap_async.
Refer to those functions for details. | Below is the the instruction that describes the task:
### Input:
Shared function between parmap.map_async and parmap.starmap_async.
Refer to those functions for details.
### Response:
def _map_or_starmap_async(function, iterable, args, kwargs, map_or_starmap):
"""
Shared function between parmap.map_async and parmap.starmap_async.
Refer to those functions for details.
"""
arg_newarg = (("parallel", "pm_parallel"), ("chunksize", "pm_chunksize"),
("pool", "pm_pool"), ("processes", "pm_processes"),
("callback", "pm_callback"),
("error_callback", "pm_error_callback"))
kwargs = _deprecated_kwargs(kwargs, arg_newarg)
chunksize = kwargs.pop("pm_chunksize", None)
callback = kwargs.pop("pm_callback", None)
error_callback = kwargs.pop("pm_error_callback", None)
parallel, pool, close_pool = _create_pool(kwargs)
# Map:
if parallel:
func_star = _get_helper_func(map_or_starmap)
try:
if sys.version_info[0] == 2: # does not support error_callback
result = pool.map_async(func_star,
izip(repeat(function),
iterable,
repeat(list(args)),
repeat(kwargs)),
chunksize, callback)
else:
result = pool.map_async(func_star,
izip(repeat(function),
iterable,
repeat(list(args)),
repeat(kwargs)),
chunksize, callback, error_callback)
finally:
if close_pool:
pool.close()
result = _ParallelAsyncResult(result, pool)
else:
result = _ParallelAsyncResult(result)
else:
values = _serial_map_or_starmap(function, iterable, args, kwargs,
False, map_or_starmap)
result = _DummyAsyncResult(values)
return result |
def find_geo_coords(s):
"""Returns a list of lat/lons found by scanning the given text"""
coords = []
LOG.debug("Matching in text size %s", len(s))
for c in INFO_BOX_LAT_LON.findall(s):
try:
coord = (float(c[1]), float(c[2])) #, c[0])
coords.append(coord)
LOG.debug("Found info box lat/lon: %s", coord)
except Exception as ex:
LOG.warn("Bad parse of info box %s: %s", c, ex)
for c in COORDS_GEN.findall(s):
# Special cases
if skip_coords(c):
LOG.debug("Ignorning coords %s", c)
continue
m = COORDS_GROUPS.search(c)
if not m:
LOG.warn("Unrecognized coord format: %s", c)
continue
try:
# Remove empty optional groups and remove pipes from matches
g = [(s[0:-1] if s[-1] == '|' else s) for s in list(m.groups()) if s is not None and len(s)]
#LOG.info("Found groups: %s", g)
if len(g) == 1: # Single lat|lon
lat, lon = g[0].split('|')
coord = (float(lat), float(lon)) #, c)
coords.append(coord)
LOG.debug("Found lat|lon: %s", coord)
elif g[3] == 'E' or g[3] == 'W':
lat = depipe(g[0]) * (1 if g[1].upper() == 'N' else -1)
lon = depipe(g[2]) * (1 if g[3].upper() == 'E' else -1)
coord = (lat, lon) #, c)
coords.append(coord)
LOG.debug("Found lat|NS|lon|EW: %s", coord)
else:
LOG.warn("Unrecognized coord format: %s (parsed %s)", c, g)
except Exception as ex:
LOG.warn("Bad parse of %s: %s", c, ex)
l = []
for c in set(coords): # Dedupe; the reality is non-trivial though...we want to keep only the most precise
if c[0] > 90 or c[0] < -90 or c[1] > 180 or c[1] < -180 or (c[0] == 0 and c[1] == 0):
LOG.warn("Invalid lat or lon: %s", c)
else:
l.append({"type": "Point", "coordinates": (c[1], c[0])}) # GeoJSON, lon goes first
return l | Returns a list of lat/lons found by scanning the given text | Below is the the instruction that describes the task:
### Input:
Returns a list of lat/lons found by scanning the given text
### Response:
def find_geo_coords(s):
"""Returns a list of lat/lons found by scanning the given text"""
coords = []
LOG.debug("Matching in text size %s", len(s))
for c in INFO_BOX_LAT_LON.findall(s):
try:
coord = (float(c[1]), float(c[2])) #, c[0])
coords.append(coord)
LOG.debug("Found info box lat/lon: %s", coord)
except Exception as ex:
LOG.warn("Bad parse of info box %s: %s", c, ex)
for c in COORDS_GEN.findall(s):
# Special cases
if skip_coords(c):
LOG.debug("Ignorning coords %s", c)
continue
m = COORDS_GROUPS.search(c)
if not m:
LOG.warn("Unrecognized coord format: %s", c)
continue
try:
# Remove empty optional groups and remove pipes from matches
g = [(s[0:-1] if s[-1] == '|' else s) for s in list(m.groups()) if s is not None and len(s)]
#LOG.info("Found groups: %s", g)
if len(g) == 1: # Single lat|lon
lat, lon = g[0].split('|')
coord = (float(lat), float(lon)) #, c)
coords.append(coord)
LOG.debug("Found lat|lon: %s", coord)
elif g[3] == 'E' or g[3] == 'W':
lat = depipe(g[0]) * (1 if g[1].upper() == 'N' else -1)
lon = depipe(g[2]) * (1 if g[3].upper() == 'E' else -1)
coord = (lat, lon) #, c)
coords.append(coord)
LOG.debug("Found lat|NS|lon|EW: %s", coord)
else:
LOG.warn("Unrecognized coord format: %s (parsed %s)", c, g)
except Exception as ex:
LOG.warn("Bad parse of %s: %s", c, ex)
l = []
for c in set(coords): # Dedupe; the reality is non-trivial though...we want to keep only the most precise
if c[0] > 90 or c[0] < -90 or c[1] > 180 or c[1] < -180 or (c[0] == 0 and c[1] == 0):
LOG.warn("Invalid lat or lon: %s", c)
else:
l.append({"type": "Point", "coordinates": (c[1], c[0])}) # GeoJSON, lon goes first
return l |
def write_byte_data(self, address, register, value):
"""Write a byte value to a device's register. """
LOGGER.debug("Writing byte data %s to register %s on device %s",
bin(value), hex(register), hex(address))
return self.driver.write_byte_data(address, register, value) | Write a byte value to a device's register. | Below is the the instruction that describes the task:
### Input:
Write a byte value to a device's register.
### Response:
def write_byte_data(self, address, register, value):
"""Write a byte value to a device's register. """
LOGGER.debug("Writing byte data %s to register %s on device %s",
bin(value), hex(register), hex(address))
return self.driver.write_byte_data(address, register, value) |
def return_selected_form_items(form_info):
"""
It returns chosen keys list from a given form.
Args:
form_info: serialized list of dict form data
Returns:
selected_keys(list): Chosen keys list
selected_names(list): Chosen channels' or subscribers' names.
"""
selected_keys = []
selected_names = []
for chosen in form_info:
if chosen['choice']:
selected_keys.append(chosen['key'])
selected_names.append(chosen['name'])
return selected_keys, selected_names | It returns chosen keys list from a given form.
Args:
form_info: serialized list of dict form data
Returns:
selected_keys(list): Chosen keys list
selected_names(list): Chosen channels' or subscribers' names. | Below is the the instruction that describes the task:
### Input:
It returns chosen keys list from a given form.
Args:
form_info: serialized list of dict form data
Returns:
selected_keys(list): Chosen keys list
selected_names(list): Chosen channels' or subscribers' names.
### Response:
def return_selected_form_items(form_info):
"""
It returns chosen keys list from a given form.
Args:
form_info: serialized list of dict form data
Returns:
selected_keys(list): Chosen keys list
selected_names(list): Chosen channels' or subscribers' names.
"""
selected_keys = []
selected_names = []
for chosen in form_info:
if chosen['choice']:
selected_keys.append(chosen['key'])
selected_names.append(chosen['name'])
return selected_keys, selected_names |
def list(self, **kwargs):
"""Retrieve a list of objects.
Args:
all (bool): If True, return all the items, without pagination
per_page (int): Number of items to retrieve per request
page (int): ID of the page to return (starts with page 1)
as_list (bool): If set to False and no pagination option is
defined, return a generator instead of a list
**kwargs: Extra options to send to the server (e.g. sudo)
Returns:
list: The list of objects, or a generator if `as_list` is False
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabListError: If the server cannot perform the request
"""
path = '/users/%s/projects' % self._parent.id
return ListMixin.list(self, path=path, **kwargs) | Retrieve a list of objects.
Args:
all (bool): If True, return all the items, without pagination
per_page (int): Number of items to retrieve per request
page (int): ID of the page to return (starts with page 1)
as_list (bool): If set to False and no pagination option is
defined, return a generator instead of a list
**kwargs: Extra options to send to the server (e.g. sudo)
Returns:
list: The list of objects, or a generator if `as_list` is False
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabListError: If the server cannot perform the request | Below is the the instruction that describes the task:
### Input:
Retrieve a list of objects.
Args:
all (bool): If True, return all the items, without pagination
per_page (int): Number of items to retrieve per request
page (int): ID of the page to return (starts with page 1)
as_list (bool): If set to False and no pagination option is
defined, return a generator instead of a list
**kwargs: Extra options to send to the server (e.g. sudo)
Returns:
list: The list of objects, or a generator if `as_list` is False
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabListError: If the server cannot perform the request
### Response:
def list(self, **kwargs):
"""Retrieve a list of objects.
Args:
all (bool): If True, return all the items, without pagination
per_page (int): Number of items to retrieve per request
page (int): ID of the page to return (starts with page 1)
as_list (bool): If set to False and no pagination option is
defined, return a generator instead of a list
**kwargs: Extra options to send to the server (e.g. sudo)
Returns:
list: The list of objects, or a generator if `as_list` is False
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabListError: If the server cannot perform the request
"""
path = '/users/%s/projects' % self._parent.id
return ListMixin.list(self, path=path, **kwargs) |
def ParseContainersTable(
self, parser_mediator, database=None, table=None, **unused_kwargs):
"""Parses the Containers table.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
database (Optional[pyesedb.file]): ESE database.
table (Optional[pyesedb.table]): table.
Raises:
ValueError: if the database or table value is missing.
"""
if database is None:
raise ValueError('Missing database value.')
if table is None:
raise ValueError('Missing table value.')
for esedb_record in table.records:
if parser_mediator.abort:
break
record_values = self._GetRecordValues(
parser_mediator, table.name, esedb_record)
event_data = MsieWebCacheContainersEventData()
event_data.container_identifier = record_values.get('ContainerId', None)
event_data.directory = record_values.get('Directory', None)
event_data.name = record_values.get('Name', None)
event_data.set_identifier = record_values.get('SetId', None)
timestamp = record_values.get('LastScavengeTime', None)
if timestamp:
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, 'Last Scavenge Time')
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = record_values.get('LastAccessTime', None)
if timestamp:
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)
parser_mediator.ProduceEventWithEventData(event, event_data)
container_identifier = record_values.get('ContainerId', None)
container_name = record_values.get('Name', None)
if not container_identifier or not container_name:
continue
table_name = 'Container_{0:d}'.format(container_identifier)
esedb_table = database.get_table_by_name(table_name)
if not esedb_table:
parser_mediator.ProduceExtractionWarning(
'Missing table: {0:s}'.format(table_name))
continue
self._ParseContainerTable(parser_mediator, esedb_table, container_name) | Parses the Containers table.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
database (Optional[pyesedb.file]): ESE database.
table (Optional[pyesedb.table]): table.
Raises:
ValueError: if the database or table value is missing. | Below is the the instruction that describes the task:
### Input:
Parses the Containers table.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
database (Optional[pyesedb.file]): ESE database.
table (Optional[pyesedb.table]): table.
Raises:
ValueError: if the database or table value is missing.
### Response:
def ParseContainersTable(
self, parser_mediator, database=None, table=None, **unused_kwargs):
"""Parses the Containers table.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
database (Optional[pyesedb.file]): ESE database.
table (Optional[pyesedb.table]): table.
Raises:
ValueError: if the database or table value is missing.
"""
if database is None:
raise ValueError('Missing database value.')
if table is None:
raise ValueError('Missing table value.')
for esedb_record in table.records:
if parser_mediator.abort:
break
record_values = self._GetRecordValues(
parser_mediator, table.name, esedb_record)
event_data = MsieWebCacheContainersEventData()
event_data.container_identifier = record_values.get('ContainerId', None)
event_data.directory = record_values.get('Directory', None)
event_data.name = record_values.get('Name', None)
event_data.set_identifier = record_values.get('SetId', None)
timestamp = record_values.get('LastScavengeTime', None)
if timestamp:
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, 'Last Scavenge Time')
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = record_values.get('LastAccessTime', None)
if timestamp:
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)
parser_mediator.ProduceEventWithEventData(event, event_data)
container_identifier = record_values.get('ContainerId', None)
container_name = record_values.get('Name', None)
if not container_identifier or not container_name:
continue
table_name = 'Container_{0:d}'.format(container_identifier)
esedb_table = database.get_table_by_name(table_name)
if not esedb_table:
parser_mediator.ProduceExtractionWarning(
'Missing table: {0:s}'.format(table_name))
continue
self._ParseContainerTable(parser_mediator, esedb_table, container_name) |
def match_index_versions(host=None, core_name=None):
'''
SLAVE CALL
Verifies that the master and the slave versions are in sync by
comparing the index version. If you are constantly pushing updates
the index the master and slave versions will seldom match. A solution
to this is pause indexing every so often to allow the slave to replicate
and then call this method before allowing indexing to resume.
host : str (None)
The solr host to query. __opts__['host'] is default.
core_name : str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
Return : dict<str,obj>::
{'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
CLI Example:
.. code-block:: bash
salt '*' solr.match_index_versions music
'''
# since only slaves can call this let's check the config:
ret = _get_return_dict()
success = True
if _is_master() and _get_none_or_value(host) is None:
return ret.update({
'success': False,
'errors': [
'solr.match_index_versions can only be called by '
'"slave" minions'
]
})
# get the default return dict
def _match(ret, success, resp, core):
if response['success']:
slave = resp['data']['details']['slave']
master_url = resp['data']['details']['slave']['masterUrl']
if 'ERROR' in slave:
error = slave['ERROR']
success = False
err = "{0}: {1} - {2}".format(core, error, master_url)
resp['errors'].append(err)
# if there was an error return the entire response so the
# alterer can get what it wants
data = slave if core is None else {core: {'data': slave}}
else:
versions = {
'master': slave['masterDetails']['master'][
'replicatableIndexVersion'],
'slave': resp['data']['details']['indexVersion'],
'next_replication': slave['nextExecutionAt'],
'failed_list': []
}
if 'replicationFailedAtList' in slave:
versions.update({'failed_list': slave[
'replicationFailedAtList']})
# check the index versions
if versions['master'] != versions['slave']:
success = False
resp['errors'].append(
'Master and Slave index versions do not match.'
)
data = versions if core is None else {core: {'data': versions}}
ret = _update_return_dict(ret, success, data,
resp['errors'], resp['warnings'])
else:
success = False
err = resp['errors']
data = resp['data']
ret = _update_return_dict(ret, success, data, errors=err)
return (ret, success)
# check all cores?
if _get_none_or_value(core_name) is None and _check_for_cores():
success = True
for name in __opts__['solr.cores']:
response = _replication_request('details', host=host,
core_name=name)
ret, success = _match(ret, success, response, name)
else:
response = _replication_request('details', host=host,
core_name=core_name)
ret, success = _match(ret, success, response, core_name)
return ret | SLAVE CALL
Verifies that the master and the slave versions are in sync by
comparing the index version. If you are constantly pushing updates
the index the master and slave versions will seldom match. A solution
to this is pause indexing every so often to allow the slave to replicate
and then call this method before allowing indexing to resume.
host : str (None)
The solr host to query. __opts__['host'] is default.
core_name : str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
Return : dict<str,obj>::
{'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
CLI Example:
.. code-block:: bash
salt '*' solr.match_index_versions music | Below is the the instruction that describes the task:
### Input:
SLAVE CALL
Verifies that the master and the slave versions are in sync by
comparing the index version. If you are constantly pushing updates
the index the master and slave versions will seldom match. A solution
to this is pause indexing every so often to allow the slave to replicate
and then call this method before allowing indexing to resume.
host : str (None)
The solr host to query. __opts__['host'] is default.
core_name : str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
Return : dict<str,obj>::
{'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
CLI Example:
.. code-block:: bash
salt '*' solr.match_index_versions music
### Response:
def match_index_versions(host=None, core_name=None):
'''
SLAVE CALL
Verifies that the master and the slave versions are in sync by
comparing the index version. If you are constantly pushing updates
the index the master and slave versions will seldom match. A solution
to this is pause indexing every so often to allow the slave to replicate
and then call this method before allowing indexing to resume.
host : str (None)
The solr host to query. __opts__['host'] is default.
core_name : str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
Return : dict<str,obj>::
{'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
CLI Example:
.. code-block:: bash
salt '*' solr.match_index_versions music
'''
# since only slaves can call this let's check the config:
ret = _get_return_dict()
success = True
if _is_master() and _get_none_or_value(host) is None:
return ret.update({
'success': False,
'errors': [
'solr.match_index_versions can only be called by '
'"slave" minions'
]
})
# get the default return dict
def _match(ret, success, resp, core):
if response['success']:
slave = resp['data']['details']['slave']
master_url = resp['data']['details']['slave']['masterUrl']
if 'ERROR' in slave:
error = slave['ERROR']
success = False
err = "{0}: {1} - {2}".format(core, error, master_url)
resp['errors'].append(err)
# if there was an error return the entire response so the
# alterer can get what it wants
data = slave if core is None else {core: {'data': slave}}
else:
versions = {
'master': slave['masterDetails']['master'][
'replicatableIndexVersion'],
'slave': resp['data']['details']['indexVersion'],
'next_replication': slave['nextExecutionAt'],
'failed_list': []
}
if 'replicationFailedAtList' in slave:
versions.update({'failed_list': slave[
'replicationFailedAtList']})
# check the index versions
if versions['master'] != versions['slave']:
success = False
resp['errors'].append(
'Master and Slave index versions do not match.'
)
data = versions if core is None else {core: {'data': versions}}
ret = _update_return_dict(ret, success, data,
resp['errors'], resp['warnings'])
else:
success = False
err = resp['errors']
data = resp['data']
ret = _update_return_dict(ret, success, data, errors=err)
return (ret, success)
# check all cores?
if _get_none_or_value(core_name) is None and _check_for_cores():
success = True
for name in __opts__['solr.cores']:
response = _replication_request('details', host=host,
core_name=name)
ret, success = _match(ret, success, response, name)
else:
response = _replication_request('details', host=host,
core_name=core_name)
ret, success = _match(ret, success, response, core_name)
return ret |
def reset(self):
'''
Reset list of terms and y-variable.
'''
self.terms = OrderedDict()
self.y = None
self.backend = None
self.added_terms = []
self._added_priors = {}
self.completes = []
self.clean_data = None | Reset list of terms and y-variable. | Below is the the instruction that describes the task:
### Input:
Reset list of terms and y-variable.
### Response:
def reset(self):
'''
Reset list of terms and y-variable.
'''
self.terms = OrderedDict()
self.y = None
self.backend = None
self.added_terms = []
self._added_priors = {}
self.completes = []
self.clean_data = None |
def add_directory_digests_for_jars(self, targets_and_jars):
"""For each target, get DirectoryDigests for its jars and return them zipped with the jars.
:param targets_and_jars: List of tuples of the form (Target, [pants.java.jar.jar_dependency_utils.ResolveJar])
:return: list[tuple[(Target, list[pants.java.jar.jar_dependency_utils.ResolveJar])]
"""
targets_and_jars=list(targets_and_jars)
if not targets_and_jars or not self.get_options().capture_snapshots:
return targets_and_jars
jar_paths = []
for target, jars_to_snapshot in targets_and_jars:
for jar in jars_to_snapshot:
jar_paths.append(fast_relpath(jar.pants_path, get_buildroot()))
snapshots = self.context._scheduler.capture_snapshots(
tuple(
PathGlobsAndRoot(PathGlobs([jar]), get_buildroot()) for jar in jar_paths
))
# We want to map back the list[Snapshot] to targets_and_jars
# We assume that (1) jars_to_snapshot has the same number of ResolveJars as snapshots does Snapshots,
# and that (2) capture_snapshots preserves ordering.
digests = [snapshot.directory_digest for snapshot in snapshots]
digest_iterator = iter(digests)
snapshotted_targets_and_jars = []
for target, jars_to_snapshot in targets_and_jars:
snapshotted_jars = [ResolvedJar(coordinate=jar.coordinate,
cache_path=jar.cache_path,
pants_path=jar.pants_path,
directory_digest=next(digest_iterator)) for jar in jars_to_snapshot]
snapshotted_targets_and_jars.append((target, snapshotted_jars))
return snapshotted_targets_and_jars | For each target, get DirectoryDigests for its jars and return them zipped with the jars.
:param targets_and_jars: List of tuples of the form (Target, [pants.java.jar.jar_dependency_utils.ResolveJar])
:return: list[tuple[(Target, list[pants.java.jar.jar_dependency_utils.ResolveJar])] | Below is the the instruction that describes the task:
### Input:
For each target, get DirectoryDigests for its jars and return them zipped with the jars.
:param targets_and_jars: List of tuples of the form (Target, [pants.java.jar.jar_dependency_utils.ResolveJar])
:return: list[tuple[(Target, list[pants.java.jar.jar_dependency_utils.ResolveJar])]
### Response:
def add_directory_digests_for_jars(self, targets_and_jars):
"""For each target, get DirectoryDigests for its jars and return them zipped with the jars.
:param targets_and_jars: List of tuples of the form (Target, [pants.java.jar.jar_dependency_utils.ResolveJar])
:return: list[tuple[(Target, list[pants.java.jar.jar_dependency_utils.ResolveJar])]
"""
targets_and_jars=list(targets_and_jars)
if not targets_and_jars or not self.get_options().capture_snapshots:
return targets_and_jars
jar_paths = []
for target, jars_to_snapshot in targets_and_jars:
for jar in jars_to_snapshot:
jar_paths.append(fast_relpath(jar.pants_path, get_buildroot()))
snapshots = self.context._scheduler.capture_snapshots(
tuple(
PathGlobsAndRoot(PathGlobs([jar]), get_buildroot()) for jar in jar_paths
))
# We want to map back the list[Snapshot] to targets_and_jars
# We assume that (1) jars_to_snapshot has the same number of ResolveJars as snapshots does Snapshots,
# and that (2) capture_snapshots preserves ordering.
digests = [snapshot.directory_digest for snapshot in snapshots]
digest_iterator = iter(digests)
snapshotted_targets_and_jars = []
for target, jars_to_snapshot in targets_and_jars:
snapshotted_jars = [ResolvedJar(coordinate=jar.coordinate,
cache_path=jar.cache_path,
pants_path=jar.pants_path,
directory_digest=next(digest_iterator)) for jar in jars_to_snapshot]
snapshotted_targets_and_jars.append((target, snapshotted_jars))
return snapshotted_targets_and_jars |
def simplified_solis(apparent_elevation, aod700=0.1, precipitable_water=1.,
pressure=101325., dni_extra=1364.):
"""
Calculate the clear sky GHI, DNI, and DHI according to the
simplified Solis model [1]_.
Reference [1]_ describes the accuracy of the model as being 15, 20,
and 18 W/m^2 for the beam, global, and diffuse components. Reference
[2]_ provides comparisons with other clear sky models.
Parameters
----------
apparent_elevation : numeric
The apparent elevation of the sun above the horizon (deg).
aod700 : numeric, default 0.1
The aerosol optical depth at 700 nm (unitless).
Algorithm derived for values between 0 and 0.45.
precipitable_water : numeric, default 1.0
The precipitable water of the atmosphere (cm).
Algorithm derived for values between 0.2 and 10 cm.
Values less than 0.2 will be assumed to be equal to 0.2.
pressure : numeric, default 101325.0
The atmospheric pressure (Pascals).
Algorithm derived for altitudes between sea level and 7000 m,
or 101325 and 41000 Pascals.
dni_extra : numeric, default 1364.0
Extraterrestrial irradiance. The units of ``dni_extra``
determine the units of the output.
Returns
-------
clearsky : DataFrame (if Series input) or OrderedDict of arrays
DataFrame/OrderedDict contains the columns/keys
``'dhi', 'dni', 'ghi'``.
References
----------
.. [1] P. Ineichen, "A broadband simplified version of the
Solis clear sky model," Solar Energy, 82, 758-762 (2008).
.. [2] P. Ineichen, "Validation of models that estimate the clear
sky global and beam solar irradiance," Solar Energy, 132,
332-344 (2016).
"""
p = pressure
w = precipitable_water
# algorithm fails for pw < 0.2
w = np.maximum(w, 0.2)
# this algorithm is reasonably fast already, but it could be made
# faster by precalculating the powers of aod700, the log(p/p0), and
# the log(w) instead of repeating the calculations as needed in each
# function
i0p = _calc_i0p(dni_extra, w, aod700, p)
taub = _calc_taub(w, aod700, p)
b = _calc_b(w, aod700)
taug = _calc_taug(w, aod700, p)
g = _calc_g(w, aod700)
taud = _calc_taud(w, aod700, p)
d = _calc_d(aod700, p)
# this prevents the creation of nans at night instead of 0s
# it's also friendly to scalar and series inputs
sin_elev = np.maximum(1.e-30, np.sin(np.radians(apparent_elevation)))
dni = i0p * np.exp(-taub/sin_elev**b)
ghi = i0p * np.exp(-taug/sin_elev**g) * sin_elev
dhi = i0p * np.exp(-taud/sin_elev**d)
irrads = OrderedDict()
irrads['ghi'] = ghi
irrads['dni'] = dni
irrads['dhi'] = dhi
if isinstance(dni, pd.Series):
irrads = pd.DataFrame.from_dict(irrads)
return irrads | Calculate the clear sky GHI, DNI, and DHI according to the
simplified Solis model [1]_.
Reference [1]_ describes the accuracy of the model as being 15, 20,
and 18 W/m^2 for the beam, global, and diffuse components. Reference
[2]_ provides comparisons with other clear sky models.
Parameters
----------
apparent_elevation : numeric
The apparent elevation of the sun above the horizon (deg).
aod700 : numeric, default 0.1
The aerosol optical depth at 700 nm (unitless).
Algorithm derived for values between 0 and 0.45.
precipitable_water : numeric, default 1.0
The precipitable water of the atmosphere (cm).
Algorithm derived for values between 0.2 and 10 cm.
Values less than 0.2 will be assumed to be equal to 0.2.
pressure : numeric, default 101325.0
The atmospheric pressure (Pascals).
Algorithm derived for altitudes between sea level and 7000 m,
or 101325 and 41000 Pascals.
dni_extra : numeric, default 1364.0
Extraterrestrial irradiance. The units of ``dni_extra``
determine the units of the output.
Returns
-------
clearsky : DataFrame (if Series input) or OrderedDict of arrays
DataFrame/OrderedDict contains the columns/keys
``'dhi', 'dni', 'ghi'``.
References
----------
.. [1] P. Ineichen, "A broadband simplified version of the
Solis clear sky model," Solar Energy, 82, 758-762 (2008).
.. [2] P. Ineichen, "Validation of models that estimate the clear
sky global and beam solar irradiance," Solar Energy, 132,
332-344 (2016). | Below is the the instruction that describes the task:
### Input:
Calculate the clear sky GHI, DNI, and DHI according to the
simplified Solis model [1]_.
Reference [1]_ describes the accuracy of the model as being 15, 20,
and 18 W/m^2 for the beam, global, and diffuse components. Reference
[2]_ provides comparisons with other clear sky models.
Parameters
----------
apparent_elevation : numeric
The apparent elevation of the sun above the horizon (deg).
aod700 : numeric, default 0.1
The aerosol optical depth at 700 nm (unitless).
Algorithm derived for values between 0 and 0.45.
precipitable_water : numeric, default 1.0
The precipitable water of the atmosphere (cm).
Algorithm derived for values between 0.2 and 10 cm.
Values less than 0.2 will be assumed to be equal to 0.2.
pressure : numeric, default 101325.0
The atmospheric pressure (Pascals).
Algorithm derived for altitudes between sea level and 7000 m,
or 101325 and 41000 Pascals.
dni_extra : numeric, default 1364.0
Extraterrestrial irradiance. The units of ``dni_extra``
determine the units of the output.
Returns
-------
clearsky : DataFrame (if Series input) or OrderedDict of arrays
DataFrame/OrderedDict contains the columns/keys
``'dhi', 'dni', 'ghi'``.
References
----------
.. [1] P. Ineichen, "A broadband simplified version of the
Solis clear sky model," Solar Energy, 82, 758-762 (2008).
.. [2] P. Ineichen, "Validation of models that estimate the clear
sky global and beam solar irradiance," Solar Energy, 132,
332-344 (2016).
### Response:
def simplified_solis(apparent_elevation, aod700=0.1, precipitable_water=1.,
pressure=101325., dni_extra=1364.):
"""
Calculate the clear sky GHI, DNI, and DHI according to the
simplified Solis model [1]_.
Reference [1]_ describes the accuracy of the model as being 15, 20,
and 18 W/m^2 for the beam, global, and diffuse components. Reference
[2]_ provides comparisons with other clear sky models.
Parameters
----------
apparent_elevation : numeric
The apparent elevation of the sun above the horizon (deg).
aod700 : numeric, default 0.1
The aerosol optical depth at 700 nm (unitless).
Algorithm derived for values between 0 and 0.45.
precipitable_water : numeric, default 1.0
The precipitable water of the atmosphere (cm).
Algorithm derived for values between 0.2 and 10 cm.
Values less than 0.2 will be assumed to be equal to 0.2.
pressure : numeric, default 101325.0
The atmospheric pressure (Pascals).
Algorithm derived for altitudes between sea level and 7000 m,
or 101325 and 41000 Pascals.
dni_extra : numeric, default 1364.0
Extraterrestrial irradiance. The units of ``dni_extra``
determine the units of the output.
Returns
-------
clearsky : DataFrame (if Series input) or OrderedDict of arrays
DataFrame/OrderedDict contains the columns/keys
``'dhi', 'dni', 'ghi'``.
References
----------
.. [1] P. Ineichen, "A broadband simplified version of the
Solis clear sky model," Solar Energy, 82, 758-762 (2008).
.. [2] P. Ineichen, "Validation of models that estimate the clear
sky global and beam solar irradiance," Solar Energy, 132,
332-344 (2016).
"""
p = pressure
w = precipitable_water
# algorithm fails for pw < 0.2
w = np.maximum(w, 0.2)
# this algorithm is reasonably fast already, but it could be made
# faster by precalculating the powers of aod700, the log(p/p0), and
# the log(w) instead of repeating the calculations as needed in each
# function
i0p = _calc_i0p(dni_extra, w, aod700, p)
taub = _calc_taub(w, aod700, p)
b = _calc_b(w, aod700)
taug = _calc_taug(w, aod700, p)
g = _calc_g(w, aod700)
taud = _calc_taud(w, aod700, p)
d = _calc_d(aod700, p)
# this prevents the creation of nans at night instead of 0s
# it's also friendly to scalar and series inputs
sin_elev = np.maximum(1.e-30, np.sin(np.radians(apparent_elevation)))
dni = i0p * np.exp(-taub/sin_elev**b)
ghi = i0p * np.exp(-taug/sin_elev**g) * sin_elev
dhi = i0p * np.exp(-taud/sin_elev**d)
irrads = OrderedDict()
irrads['ghi'] = ghi
irrads['dni'] = dni
irrads['dhi'] = dhi
if isinstance(dni, pd.Series):
irrads = pd.DataFrame.from_dict(irrads)
return irrads |
def get_num_columns_and_rows(widths, gap_width, term_width):
'''Given a list of string widths, a width of the minimum gap to place
between them, and the maximum width of the output (such as a terminal
width), calculate the number of columns and rows, and the width of each
column, for the optimal layout.
'''
def calc_longest_width(widths, gap_width, ncols):
longest = 0
rows = [widths[s:s + ncols] for s in range(0, len(widths), ncols)]
col_widths = rows[0] # Column widths start at the first row widths
for r in rows:
for ii, c in enumerate(r):
if c > col_widths[ii]:
col_widths[ii] = c
length = sum(col_widths) + gap_width * (ncols - 1)
if length > longest:
longest = length
return longest, col_widths
def calc_num_rows(num_items, cols):
div, mod = divmod(num_items, cols)
return div + (mod != 0)
# Start with one row
ncols = len(widths)
# Calculate the width of the longest row as the longest set of item widths
# ncols long and gap widths (gap_width * ncols - 1) that fits within the
# terminal width.
while ncols > 0:
longest_width, col_widths = calc_longest_width(widths, gap_width, ncols)
if longest_width < term_width:
# This number of columns fits
return calc_num_rows(len(widths), ncols), ncols, col_widths
else:
# This number of columns doesn't fit, so try one less
ncols -= 1
# If got here, it all has to go in one column
return len(widths), 1, 0 | Given a list of string widths, a width of the minimum gap to place
between them, and the maximum width of the output (such as a terminal
width), calculate the number of columns and rows, and the width of each
column, for the optimal layout. | Below is the the instruction that describes the task:
### Input:
Given a list of string widths, a width of the minimum gap to place
between them, and the maximum width of the output (such as a terminal
width), calculate the number of columns and rows, and the width of each
column, for the optimal layout.
### Response:
def get_num_columns_and_rows(widths, gap_width, term_width):
'''Given a list of string widths, a width of the minimum gap to place
between them, and the maximum width of the output (such as a terminal
width), calculate the number of columns and rows, and the width of each
column, for the optimal layout.
'''
def calc_longest_width(widths, gap_width, ncols):
longest = 0
rows = [widths[s:s + ncols] for s in range(0, len(widths), ncols)]
col_widths = rows[0] # Column widths start at the first row widths
for r in rows:
for ii, c in enumerate(r):
if c > col_widths[ii]:
col_widths[ii] = c
length = sum(col_widths) + gap_width * (ncols - 1)
if length > longest:
longest = length
return longest, col_widths
def calc_num_rows(num_items, cols):
div, mod = divmod(num_items, cols)
return div + (mod != 0)
# Start with one row
ncols = len(widths)
# Calculate the width of the longest row as the longest set of item widths
# ncols long and gap widths (gap_width * ncols - 1) that fits within the
# terminal width.
while ncols > 0:
longest_width, col_widths = calc_longest_width(widths, gap_width, ncols)
if longest_width < term_width:
# This number of columns fits
return calc_num_rows(len(widths), ncols), ncols, col_widths
else:
# This number of columns doesn't fit, so try one less
ncols -= 1
# If got here, it all has to go in one column
return len(widths), 1, 0 |
def read_metadata(self, symbol, as_of=None, allow_secondary=None):
"""
Return the metadata saved for a symbol. This method is fast as it doesn't
actually load the data.
Parameters
----------
symbol : `str`
symbol name for the item
as_of : `str` or int or `datetime.datetime`
Return the data as it was as_of the point in time.
`int` : specific version number
`str` : snapshot name which contains the version
`datetime.datetime` : the version of the data that existed as_of the requested point in time
allow_secondary : `bool` or `None`
Override the default behavior for allowing reads from secondary members of a cluster:
`None` : use the settings from the top-level `Arctic` object used to query this version store.
`True` : allow reads from secondary members
`False` : only allow reads from primary members
"""
_version = self._read_metadata(symbol, as_of=as_of, read_preference=self._read_preference(allow_secondary))
return VersionedItem(symbol=symbol, library=self._arctic_lib.get_name(), version=_version['version'],
metadata=_version.pop('metadata', None), data=None,
host=self._arctic_lib.arctic.mongo_host) | Return the metadata saved for a symbol. This method is fast as it doesn't
actually load the data.
Parameters
----------
symbol : `str`
symbol name for the item
as_of : `str` or int or `datetime.datetime`
Return the data as it was as_of the point in time.
`int` : specific version number
`str` : snapshot name which contains the version
`datetime.datetime` : the version of the data that existed as_of the requested point in time
allow_secondary : `bool` or `None`
Override the default behavior for allowing reads from secondary members of a cluster:
`None` : use the settings from the top-level `Arctic` object used to query this version store.
`True` : allow reads from secondary members
`False` : only allow reads from primary members | Below is the the instruction that describes the task:
### Input:
Return the metadata saved for a symbol. This method is fast as it doesn't
actually load the data.
Parameters
----------
symbol : `str`
symbol name for the item
as_of : `str` or int or `datetime.datetime`
Return the data as it was as_of the point in time.
`int` : specific version number
`str` : snapshot name which contains the version
`datetime.datetime` : the version of the data that existed as_of the requested point in time
allow_secondary : `bool` or `None`
Override the default behavior for allowing reads from secondary members of a cluster:
`None` : use the settings from the top-level `Arctic` object used to query this version store.
`True` : allow reads from secondary members
`False` : only allow reads from primary members
### Response:
def read_metadata(self, symbol, as_of=None, allow_secondary=None):
"""
Return the metadata saved for a symbol. This method is fast as it doesn't
actually load the data.
Parameters
----------
symbol : `str`
symbol name for the item
as_of : `str` or int or `datetime.datetime`
Return the data as it was as_of the point in time.
`int` : specific version number
`str` : snapshot name which contains the version
`datetime.datetime` : the version of the data that existed as_of the requested point in time
allow_secondary : `bool` or `None`
Override the default behavior for allowing reads from secondary members of a cluster:
`None` : use the settings from the top-level `Arctic` object used to query this version store.
`True` : allow reads from secondary members
`False` : only allow reads from primary members
"""
_version = self._read_metadata(symbol, as_of=as_of, read_preference=self._read_preference(allow_secondary))
return VersionedItem(symbol=symbol, library=self._arctic_lib.get_name(), version=_version['version'],
metadata=_version.pop('metadata', None), data=None,
host=self._arctic_lib.arctic.mongo_host) |
def deps_status(self):
"""Returns a list with the status of the dependencies."""
if not self.deps:
return [self.S_OK]
return [d.status for d in self.deps] | Returns a list with the status of the dependencies. | Below is the the instruction that describes the task:
### Input:
Returns a list with the status of the dependencies.
### Response:
def deps_status(self):
"""Returns a list with the status of the dependencies."""
if not self.deps:
return [self.S_OK]
return [d.status for d in self.deps] |
def all_continuous_indexes_slices(ol,value):
'''
from elist.elist import *
ol = [1,"a","a",2,3,"a",4,"a","a","a",5]
all_continuous_indexes_slices(ol,"a")
'''
rslt = []
length = ol.__len__()
cursor = 0
begin = None
slice = []
while(cursor < length):
cond1 = (ol[cursor] == value)
cond2 = (begin == None)
if(cond1 & cond2):
begin = cursor
slice.append(cursor)
elif(cond1 & (not(cond2))):
slice.append(cursor)
elif((not(cond1)) & (not(cond2))):
rslt.append(slice)
begin = None
slice = []
else:
pass
cursor = cursor + 1
if(slice):
rslt.append(slice)
else:
pass
return(rslt) | from elist.elist import *
ol = [1,"a","a",2,3,"a",4,"a","a","a",5]
all_continuous_indexes_slices(ol,"a") | Below is the the instruction that describes the task:
### Input:
from elist.elist import *
ol = [1,"a","a",2,3,"a",4,"a","a","a",5]
all_continuous_indexes_slices(ol,"a")
### Response:
def all_continuous_indexes_slices(ol,value):
'''
from elist.elist import *
ol = [1,"a","a",2,3,"a",4,"a","a","a",5]
all_continuous_indexes_slices(ol,"a")
'''
rslt = []
length = ol.__len__()
cursor = 0
begin = None
slice = []
while(cursor < length):
cond1 = (ol[cursor] == value)
cond2 = (begin == None)
if(cond1 & cond2):
begin = cursor
slice.append(cursor)
elif(cond1 & (not(cond2))):
slice.append(cursor)
elif((not(cond1)) & (not(cond2))):
rslt.append(slice)
begin = None
slice = []
else:
pass
cursor = cursor + 1
if(slice):
rslt.append(slice)
else:
pass
return(rslt) |
def shift(self, periods, axis=0, fill_value=None):
""" shift the block by periods, possibly upcast """
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = maybe_upcast(self.values, fill_value)
# make sure array sent to np.roll is c_contiguous
f_ordered = new_values.flags.f_contiguous
if f_ordered:
new_values = new_values.T
axis = new_values.ndim - axis - 1
if np.prod(new_values.shape):
new_values = np.roll(new_values, ensure_platform_int(periods),
axis=axis)
axis_indexer = [slice(None)] * self.ndim
if periods > 0:
axis_indexer[axis] = slice(None, periods)
else:
axis_indexer[axis] = slice(periods, None)
new_values[tuple(axis_indexer)] = fill_value
# restore original order
if f_ordered:
new_values = new_values.T
return [self.make_block(new_values)] | shift the block by periods, possibly upcast | Below is the the instruction that describes the task:
### Input:
shift the block by periods, possibly upcast
### Response:
def shift(self, periods, axis=0, fill_value=None):
""" shift the block by periods, possibly upcast """
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = maybe_upcast(self.values, fill_value)
# make sure array sent to np.roll is c_contiguous
f_ordered = new_values.flags.f_contiguous
if f_ordered:
new_values = new_values.T
axis = new_values.ndim - axis - 1
if np.prod(new_values.shape):
new_values = np.roll(new_values, ensure_platform_int(periods),
axis=axis)
axis_indexer = [slice(None)] * self.ndim
if periods > 0:
axis_indexer[axis] = slice(None, periods)
else:
axis_indexer[axis] = slice(periods, None)
new_values[tuple(axis_indexer)] = fill_value
# restore original order
if f_ordered:
new_values = new_values.T
return [self.make_block(new_values)] |
def get_csv_col_headers(rows, row_headers_count_value=0):
"""
Retrieve csv column headers
"""
count = 0
if rows:
for row in rows:
if exclude_empty_values(row[:row_headers_count_value]):
break
count += 1
if len(rows) == count:
count = 1 # by default
return [r[row_headers_count_value:] for r in rows[:count]] | Retrieve csv column headers | Below is the the instruction that describes the task:
### Input:
Retrieve csv column headers
### Response:
def get_csv_col_headers(rows, row_headers_count_value=0):
"""
Retrieve csv column headers
"""
count = 0
if rows:
for row in rows:
if exclude_empty_values(row[:row_headers_count_value]):
break
count += 1
if len(rows) == count:
count = 1 # by default
return [r[row_headers_count_value:] for r in rows[:count]] |
def _dirdiffandupdate(self, dir1, dir2):
"""
Private function which does directory diff & update
"""
self._dowork(dir1, dir2, None, self._update) | Private function which does directory diff & update | Below is the the instruction that describes the task:
### Input:
Private function which does directory diff & update
### Response:
def _dirdiffandupdate(self, dir1, dir2):
"""
Private function which does directory diff & update
"""
self._dowork(dir1, dir2, None, self._update) |
def build_hardware_simulator(
cls,
attached_instruments: Dict[top_types.Mount, Dict[str, Optional[str]]] = None, # noqa E501
attached_modules: List[str] = None,
config: robot_configs.robot_config = None,
loop: asyncio.AbstractEventLoop = None,
strict_attached_instruments: bool = True) -> 'API':
""" Build a simulating hardware controller.
This method may be used both on a real robot and on dev machines.
Multiple simulating hardware controllers may be active at one time.
"""
if None is attached_instruments:
attached_instruments = {}
if None is attached_modules:
attached_modules = []
return cls(Simulator(attached_instruments,
attached_modules,
config, loop,
strict_attached_instruments),
config=config, loop=loop) | Build a simulating hardware controller.
This method may be used both on a real robot and on dev machines.
Multiple simulating hardware controllers may be active at one time. | Below is the the instruction that describes the task:
### Input:
Build a simulating hardware controller.
This method may be used both on a real robot and on dev machines.
Multiple simulating hardware controllers may be active at one time.
### Response:
def build_hardware_simulator(
cls,
attached_instruments: Dict[top_types.Mount, Dict[str, Optional[str]]] = None, # noqa E501
attached_modules: List[str] = None,
config: robot_configs.robot_config = None,
loop: asyncio.AbstractEventLoop = None,
strict_attached_instruments: bool = True) -> 'API':
""" Build a simulating hardware controller.
This method may be used both on a real robot and on dev machines.
Multiple simulating hardware controllers may be active at one time.
"""
if None is attached_instruments:
attached_instruments = {}
if None is attached_modules:
attached_modules = []
return cls(Simulator(attached_instruments,
attached_modules,
config, loop,
strict_attached_instruments),
config=config, loop=loop) |
def Y(self, value):
""" set phenotype """
self._N = value.shape[0]
self._P = value.shape[1]
self._Y = value
# missing data
self._Iok = ~sp.isnan(value)
self._veIok = vec(self._Iok)[:, 0]
self._miss = (~self._Iok).any()
# notify and clear_cached
self.clear_cache('pheno')
self._notify()
self._notify('pheno') | set phenotype | Below is the the instruction that describes the task:
### Input:
set phenotype
### Response:
def Y(self, value):
""" set phenotype """
self._N = value.shape[0]
self._P = value.shape[1]
self._Y = value
# missing data
self._Iok = ~sp.isnan(value)
self._veIok = vec(self._Iok)[:, 0]
self._miss = (~self._Iok).any()
# notify and clear_cached
self.clear_cache('pheno')
self._notify()
self._notify('pheno') |
def get_feature(name):
"""Get an instance of a ``Features`` class by ``name`` (str)."""
if name == 'css':
return CSSFeatures()
elif name == 'kohlschuetter':
return KohlschuetterFeatures()
elif name == 'readability':
return ReadabilityFeatures()
elif name == 'weninger':
return WeningerFeatures()
elif name == 'clustered_weninger':
return ClusteredWeningerFeatures()
else:
raise ValueError('invalid feature name: "{}"'.format(name)) | Get an instance of a ``Features`` class by ``name`` (str). | Below is the the instruction that describes the task:
### Input:
Get an instance of a ``Features`` class by ``name`` (str).
### Response:
def get_feature(name):
"""Get an instance of a ``Features`` class by ``name`` (str)."""
if name == 'css':
return CSSFeatures()
elif name == 'kohlschuetter':
return KohlschuetterFeatures()
elif name == 'readability':
return ReadabilityFeatures()
elif name == 'weninger':
return WeningerFeatures()
elif name == 'clustered_weninger':
return ClusteredWeningerFeatures()
else:
raise ValueError('invalid feature name: "{}"'.format(name)) |
def normalize(rendered):
"""Return the input string without non-functional spaces or newlines."""
out = ''.join([line.strip()
for line in rendered.splitlines()
if line.strip()])
out = out.replace(', ', ',')
return out | Return the input string without non-functional spaces or newlines. | Below is the the instruction that describes the task:
### Input:
Return the input string without non-functional spaces or newlines.
### Response:
def normalize(rendered):
"""Return the input string without non-functional spaces or newlines."""
out = ''.join([line.strip()
for line in rendered.splitlines()
if line.strip()])
out = out.replace(', ', ',')
return out |
def set_category(self, category):
"""Set package category
Args:
category: String of an existing category's name, or a
Category object.
"""
# For some reason, packages only have the category name, not the
# ID.
if isinstance(category, Category):
name = category.name
else:
name = category
self.find("category").text = name | Set package category
Args:
category: String of an existing category's name, or a
Category object. | Below is the the instruction that describes the task:
### Input:
Set package category
Args:
category: String of an existing category's name, or a
Category object.
### Response:
def set_category(self, category):
"""Set package category
Args:
category: String of an existing category's name, or a
Category object.
"""
# For some reason, packages only have the category name, not the
# ID.
if isinstance(category, Category):
name = category.name
else:
name = category
self.find("category").text = name |
def select(self, adjacentEdges=None, edgeList=None, extendEdges=None, firstNeighbors=None, \
invert=None, network=None, nodeList=None, verbose=False):
"""
Select nodes and/or edges in a network. This command provides options to
invert the selection, add first neighbors, add adjacent edges of selected
nodes, and add adjacent nodes of selected edges
:param adjacentEdges (string, optional): If 'true', then select any edges
adjacent to any selected nodes. This happens before any inversion
:param edgeList (string, optional): Specifies a list of edges. The keywords
all, selected, or unselected can be used to specify edges by their
selection state. The pattern COLUMN:VALUE sets this parameter to any
rows that contain the specified column value; if the COLUMN prefix is
not used, the NAME column is matched by default. A list of COLUMN:VALUE
pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be used to
match multiple values.
:param extendEdges (string, optional): If 'true', then select any nodes
adjacent to any selected edges. This happens before any inversion
:param firstNeighbors (string, optional): If this option is anything other
than 'none', add nodes to the selection based on the value of the
argument. If 'incoming', add nodes to the selection that have edges
pointing to one of the selected nodes. If 'output', add nodes to the
selection that have edges that point to them from one of the selected
nodes. If 'undirected' add any neighbors that have undirected edges
connecting to any of the selected nodes. Finally, if 'any', then add
all first neighbors to the selection list. = ['none', 'incoming',
'outgoing', 'undirected', 'any'],
:param invert (string, optional): If this option is not 'none', then the
selected nodes or edges (or both) will be deselected and all other
nodes or edges will be selected = ['none', 'nodes', 'edges', 'both']
:param network (string, optional): Specifies a network by name, or by
SUID if the prefix SUID: is used. The keyword CURRENT, or a blank value
can also be used to specify the current network.
:param nodeList (string, optional): Specifies a list of nodes. The keywords
all, selected, or unselected can be used to specify nodes by their
selection state. The pattern COLUMN:VALUE sets this parameter to any
rows that contain the specified column value; if the COLUMN prefix is
not used, the NAME column is matched by default. A list of COLUMN:VALUE
pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be used to
match multiple values.
:param verbose: print more
:returns: [ list of selected edges and nodes ]
"""
network=check_network(self,network,verbose=verbose)
PARAMS=set_param(["adjacentEdges","edgeList","extendEdges","firstNeighbors",\
"invert","network","nodeList"], \
[adjacentEdges,edgeList,extendEdges,firstNeighbors,\
invert,network,nodeList])
response=api(url=self.__url+"/select", PARAMS=PARAMS, method="POST", verbose=verbose)
return response | Select nodes and/or edges in a network. This command provides options to
invert the selection, add first neighbors, add adjacent edges of selected
nodes, and add adjacent nodes of selected edges
:param adjacentEdges (string, optional): If 'true', then select any edges
adjacent to any selected nodes. This happens before any inversion
:param edgeList (string, optional): Specifies a list of edges. The keywords
all, selected, or unselected can be used to specify edges by their
selection state. The pattern COLUMN:VALUE sets this parameter to any
rows that contain the specified column value; if the COLUMN prefix is
not used, the NAME column is matched by default. A list of COLUMN:VALUE
pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be used to
match multiple values.
:param extendEdges (string, optional): If 'true', then select any nodes
adjacent to any selected edges. This happens before any inversion
:param firstNeighbors (string, optional): If this option is anything other
than 'none', add nodes to the selection based on the value of the
argument. If 'incoming', add nodes to the selection that have edges
pointing to one of the selected nodes. If 'output', add nodes to the
selection that have edges that point to them from one of the selected
nodes. If 'undirected' add any neighbors that have undirected edges
connecting to any of the selected nodes. Finally, if 'any', then add
all first neighbors to the selection list. = ['none', 'incoming',
'outgoing', 'undirected', 'any'],
:param invert (string, optional): If this option is not 'none', then the
selected nodes or edges (or both) will be deselected and all other
nodes or edges will be selected = ['none', 'nodes', 'edges', 'both']
:param network (string, optional): Specifies a network by name, or by
SUID if the prefix SUID: is used. The keyword CURRENT, or a blank value
can also be used to specify the current network.
:param nodeList (string, optional): Specifies a list of nodes. The keywords
all, selected, or unselected can be used to specify nodes by their
selection state. The pattern COLUMN:VALUE sets this parameter to any
rows that contain the specified column value; if the COLUMN prefix is
not used, the NAME column is matched by default. A list of COLUMN:VALUE
pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be used to
match multiple values.
:param verbose: print more
:returns: [ list of selected edges and nodes ] | Below is the the instruction that describes the task:
### Input:
Select nodes and/or edges in a network. This command provides options to
invert the selection, add first neighbors, add adjacent edges of selected
nodes, and add adjacent nodes of selected edges
:param adjacentEdges (string, optional): If 'true', then select any edges
adjacent to any selected nodes. This happens before any inversion
:param edgeList (string, optional): Specifies a list of edges. The keywords
all, selected, or unselected can be used to specify edges by their
selection state. The pattern COLUMN:VALUE sets this parameter to any
rows that contain the specified column value; if the COLUMN prefix is
not used, the NAME column is matched by default. A list of COLUMN:VALUE
pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be used to
match multiple values.
:param extendEdges (string, optional): If 'true', then select any nodes
adjacent to any selected edges. This happens before any inversion
:param firstNeighbors (string, optional): If this option is anything other
than 'none', add nodes to the selection based on the value of the
argument. If 'incoming', add nodes to the selection that have edges
pointing to one of the selected nodes. If 'output', add nodes to the
selection that have edges that point to them from one of the selected
nodes. If 'undirected' add any neighbors that have undirected edges
connecting to any of the selected nodes. Finally, if 'any', then add
all first neighbors to the selection list. = ['none', 'incoming',
'outgoing', 'undirected', 'any'],
:param invert (string, optional): If this option is not 'none', then the
selected nodes or edges (or both) will be deselected and all other
nodes or edges will be selected = ['none', 'nodes', 'edges', 'both']
:param network (string, optional): Specifies a network by name, or by
SUID if the prefix SUID: is used. The keyword CURRENT, or a blank value
can also be used to specify the current network.
:param nodeList (string, optional): Specifies a list of nodes. The keywords
all, selected, or unselected can be used to specify nodes by their
selection state. The pattern COLUMN:VALUE sets this parameter to any
rows that contain the specified column value; if the COLUMN prefix is
not used, the NAME column is matched by default. A list of COLUMN:VALUE
pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be used to
match multiple values.
:param verbose: print more
:returns: [ list of selected edges and nodes ]
### Response:
def select(self, adjacentEdges=None, edgeList=None, extendEdges=None, firstNeighbors=None, \
invert=None, network=None, nodeList=None, verbose=False):
"""
Select nodes and/or edges in a network. This command provides options to
invert the selection, add first neighbors, add adjacent edges of selected
nodes, and add adjacent nodes of selected edges
:param adjacentEdges (string, optional): If 'true', then select any edges
adjacent to any selected nodes. This happens before any inversion
:param edgeList (string, optional): Specifies a list of edges. The keywords
all, selected, or unselected can be used to specify edges by their
selection state. The pattern COLUMN:VALUE sets this parameter to any
rows that contain the specified column value; if the COLUMN prefix is
not used, the NAME column is matched by default. A list of COLUMN:VALUE
pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be used to
match multiple values.
:param extendEdges (string, optional): If 'true', then select any nodes
adjacent to any selected edges. This happens before any inversion
:param firstNeighbors (string, optional): If this option is anything other
than 'none', add nodes to the selection based on the value of the
argument. If 'incoming', add nodes to the selection that have edges
pointing to one of the selected nodes. If 'output', add nodes to the
selection that have edges that point to them from one of the selected
nodes. If 'undirected' add any neighbors that have undirected edges
connecting to any of the selected nodes. Finally, if 'any', then add
all first neighbors to the selection list. = ['none', 'incoming',
'outgoing', 'undirected', 'any'],
:param invert (string, optional): If this option is not 'none', then the
selected nodes or edges (or both) will be deselected and all other
nodes or edges will be selected = ['none', 'nodes', 'edges', 'both']
:param network (string, optional): Specifies a network by name, or by
SUID if the prefix SUID: is used. The keyword CURRENT, or a blank value
can also be used to specify the current network.
:param nodeList (string, optional): Specifies a list of nodes. The keywords
all, selected, or unselected can be used to specify nodes by their
selection state. The pattern COLUMN:VALUE sets this parameter to any
rows that contain the specified column value; if the COLUMN prefix is
not used, the NAME column is matched by default. A list of COLUMN:VALUE
pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be used to
match multiple values.
:param verbose: print more
:returns: [ list of selected edges and nodes ]
"""
network=check_network(self,network,verbose=verbose)
PARAMS=set_param(["adjacentEdges","edgeList","extendEdges","firstNeighbors",\
"invert","network","nodeList"], \
[adjacentEdges,edgeList,extendEdges,firstNeighbors,\
invert,network,nodeList])
response=api(url=self.__url+"/select", PARAMS=PARAMS, method="POST", verbose=verbose)
return response |
def weld_merge_join(arrays_self, weld_types_self, arrays_other, weld_types_other,
how, is_on_sorted, is_on_unique, readable_text):
"""Applies merge-join on the arrays returning indices from each to keep in the resulting
Parameters
----------
arrays_self : list of (numpy.ndarray or WeldObject)
Columns from the self DataFrame on which to join.
weld_types_self : list of WeldType
Corresponding Weld types.
arrays_other : list of (numpy.ndarray or WeldObject)
Columns from the other DataFrame on which to join.
weld_types_other : list of WeldType
Corresponding Weld types.
how : {'inner', 'left', 'right'}
Which kind of join to do.
is_on_sorted : bool
If we know that the on columns are already sorted, can employ faster algorithm.
is_on_unique : bool
If we know that the values are unique, can employ faster algorithm.
readable_text : str
Explanatory string to add in the Weld placeholder.
Returns
-------
tuple of WeldObject
Two columns of indices from the input arrays, indices of the rows from self and other that should be
available in the resulting joined DataFrame.
"""
assert is_on_unique
weld_obj_vec_of_struct_self = weld_arrays_to_vec_of_struct(arrays_self, weld_types_self)
weld_obj_vec_of_struct_other = weld_arrays_to_vec_of_struct(arrays_other, weld_types_other)
weld_obj_join = _weld_merge_join(weld_obj_vec_of_struct_self,
weld_obj_vec_of_struct_other,
len(arrays_self),
how,
is_on_unique)
intermediate_result = LazyStructOfVecResult(weld_obj_join, [WeldLong(), WeldLong()])
dependency_name = Cache.cache_intermediate_result(intermediate_result, readable_text)
weld_objects = extract_placeholder_weld_objects(dependency_name, 2, readable_text)
return weld_objects | Applies merge-join on the arrays returning indices from each to keep in the resulting
Parameters
----------
arrays_self : list of (numpy.ndarray or WeldObject)
Columns from the self DataFrame on which to join.
weld_types_self : list of WeldType
Corresponding Weld types.
arrays_other : list of (numpy.ndarray or WeldObject)
Columns from the other DataFrame on which to join.
weld_types_other : list of WeldType
Corresponding Weld types.
how : {'inner', 'left', 'right'}
Which kind of join to do.
is_on_sorted : bool
If we know that the on columns are already sorted, can employ faster algorithm.
is_on_unique : bool
If we know that the values are unique, can employ faster algorithm.
readable_text : str
Explanatory string to add in the Weld placeholder.
Returns
-------
tuple of WeldObject
Two columns of indices from the input arrays, indices of the rows from self and other that should be
available in the resulting joined DataFrame. | Below is the the instruction that describes the task:
### Input:
Applies merge-join on the arrays returning indices from each to keep in the resulting
Parameters
----------
arrays_self : list of (numpy.ndarray or WeldObject)
Columns from the self DataFrame on which to join.
weld_types_self : list of WeldType
Corresponding Weld types.
arrays_other : list of (numpy.ndarray or WeldObject)
Columns from the other DataFrame on which to join.
weld_types_other : list of WeldType
Corresponding Weld types.
how : {'inner', 'left', 'right'}
Which kind of join to do.
is_on_sorted : bool
If we know that the on columns are already sorted, can employ faster algorithm.
is_on_unique : bool
If we know that the values are unique, can employ faster algorithm.
readable_text : str
Explanatory string to add in the Weld placeholder.
Returns
-------
tuple of WeldObject
Two columns of indices from the input arrays, indices of the rows from self and other that should be
available in the resulting joined DataFrame.
### Response:
def weld_merge_join(arrays_self, weld_types_self, arrays_other, weld_types_other,
how, is_on_sorted, is_on_unique, readable_text):
"""Applies merge-join on the arrays returning indices from each to keep in the resulting
Parameters
----------
arrays_self : list of (numpy.ndarray or WeldObject)
Columns from the self DataFrame on which to join.
weld_types_self : list of WeldType
Corresponding Weld types.
arrays_other : list of (numpy.ndarray or WeldObject)
Columns from the other DataFrame on which to join.
weld_types_other : list of WeldType
Corresponding Weld types.
how : {'inner', 'left', 'right'}
Which kind of join to do.
is_on_sorted : bool
If we know that the on columns are already sorted, can employ faster algorithm.
is_on_unique : bool
If we know that the values are unique, can employ faster algorithm.
readable_text : str
Explanatory string to add in the Weld placeholder.
Returns
-------
tuple of WeldObject
Two columns of indices from the input arrays, indices of the rows from self and other that should be
available in the resulting joined DataFrame.
"""
assert is_on_unique
weld_obj_vec_of_struct_self = weld_arrays_to_vec_of_struct(arrays_self, weld_types_self)
weld_obj_vec_of_struct_other = weld_arrays_to_vec_of_struct(arrays_other, weld_types_other)
weld_obj_join = _weld_merge_join(weld_obj_vec_of_struct_self,
weld_obj_vec_of_struct_other,
len(arrays_self),
how,
is_on_unique)
intermediate_result = LazyStructOfVecResult(weld_obj_join, [WeldLong(), WeldLong()])
dependency_name = Cache.cache_intermediate_result(intermediate_result, readable_text)
weld_objects = extract_placeholder_weld_objects(dependency_name, 2, readable_text)
return weld_objects |
def get_regulate_activities(self):
"""Get Activation/Inhibition INDRA Statements from the BioPAX model.
This method extracts Activation/Inhibition Statements and reuses the
structure of BioPAX Pattern's
org.biopax.paxtools.pattern.PatternBox.constrolsStateChange pattern
with additional constraints to specify the gain or loss of
activity state but assuring that the activity change is not due to
a modification state change (which are extracted by get_modifications
and get_activity_modification).
"""
mcc = _bpp('constraint.ModificationChangeConstraint')
mcct = _bpp('constraint.ModificationChangeConstraint$Type')
mod_filter = 'residue modification, active'
# Start with a generic modification pattern
p = BiopaxProcessor._construct_modification_pattern()
stmts = []
for act_class, gain_loss in zip([Activation, Inhibition],
[mcct.GAIN, mcct.LOSS]):
p.add(mcc(gain_loss, mod_filter),
"input simple PE", "output simple PE")
s = _bpp('Searcher')
res = s.searchPlain(self.model, p)
res_array = [_match_to_array(m) for m in res.toArray()]
for r in res_array:
controller_pe = r[p.indexOf('controller PE')]
input_pe = r[p.indexOf('input PE')]
input_spe = r[p.indexOf('input simple PE')]
output_spe = r[p.indexOf('output simple PE')]
reaction = r[p.indexOf('Conversion')]
control = r[p.indexOf('Control')]
if not _is_catalysis(control):
continue
cat_dir = control.getCatalysisDirection()
if cat_dir is not None and cat_dir.name() != 'LEFT_TO_RIGHT':
logger.debug('Unexpected catalysis direction: %s.' % \
control.getCatalysisDirection())
continue
subjs = BiopaxProcessor._get_primary_controller(controller_pe)
if not subjs:
continue
'''
if _is_complex(input_pe):
# TODO: It is possible to find which member of the complex
# is actually activated. That member will be the substrate
# and all other members of the complex will be bound to it.
logger.info('Cannot handle complex subjects.')
continue
'''
objs = BiopaxProcessor._get_agents_from_entity(input_spe,
expand_pe=False)
ev = self._get_evidence(control)
for subj, obj in itertools.product(_listify(subjs),
_listify(objs)):
# Get the modifications
mod_in = \
BiopaxProcessor._get_entity_mods(input_spe)
mod_out = \
BiopaxProcessor._get_entity_mods(output_spe)
# We assume if modifications change then this is not really
# a pure activation event
gained_mods = _get_mod_difference(mod_out, mod_in)
lost_mods = _get_mod_difference(mod_in, mod_out)
if gained_mods or lost_mods:
continue
stmt = act_class(subj, obj, 'activity', evidence=ev)
self.statements.append(decode_obj(stmt, encoding='utf-8')) | Get Activation/Inhibition INDRA Statements from the BioPAX model.
This method extracts Activation/Inhibition Statements and reuses the
structure of BioPAX Pattern's
org.biopax.paxtools.pattern.PatternBox.constrolsStateChange pattern
with additional constraints to specify the gain or loss of
activity state but assuring that the activity change is not due to
a modification state change (which are extracted by get_modifications
and get_activity_modification). | Below is the the instruction that describes the task:
### Input:
Get Activation/Inhibition INDRA Statements from the BioPAX model.
This method extracts Activation/Inhibition Statements and reuses the
structure of BioPAX Pattern's
org.biopax.paxtools.pattern.PatternBox.constrolsStateChange pattern
with additional constraints to specify the gain or loss of
activity state but assuring that the activity change is not due to
a modification state change (which are extracted by get_modifications
and get_activity_modification).
### Response:
def get_regulate_activities(self):
"""Get Activation/Inhibition INDRA Statements from the BioPAX model.
This method extracts Activation/Inhibition Statements and reuses the
structure of BioPAX Pattern's
org.biopax.paxtools.pattern.PatternBox.constrolsStateChange pattern
with additional constraints to specify the gain or loss of
activity state but assuring that the activity change is not due to
a modification state change (which are extracted by get_modifications
and get_activity_modification).
"""
mcc = _bpp('constraint.ModificationChangeConstraint')
mcct = _bpp('constraint.ModificationChangeConstraint$Type')
mod_filter = 'residue modification, active'
# Start with a generic modification pattern
p = BiopaxProcessor._construct_modification_pattern()
stmts = []
for act_class, gain_loss in zip([Activation, Inhibition],
[mcct.GAIN, mcct.LOSS]):
p.add(mcc(gain_loss, mod_filter),
"input simple PE", "output simple PE")
s = _bpp('Searcher')
res = s.searchPlain(self.model, p)
res_array = [_match_to_array(m) for m in res.toArray()]
for r in res_array:
controller_pe = r[p.indexOf('controller PE')]
input_pe = r[p.indexOf('input PE')]
input_spe = r[p.indexOf('input simple PE')]
output_spe = r[p.indexOf('output simple PE')]
reaction = r[p.indexOf('Conversion')]
control = r[p.indexOf('Control')]
if not _is_catalysis(control):
continue
cat_dir = control.getCatalysisDirection()
if cat_dir is not None and cat_dir.name() != 'LEFT_TO_RIGHT':
logger.debug('Unexpected catalysis direction: %s.' % \
control.getCatalysisDirection())
continue
subjs = BiopaxProcessor._get_primary_controller(controller_pe)
if not subjs:
continue
'''
if _is_complex(input_pe):
# TODO: It is possible to find which member of the complex
# is actually activated. That member will be the substrate
# and all other members of the complex will be bound to it.
logger.info('Cannot handle complex subjects.')
continue
'''
objs = BiopaxProcessor._get_agents_from_entity(input_spe,
expand_pe=False)
ev = self._get_evidence(control)
for subj, obj in itertools.product(_listify(subjs),
_listify(objs)):
# Get the modifications
mod_in = \
BiopaxProcessor._get_entity_mods(input_spe)
mod_out = \
BiopaxProcessor._get_entity_mods(output_spe)
# We assume if modifications change then this is not really
# a pure activation event
gained_mods = _get_mod_difference(mod_out, mod_in)
lost_mods = _get_mod_difference(mod_in, mod_out)
if gained_mods or lost_mods:
continue
stmt = act_class(subj, obj, 'activity', evidence=ev)
self.statements.append(decode_obj(stmt, encoding='utf-8')) |
def uninstall(self, auto_confirm=False):
"""
Uninstall the distribution currently satisfying this requirement.
Prompts before removing or modifying files unless
``auto_confirm`` is True.
Refuses to delete or modify files outside of ``sys.prefix`` -
thus uninstallation within a virtual environment can only
modify that virtual environment, even if the virtualenv is
linked to global site-packages.
"""
if not self.check_if_exists():
raise UninstallationError("Cannot uninstall requirement %s, not installed" % (self.name,))
dist = self.satisfied_by or self.conflicts_with
paths_to_remove = UninstallPathSet(dist)
pip_egg_info_path = os.path.join(dist.location,
dist.egg_name()) + '.egg-info'
# workaround for http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=618367
debian_egg_info_path = pip_egg_info_path.replace(
'-py%s' % pkg_resources.PY_MAJOR, '')
easy_install_egg = dist.egg_name() + '.egg'
develop_egg_link = egg_link_path(dist)
pip_egg_info_exists = os.path.exists(pip_egg_info_path)
debian_egg_info_exists = os.path.exists(debian_egg_info_path)
if pip_egg_info_exists or debian_egg_info_exists:
# package installed by pip
if pip_egg_info_exists:
egg_info_path = pip_egg_info_path
else:
egg_info_path = debian_egg_info_path
paths_to_remove.add(egg_info_path)
if dist.has_metadata('installed-files.txt'):
for installed_file in dist.get_metadata('installed-files.txt').splitlines():
path = os.path.normpath(os.path.join(egg_info_path, installed_file))
paths_to_remove.add(path)
#FIXME: need a test for this elif block
#occurs with --single-version-externally-managed/--record outside of pip
elif dist.has_metadata('top_level.txt'):
if dist.has_metadata('namespace_packages.txt'):
namespaces = dist.get_metadata('namespace_packages.txt')
else:
namespaces = []
for top_level_pkg in [p for p
in dist.get_metadata('top_level.txt').splitlines()
if p and p not in namespaces]:
path = os.path.join(dist.location, top_level_pkg)
paths_to_remove.add(path)
paths_to_remove.add(path + '.py')
paths_to_remove.add(path + '.pyc')
elif dist.location.endswith(easy_install_egg):
# package installed by easy_install
paths_to_remove.add(dist.location)
easy_install_pth = os.path.join(os.path.dirname(dist.location),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg)
elif develop_egg_link:
# develop egg
fh = open(develop_egg_link, 'r')
link_pointer = os.path.normcase(fh.readline().strip())
fh.close()
assert (link_pointer == dist.location), 'Egg-link %s does not match installed location of %s (at %s)' % (link_pointer, self.name, dist.location)
paths_to_remove.add(develop_egg_link)
easy_install_pth = os.path.join(os.path.dirname(develop_egg_link),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, dist.location)
# find distutils scripts= scripts
if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'):
for script in dist.metadata_listdir('scripts'):
paths_to_remove.add(os.path.join(bin_py, script))
if sys.platform == 'win32':
paths_to_remove.add(os.path.join(bin_py, script) + '.bat')
# find console_scripts
if dist.has_metadata('entry_points.txt'):
config = ConfigParser.SafeConfigParser()
config.readfp(FakeFile(dist.get_metadata_lines('entry_points.txt')))
if config.has_section('console_scripts'):
for name, value in config.items('console_scripts'):
paths_to_remove.add(os.path.join(bin_py, name))
if sys.platform == 'win32':
paths_to_remove.add(os.path.join(bin_py, name) + '.exe')
paths_to_remove.add(os.path.join(bin_py, name) + '.exe.manifest')
paths_to_remove.add(os.path.join(bin_py, name) + '-script.py')
paths_to_remove.remove(auto_confirm)
self.uninstalled = paths_to_remove | Uninstall the distribution currently satisfying this requirement.
Prompts before removing or modifying files unless
``auto_confirm`` is True.
Refuses to delete or modify files outside of ``sys.prefix`` -
thus uninstallation within a virtual environment can only
modify that virtual environment, even if the virtualenv is
linked to global site-packages. | Below is the the instruction that describes the task:
### Input:
Uninstall the distribution currently satisfying this requirement.
Prompts before removing or modifying files unless
``auto_confirm`` is True.
Refuses to delete or modify files outside of ``sys.prefix`` -
thus uninstallation within a virtual environment can only
modify that virtual environment, even if the virtualenv is
linked to global site-packages.
### Response:
def uninstall(self, auto_confirm=False):
"""
Uninstall the distribution currently satisfying this requirement.
Prompts before removing or modifying files unless
``auto_confirm`` is True.
Refuses to delete or modify files outside of ``sys.prefix`` -
thus uninstallation within a virtual environment can only
modify that virtual environment, even if the virtualenv is
linked to global site-packages.
"""
if not self.check_if_exists():
raise UninstallationError("Cannot uninstall requirement %s, not installed" % (self.name,))
dist = self.satisfied_by or self.conflicts_with
paths_to_remove = UninstallPathSet(dist)
pip_egg_info_path = os.path.join(dist.location,
dist.egg_name()) + '.egg-info'
# workaround for http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=618367
debian_egg_info_path = pip_egg_info_path.replace(
'-py%s' % pkg_resources.PY_MAJOR, '')
easy_install_egg = dist.egg_name() + '.egg'
develop_egg_link = egg_link_path(dist)
pip_egg_info_exists = os.path.exists(pip_egg_info_path)
debian_egg_info_exists = os.path.exists(debian_egg_info_path)
if pip_egg_info_exists or debian_egg_info_exists:
# package installed by pip
if pip_egg_info_exists:
egg_info_path = pip_egg_info_path
else:
egg_info_path = debian_egg_info_path
paths_to_remove.add(egg_info_path)
if dist.has_metadata('installed-files.txt'):
for installed_file in dist.get_metadata('installed-files.txt').splitlines():
path = os.path.normpath(os.path.join(egg_info_path, installed_file))
paths_to_remove.add(path)
#FIXME: need a test for this elif block
#occurs with --single-version-externally-managed/--record outside of pip
elif dist.has_metadata('top_level.txt'):
if dist.has_metadata('namespace_packages.txt'):
namespaces = dist.get_metadata('namespace_packages.txt')
else:
namespaces = []
for top_level_pkg in [p for p
in dist.get_metadata('top_level.txt').splitlines()
if p and p not in namespaces]:
path = os.path.join(dist.location, top_level_pkg)
paths_to_remove.add(path)
paths_to_remove.add(path + '.py')
paths_to_remove.add(path + '.pyc')
elif dist.location.endswith(easy_install_egg):
# package installed by easy_install
paths_to_remove.add(dist.location)
easy_install_pth = os.path.join(os.path.dirname(dist.location),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg)
elif develop_egg_link:
# develop egg
fh = open(develop_egg_link, 'r')
link_pointer = os.path.normcase(fh.readline().strip())
fh.close()
assert (link_pointer == dist.location), 'Egg-link %s does not match installed location of %s (at %s)' % (link_pointer, self.name, dist.location)
paths_to_remove.add(develop_egg_link)
easy_install_pth = os.path.join(os.path.dirname(develop_egg_link),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, dist.location)
# find distutils scripts= scripts
if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'):
for script in dist.metadata_listdir('scripts'):
paths_to_remove.add(os.path.join(bin_py, script))
if sys.platform == 'win32':
paths_to_remove.add(os.path.join(bin_py, script) + '.bat')
# find console_scripts
if dist.has_metadata('entry_points.txt'):
config = ConfigParser.SafeConfigParser()
config.readfp(FakeFile(dist.get_metadata_lines('entry_points.txt')))
if config.has_section('console_scripts'):
for name, value in config.items('console_scripts'):
paths_to_remove.add(os.path.join(bin_py, name))
if sys.platform == 'win32':
paths_to_remove.add(os.path.join(bin_py, name) + '.exe')
paths_to_remove.add(os.path.join(bin_py, name) + '.exe.manifest')
paths_to_remove.add(os.path.join(bin_py, name) + '-script.py')
paths_to_remove.remove(auto_confirm)
self.uninstalled = paths_to_remove |
def register_hooks(self, on_terminate=None, on_reload=None,
on_new_worker=None, on_dead_worker=None):
"""Register hook methods
This can be callable multiple times to add more hooks, hooks are
executed in added order. If a hook raised an exception, next hooks
will be not executed.
:param on_terminate: method called on SIGTERM
:type on_terminate: callable()
:param on_reload: method called on SIGHUP
:type on_reload: callable()
:param on_new_worker: method called in the child process when this one
is ready
:type on_new_worker: callable(service_id, worker_id, service_obj)
:param on_new_worker: method called when a child died
:type on_new_worker: callable(service_id, worker_id, exit_code)
If window support is planned, hooks callable must support
to be pickle.pickle(). See CPython multiprocessing module documentation
for more detail.
"""
if on_terminate is not None:
_utils.check_callable(on_terminate, 'on_terminate')
self._hooks['terminate'].append(on_terminate)
if on_reload is not None:
_utils.check_callable(on_reload, 'on_reload')
self._hooks['reload'].append(on_reload)
if on_new_worker is not None:
_utils.check_callable(on_new_worker, 'on_new_worker')
self._hooks['new_worker'].append(on_new_worker)
if on_dead_worker is not None:
_utils.check_callable(on_dead_worker, 'on_dead_worker')
self._hooks['dead_worker'].append(on_dead_worker) | Register hook methods
This can be callable multiple times to add more hooks, hooks are
executed in added order. If a hook raised an exception, next hooks
will be not executed.
:param on_terminate: method called on SIGTERM
:type on_terminate: callable()
:param on_reload: method called on SIGHUP
:type on_reload: callable()
:param on_new_worker: method called in the child process when this one
is ready
:type on_new_worker: callable(service_id, worker_id, service_obj)
:param on_new_worker: method called when a child died
:type on_new_worker: callable(service_id, worker_id, exit_code)
If window support is planned, hooks callable must support
to be pickle.pickle(). See CPython multiprocessing module documentation
for more detail. | Below is the the instruction that describes the task:
### Input:
Register hook methods
This can be callable multiple times to add more hooks, hooks are
executed in added order. If a hook raised an exception, next hooks
will be not executed.
:param on_terminate: method called on SIGTERM
:type on_terminate: callable()
:param on_reload: method called on SIGHUP
:type on_reload: callable()
:param on_new_worker: method called in the child process when this one
is ready
:type on_new_worker: callable(service_id, worker_id, service_obj)
:param on_new_worker: method called when a child died
:type on_new_worker: callable(service_id, worker_id, exit_code)
If window support is planned, hooks callable must support
to be pickle.pickle(). See CPython multiprocessing module documentation
for more detail.
### Response:
def register_hooks(self, on_terminate=None, on_reload=None,
on_new_worker=None, on_dead_worker=None):
"""Register hook methods
This can be callable multiple times to add more hooks, hooks are
executed in added order. If a hook raised an exception, next hooks
will be not executed.
:param on_terminate: method called on SIGTERM
:type on_terminate: callable()
:param on_reload: method called on SIGHUP
:type on_reload: callable()
:param on_new_worker: method called in the child process when this one
is ready
:type on_new_worker: callable(service_id, worker_id, service_obj)
:param on_new_worker: method called when a child died
:type on_new_worker: callable(service_id, worker_id, exit_code)
If window support is planned, hooks callable must support
to be pickle.pickle(). See CPython multiprocessing module documentation
for more detail.
"""
if on_terminate is not None:
_utils.check_callable(on_terminate, 'on_terminate')
self._hooks['terminate'].append(on_terminate)
if on_reload is not None:
_utils.check_callable(on_reload, 'on_reload')
self._hooks['reload'].append(on_reload)
if on_new_worker is not None:
_utils.check_callable(on_new_worker, 'on_new_worker')
self._hooks['new_worker'].append(on_new_worker)
if on_dead_worker is not None:
_utils.check_callable(on_dead_worker, 'on_dead_worker')
self._hooks['dead_worker'].append(on_dead_worker) |
def print_stats(self, clsname=None, limit=1.0):
"""
Write tracked objects to stdout. The output can be filtered and
pruned. Only objects are printed whose classname contain the substring
supplied by the `clsname` argument. The output can be pruned by
passing a `limit` value.
:param clsname: Only print objects whose classname contain the given
substring.
:param limit: If `limit` is a float smaller than one, only the supplied
percentage of the total tracked data is printed. If `limit` is
bigger than one, this number of tracked objects are printed.
Tracked objects are first filtered, and then pruned (if specified).
"""
if self.tracker:
self.tracker.stop_periodic_snapshots()
if not self.sorted:
self.sort_stats()
_sorted = self.sorted
if clsname:
_sorted = [to for to in _sorted if clsname in to.classname]
if limit < 1.0:
limit = max(1, int(len(self.sorted) * limit))
_sorted = _sorted[:int(limit)]
# Emit per-instance data
for tobj in _sorted:
self.print_object(tobj) | Write tracked objects to stdout. The output can be filtered and
pruned. Only objects are printed whose classname contain the substring
supplied by the `clsname` argument. The output can be pruned by
passing a `limit` value.
:param clsname: Only print objects whose classname contain the given
substring.
:param limit: If `limit` is a float smaller than one, only the supplied
percentage of the total tracked data is printed. If `limit` is
bigger than one, this number of tracked objects are printed.
Tracked objects are first filtered, and then pruned (if specified). | Below is the the instruction that describes the task:
### Input:
Write tracked objects to stdout. The output can be filtered and
pruned. Only objects are printed whose classname contain the substring
supplied by the `clsname` argument. The output can be pruned by
passing a `limit` value.
:param clsname: Only print objects whose classname contain the given
substring.
:param limit: If `limit` is a float smaller than one, only the supplied
percentage of the total tracked data is printed. If `limit` is
bigger than one, this number of tracked objects are printed.
Tracked objects are first filtered, and then pruned (if specified).
### Response:
def print_stats(self, clsname=None, limit=1.0):
"""
Write tracked objects to stdout. The output can be filtered and
pruned. Only objects are printed whose classname contain the substring
supplied by the `clsname` argument. The output can be pruned by
passing a `limit` value.
:param clsname: Only print objects whose classname contain the given
substring.
:param limit: If `limit` is a float smaller than one, only the supplied
percentage of the total tracked data is printed. If `limit` is
bigger than one, this number of tracked objects are printed.
Tracked objects are first filtered, and then pruned (if specified).
"""
if self.tracker:
self.tracker.stop_periodic_snapshots()
if not self.sorted:
self.sort_stats()
_sorted = self.sorted
if clsname:
_sorted = [to for to in _sorted if clsname in to.classname]
if limit < 1.0:
limit = max(1, int(len(self.sorted) * limit))
_sorted = _sorted[:int(limit)]
# Emit per-instance data
for tobj in _sorted:
self.print_object(tobj) |
def uploadStickerFile(self, user_id, png_sticker):
"""
See: https://core.telegram.org/bots/api#uploadstickerfile
"""
p = _strip(locals(), more=['png_sticker'])
return self._api_request_with_file('uploadStickerFile', _rectify(p), 'png_sticker', png_sticker) | See: https://core.telegram.org/bots/api#uploadstickerfile | Below is the the instruction that describes the task:
### Input:
See: https://core.telegram.org/bots/api#uploadstickerfile
### Response:
def uploadStickerFile(self, user_id, png_sticker):
"""
See: https://core.telegram.org/bots/api#uploadstickerfile
"""
p = _strip(locals(), more=['png_sticker'])
return self._api_request_with_file('uploadStickerFile', _rectify(p), 'png_sticker', png_sticker) |
def parent_chain(self):
"""
Return the list of parents starting from this node. The chain ends
at the first node with no parents.
"""
chain = [self]
while True:
try:
parent = chain[-1].parent
except Exception:
break
if parent is None:
break
chain.append(parent)
return chain | Return the list of parents starting from this node. The chain ends
at the first node with no parents. | Below is the the instruction that describes the task:
### Input:
Return the list of parents starting from this node. The chain ends
at the first node with no parents.
### Response:
def parent_chain(self):
"""
Return the list of parents starting from this node. The chain ends
at the first node with no parents.
"""
chain = [self]
while True:
try:
parent = chain[-1].parent
except Exception:
break
if parent is None:
break
chain.append(parent)
return chain |
def _augment_book(self, uuid, event):
"""
Checks if the newly created object is a book and only has an ISBN.
If so, tries to fetch the book data off the internet.
:param uuid: uuid of book to augment
:param client: requesting client
"""
try:
if not isbnmeta:
self.log(
"No isbntools found! Install it to get full "
"functionality!",
lvl=warn)
return
new_book = objectmodels['book'].find_one({'uuid': uuid})
try:
if len(new_book.isbn) != 0:
self.log('Got a lookup candidate: ', new_book._fields)
try:
meta = isbnmeta(
new_book.isbn,
service=self.config.isbnservice
)
mapping = libraryfieldmapping[
self.config.isbnservice
]
new_meta = {}
for key in meta.keys():
if key in mapping:
if isinstance(mapping[key], tuple):
name, conv = mapping[key]
try:
new_meta[name] = conv(meta[key])
except ValueError:
self.log(
'Bad value from lookup:',
name, conv, key
)
else:
new_meta[mapping[key]] = meta[key]
new_book.update(new_meta)
new_book.save()
self._notify_result(event, new_book)
self.log("Book successfully augmented from ",
self.config.isbnservice)
except Exception as e:
self.log("Error during meta lookup: ", e, type(e),
new_book.isbn, lvl=error, exc=True)
error_response = {
'component': 'hfos.alert.manager',
'action': 'notify',
'data': {
'type': 'error',
'message': 'Could not look up metadata, sorry:' + str(e)
}
}
self.log(event, event.client, pretty=True)
self.fireEvent(send(event.client.uuid, error_response))
except Exception as e:
self.log("Error during book update.", e, type(e),
exc=True, lvl=error)
except Exception as e:
self.log("Book creation notification error: ", uuid, e, type(e),
lvl=error, exc=True) | Checks if the newly created object is a book and only has an ISBN.
If so, tries to fetch the book data off the internet.
:param uuid: uuid of book to augment
:param client: requesting client | Below is the the instruction that describes the task:
### Input:
Checks if the newly created object is a book and only has an ISBN.
If so, tries to fetch the book data off the internet.
:param uuid: uuid of book to augment
:param client: requesting client
### Response:
def _augment_book(self, uuid, event):
"""
Checks if the newly created object is a book and only has an ISBN.
If so, tries to fetch the book data off the internet.
:param uuid: uuid of book to augment
:param client: requesting client
"""
try:
if not isbnmeta:
self.log(
"No isbntools found! Install it to get full "
"functionality!",
lvl=warn)
return
new_book = objectmodels['book'].find_one({'uuid': uuid})
try:
if len(new_book.isbn) != 0:
self.log('Got a lookup candidate: ', new_book._fields)
try:
meta = isbnmeta(
new_book.isbn,
service=self.config.isbnservice
)
mapping = libraryfieldmapping[
self.config.isbnservice
]
new_meta = {}
for key in meta.keys():
if key in mapping:
if isinstance(mapping[key], tuple):
name, conv = mapping[key]
try:
new_meta[name] = conv(meta[key])
except ValueError:
self.log(
'Bad value from lookup:',
name, conv, key
)
else:
new_meta[mapping[key]] = meta[key]
new_book.update(new_meta)
new_book.save()
self._notify_result(event, new_book)
self.log("Book successfully augmented from ",
self.config.isbnservice)
except Exception as e:
self.log("Error during meta lookup: ", e, type(e),
new_book.isbn, lvl=error, exc=True)
error_response = {
'component': 'hfos.alert.manager',
'action': 'notify',
'data': {
'type': 'error',
'message': 'Could not look up metadata, sorry:' + str(e)
}
}
self.log(event, event.client, pretty=True)
self.fireEvent(send(event.client.uuid, error_response))
except Exception as e:
self.log("Error during book update.", e, type(e),
exc=True, lvl=error)
except Exception as e:
self.log("Book creation notification error: ", uuid, e, type(e),
lvl=error, exc=True) |
def relevant_kwargs(function, exclude_keys='self', exclude_values=None,
extra_values=None):
"""
This will return a dictionary of local variables that are parameters to the
function provided in the arg.
Example:
function(**relevant_kwargs(function))
:param function: function to select parameters for
:param exclude_keys: str,list,func if not a function it will be converted
into a funciton, defaults to excluding None
:param exclude_values: obj,list,func if not a function it will be convereted
into one, defaults to excluding 'self'
:param extra_values: dict of other values to include with local
:return: dict of local variables for the function
"""
args = function_args(function)
locals_values = function_kwargs(function_index=2, exclude_keys=exclude_keys)
if extra_values:
locals_values.update(extra_values)
return {k: v for k, v in locals_values.items() if k in args} | This will return a dictionary of local variables that are parameters to the
function provided in the arg.
Example:
function(**relevant_kwargs(function))
:param function: function to select parameters for
:param exclude_keys: str,list,func if not a function it will be converted
into a funciton, defaults to excluding None
:param exclude_values: obj,list,func if not a function it will be convereted
into one, defaults to excluding 'self'
:param extra_values: dict of other values to include with local
:return: dict of local variables for the function | Below is the the instruction that describes the task:
### Input:
This will return a dictionary of local variables that are parameters to the
function provided in the arg.
Example:
function(**relevant_kwargs(function))
:param function: function to select parameters for
:param exclude_keys: str,list,func if not a function it will be converted
into a funciton, defaults to excluding None
:param exclude_values: obj,list,func if not a function it will be convereted
into one, defaults to excluding 'self'
:param extra_values: dict of other values to include with local
:return: dict of local variables for the function
### Response:
def relevant_kwargs(function, exclude_keys='self', exclude_values=None,
extra_values=None):
"""
This will return a dictionary of local variables that are parameters to the
function provided in the arg.
Example:
function(**relevant_kwargs(function))
:param function: function to select parameters for
:param exclude_keys: str,list,func if not a function it will be converted
into a funciton, defaults to excluding None
:param exclude_values: obj,list,func if not a function it will be convereted
into one, defaults to excluding 'self'
:param extra_values: dict of other values to include with local
:return: dict of local variables for the function
"""
args = function_args(function)
locals_values = function_kwargs(function_index=2, exclude_keys=exclude_keys)
if extra_values:
locals_values.update(extra_values)
return {k: v for k, v in locals_values.items() if k in args} |
def expand(self, new_size):
""" expand the LUN to a new size
:param new_size: new size in bytes.
:return: the old size
"""
ret = self.size_total
resp = self.modify(size=new_size)
resp.raise_if_err()
return ret | expand the LUN to a new size
:param new_size: new size in bytes.
:return: the old size | Below is the the instruction that describes the task:
### Input:
expand the LUN to a new size
:param new_size: new size in bytes.
:return: the old size
### Response:
def expand(self, new_size):
""" expand the LUN to a new size
:param new_size: new size in bytes.
:return: the old size
"""
ret = self.size_total
resp = self.modify(size=new_size)
resp.raise_if_err()
return ret |
def new_workunit_under_parent(self, name, parent, labels=None, cmd='', log_config=None):
"""Creates a (hierarchical) subunit of work for the purpose of timing and reporting.
- name: A short name for this work. E.g., 'resolve', 'compile', 'scala', 'zinc'.
- parent: The new workunit is created under this parent.
- labels: An optional iterable of labels. The reporters can use this to decide how to
display information about this work.
- cmd: An optional longer string representing this work.
E.g., the cmd line of a compiler invocation.
Task code should not typically call this directly.
:API: public
"""
workunit = WorkUnit(run_info_dir=self.run_info_dir, parent=parent, name=name, labels=labels,
cmd=cmd, log_config=log_config)
workunit.start()
outcome = WorkUnit.FAILURE # Default to failure we will override if we get success/abort.
try:
self.report.start_workunit(workunit)
yield workunit
except KeyboardInterrupt:
outcome = WorkUnit.ABORTED
self._aborted = True
raise
else:
outcome = WorkUnit.SUCCESS
finally:
workunit.set_outcome(outcome)
self.end_workunit(workunit) | Creates a (hierarchical) subunit of work for the purpose of timing and reporting.
- name: A short name for this work. E.g., 'resolve', 'compile', 'scala', 'zinc'.
- parent: The new workunit is created under this parent.
- labels: An optional iterable of labels. The reporters can use this to decide how to
display information about this work.
- cmd: An optional longer string representing this work.
E.g., the cmd line of a compiler invocation.
Task code should not typically call this directly.
:API: public | Below is the the instruction that describes the task:
### Input:
Creates a (hierarchical) subunit of work for the purpose of timing and reporting.
- name: A short name for this work. E.g., 'resolve', 'compile', 'scala', 'zinc'.
- parent: The new workunit is created under this parent.
- labels: An optional iterable of labels. The reporters can use this to decide how to
display information about this work.
- cmd: An optional longer string representing this work.
E.g., the cmd line of a compiler invocation.
Task code should not typically call this directly.
:API: public
### Response:
def new_workunit_under_parent(self, name, parent, labels=None, cmd='', log_config=None):
"""Creates a (hierarchical) subunit of work for the purpose of timing and reporting.
- name: A short name for this work. E.g., 'resolve', 'compile', 'scala', 'zinc'.
- parent: The new workunit is created under this parent.
- labels: An optional iterable of labels. The reporters can use this to decide how to
display information about this work.
- cmd: An optional longer string representing this work.
E.g., the cmd line of a compiler invocation.
Task code should not typically call this directly.
:API: public
"""
workunit = WorkUnit(run_info_dir=self.run_info_dir, parent=parent, name=name, labels=labels,
cmd=cmd, log_config=log_config)
workunit.start()
outcome = WorkUnit.FAILURE # Default to failure we will override if we get success/abort.
try:
self.report.start_workunit(workunit)
yield workunit
except KeyboardInterrupt:
outcome = WorkUnit.ABORTED
self._aborted = True
raise
else:
outcome = WorkUnit.SUCCESS
finally:
workunit.set_outcome(outcome)
self.end_workunit(workunit) |
def _slice_weights(self, arr, li, lh):
"""slice fused rnn weights"""
args = {}
gate_names = self._gate_names
directions = self._directions
b = len(directions)
p = 0
for layer in range(self._num_layers):
for direction in directions:
for gate in gate_names:
name = '%s%s%d_i2h%s_weight'%(self._prefix, direction, layer, gate)
if layer > 0:
size = b*lh*lh
args[name] = arr[p:p+size].reshape((lh, b*lh))
else:
size = li*lh
args[name] = arr[p:p+size].reshape((lh, li))
p += size
for gate in gate_names:
name = '%s%s%d_h2h%s_weight'%(self._prefix, direction, layer, gate)
size = lh**2
args[name] = arr[p:p+size].reshape((lh, lh))
p += size
for layer in range(self._num_layers):
for direction in directions:
for gate in gate_names:
name = '%s%s%d_i2h%s_bias'%(self._prefix, direction, layer, gate)
args[name] = arr[p:p+lh]
p += lh
for gate in gate_names:
name = '%s%s%d_h2h%s_bias'%(self._prefix, direction, layer, gate)
args[name] = arr[p:p+lh]
p += lh
assert p == arr.size, "Invalid parameters size for FusedRNNCell"
return args | slice fused rnn weights | Below is the the instruction that describes the task:
### Input:
slice fused rnn weights
### Response:
def _slice_weights(self, arr, li, lh):
"""slice fused rnn weights"""
args = {}
gate_names = self._gate_names
directions = self._directions
b = len(directions)
p = 0
for layer in range(self._num_layers):
for direction in directions:
for gate in gate_names:
name = '%s%s%d_i2h%s_weight'%(self._prefix, direction, layer, gate)
if layer > 0:
size = b*lh*lh
args[name] = arr[p:p+size].reshape((lh, b*lh))
else:
size = li*lh
args[name] = arr[p:p+size].reshape((lh, li))
p += size
for gate in gate_names:
name = '%s%s%d_h2h%s_weight'%(self._prefix, direction, layer, gate)
size = lh**2
args[name] = arr[p:p+size].reshape((lh, lh))
p += size
for layer in range(self._num_layers):
for direction in directions:
for gate in gate_names:
name = '%s%s%d_i2h%s_bias'%(self._prefix, direction, layer, gate)
args[name] = arr[p:p+lh]
p += lh
for gate in gate_names:
name = '%s%s%d_h2h%s_bias'%(self._prefix, direction, layer, gate)
args[name] = arr[p:p+lh]
p += lh
assert p == arr.size, "Invalid parameters size for FusedRNNCell"
return args |
def hsts_header(self):
"""Returns the proper HSTS policy."""
hsts_policy = 'max-age={0}'.format(self.hsts_age)
if self.hsts_include_subdomains:
hsts_policy += '; includeSubDomains'
return hsts_policy | Returns the proper HSTS policy. | Below is the the instruction that describes the task:
### Input:
Returns the proper HSTS policy.
### Response:
def hsts_header(self):
"""Returns the proper HSTS policy."""
hsts_policy = 'max-age={0}'.format(self.hsts_age)
if self.hsts_include_subdomains:
hsts_policy += '; includeSubDomains'
return hsts_policy |
def _n2deriv(self,l,n):
"""
NAME:
_n2deriv
PURPOSE:
evaluate the second derivative w.r.t. nu for this potential
INPUT:
l - prolate spheroidal coordinate lambda
n - prolate spheroidal coordinate nu
OUTPUT:
second derivative w.r.t. nu
HISTORY:
2015-02-15 - Written - Trick (MPIA)
"""
numer = -nu.sqrt(l) - 3.*nu.sqrt(n)
denom = 4. * n**1.5 * (nu.sqrt(l)+nu.sqrt(n))**3
return numer / denom | NAME:
_n2deriv
PURPOSE:
evaluate the second derivative w.r.t. nu for this potential
INPUT:
l - prolate spheroidal coordinate lambda
n - prolate spheroidal coordinate nu
OUTPUT:
second derivative w.r.t. nu
HISTORY:
2015-02-15 - Written - Trick (MPIA) | Below is the the instruction that describes the task:
### Input:
NAME:
_n2deriv
PURPOSE:
evaluate the second derivative w.r.t. nu for this potential
INPUT:
l - prolate spheroidal coordinate lambda
n - prolate spheroidal coordinate nu
OUTPUT:
second derivative w.r.t. nu
HISTORY:
2015-02-15 - Written - Trick (MPIA)
### Response:
def _n2deriv(self,l,n):
"""
NAME:
_n2deriv
PURPOSE:
evaluate the second derivative w.r.t. nu for this potential
INPUT:
l - prolate spheroidal coordinate lambda
n - prolate spheroidal coordinate nu
OUTPUT:
second derivative w.r.t. nu
HISTORY:
2015-02-15 - Written - Trick (MPIA)
"""
numer = -nu.sqrt(l) - 3.*nu.sqrt(n)
denom = 4. * n**1.5 * (nu.sqrt(l)+nu.sqrt(n))**3
return numer / denom |
def mutagen_call(action, path, func, *args, **kwargs):
"""Call a Mutagen function with appropriate error handling.
`action` is a string describing what the function is trying to do,
and `path` is the relevant filename. The rest of the arguments
describe the callable to invoke.
We require at least Mutagen 1.33, where `IOError` is *never* used,
neither for internal parsing errors *nor* for ordinary IO error
conditions such as a bad filename. Mutagen-specific parsing errors and IO
errors are reraised as `UnreadableFileError`. Other exceptions
raised inside Mutagen---i.e., bugs---are reraised as `MutagenError`.
"""
try:
return func(*args, **kwargs)
except mutagen.MutagenError as exc:
log.debug(u'%s failed: %s', action, six.text_type(exc))
raise UnreadableFileError(path, six.text_type(exc))
except Exception as exc:
# Isolate bugs in Mutagen.
log.debug(u'%s', traceback.format_exc())
log.error(u'uncaught Mutagen exception in %s: %s', action, exc)
raise MutagenError(path, exc) | Call a Mutagen function with appropriate error handling.
`action` is a string describing what the function is trying to do,
and `path` is the relevant filename. The rest of the arguments
describe the callable to invoke.
We require at least Mutagen 1.33, where `IOError` is *never* used,
neither for internal parsing errors *nor* for ordinary IO error
conditions such as a bad filename. Mutagen-specific parsing errors and IO
errors are reraised as `UnreadableFileError`. Other exceptions
raised inside Mutagen---i.e., bugs---are reraised as `MutagenError`. | Below is the the instruction that describes the task:
### Input:
Call a Mutagen function with appropriate error handling.
`action` is a string describing what the function is trying to do,
and `path` is the relevant filename. The rest of the arguments
describe the callable to invoke.
We require at least Mutagen 1.33, where `IOError` is *never* used,
neither for internal parsing errors *nor* for ordinary IO error
conditions such as a bad filename. Mutagen-specific parsing errors and IO
errors are reraised as `UnreadableFileError`. Other exceptions
raised inside Mutagen---i.e., bugs---are reraised as `MutagenError`.
### Response:
def mutagen_call(action, path, func, *args, **kwargs):
"""Call a Mutagen function with appropriate error handling.
`action` is a string describing what the function is trying to do,
and `path` is the relevant filename. The rest of the arguments
describe the callable to invoke.
We require at least Mutagen 1.33, where `IOError` is *never* used,
neither for internal parsing errors *nor* for ordinary IO error
conditions such as a bad filename. Mutagen-specific parsing errors and IO
errors are reraised as `UnreadableFileError`. Other exceptions
raised inside Mutagen---i.e., bugs---are reraised as `MutagenError`.
"""
try:
return func(*args, **kwargs)
except mutagen.MutagenError as exc:
log.debug(u'%s failed: %s', action, six.text_type(exc))
raise UnreadableFileError(path, six.text_type(exc))
except Exception as exc:
# Isolate bugs in Mutagen.
log.debug(u'%s', traceback.format_exc())
log.error(u'uncaught Mutagen exception in %s: %s', action, exc)
raise MutagenError(path, exc) |
def get_oauth_request(self):
"""Return an OAuth Request object for the current request."""
try:
method = os.environ['REQUEST_METHOD']
except:
method = 'GET'
postdata = None
if method in ('POST', 'PUT'):
postdata = self.request.body
return oauth.Request.from_request(method, self.request.uri,
headers=self.request.headers, query_string=postdata) | Return an OAuth Request object for the current request. | Below is the the instruction that describes the task:
### Input:
Return an OAuth Request object for the current request.
### Response:
def get_oauth_request(self):
"""Return an OAuth Request object for the current request."""
try:
method = os.environ['REQUEST_METHOD']
except:
method = 'GET'
postdata = None
if method in ('POST', 'PUT'):
postdata = self.request.body
return oauth.Request.from_request(method, self.request.uri,
headers=self.request.headers, query_string=postdata) |
def main():
"""The simplest usage of watershed delineation based on TauDEM."""
dem = '../tests/data/Jamaica_dem.tif'
num_proc = 2
wp = '../tests/data/tmp_results/wtsd_delineation'
TauDEMWorkflow.watershed_delineation(num_proc, dem, workingdir=wp) | The simplest usage of watershed delineation based on TauDEM. | Below is the the instruction that describes the task:
### Input:
The simplest usage of watershed delineation based on TauDEM.
### Response:
def main():
"""The simplest usage of watershed delineation based on TauDEM."""
dem = '../tests/data/Jamaica_dem.tif'
num_proc = 2
wp = '../tests/data/tmp_results/wtsd_delineation'
TauDEMWorkflow.watershed_delineation(num_proc, dem, workingdir=wp) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.