code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def _fbp_filter(norm_freq, filter_type, frequency_scaling):
"""Create a smoothing filter for FBP.
Parameters
----------
norm_freq : `array-like`
Frequencies normalized to lie in the interval [0, 1].
filter_type : {'Ram-Lak', 'Shepp-Logan', 'Cosine', 'Hamming', 'Hann',
callable}
The type of filter to be used.
If a string is given, use one of the standard filters with that name.
A callable should take an array of values in [0, 1] and return the
filter for these frequencies.
frequency_scaling : float
Scaling of the frequencies for the filter. All frequencies are scaled
by this number, any relative frequency above ``frequency_scaling`` is
set to 0.
Returns
-------
smoothing_filter : `numpy.ndarray`
Examples
--------
Create an FBP filter
>>> norm_freq = np.linspace(0, 1, 10)
>>> filt = _fbp_filter(norm_freq,
... filter_type='Hann',
... frequency_scaling=0.8)
"""
filter_type, filter_type_in = str(filter_type).lower(), filter_type
if callable(filter_type):
filt = filter_type(norm_freq)
elif filter_type == 'ram-lak':
filt = np.copy(norm_freq)
elif filter_type == 'shepp-logan':
filt = norm_freq * np.sinc(norm_freq / (2 * frequency_scaling))
elif filter_type == 'cosine':
filt = norm_freq * np.cos(norm_freq * np.pi / (2 * frequency_scaling))
elif filter_type == 'hamming':
filt = norm_freq * (
0.54 + 0.46 * np.cos(norm_freq * np.pi / (frequency_scaling)))
elif filter_type == 'hann':
filt = norm_freq * (
np.cos(norm_freq * np.pi / (2 * frequency_scaling)) ** 2)
else:
raise ValueError('unknown `filter_type` ({})'
''.format(filter_type_in))
indicator = (norm_freq <= frequency_scaling)
filt *= indicator
return filt | Create a smoothing filter for FBP.
Parameters
----------
norm_freq : `array-like`
Frequencies normalized to lie in the interval [0, 1].
filter_type : {'Ram-Lak', 'Shepp-Logan', 'Cosine', 'Hamming', 'Hann',
callable}
The type of filter to be used.
If a string is given, use one of the standard filters with that name.
A callable should take an array of values in [0, 1] and return the
filter for these frequencies.
frequency_scaling : float
Scaling of the frequencies for the filter. All frequencies are scaled
by this number, any relative frequency above ``frequency_scaling`` is
set to 0.
Returns
-------
smoothing_filter : `numpy.ndarray`
Examples
--------
Create an FBP filter
>>> norm_freq = np.linspace(0, 1, 10)
>>> filt = _fbp_filter(norm_freq,
... filter_type='Hann',
... frequency_scaling=0.8) | Below is the the instruction that describes the task:
### Input:
Create a smoothing filter for FBP.
Parameters
----------
norm_freq : `array-like`
Frequencies normalized to lie in the interval [0, 1].
filter_type : {'Ram-Lak', 'Shepp-Logan', 'Cosine', 'Hamming', 'Hann',
callable}
The type of filter to be used.
If a string is given, use one of the standard filters with that name.
A callable should take an array of values in [0, 1] and return the
filter for these frequencies.
frequency_scaling : float
Scaling of the frequencies for the filter. All frequencies are scaled
by this number, any relative frequency above ``frequency_scaling`` is
set to 0.
Returns
-------
smoothing_filter : `numpy.ndarray`
Examples
--------
Create an FBP filter
>>> norm_freq = np.linspace(0, 1, 10)
>>> filt = _fbp_filter(norm_freq,
... filter_type='Hann',
... frequency_scaling=0.8)
### Response:
def _fbp_filter(norm_freq, filter_type, frequency_scaling):
"""Create a smoothing filter for FBP.
Parameters
----------
norm_freq : `array-like`
Frequencies normalized to lie in the interval [0, 1].
filter_type : {'Ram-Lak', 'Shepp-Logan', 'Cosine', 'Hamming', 'Hann',
callable}
The type of filter to be used.
If a string is given, use one of the standard filters with that name.
A callable should take an array of values in [0, 1] and return the
filter for these frequencies.
frequency_scaling : float
Scaling of the frequencies for the filter. All frequencies are scaled
by this number, any relative frequency above ``frequency_scaling`` is
set to 0.
Returns
-------
smoothing_filter : `numpy.ndarray`
Examples
--------
Create an FBP filter
>>> norm_freq = np.linspace(0, 1, 10)
>>> filt = _fbp_filter(norm_freq,
... filter_type='Hann',
... frequency_scaling=0.8)
"""
filter_type, filter_type_in = str(filter_type).lower(), filter_type
if callable(filter_type):
filt = filter_type(norm_freq)
elif filter_type == 'ram-lak':
filt = np.copy(norm_freq)
elif filter_type == 'shepp-logan':
filt = norm_freq * np.sinc(norm_freq / (2 * frequency_scaling))
elif filter_type == 'cosine':
filt = norm_freq * np.cos(norm_freq * np.pi / (2 * frequency_scaling))
elif filter_type == 'hamming':
filt = norm_freq * (
0.54 + 0.46 * np.cos(norm_freq * np.pi / (frequency_scaling)))
elif filter_type == 'hann':
filt = norm_freq * (
np.cos(norm_freq * np.pi / (2 * frequency_scaling)) ** 2)
else:
raise ValueError('unknown `filter_type` ({})'
''.format(filter_type_in))
indicator = (norm_freq <= frequency_scaling)
filt *= indicator
return filt |
def _chunk_with_padding(self, iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# _chunk_with_padding('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue) | Collect data into fixed-length chunks or blocks | Below is the the instruction that describes the task:
### Input:
Collect data into fixed-length chunks or blocks
### Response:
def _chunk_with_padding(self, iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# _chunk_with_padding('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue) |
def irfftn(a, s, axes=None):
"""
Compute the inverse of the multi-dimensional discrete Fourier transform
for real input. This function is a wrapper for
:func:`pyfftw.interfaces.numpy_fft.irfftn`, with an interface similar to
that of :func:`numpy.fft.irfftn`.
Parameters
----------
a : array_like
Input array
s : sequence of ints
Shape of the output along each transformed axis (input is cropped or
zero-padded to match). This parameter is not optional because, unlike
:func:`ifftn`, the output shape cannot be uniquely determined from
the input shape.
axes : sequence of ints, optional (default None)
Axes over which to compute the inverse DFT.
Returns
-------
af : ndarray
Inverse DFT of input array
"""
return pyfftw.interfaces.numpy_fft.irfftn(
a, s=s, axes=axes, overwrite_input=False,
planner_effort='FFTW_MEASURE', threads=pyfftw_threads) | Compute the inverse of the multi-dimensional discrete Fourier transform
for real input. This function is a wrapper for
:func:`pyfftw.interfaces.numpy_fft.irfftn`, with an interface similar to
that of :func:`numpy.fft.irfftn`.
Parameters
----------
a : array_like
Input array
s : sequence of ints
Shape of the output along each transformed axis (input is cropped or
zero-padded to match). This parameter is not optional because, unlike
:func:`ifftn`, the output shape cannot be uniquely determined from
the input shape.
axes : sequence of ints, optional (default None)
Axes over which to compute the inverse DFT.
Returns
-------
af : ndarray
Inverse DFT of input array | Below is the the instruction that describes the task:
### Input:
Compute the inverse of the multi-dimensional discrete Fourier transform
for real input. This function is a wrapper for
:func:`pyfftw.interfaces.numpy_fft.irfftn`, with an interface similar to
that of :func:`numpy.fft.irfftn`.
Parameters
----------
a : array_like
Input array
s : sequence of ints
Shape of the output along each transformed axis (input is cropped or
zero-padded to match). This parameter is not optional because, unlike
:func:`ifftn`, the output shape cannot be uniquely determined from
the input shape.
axes : sequence of ints, optional (default None)
Axes over which to compute the inverse DFT.
Returns
-------
af : ndarray
Inverse DFT of input array
### Response:
def irfftn(a, s, axes=None):
"""
Compute the inverse of the multi-dimensional discrete Fourier transform
for real input. This function is a wrapper for
:func:`pyfftw.interfaces.numpy_fft.irfftn`, with an interface similar to
that of :func:`numpy.fft.irfftn`.
Parameters
----------
a : array_like
Input array
s : sequence of ints
Shape of the output along each transformed axis (input is cropped or
zero-padded to match). This parameter is not optional because, unlike
:func:`ifftn`, the output shape cannot be uniquely determined from
the input shape.
axes : sequence of ints, optional (default None)
Axes over which to compute the inverse DFT.
Returns
-------
af : ndarray
Inverse DFT of input array
"""
return pyfftw.interfaces.numpy_fft.irfftn(
a, s=s, axes=axes, overwrite_input=False,
planner_effort='FFTW_MEASURE', threads=pyfftw_threads) |
def _columns_to_kwargs(conversion_table, columns, row):
"""
Given a list of column names, and a list of values (a row), return a dict
of kwargs that may be used to instantiate a MarketHistoryEntry
or MarketOrder object.
:param dict conversion_table: The conversion table to use for mapping
spec names to kwargs.
:param list columns: A list of column names.
:param list row: A list of values.
"""
kwdict = {}
counter = 0
for column in columns:
# Map the column name to the correct MarketHistoryEntry kwarg.
kwarg_name = conversion_table[column]
# Set the kwarg to the correct value from the row.
kwdict[kwarg_name] = row[counter]
counter += 1
return kwdict | Given a list of column names, and a list of values (a row), return a dict
of kwargs that may be used to instantiate a MarketHistoryEntry
or MarketOrder object.
:param dict conversion_table: The conversion table to use for mapping
spec names to kwargs.
:param list columns: A list of column names.
:param list row: A list of values. | Below is the the instruction that describes the task:
### Input:
Given a list of column names, and a list of values (a row), return a dict
of kwargs that may be used to instantiate a MarketHistoryEntry
or MarketOrder object.
:param dict conversion_table: The conversion table to use for mapping
spec names to kwargs.
:param list columns: A list of column names.
:param list row: A list of values.
### Response:
def _columns_to_kwargs(conversion_table, columns, row):
"""
Given a list of column names, and a list of values (a row), return a dict
of kwargs that may be used to instantiate a MarketHistoryEntry
or MarketOrder object.
:param dict conversion_table: The conversion table to use for mapping
spec names to kwargs.
:param list columns: A list of column names.
:param list row: A list of values.
"""
kwdict = {}
counter = 0
for column in columns:
# Map the column name to the correct MarketHistoryEntry kwarg.
kwarg_name = conversion_table[column]
# Set the kwarg to the correct value from the row.
kwdict[kwarg_name] = row[counter]
counter += 1
return kwdict |
def evaluate(self, dataset):
'''Evaluate the current model parameters on a dataset.
Parameters
----------
dataset : :class:`Dataset <downhill.dataset.Dataset>`
A set of data to use for evaluating the model.
Returns
-------
monitors : OrderedDict
A dictionary mapping monitor names to values. Monitors are
quantities of interest during optimization---for example, loss
function, accuracy, or whatever the optimization task requires.
'''
if dataset is None:
values = [self.f_eval()]
else:
values = [self.f_eval(*x) for x in dataset]
monitors = zip(self._monitor_names, np.mean(values, axis=0))
return collections.OrderedDict(monitors) | Evaluate the current model parameters on a dataset.
Parameters
----------
dataset : :class:`Dataset <downhill.dataset.Dataset>`
A set of data to use for evaluating the model.
Returns
-------
monitors : OrderedDict
A dictionary mapping monitor names to values. Monitors are
quantities of interest during optimization---for example, loss
function, accuracy, or whatever the optimization task requires. | Below is the the instruction that describes the task:
### Input:
Evaluate the current model parameters on a dataset.
Parameters
----------
dataset : :class:`Dataset <downhill.dataset.Dataset>`
A set of data to use for evaluating the model.
Returns
-------
monitors : OrderedDict
A dictionary mapping monitor names to values. Monitors are
quantities of interest during optimization---for example, loss
function, accuracy, or whatever the optimization task requires.
### Response:
def evaluate(self, dataset):
'''Evaluate the current model parameters on a dataset.
Parameters
----------
dataset : :class:`Dataset <downhill.dataset.Dataset>`
A set of data to use for evaluating the model.
Returns
-------
monitors : OrderedDict
A dictionary mapping monitor names to values. Monitors are
quantities of interest during optimization---for example, loss
function, accuracy, or whatever the optimization task requires.
'''
if dataset is None:
values = [self.f_eval()]
else:
values = [self.f_eval(*x) for x in dataset]
monitors = zip(self._monitor_names, np.mean(values, axis=0))
return collections.OrderedDict(monitors) |
def form_valid(self, form):
if self.__pk:
obj = PurchasesAlbaran.objects.get(pk=self.__pk)
self.request.albaran = obj
form.instance.albaran = obj
form.instance.validator_user = self.request.user
raise Exception("revisar StorageBatch")
"""
batch = StorageBatch.objects.filter(pk=form.data['batch']).first()
if not batch:
errors = form._errors.setdefault("batch", ErrorList())
errors.append(_("Batch invalid"))
return super(LineAlbaranCreate, self).form_invalid(form)
"""
# comprueba si el producto comprado requiere un valor de atributo especial
product_final = ProductFinal.objects.filter(pk=form.data['product']).first()
feature_special_value = None
if not product_final:
errors = form._errors.setdefault("feature_special_value", ErrorList())
errors.append(_("Product not selected"))
return super(LineAlbaranCreate, self).form_invalid(form)
elif product_final.product.feature_special:
# es obligatorio la informacion de caracteristicas especiales
if 'feature_special_value' not in form.data or not form.data['feature_special_value']:
errors = form._errors.setdefault("feature_special_value", ErrorList())
errors.append(_("Product needs information of feature special"))
return super(LineAlbaranCreate, self).form_invalid(form)
else:
feature_special_value = list(set(filter(None, form.data['feature_special_value'].split('\n'))))
try:
quantity = int(float(form.data['quantity']))
except ValueError:
errors = form._errors.setdefault("quantity", ErrorList())
errors.append(_("Quantity is not valid"))
return super(LineAlbaranCreate, self).form_invalid(form)
if product_final.product.feature_special.unique:
# mismo numero de caracteristicas que de cantidades
# si el feature special esta marcado como 'unico'
if len(feature_special_value) != quantity:
errors = form._errors.setdefault("feature_special_value", ErrorList())
errors.append(_("Quantity and values of feature special not equals"))
return super(LineAlbaranCreate, self).form_invalid(form)
# no existen las caracteristicas especiales dadas de alta en el sistema
elif ProductUnique.objects.filter(product_final=product_final, value__in=feature_special_value).exists():
errors = form._errors.setdefault("feature_special_value", ErrorList())
errors.append(_("Some value of feature special exists"))
return super(LineAlbaranCreate, self).form_invalid(form)
elif len(feature_special_value) != 1:
errors = form._errors.setdefault("feature_special_value", ErrorList())
errors.append(_("The special feature must be unique for all products"))
return super(LineAlbaranCreate, self).form_invalid(form)
try:
with transaction.atomic():
# save line albaran
result = super(LineAlbaranCreate, self).form_valid(form)
raise Exception("Cambiar ProductStock por ProductUnique")
"""
if self.object.status != PURCHASE_ALBARAN_LINE_STATUS_REJECTED:
# prepare stock
ps = ProductStock()
ps.product_final = product_final
ps.line_albaran = self.object
ps.batch = batch
# save stock
ps.quantity = self.object.quantity
ps.save()
if feature_special_value:
# prepare product feature special
if product_final.product.feature_special.unique:
pfs = ProductUnique()
pfs.product_final = product_final
# save product featureSpecial and stock
for fs in feature_special_value:
pfs.pk = None
pfs.value = fs
pfs.save()
else:
pfs = ProductUnique.objects.filter(
value=feature_special_value[0],
product_final=product_final
).first()
if pfs:
pfs.stock_real += self.object.quantity
else:
pfs = ProductUnique()
pfs.product_final = product_final
pfs.value = feature_special_value[0]
pfs.stock_real = self.object.quantity
pfs.save()
else:
# product unique by default
pfs = ProductUnique.objects.filter(product_final=product_final).first()
if not pfs:
pfs = ProductUnique()
pfs.product_final = product_final
pfs.stock_real = self.object.quantity
else:
pfs.stock_real += self.object.quantity
pfs.save()
"""
return result
except IntegrityError as e:
errors = form._errors.setdefault("product", ErrorList())
errors.append(_("Integrity Error: {}".format(e)))
return super(LineAlbaranCreate, self).form_invalid(form) | batch = StorageBatch.objects.filter(pk=form.data['batch']).first()
if not batch:
errors = form._errors.setdefault("batch", ErrorList())
errors.append(_("Batch invalid"))
return super(LineAlbaranCreate, self).form_invalid(form) | Below is the the instruction that describes the task:
### Input:
batch = StorageBatch.objects.filter(pk=form.data['batch']).first()
if not batch:
errors = form._errors.setdefault("batch", ErrorList())
errors.append(_("Batch invalid"))
return super(LineAlbaranCreate, self).form_invalid(form)
### Response:
def form_valid(self, form):
if self.__pk:
obj = PurchasesAlbaran.objects.get(pk=self.__pk)
self.request.albaran = obj
form.instance.albaran = obj
form.instance.validator_user = self.request.user
raise Exception("revisar StorageBatch")
"""
batch = StorageBatch.objects.filter(pk=form.data['batch']).first()
if not batch:
errors = form._errors.setdefault("batch", ErrorList())
errors.append(_("Batch invalid"))
return super(LineAlbaranCreate, self).form_invalid(form)
"""
# comprueba si el producto comprado requiere un valor de atributo especial
product_final = ProductFinal.objects.filter(pk=form.data['product']).first()
feature_special_value = None
if not product_final:
errors = form._errors.setdefault("feature_special_value", ErrorList())
errors.append(_("Product not selected"))
return super(LineAlbaranCreate, self).form_invalid(form)
elif product_final.product.feature_special:
# es obligatorio la informacion de caracteristicas especiales
if 'feature_special_value' not in form.data or not form.data['feature_special_value']:
errors = form._errors.setdefault("feature_special_value", ErrorList())
errors.append(_("Product needs information of feature special"))
return super(LineAlbaranCreate, self).form_invalid(form)
else:
feature_special_value = list(set(filter(None, form.data['feature_special_value'].split('\n'))))
try:
quantity = int(float(form.data['quantity']))
except ValueError:
errors = form._errors.setdefault("quantity", ErrorList())
errors.append(_("Quantity is not valid"))
return super(LineAlbaranCreate, self).form_invalid(form)
if product_final.product.feature_special.unique:
# mismo numero de caracteristicas que de cantidades
# si el feature special esta marcado como 'unico'
if len(feature_special_value) != quantity:
errors = form._errors.setdefault("feature_special_value", ErrorList())
errors.append(_("Quantity and values of feature special not equals"))
return super(LineAlbaranCreate, self).form_invalid(form)
# no existen las caracteristicas especiales dadas de alta en el sistema
elif ProductUnique.objects.filter(product_final=product_final, value__in=feature_special_value).exists():
errors = form._errors.setdefault("feature_special_value", ErrorList())
errors.append(_("Some value of feature special exists"))
return super(LineAlbaranCreate, self).form_invalid(form)
elif len(feature_special_value) != 1:
errors = form._errors.setdefault("feature_special_value", ErrorList())
errors.append(_("The special feature must be unique for all products"))
return super(LineAlbaranCreate, self).form_invalid(form)
try:
with transaction.atomic():
# save line albaran
result = super(LineAlbaranCreate, self).form_valid(form)
raise Exception("Cambiar ProductStock por ProductUnique")
"""
if self.object.status != PURCHASE_ALBARAN_LINE_STATUS_REJECTED:
# prepare stock
ps = ProductStock()
ps.product_final = product_final
ps.line_albaran = self.object
ps.batch = batch
# save stock
ps.quantity = self.object.quantity
ps.save()
if feature_special_value:
# prepare product feature special
if product_final.product.feature_special.unique:
pfs = ProductUnique()
pfs.product_final = product_final
# save product featureSpecial and stock
for fs in feature_special_value:
pfs.pk = None
pfs.value = fs
pfs.save()
else:
pfs = ProductUnique.objects.filter(
value=feature_special_value[0],
product_final=product_final
).first()
if pfs:
pfs.stock_real += self.object.quantity
else:
pfs = ProductUnique()
pfs.product_final = product_final
pfs.value = feature_special_value[0]
pfs.stock_real = self.object.quantity
pfs.save()
else:
# product unique by default
pfs = ProductUnique.objects.filter(product_final=product_final).first()
if not pfs:
pfs = ProductUnique()
pfs.product_final = product_final
pfs.stock_real = self.object.quantity
else:
pfs.stock_real += self.object.quantity
pfs.save()
"""
return result
except IntegrityError as e:
errors = form._errors.setdefault("product", ErrorList())
errors.append(_("Integrity Error: {}".format(e)))
return super(LineAlbaranCreate, self).form_invalid(form) |
def _jseq(self, cols, converter=None):
"""Return a JVM Seq of Columns from a list of Column or names"""
return _to_seq(self.sql_ctx._sc, cols, converter) | Return a JVM Seq of Columns from a list of Column or names | Below is the the instruction that describes the task:
### Input:
Return a JVM Seq of Columns from a list of Column or names
### Response:
def _jseq(self, cols, converter=None):
"""Return a JVM Seq of Columns from a list of Column or names"""
return _to_seq(self.sql_ctx._sc, cols, converter) |
def broadcast(*sinks_):
"""The |broadcast| decorator creates a |push| object that receives a
message by ``yield`` and then sends this message on to all the given sinks.
.. |broadcast| replace:: :py:func:`broadcast`
"""
@push
def bc():
sinks = [s() for s in sinks_]
while True:
msg = yield
for s in sinks:
s.send(msg)
return bc | The |broadcast| decorator creates a |push| object that receives a
message by ``yield`` and then sends this message on to all the given sinks.
.. |broadcast| replace:: :py:func:`broadcast` | Below is the the instruction that describes the task:
### Input:
The |broadcast| decorator creates a |push| object that receives a
message by ``yield`` and then sends this message on to all the given sinks.
.. |broadcast| replace:: :py:func:`broadcast`
### Response:
def broadcast(*sinks_):
"""The |broadcast| decorator creates a |push| object that receives a
message by ``yield`` and then sends this message on to all the given sinks.
.. |broadcast| replace:: :py:func:`broadcast`
"""
@push
def bc():
sinks = [s() for s in sinks_]
while True:
msg = yield
for s in sinks:
s.send(msg)
return bc |
def main():
'''
Sets up command line parser for Toil/ADAM based k-mer counter, and launches
k-mer counter with optional Spark cluster.
'''
parser = argparse.ArgumentParser()
# add parser arguments
parser.add_argument('--input_path',
help='The full path to the input SAM/BAM/ADAM/FASTQ file.')
parser.add_argument('--output-path',
help='full path where final results will be output.')
parser.add_argument('--kmer-length',
help='Length to use for k-mer counting. Defaults to 20.',
default=20,
type=int)
parser.add_argument('--spark-conf',
help='Optional configuration to pass to Spark commands. Either this or --workers must be specified.',
default=None)
parser.add_argument('--memory',
help='Optional memory configuration for Spark workers/driver. This must be specified if --workers is specified.',
default=None,
type=int)
parser.add_argument('--cores',
help='Optional core configuration for Spark workers/driver. This must be specified if --workers is specified.',
default=None,
type=int)
parser.add_argument('--workers',
help='Number of workers to spin up in Toil. Either this or --spark-conf must be specified. If this is specified, --memory and --cores must be specified.',
default=None,
type=int)
parser.add_argument('--sudo',
help='Run docker containers with sudo. Defaults to False.',
default=False,
action='store_true')
Job.Runner.addToilOptions(parser)
args = parser.parse_args()
Job.Runner.startToil(Job.wrapJobFn(kmer_dag,
args.kmer_length,
args.input_path,
args.output_path,
args.spark_conf,
args.workers,
args.cores,
args.memory,
args.sudo,
checkpoint=True), args) | Sets up command line parser for Toil/ADAM based k-mer counter, and launches
k-mer counter with optional Spark cluster. | Below is the the instruction that describes the task:
### Input:
Sets up command line parser for Toil/ADAM based k-mer counter, and launches
k-mer counter with optional Spark cluster.
### Response:
def main():
'''
Sets up command line parser for Toil/ADAM based k-mer counter, and launches
k-mer counter with optional Spark cluster.
'''
parser = argparse.ArgumentParser()
# add parser arguments
parser.add_argument('--input_path',
help='The full path to the input SAM/BAM/ADAM/FASTQ file.')
parser.add_argument('--output-path',
help='full path where final results will be output.')
parser.add_argument('--kmer-length',
help='Length to use for k-mer counting. Defaults to 20.',
default=20,
type=int)
parser.add_argument('--spark-conf',
help='Optional configuration to pass to Spark commands. Either this or --workers must be specified.',
default=None)
parser.add_argument('--memory',
help='Optional memory configuration for Spark workers/driver. This must be specified if --workers is specified.',
default=None,
type=int)
parser.add_argument('--cores',
help='Optional core configuration for Spark workers/driver. This must be specified if --workers is specified.',
default=None,
type=int)
parser.add_argument('--workers',
help='Number of workers to spin up in Toil. Either this or --spark-conf must be specified. If this is specified, --memory and --cores must be specified.',
default=None,
type=int)
parser.add_argument('--sudo',
help='Run docker containers with sudo. Defaults to False.',
default=False,
action='store_true')
Job.Runner.addToilOptions(parser)
args = parser.parse_args()
Job.Runner.startToil(Job.wrapJobFn(kmer_dag,
args.kmer_length,
args.input_path,
args.output_path,
args.spark_conf,
args.workers,
args.cores,
args.memory,
args.sudo,
checkpoint=True), args) |
def _get_wheel_metadata_from_wheel(
backend, metadata_directory, config_settings):
"""Build a wheel and extract the metadata from it.
Fallback for when the build backend does not
define the 'get_wheel_metadata' hook.
"""
from zipfile import ZipFile
whl_basename = backend.build_wheel(metadata_directory, config_settings)
with open(os.path.join(metadata_directory, WHEEL_BUILT_MARKER), 'wb'):
pass # Touch marker file
whl_file = os.path.join(metadata_directory, whl_basename)
with ZipFile(whl_file) as zipf:
dist_info = _dist_info_files(zipf)
zipf.extractall(path=metadata_directory, members=dist_info)
return dist_info[0].split('/')[0] | Build a wheel and extract the metadata from it.
Fallback for when the build backend does not
define the 'get_wheel_metadata' hook. | Below is the the instruction that describes the task:
### Input:
Build a wheel and extract the metadata from it.
Fallback for when the build backend does not
define the 'get_wheel_metadata' hook.
### Response:
def _get_wheel_metadata_from_wheel(
backend, metadata_directory, config_settings):
"""Build a wheel and extract the metadata from it.
Fallback for when the build backend does not
define the 'get_wheel_metadata' hook.
"""
from zipfile import ZipFile
whl_basename = backend.build_wheel(metadata_directory, config_settings)
with open(os.path.join(metadata_directory, WHEEL_BUILT_MARKER), 'wb'):
pass # Touch marker file
whl_file = os.path.join(metadata_directory, whl_basename)
with ZipFile(whl_file) as zipf:
dist_info = _dist_info_files(zipf)
zipf.extractall(path=metadata_directory, members=dist_info)
return dist_info[0].split('/')[0] |
def update(self):
'''
Use primitive parameters to set basic objects. This is an extremely stripped-down version
of update for CobbDouglasEconomy.
Parameters
----------
none
Returns
-------
none
'''
self.kSS = 1.0
self.MSS = 1.0
self.KtoLnow_init = self.kSS
self.Rfunc = ConstantFunction(self.Rfree)
self.wFunc = ConstantFunction(self.wRte)
self.RfreeNow_init = self.Rfunc(self.kSS)
self.wRteNow_init = self.wFunc(self.kSS)
self.MaggNow_init = self.kSS
self.AaggNow_init = self.kSS
self.PermShkAggNow_init = 1.0
self.TranShkAggNow_init = 1.0
self.makeAggShkDstn()
self.AFunc = ConstantFunction(1.0) | Use primitive parameters to set basic objects. This is an extremely stripped-down version
of update for CobbDouglasEconomy.
Parameters
----------
none
Returns
-------
none | Below is the the instruction that describes the task:
### Input:
Use primitive parameters to set basic objects. This is an extremely stripped-down version
of update for CobbDouglasEconomy.
Parameters
----------
none
Returns
-------
none
### Response:
def update(self):
'''
Use primitive parameters to set basic objects. This is an extremely stripped-down version
of update for CobbDouglasEconomy.
Parameters
----------
none
Returns
-------
none
'''
self.kSS = 1.0
self.MSS = 1.0
self.KtoLnow_init = self.kSS
self.Rfunc = ConstantFunction(self.Rfree)
self.wFunc = ConstantFunction(self.wRte)
self.RfreeNow_init = self.Rfunc(self.kSS)
self.wRteNow_init = self.wFunc(self.kSS)
self.MaggNow_init = self.kSS
self.AaggNow_init = self.kSS
self.PermShkAggNow_init = 1.0
self.TranShkAggNow_init = 1.0
self.makeAggShkDstn()
self.AFunc = ConstantFunction(1.0) |
def add_reference_context_args(parser):
"""
Extends an ArgumentParser instance with the following commandline arguments:
--context-size
"""
reference_context_group = parser.add_argument_group("Reference Transcripts")
parser.add_argument(
"--context-size",
default=CDNA_CONTEXT_SIZE,
type=int)
return reference_context_group | Extends an ArgumentParser instance with the following commandline arguments:
--context-size | Below is the the instruction that describes the task:
### Input:
Extends an ArgumentParser instance with the following commandline arguments:
--context-size
### Response:
def add_reference_context_args(parser):
"""
Extends an ArgumentParser instance with the following commandline arguments:
--context-size
"""
reference_context_group = parser.add_argument_group("Reference Transcripts")
parser.add_argument(
"--context-size",
default=CDNA_CONTEXT_SIZE,
type=int)
return reference_context_group |
def instance(self, counter=None):
"""Returns all the information regarding a specific pipeline run
See the `Go pipeline instance documentation`__ for examples.
.. __: http://api.go.cd/current/#get-pipeline-instance
Args:
counter (int): The pipeline instance to fetch.
If falsey returns the latest pipeline instance from :meth:`history`.
Returns:
Response: :class:`gocd.api.response.Response` object
"""
if not counter:
history = self.history()
if not history:
return history
else:
return Response._from_json(history['pipelines'][0])
return self._get('/instance/{counter:d}'.format(counter=counter)) | Returns all the information regarding a specific pipeline run
See the `Go pipeline instance documentation`__ for examples.
.. __: http://api.go.cd/current/#get-pipeline-instance
Args:
counter (int): The pipeline instance to fetch.
If falsey returns the latest pipeline instance from :meth:`history`.
Returns:
Response: :class:`gocd.api.response.Response` object | Below is the the instruction that describes the task:
### Input:
Returns all the information regarding a specific pipeline run
See the `Go pipeline instance documentation`__ for examples.
.. __: http://api.go.cd/current/#get-pipeline-instance
Args:
counter (int): The pipeline instance to fetch.
If falsey returns the latest pipeline instance from :meth:`history`.
Returns:
Response: :class:`gocd.api.response.Response` object
### Response:
def instance(self, counter=None):
"""Returns all the information regarding a specific pipeline run
See the `Go pipeline instance documentation`__ for examples.
.. __: http://api.go.cd/current/#get-pipeline-instance
Args:
counter (int): The pipeline instance to fetch.
If falsey returns the latest pipeline instance from :meth:`history`.
Returns:
Response: :class:`gocd.api.response.Response` object
"""
if not counter:
history = self.history()
if not history:
return history
else:
return Response._from_json(history['pipelines'][0])
return self._get('/instance/{counter:d}'.format(counter=counter)) |
def sc_to_fc(spvec, nmax, mmax, nrows, ncols):
"""assume Ncols is even"""
fdata = np.zeros([int(nrows), ncols], dtype=np.complex128)
for k in xrange(0, int(ncols / 2)):
if k < mmax:
kk = k
ind = mindx(kk, nmax, mmax)
vec = spvec[ind:ind + nmax - np.abs(kk) + 1]
fdata[:, kk] = fcvec_m_sc(vec, kk, nmax, nrows)
kk = -(k + 1)
ind = mindx(kk, nmax, mmax)
vec = spvec[ind:ind + nmax - np.abs(kk) + 1]
fdata[:, kk] = fcvec_m_sc(vec, kk, nmax, nrows)
if k == mmax:
kk = k
ind = mindx(kk, nmax, mmax)
vec = spvec[ind:ind + nmax - np.abs(kk) + 1]
fdata[:, kk] = fcvec_m_sc(vec, kk, nmax, nrows)
return fdata | assume Ncols is even | Below is the the instruction that describes the task:
### Input:
assume Ncols is even
### Response:
def sc_to_fc(spvec, nmax, mmax, nrows, ncols):
"""assume Ncols is even"""
fdata = np.zeros([int(nrows), ncols], dtype=np.complex128)
for k in xrange(0, int(ncols / 2)):
if k < mmax:
kk = k
ind = mindx(kk, nmax, mmax)
vec = spvec[ind:ind + nmax - np.abs(kk) + 1]
fdata[:, kk] = fcvec_m_sc(vec, kk, nmax, nrows)
kk = -(k + 1)
ind = mindx(kk, nmax, mmax)
vec = spvec[ind:ind + nmax - np.abs(kk) + 1]
fdata[:, kk] = fcvec_m_sc(vec, kk, nmax, nrows)
if k == mmax:
kk = k
ind = mindx(kk, nmax, mmax)
vec = spvec[ind:ind + nmax - np.abs(kk) + 1]
fdata[:, kk] = fcvec_m_sc(vec, kk, nmax, nrows)
return fdata |
def format_header_cell(val):
"""
Formats given header column. This involves changing '_Px_' to '(', '_xP_' to ')' and
all other '_' to spaces.
"""
return re.sub('_', ' ', re.sub(r'(_Px_)', '(', re.sub(r'(_xP_)', ')', str(val) ))) | Formats given header column. This involves changing '_Px_' to '(', '_xP_' to ')' and
all other '_' to spaces. | Below is the the instruction that describes the task:
### Input:
Formats given header column. This involves changing '_Px_' to '(', '_xP_' to ')' and
all other '_' to spaces.
### Response:
def format_header_cell(val):
"""
Formats given header column. This involves changing '_Px_' to '(', '_xP_' to ')' and
all other '_' to spaces.
"""
return re.sub('_', ' ', re.sub(r'(_Px_)', '(', re.sub(r'(_xP_)', ')', str(val) ))) |
def run(*args, **kwargs):
"""
Please dont use. Instead use `luigi` binary.
Run from cmdline using argparse.
:param use_dynamic_argparse: Deprecated and ignored
"""
luigi_run_result = _run(*args, **kwargs)
return luigi_run_result if kwargs.get('detailed_summary') else luigi_run_result.scheduling_succeeded | Please dont use. Instead use `luigi` binary.
Run from cmdline using argparse.
:param use_dynamic_argparse: Deprecated and ignored | Below is the the instruction that describes the task:
### Input:
Please dont use. Instead use `luigi` binary.
Run from cmdline using argparse.
:param use_dynamic_argparse: Deprecated and ignored
### Response:
def run(*args, **kwargs):
"""
Please dont use. Instead use `luigi` binary.
Run from cmdline using argparse.
:param use_dynamic_argparse: Deprecated and ignored
"""
luigi_run_result = _run(*args, **kwargs)
return luigi_run_result if kwargs.get('detailed_summary') else luigi_run_result.scheduling_succeeded |
def from_center(self, x=None, y=None, z=None, r=None,
theta=None, h=None, reference=None):
"""
Accepts a set of (:x:, :y:, :z:) ratios for Cartesian or
(:r:, :theta:, :h:) rations/angle for Polar and returns
:Vector: using :reference: as origin
"""
coords_to_endpoint = None
if all([isinstance(i, numbers.Number) for i in (x, y, z)]):
coords_to_endpoint = self.from_cartesian(x, y, z)
if all([isinstance(i, numbers.Number) for i in (r, theta, h)]):
coords_to_endpoint = self.from_polar(r, theta, h)
coords_to_reference = Vector(0, 0, 0)
if reference:
coords_to_reference = self.coordinates(reference)
return coords_to_reference + coords_to_endpoint | Accepts a set of (:x:, :y:, :z:) ratios for Cartesian or
(:r:, :theta:, :h:) rations/angle for Polar and returns
:Vector: using :reference: as origin | Below is the the instruction that describes the task:
### Input:
Accepts a set of (:x:, :y:, :z:) ratios for Cartesian or
(:r:, :theta:, :h:) rations/angle for Polar and returns
:Vector: using :reference: as origin
### Response:
def from_center(self, x=None, y=None, z=None, r=None,
theta=None, h=None, reference=None):
"""
Accepts a set of (:x:, :y:, :z:) ratios for Cartesian or
(:r:, :theta:, :h:) rations/angle for Polar and returns
:Vector: using :reference: as origin
"""
coords_to_endpoint = None
if all([isinstance(i, numbers.Number) for i in (x, y, z)]):
coords_to_endpoint = self.from_cartesian(x, y, z)
if all([isinstance(i, numbers.Number) for i in (r, theta, h)]):
coords_to_endpoint = self.from_polar(r, theta, h)
coords_to_reference = Vector(0, 0, 0)
if reference:
coords_to_reference = self.coordinates(reference)
return coords_to_reference + coords_to_endpoint |
def _equivalent(self, char, prev, next, implicitA):
""" Transliterate a Latin character equivalent to Devanagari.
Add VIRAMA for ligatures.
Convert standalone to dependent vowels.
"""
result = []
if char.isVowel == False:
result.append(char.chr)
if char.isConsonant \
and ((next is not None and next.isConsonant) \
or next is None):
result.append(DevanagariCharacter._VIRAMA)
else:
if prev is None or prev.isConsonant == False:
result.append(char.chr)
else:
if char._dependentVowel is not None:
result.append(char._dependentVowel)
return result | Transliterate a Latin character equivalent to Devanagari.
Add VIRAMA for ligatures.
Convert standalone to dependent vowels. | Below is the the instruction that describes the task:
### Input:
Transliterate a Latin character equivalent to Devanagari.
Add VIRAMA for ligatures.
Convert standalone to dependent vowels.
### Response:
def _equivalent(self, char, prev, next, implicitA):
""" Transliterate a Latin character equivalent to Devanagari.
Add VIRAMA for ligatures.
Convert standalone to dependent vowels.
"""
result = []
if char.isVowel == False:
result.append(char.chr)
if char.isConsonant \
and ((next is not None and next.isConsonant) \
or next is None):
result.append(DevanagariCharacter._VIRAMA)
else:
if prev is None or prev.isConsonant == False:
result.append(char.chr)
else:
if char._dependentVowel is not None:
result.append(char._dependentVowel)
return result |
def add_annotation_layer(self, annotation_file, layer_name):
"""
adds all markables from the given annotation layer to the discourse
graph.
"""
assert os.path.isfile(annotation_file), \
"Annotation file doesn't exist: {}".format(annotation_file)
tree = etree.parse(annotation_file)
root = tree.getroot()
default_layers = {self.ns, self.ns+':markable', self.ns+':'+layer_name}
# avoids eml.org namespace handling
for markable in root.iterchildren():
markable_node_id = markable.attrib['id']
markable_attribs = add_prefix(markable.attrib, self.ns+':')
self.add_node(markable_node_id,
layers=default_layers,
attr_dict=markable_attribs,
label=markable_node_id+':'+layer_name)
for target_node_id in spanstring2tokens(self, markable.attrib['span']):
# manually add to_node if it's not in the graph, yet
# cf. issue #39
if target_node_id not in self:
self.add_node(target_node_id,
# adding 'mmax:layer_name' here could be
# misleading (e.g. each token would be part
# of the 'mmax:sentence' layer
layers={self.ns, self.ns+':markable'},
label=target_node_id)
self.add_edge(markable_node_id, target_node_id,
layers=default_layers,
edge_type=EdgeTypes.spanning_relation,
label=self.ns+':'+layer_name)
# this is a workaround for Chiarcos-style MMAX files
if has_antecedent(markable):
antecedent_pointer = markable.attrib['anaphor_antecedent']
# mmax2 supports weird double antecedents,
# e.g. "markable_1000131;markable_1000132", cf. Issue #40
#
# handling these double antecendents increases the number of
# chains, cf. commit edc28abdc4fd36065e8bbf5900eeb4d1326db153
for antecedent in antecedent_pointer.split(';'):
ante_split = antecedent.split(":")
if len(ante_split) == 2:
# mark group:markable_n or secmark:markable_n as such
edge_label = '{}:antecedent'.format(ante_split[0])
else:
edge_label = ':antecedent'
# handles both 'markable_n' and 'layer:markable_n'
antecedent_node_id = ante_split[-1]
if len(ante_split) == 2:
antecedent_layer = ante_split[0]
default_layers.add('{0}:{1}'.format(self.ns, antecedent_layer))
# manually add antecedent node if it's not yet in the graph
# cf. issue #39
if antecedent_node_id not in self:
self.add_node(antecedent_node_id,
layers=default_layers)
self.add_edge(markable_node_id, antecedent_node_id,
layers=default_layers,
edge_type=EdgeTypes.pointing_relation,
label=self.ns+edge_label) | adds all markables from the given annotation layer to the discourse
graph. | Below is the the instruction that describes the task:
### Input:
adds all markables from the given annotation layer to the discourse
graph.
### Response:
def add_annotation_layer(self, annotation_file, layer_name):
"""
adds all markables from the given annotation layer to the discourse
graph.
"""
assert os.path.isfile(annotation_file), \
"Annotation file doesn't exist: {}".format(annotation_file)
tree = etree.parse(annotation_file)
root = tree.getroot()
default_layers = {self.ns, self.ns+':markable', self.ns+':'+layer_name}
# avoids eml.org namespace handling
for markable in root.iterchildren():
markable_node_id = markable.attrib['id']
markable_attribs = add_prefix(markable.attrib, self.ns+':')
self.add_node(markable_node_id,
layers=default_layers,
attr_dict=markable_attribs,
label=markable_node_id+':'+layer_name)
for target_node_id in spanstring2tokens(self, markable.attrib['span']):
# manually add to_node if it's not in the graph, yet
# cf. issue #39
if target_node_id not in self:
self.add_node(target_node_id,
# adding 'mmax:layer_name' here could be
# misleading (e.g. each token would be part
# of the 'mmax:sentence' layer
layers={self.ns, self.ns+':markable'},
label=target_node_id)
self.add_edge(markable_node_id, target_node_id,
layers=default_layers,
edge_type=EdgeTypes.spanning_relation,
label=self.ns+':'+layer_name)
# this is a workaround for Chiarcos-style MMAX files
if has_antecedent(markable):
antecedent_pointer = markable.attrib['anaphor_antecedent']
# mmax2 supports weird double antecedents,
# e.g. "markable_1000131;markable_1000132", cf. Issue #40
#
# handling these double antecendents increases the number of
# chains, cf. commit edc28abdc4fd36065e8bbf5900eeb4d1326db153
for antecedent in antecedent_pointer.split(';'):
ante_split = antecedent.split(":")
if len(ante_split) == 2:
# mark group:markable_n or secmark:markable_n as such
edge_label = '{}:antecedent'.format(ante_split[0])
else:
edge_label = ':antecedent'
# handles both 'markable_n' and 'layer:markable_n'
antecedent_node_id = ante_split[-1]
if len(ante_split) == 2:
antecedent_layer = ante_split[0]
default_layers.add('{0}:{1}'.format(self.ns, antecedent_layer))
# manually add antecedent node if it's not yet in the graph
# cf. issue #39
if antecedent_node_id not in self:
self.add_node(antecedent_node_id,
layers=default_layers)
self.add_edge(markable_node_id, antecedent_node_id,
layers=default_layers,
edge_type=EdgeTypes.pointing_relation,
label=self.ns+edge_label) |
def display_buffer(self, buffer, redraw=True):
"""
display provided buffer
:param buffer: Buffer
:return:
"""
logger.debug("display buffer %r", buffer)
self.buffer_movement_history.append(buffer)
self.current_buffer = buffer
self._set_main_widget(buffer.widget, redraw=redraw) | display provided buffer
:param buffer: Buffer
:return: | Below is the the instruction that describes the task:
### Input:
display provided buffer
:param buffer: Buffer
:return:
### Response:
def display_buffer(self, buffer, redraw=True):
"""
display provided buffer
:param buffer: Buffer
:return:
"""
logger.debug("display buffer %r", buffer)
self.buffer_movement_history.append(buffer)
self.current_buffer = buffer
self._set_main_widget(buffer.widget, redraw=redraw) |
def visit_and_update_expressions(self, visitor_fn):
"""Create an updated version (if needed) of the Filter via the visitor pattern."""
new_predicate = self.predicate.visit_and_update(visitor_fn)
if new_predicate is not self.predicate:
return Filter(new_predicate)
else:
return self | Create an updated version (if needed) of the Filter via the visitor pattern. | Below is the the instruction that describes the task:
### Input:
Create an updated version (if needed) of the Filter via the visitor pattern.
### Response:
def visit_and_update_expressions(self, visitor_fn):
"""Create an updated version (if needed) of the Filter via the visitor pattern."""
new_predicate = self.predicate.visit_and_update(visitor_fn)
if new_predicate is not self.predicate:
return Filter(new_predicate)
else:
return self |
def enterprise_login_required(view):
"""
View decorator for allowing authenticated user with valid enterprise UUID.
This decorator requires enterprise identifier as a parameter
`enterprise_uuid`.
This decorator will throw 404 if no kwarg `enterprise_uuid` is provided to
the decorated view .
If there is no enterprise in database against the kwarg `enterprise_uuid`
or if the user is not authenticated then it will redirect the user to the
enterprise-linked SSO login page.
Usage::
@enterprise_login_required()
def my_view(request, enterprise_uuid):
# Some functionality ...
OR
class MyView(View):
...
@method_decorator(enterprise_login_required)
def get(self, request, enterprise_uuid):
# Some functionality ...
"""
@wraps(view)
def wrapper(request, *args, **kwargs):
"""
Wrap the decorator.
"""
if 'enterprise_uuid' not in kwargs:
raise Http404
enterprise_uuid = kwargs['enterprise_uuid']
enterprise_customer = get_enterprise_customer_or_404(enterprise_uuid)
# Now verify if the user is logged in. If user is not logged in then
# send the user to the login screen to sign in with an
# Enterprise-linked IdP and the pipeline will get them back here.
if not request.user.is_authenticated:
parsed_current_url = urlparse(request.get_full_path())
parsed_query_string = parse_qs(parsed_current_url.query)
parsed_query_string.update({
'tpa_hint': enterprise_customer.identity_provider,
FRESH_LOGIN_PARAMETER: 'yes'
})
next_url = '{current_path}?{query_string}'.format(
current_path=quote(parsed_current_url.path),
query_string=urlencode(parsed_query_string, doseq=True)
)
return redirect(
'{login_url}?{params}'.format(
login_url='/login',
params=urlencode(
{'next': next_url}
)
)
)
# Otherwise, they can proceed to the original view.
return view(request, *args, **kwargs)
return wrapper | View decorator for allowing authenticated user with valid enterprise UUID.
This decorator requires enterprise identifier as a parameter
`enterprise_uuid`.
This decorator will throw 404 if no kwarg `enterprise_uuid` is provided to
the decorated view .
If there is no enterprise in database against the kwarg `enterprise_uuid`
or if the user is not authenticated then it will redirect the user to the
enterprise-linked SSO login page.
Usage::
@enterprise_login_required()
def my_view(request, enterprise_uuid):
# Some functionality ...
OR
class MyView(View):
...
@method_decorator(enterprise_login_required)
def get(self, request, enterprise_uuid):
# Some functionality ... | Below is the the instruction that describes the task:
### Input:
View decorator for allowing authenticated user with valid enterprise UUID.
This decorator requires enterprise identifier as a parameter
`enterprise_uuid`.
This decorator will throw 404 if no kwarg `enterprise_uuid` is provided to
the decorated view .
If there is no enterprise in database against the kwarg `enterprise_uuid`
or if the user is not authenticated then it will redirect the user to the
enterprise-linked SSO login page.
Usage::
@enterprise_login_required()
def my_view(request, enterprise_uuid):
# Some functionality ...
OR
class MyView(View):
...
@method_decorator(enterprise_login_required)
def get(self, request, enterprise_uuid):
# Some functionality ...
### Response:
def enterprise_login_required(view):
"""
View decorator for allowing authenticated user with valid enterprise UUID.
This decorator requires enterprise identifier as a parameter
`enterprise_uuid`.
This decorator will throw 404 if no kwarg `enterprise_uuid` is provided to
the decorated view .
If there is no enterprise in database against the kwarg `enterprise_uuid`
or if the user is not authenticated then it will redirect the user to the
enterprise-linked SSO login page.
Usage::
@enterprise_login_required()
def my_view(request, enterprise_uuid):
# Some functionality ...
OR
class MyView(View):
...
@method_decorator(enterprise_login_required)
def get(self, request, enterprise_uuid):
# Some functionality ...
"""
@wraps(view)
def wrapper(request, *args, **kwargs):
"""
Wrap the decorator.
"""
if 'enterprise_uuid' not in kwargs:
raise Http404
enterprise_uuid = kwargs['enterprise_uuid']
enterprise_customer = get_enterprise_customer_or_404(enterprise_uuid)
# Now verify if the user is logged in. If user is not logged in then
# send the user to the login screen to sign in with an
# Enterprise-linked IdP and the pipeline will get them back here.
if not request.user.is_authenticated:
parsed_current_url = urlparse(request.get_full_path())
parsed_query_string = parse_qs(parsed_current_url.query)
parsed_query_string.update({
'tpa_hint': enterprise_customer.identity_provider,
FRESH_LOGIN_PARAMETER: 'yes'
})
next_url = '{current_path}?{query_string}'.format(
current_path=quote(parsed_current_url.path),
query_string=urlencode(parsed_query_string, doseq=True)
)
return redirect(
'{login_url}?{params}'.format(
login_url='/login',
params=urlencode(
{'next': next_url}
)
)
)
# Otherwise, they can proceed to the original view.
return view(request, *args, **kwargs)
return wrapper |
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString):
"""Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default="("); can also be a pyparsing expression
- closer - closing character for a nested list (default=")"); can also be a pyparsing expression
- content - expression for items within the nested lists (default=None)
- ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString)
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the ignoreExpr argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an Or or MatchFirst.
The default is quotedString, but if no expressions are to be ignored,
then pass None for this argument.
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener,basestring) and isinstance(closer,basestring):
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (empty+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS).setParseAction(lambda t:t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret << Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
else:
ret << Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
return ret | Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default="("); can also be a pyparsing expression
- closer - closing character for a nested list (default=")"); can also be a pyparsing expression
- content - expression for items within the nested lists (default=None)
- ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString)
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the ignoreExpr argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an Or or MatchFirst.
The default is quotedString, but if no expressions are to be ignored,
then pass None for this argument. | Below is the the instruction that describes the task:
### Input:
Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default="("); can also be a pyparsing expression
- closer - closing character for a nested list (default=")"); can also be a pyparsing expression
- content - expression for items within the nested lists (default=None)
- ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString)
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the ignoreExpr argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an Or or MatchFirst.
The default is quotedString, but if no expressions are to be ignored,
then pass None for this argument.
### Response:
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString):
"""Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default="("); can also be a pyparsing expression
- closer - closing character for a nested list (default=")"); can also be a pyparsing expression
- content - expression for items within the nested lists (default=None)
- ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString)
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the ignoreExpr argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an Or or MatchFirst.
The default is quotedString, but if no expressions are to be ignored,
then pass None for this argument.
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener,basestring) and isinstance(closer,basestring):
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (empty+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS).setParseAction(lambda t:t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret << Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
else:
ret << Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
return ret |
def get_extension(filepath, check_if_exists=False, allowed_exts=ALLOWED_EXTS):
"""Return the extension of fpath.
Parameters
----------
fpath: string
File name or path
check_if_exists: bool
allowed_exts: dict
Dictionary of strings, where the key if the last part of a complex ('.' separated) extension
and the value is the previous part.
For example: for the '.nii.gz' extension I would have a dict as {'.gz': ['.nii',]}
Returns
-------
str
The extension of the file name or path
"""
if check_if_exists:
if not op.exists(filepath):
raise IOError('File not found: ' + filepath)
rest, ext = op.splitext(filepath)
if ext in allowed_exts:
alloweds = allowed_exts[ext]
_, ext2 = op.splitext(rest)
if ext2 in alloweds:
ext = ext2 + ext
return ext | Return the extension of fpath.
Parameters
----------
fpath: string
File name or path
check_if_exists: bool
allowed_exts: dict
Dictionary of strings, where the key if the last part of a complex ('.' separated) extension
and the value is the previous part.
For example: for the '.nii.gz' extension I would have a dict as {'.gz': ['.nii',]}
Returns
-------
str
The extension of the file name or path | Below is the the instruction that describes the task:
### Input:
Return the extension of fpath.
Parameters
----------
fpath: string
File name or path
check_if_exists: bool
allowed_exts: dict
Dictionary of strings, where the key if the last part of a complex ('.' separated) extension
and the value is the previous part.
For example: for the '.nii.gz' extension I would have a dict as {'.gz': ['.nii',]}
Returns
-------
str
The extension of the file name or path
### Response:
def get_extension(filepath, check_if_exists=False, allowed_exts=ALLOWED_EXTS):
"""Return the extension of fpath.
Parameters
----------
fpath: string
File name or path
check_if_exists: bool
allowed_exts: dict
Dictionary of strings, where the key if the last part of a complex ('.' separated) extension
and the value is the previous part.
For example: for the '.nii.gz' extension I would have a dict as {'.gz': ['.nii',]}
Returns
-------
str
The extension of the file name or path
"""
if check_if_exists:
if not op.exists(filepath):
raise IOError('File not found: ' + filepath)
rest, ext = op.splitext(filepath)
if ext in allowed_exts:
alloweds = allowed_exts[ext]
_, ext2 = op.splitext(rest)
if ext2 in alloweds:
ext = ext2 + ext
return ext |
def process_results(self, paragraph):
"""Routes Zeppelin output types to corresponding handlers."""
if 'editorMode' in paragraph['config']:
mode = paragraph['config']['editorMode'].split('/')[-1]
if 'results' in paragraph and paragraph['results']['msg']:
msg = paragraph['results']['msg'][0]
if mode not in ('text', 'markdown'):
self.output_options[msg['type']](msg['data']) | Routes Zeppelin output types to corresponding handlers. | Below is the the instruction that describes the task:
### Input:
Routes Zeppelin output types to corresponding handlers.
### Response:
def process_results(self, paragraph):
"""Routes Zeppelin output types to corresponding handlers."""
if 'editorMode' in paragraph['config']:
mode = paragraph['config']['editorMode'].split('/')[-1]
if 'results' in paragraph and paragraph['results']['msg']:
msg = paragraph['results']['msg'][0]
if mode not in ('text', 'markdown'):
self.output_options[msg['type']](msg['data']) |
def container_running(self, container_name):
"""
Finds out if a container with name ``container_name`` is running.
:return: :class:`Container <docker.models.containers.Container>` if it's running, ``None`` otherwise.
:rtype: Optional[docker.models.container.Container]
"""
filters = {
"name": container_name,
"status": "running",
}
for container in self.client.containers.list(filters=filters):
if container_name == container.name:
return container
return None | Finds out if a container with name ``container_name`` is running.
:return: :class:`Container <docker.models.containers.Container>` if it's running, ``None`` otherwise.
:rtype: Optional[docker.models.container.Container] | Below is the the instruction that describes the task:
### Input:
Finds out if a container with name ``container_name`` is running.
:return: :class:`Container <docker.models.containers.Container>` if it's running, ``None`` otherwise.
:rtype: Optional[docker.models.container.Container]
### Response:
def container_running(self, container_name):
"""
Finds out if a container with name ``container_name`` is running.
:return: :class:`Container <docker.models.containers.Container>` if it's running, ``None`` otherwise.
:rtype: Optional[docker.models.container.Container]
"""
filters = {
"name": container_name,
"status": "running",
}
for container in self.client.containers.list(filters=filters):
if container_name == container.name:
return container
return None |
def giflogo(self,id,title=None,scale=0.8,info_str=''):
"""
m.giflogo(id,title=None,scale=0.8) -- (Requires seqlogo package) Make a gif sequence logo
"""
return giflogo(self,id,title,scale) | m.giflogo(id,title=None,scale=0.8) -- (Requires seqlogo package) Make a gif sequence logo | Below is the the instruction that describes the task:
### Input:
m.giflogo(id,title=None,scale=0.8) -- (Requires seqlogo package) Make a gif sequence logo
### Response:
def giflogo(self,id,title=None,scale=0.8,info_str=''):
"""
m.giflogo(id,title=None,scale=0.8) -- (Requires seqlogo package) Make a gif sequence logo
"""
return giflogo(self,id,title,scale) |
def get_disease(self, disease_name=None, disease_id=None, definition=None, parent_ids=None, tree_numbers=None,
parent_tree_numbers=None, slim_mapping=None, synonym=None, alt_disease_id=None, limit=None,
as_df=False):
"""
Get diseases
:param bool as_df: if set to True result returns as `pandas.DataFrame`
:param int limit: maximum number of results
:param str disease_name: disease name
:param str disease_id: disease identifier
:param str definition: definition of disease
:param str parent_ids: parent identifiers, delimiter |
:param str tree_numbers: tree numbers, delimiter |
:param str parent_tree_numbers: parent tree numbers, delimiter
:param str slim_mapping: term derived from the MeSH tree structure for the “Diseases” [C] branch, \
that classifies MEDIC diseases into high-level categories
:param str synonym: disease synonyms
:param str alt_disease_id: alternative disease identifiers
:return: list of :class:`pyctd.manager.models.Disease` object
.. seealso::
:class:`pyctd.manager.models.Disease`
.. todo::
normalize parent_ids, tree_numbers and parent_tree_numbers in :class:`pyctd.manager.models.Disease`
"""
q = self.session.query(models.Disease)
if disease_name:
q = q.filter(models.Disease.disease_name.like(disease_name))
if disease_id:
q = q.filter(models.Disease.disease_id == disease_id)
if definition:
q = q.filter(models.Disease.definition.like(definition))
if parent_ids:
q = q.filter(models.Disease.parent_ids.like(parent_ids))
if tree_numbers:
q = q.filter(models.Disease.tree_numbers.like(tree_numbers))
if parent_tree_numbers:
q = q.filter(models.Disease.parent_tree_numbers.like(parent_tree_numbers))
if slim_mapping:
q = q.join(models.DiseaseSlimmapping).filter(models.DiseaseSlimmapping.slim_mapping.like(slim_mapping))
if synonym:
q = q.join(models.DiseaseSynonym).filter(models.DiseaseSynonym.synonym.like(synonym))
if alt_disease_id:
q = q.join(models.DiseaseAltdiseaseid).filter(models.DiseaseAltdiseaseid.alt_disease_id == alt_disease_id)
return self._limit_and_df(q, limit, as_df) | Get diseases
:param bool as_df: if set to True result returns as `pandas.DataFrame`
:param int limit: maximum number of results
:param str disease_name: disease name
:param str disease_id: disease identifier
:param str definition: definition of disease
:param str parent_ids: parent identifiers, delimiter |
:param str tree_numbers: tree numbers, delimiter |
:param str parent_tree_numbers: parent tree numbers, delimiter
:param str slim_mapping: term derived from the MeSH tree structure for the “Diseases” [C] branch, \
that classifies MEDIC diseases into high-level categories
:param str synonym: disease synonyms
:param str alt_disease_id: alternative disease identifiers
:return: list of :class:`pyctd.manager.models.Disease` object
.. seealso::
:class:`pyctd.manager.models.Disease`
.. todo::
normalize parent_ids, tree_numbers and parent_tree_numbers in :class:`pyctd.manager.models.Disease` | Below is the the instruction that describes the task:
### Input:
Get diseases
:param bool as_df: if set to True result returns as `pandas.DataFrame`
:param int limit: maximum number of results
:param str disease_name: disease name
:param str disease_id: disease identifier
:param str definition: definition of disease
:param str parent_ids: parent identifiers, delimiter |
:param str tree_numbers: tree numbers, delimiter |
:param str parent_tree_numbers: parent tree numbers, delimiter
:param str slim_mapping: term derived from the MeSH tree structure for the “Diseases” [C] branch, \
that classifies MEDIC diseases into high-level categories
:param str synonym: disease synonyms
:param str alt_disease_id: alternative disease identifiers
:return: list of :class:`pyctd.manager.models.Disease` object
.. seealso::
:class:`pyctd.manager.models.Disease`
.. todo::
normalize parent_ids, tree_numbers and parent_tree_numbers in :class:`pyctd.manager.models.Disease`
### Response:
def get_disease(self, disease_name=None, disease_id=None, definition=None, parent_ids=None, tree_numbers=None,
parent_tree_numbers=None, slim_mapping=None, synonym=None, alt_disease_id=None, limit=None,
as_df=False):
"""
Get diseases
:param bool as_df: if set to True result returns as `pandas.DataFrame`
:param int limit: maximum number of results
:param str disease_name: disease name
:param str disease_id: disease identifier
:param str definition: definition of disease
:param str parent_ids: parent identifiers, delimiter |
:param str tree_numbers: tree numbers, delimiter |
:param str parent_tree_numbers: parent tree numbers, delimiter
:param str slim_mapping: term derived from the MeSH tree structure for the “Diseases” [C] branch, \
that classifies MEDIC diseases into high-level categories
:param str synonym: disease synonyms
:param str alt_disease_id: alternative disease identifiers
:return: list of :class:`pyctd.manager.models.Disease` object
.. seealso::
:class:`pyctd.manager.models.Disease`
.. todo::
normalize parent_ids, tree_numbers and parent_tree_numbers in :class:`pyctd.manager.models.Disease`
"""
q = self.session.query(models.Disease)
if disease_name:
q = q.filter(models.Disease.disease_name.like(disease_name))
if disease_id:
q = q.filter(models.Disease.disease_id == disease_id)
if definition:
q = q.filter(models.Disease.definition.like(definition))
if parent_ids:
q = q.filter(models.Disease.parent_ids.like(parent_ids))
if tree_numbers:
q = q.filter(models.Disease.tree_numbers.like(tree_numbers))
if parent_tree_numbers:
q = q.filter(models.Disease.parent_tree_numbers.like(parent_tree_numbers))
if slim_mapping:
q = q.join(models.DiseaseSlimmapping).filter(models.DiseaseSlimmapping.slim_mapping.like(slim_mapping))
if synonym:
q = q.join(models.DiseaseSynonym).filter(models.DiseaseSynonym.synonym.like(synonym))
if alt_disease_id:
q = q.join(models.DiseaseAltdiseaseid).filter(models.DiseaseAltdiseaseid.alt_disease_id == alt_disease_id)
return self._limit_and_df(q, limit, as_df) |
def _shadow_model_variables(shadow_vars):
"""
Create shadow vars for model_variables as well, and add to the list of ``shadow_vars``.
Returns:
list of (shadow_model_var, local_model_var) used for syncing.
"""
G = tf.get_default_graph()
curr_shadow_vars = set([v.name for v in shadow_vars])
model_vars = tf.model_variables()
shadow_model_vars = []
for v in model_vars:
assert v.name.startswith('tower'), "Found some MODEL_VARIABLES created outside of the tower function!"
stripped_op_name, stripped_var_name = get_op_tensor_name(re.sub('^tower[0-9]+/', '', v.name))
if stripped_op_name in curr_shadow_vars:
continue
try:
G.get_tensor_by_name(stripped_var_name)
logger.warn("Model Variable {} also appears in other collections.".format(stripped_var_name))
continue
except KeyError:
pass
new_v = tf.get_variable(stripped_op_name, dtype=v.dtype.base_dtype,
initializer=v.initial_value,
trainable=False)
curr_shadow_vars.add(stripped_op_name) # avoid duplicated shadow_model_vars
shadow_vars.append(new_v)
shadow_model_vars.append((new_v, v)) # only need to sync model_var from one tower
return shadow_model_vars | Create shadow vars for model_variables as well, and add to the list of ``shadow_vars``.
Returns:
list of (shadow_model_var, local_model_var) used for syncing. | Below is the the instruction that describes the task:
### Input:
Create shadow vars for model_variables as well, and add to the list of ``shadow_vars``.
Returns:
list of (shadow_model_var, local_model_var) used for syncing.
### Response:
def _shadow_model_variables(shadow_vars):
"""
Create shadow vars for model_variables as well, and add to the list of ``shadow_vars``.
Returns:
list of (shadow_model_var, local_model_var) used for syncing.
"""
G = tf.get_default_graph()
curr_shadow_vars = set([v.name for v in shadow_vars])
model_vars = tf.model_variables()
shadow_model_vars = []
for v in model_vars:
assert v.name.startswith('tower'), "Found some MODEL_VARIABLES created outside of the tower function!"
stripped_op_name, stripped_var_name = get_op_tensor_name(re.sub('^tower[0-9]+/', '', v.name))
if stripped_op_name in curr_shadow_vars:
continue
try:
G.get_tensor_by_name(stripped_var_name)
logger.warn("Model Variable {} also appears in other collections.".format(stripped_var_name))
continue
except KeyError:
pass
new_v = tf.get_variable(stripped_op_name, dtype=v.dtype.base_dtype,
initializer=v.initial_value,
trainable=False)
curr_shadow_vars.add(stripped_op_name) # avoid duplicated shadow_model_vars
shadow_vars.append(new_v)
shadow_model_vars.append((new_v, v)) # only need to sync model_var from one tower
return shadow_model_vars |
def com_google_fonts_check_family_tnum_horizontal_metrics(fonts):
"""All tabular figures must have the same width across the RIBBI-family."""
from fontbakery.constants import RIBBI_STYLE_NAMES
from fontTools.ttLib import TTFont
RIBBI_ttFonts = [TTFont(f)
for f in fonts
if style(f) in RIBBI_STYLE_NAMES]
tnum_widths = {}
for ttFont in RIBBI_ttFonts:
glyphs = ttFont.getGlyphSet()
tnum_glyphs = [(glyph_id, glyphs[glyph_id])
for glyph_id in glyphs.keys()
if glyph_id.endswith(".tnum")]
for glyph_id, glyph in tnum_glyphs:
if glyph.width not in tnum_widths:
tnum_widths[glyph.width] = [glyph_id]
else:
tnum_widths[glyph.width].append(glyph_id)
if len(tnum_widths.keys()) > 1:
max_num = 0
most_common_width = None
for width, glyphs in tnum_widths.items():
if len(glyphs) > max_num:
max_num = len(glyphs)
most_common_width = width
del tnum_widths[most_common_width]
yield FAIL, (f"The most common tabular glyph width is {most_common_width}."
" But there are other tabular glyphs with different widths"
f" such as the following ones:\n\t{tnum_widths}.")
else:
yield PASS, "OK" | All tabular figures must have the same width across the RIBBI-family. | Below is the the instruction that describes the task:
### Input:
All tabular figures must have the same width across the RIBBI-family.
### Response:
def com_google_fonts_check_family_tnum_horizontal_metrics(fonts):
"""All tabular figures must have the same width across the RIBBI-family."""
from fontbakery.constants import RIBBI_STYLE_NAMES
from fontTools.ttLib import TTFont
RIBBI_ttFonts = [TTFont(f)
for f in fonts
if style(f) in RIBBI_STYLE_NAMES]
tnum_widths = {}
for ttFont in RIBBI_ttFonts:
glyphs = ttFont.getGlyphSet()
tnum_glyphs = [(glyph_id, glyphs[glyph_id])
for glyph_id in glyphs.keys()
if glyph_id.endswith(".tnum")]
for glyph_id, glyph in tnum_glyphs:
if glyph.width not in tnum_widths:
tnum_widths[glyph.width] = [glyph_id]
else:
tnum_widths[glyph.width].append(glyph_id)
if len(tnum_widths.keys()) > 1:
max_num = 0
most_common_width = None
for width, glyphs in tnum_widths.items():
if len(glyphs) > max_num:
max_num = len(glyphs)
most_common_width = width
del tnum_widths[most_common_width]
yield FAIL, (f"The most common tabular glyph width is {most_common_width}."
" But there are other tabular glyphs with different widths"
f" such as the following ones:\n\t{tnum_widths}.")
else:
yield PASS, "OK" |
def flux_up(self, fluxUpBottom, emission=None):
'''Compute downwelling radiative flux at interfaces between layers.
Inputs:
* fluxDownTop: flux down at top
* emission: emission from atmospheric levels (N)
defaults to zero if not given
Returns:
* vector of downwelling radiative flux between levels (N+1)
element 0 is the flux down to the surface.
'''
if emission is None:
emission = np.zeros_like(self.absorptivity)
E = np.concatenate((emission, np.atleast_1d(fluxUpBottom)), axis=-1)
# dot product (matrix multiplication) along last axes
return np.squeeze(matrix_multiply(self.Tup, E[..., np.newaxis])) | Compute downwelling radiative flux at interfaces between layers.
Inputs:
* fluxDownTop: flux down at top
* emission: emission from atmospheric levels (N)
defaults to zero if not given
Returns:
* vector of downwelling radiative flux between levels (N+1)
element 0 is the flux down to the surface. | Below is the the instruction that describes the task:
### Input:
Compute downwelling radiative flux at interfaces between layers.
Inputs:
* fluxDownTop: flux down at top
* emission: emission from atmospheric levels (N)
defaults to zero if not given
Returns:
* vector of downwelling radiative flux between levels (N+1)
element 0 is the flux down to the surface.
### Response:
def flux_up(self, fluxUpBottom, emission=None):
'''Compute downwelling radiative flux at interfaces between layers.
Inputs:
* fluxDownTop: flux down at top
* emission: emission from atmospheric levels (N)
defaults to zero if not given
Returns:
* vector of downwelling radiative flux between levels (N+1)
element 0 is the flux down to the surface.
'''
if emission is None:
emission = np.zeros_like(self.absorptivity)
E = np.concatenate((emission, np.atleast_1d(fluxUpBottom)), axis=-1)
# dot product (matrix multiplication) along last axes
return np.squeeze(matrix_multiply(self.Tup, E[..., np.newaxis])) |
def children(self, primary=None):
"""
:param primary: if None, then all parents are returned. If True, then only foreign keys composed of
primary key attributes are considered. If False, the only foreign keys including at least one non-primary
attribute are considered.
:return: dict of tables with foreign keys referencing self
"""
return self.connection.dependencies.children(self.full_table_name, primary) | :param primary: if None, then all parents are returned. If True, then only foreign keys composed of
primary key attributes are considered. If False, the only foreign keys including at least one non-primary
attribute are considered.
:return: dict of tables with foreign keys referencing self | Below is the the instruction that describes the task:
### Input:
:param primary: if None, then all parents are returned. If True, then only foreign keys composed of
primary key attributes are considered. If False, the only foreign keys including at least one non-primary
attribute are considered.
:return: dict of tables with foreign keys referencing self
### Response:
def children(self, primary=None):
"""
:param primary: if None, then all parents are returned. If True, then only foreign keys composed of
primary key attributes are considered. If False, the only foreign keys including at least one non-primary
attribute are considered.
:return: dict of tables with foreign keys referencing self
"""
return self.connection.dependencies.children(self.full_table_name, primary) |
async def _request(self, method, url, loop=None, timeout=None, **kwargs):
"""Make a request through AIOHTTP."""
session = self.session or aiohttp.ClientSession(
loop=loop, conn_timeout=timeout, read_timeout=timeout)
try:
async with session.request(method, url, **kwargs) as response:
if response.status / 100 > 2:
raise web.HTTPBadRequest(
reason='HTTP status code: %s' % response.status)
if 'json' in response.headers.get('CONTENT-TYPE'):
data = await response.json()
else:
data = await response.text()
data = dict(parse_qsl(data))
return data
except asyncio.TimeoutError:
raise web.HTTPBadRequest(reason='HTTP Timeout')
finally:
if not self.session and not session.closed:
await session.close() | Make a request through AIOHTTP. | Below is the the instruction that describes the task:
### Input:
Make a request through AIOHTTP.
### Response:
async def _request(self, method, url, loop=None, timeout=None, **kwargs):
"""Make a request through AIOHTTP."""
session = self.session or aiohttp.ClientSession(
loop=loop, conn_timeout=timeout, read_timeout=timeout)
try:
async with session.request(method, url, **kwargs) as response:
if response.status / 100 > 2:
raise web.HTTPBadRequest(
reason='HTTP status code: %s' % response.status)
if 'json' in response.headers.get('CONTENT-TYPE'):
data = await response.json()
else:
data = await response.text()
data = dict(parse_qsl(data))
return data
except asyncio.TimeoutError:
raise web.HTTPBadRequest(reason='HTTP Timeout')
finally:
if not self.session and not session.closed:
await session.close() |
def project_geometry(geometry, crs=None, to_crs=None, to_latlong=False):
"""
Project a shapely Polygon or MultiPolygon from lat-long to UTM, or
vice-versa
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to project
crs : dict
the starting coordinate reference system of the passed-in geometry,
default value (None) will set settings.default_crs as the CRS
to_crs : dict
if not None, just project to this CRS instead of to UTM
to_latlong : bool
if True, project from crs to lat-long, if False, project from crs to
local UTM zone
Returns
-------
tuple
(geometry_proj, crs), the projected shapely geometry and the crs of the
projected geometry
"""
if crs is None:
crs = settings.default_crs
gdf = gpd.GeoDataFrame()
gdf.crs = crs
gdf.gdf_name = 'geometry to project'
gdf['geometry'] = None
gdf.loc[0, 'geometry'] = geometry
gdf_proj = project_gdf(gdf, to_crs=to_crs, to_latlong=to_latlong)
geometry_proj = gdf_proj['geometry'].iloc[0]
return geometry_proj, gdf_proj.crs | Project a shapely Polygon or MultiPolygon from lat-long to UTM, or
vice-versa
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to project
crs : dict
the starting coordinate reference system of the passed-in geometry,
default value (None) will set settings.default_crs as the CRS
to_crs : dict
if not None, just project to this CRS instead of to UTM
to_latlong : bool
if True, project from crs to lat-long, if False, project from crs to
local UTM zone
Returns
-------
tuple
(geometry_proj, crs), the projected shapely geometry and the crs of the
projected geometry | Below is the the instruction that describes the task:
### Input:
Project a shapely Polygon or MultiPolygon from lat-long to UTM, or
vice-versa
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to project
crs : dict
the starting coordinate reference system of the passed-in geometry,
default value (None) will set settings.default_crs as the CRS
to_crs : dict
if not None, just project to this CRS instead of to UTM
to_latlong : bool
if True, project from crs to lat-long, if False, project from crs to
local UTM zone
Returns
-------
tuple
(geometry_proj, crs), the projected shapely geometry and the crs of the
projected geometry
### Response:
def project_geometry(geometry, crs=None, to_crs=None, to_latlong=False):
"""
Project a shapely Polygon or MultiPolygon from lat-long to UTM, or
vice-versa
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to project
crs : dict
the starting coordinate reference system of the passed-in geometry,
default value (None) will set settings.default_crs as the CRS
to_crs : dict
if not None, just project to this CRS instead of to UTM
to_latlong : bool
if True, project from crs to lat-long, if False, project from crs to
local UTM zone
Returns
-------
tuple
(geometry_proj, crs), the projected shapely geometry and the crs of the
projected geometry
"""
if crs is None:
crs = settings.default_crs
gdf = gpd.GeoDataFrame()
gdf.crs = crs
gdf.gdf_name = 'geometry to project'
gdf['geometry'] = None
gdf.loc[0, 'geometry'] = geometry
gdf_proj = project_gdf(gdf, to_crs=to_crs, to_latlong=to_latlong)
geometry_proj = gdf_proj['geometry'].iloc[0]
return geometry_proj, gdf_proj.crs |
def apply_transform(self, transform):
"""
Apply a transformation matrix to the current path in- place
Parameters
-----------
transform : (d+1, d+1) float
Homogenous transformation for vertices
"""
dimension = self.vertices.shape[1]
transform = np.asanyarray(transform, dtype=np.float64)
if transform.shape != (dimension + 1, dimension + 1):
raise ValueError('transform is incorrect shape!')
elif np.abs(transform - np.eye(dimension + 1)).max() < 1e-8:
# if we've been passed an identity matrix do nothing
return
# make sure cache is up to date
self._cache.verify()
# new cache to transfer items
cache = {}
# apply transform to discretized paths
if 'discrete' in self._cache.cache:
cache['discrete'] = np.array([
transformations.transform_points(
d, matrix=transform)
for d in self.discrete])
# things we can just straight up copy
# as they are topological not geometric
for key in ['root',
'paths',
'path_valid',
'dangling',
'vertex_graph',
'enclosure',
'enclosure_shell',
'enclosure_directed']:
# if they're in cache save them from the purge
if key in self._cache.cache:
cache[key] = self._cache.cache[key]
# transform vertices in place
self.vertices = transformations.transform_points(
self.vertices,
matrix=transform)
# explicitly clear the cache
self._cache.clear()
self._cache.id_set()
# populate the things we wangled
self._cache.cache.update(cache) | Apply a transformation matrix to the current path in- place
Parameters
-----------
transform : (d+1, d+1) float
Homogenous transformation for vertices | Below is the the instruction that describes the task:
### Input:
Apply a transformation matrix to the current path in- place
Parameters
-----------
transform : (d+1, d+1) float
Homogenous transformation for vertices
### Response:
def apply_transform(self, transform):
"""
Apply a transformation matrix to the current path in- place
Parameters
-----------
transform : (d+1, d+1) float
Homogenous transformation for vertices
"""
dimension = self.vertices.shape[1]
transform = np.asanyarray(transform, dtype=np.float64)
if transform.shape != (dimension + 1, dimension + 1):
raise ValueError('transform is incorrect shape!')
elif np.abs(transform - np.eye(dimension + 1)).max() < 1e-8:
# if we've been passed an identity matrix do nothing
return
# make sure cache is up to date
self._cache.verify()
# new cache to transfer items
cache = {}
# apply transform to discretized paths
if 'discrete' in self._cache.cache:
cache['discrete'] = np.array([
transformations.transform_points(
d, matrix=transform)
for d in self.discrete])
# things we can just straight up copy
# as they are topological not geometric
for key in ['root',
'paths',
'path_valid',
'dangling',
'vertex_graph',
'enclosure',
'enclosure_shell',
'enclosure_directed']:
# if they're in cache save them from the purge
if key in self._cache.cache:
cache[key] = self._cache.cache[key]
# transform vertices in place
self.vertices = transformations.transform_points(
self.vertices,
matrix=transform)
# explicitly clear the cache
self._cache.clear()
self._cache.id_set()
# populate the things we wangled
self._cache.cache.update(cache) |
def methods(self) -> 'PrettyDir':
"""Returns all methods of the inspected object.
Note that "methods" can mean "functions" when inspecting a module.
"""
return PrettyDir(
self.obj,
[
pattr
for pattr in self.pattrs
if category_match(pattr.category, AttrCategory.FUNCTION)
],
) | Returns all methods of the inspected object.
Note that "methods" can mean "functions" when inspecting a module. | Below is the the instruction that describes the task:
### Input:
Returns all methods of the inspected object.
Note that "methods" can mean "functions" when inspecting a module.
### Response:
def methods(self) -> 'PrettyDir':
"""Returns all methods of the inspected object.
Note that "methods" can mean "functions" when inspecting a module.
"""
return PrettyDir(
self.obj,
[
pattr
for pattr in self.pattrs
if category_match(pattr.category, AttrCategory.FUNCTION)
],
) |
def rest_get_stream(self, url, auth=None, verify=True, cert=None):
"""
Perform a chunked GET request to url with optional authentication
This is specifically to download files.
"""
res = requests.get(url, auth=auth, stream=True, verify=verify, cert=cert)
return res.raw, res.status_code | Perform a chunked GET request to url with optional authentication
This is specifically to download files. | Below is the the instruction that describes the task:
### Input:
Perform a chunked GET request to url with optional authentication
This is specifically to download files.
### Response:
def rest_get_stream(self, url, auth=None, verify=True, cert=None):
"""
Perform a chunked GET request to url with optional authentication
This is specifically to download files.
"""
res = requests.get(url, auth=auth, stream=True, verify=verify, cert=cert)
return res.raw, res.status_code |
def to_ut1unix(time: Union[str, datetime, float, np.ndarray]) -> np.ndarray:
"""
converts time inputs to UT1 seconds since Unix epoch
"""
# keep this order
time = totime(time)
if isinstance(time, (float, int)):
return time
if isinstance(time, (tuple, list, np.ndarray)):
assert isinstance(time[0], datetime), f'expected datetime, not {type(time[0])}'
return np.array(list(map(dt2ut1, time)))
else:
assert isinstance(time, datetime)
return dt2ut1(time) | converts time inputs to UT1 seconds since Unix epoch | Below is the the instruction that describes the task:
### Input:
converts time inputs to UT1 seconds since Unix epoch
### Response:
def to_ut1unix(time: Union[str, datetime, float, np.ndarray]) -> np.ndarray:
"""
converts time inputs to UT1 seconds since Unix epoch
"""
# keep this order
time = totime(time)
if isinstance(time, (float, int)):
return time
if isinstance(time, (tuple, list, np.ndarray)):
assert isinstance(time[0], datetime), f'expected datetime, not {type(time[0])}'
return np.array(list(map(dt2ut1, time)))
else:
assert isinstance(time, datetime)
return dt2ut1(time) |
def replicasResource(self):
"""returns a list of replices"""
if self._replicasResource is None:
self._replicasResource = {}
for replica in self.replicas:
self._replicasResource["replicaName"] = replica.name
self._replicasResource["replicaID"] = replica.guid
return self._replicasResource | returns a list of replices | Below is the the instruction that describes the task:
### Input:
returns a list of replices
### Response:
def replicasResource(self):
"""returns a list of replices"""
if self._replicasResource is None:
self._replicasResource = {}
for replica in self.replicas:
self._replicasResource["replicaName"] = replica.name
self._replicasResource["replicaID"] = replica.guid
return self._replicasResource |
def make_network_graph(compact, expression_names, lookup_names):
"""
Make a network graph, represented as of nodes and a set of edges.
The nodes are represented as tuples: (name: string, input_dim: Dim, label: string, output_dim: Dim, children: set[name], features: string)
# The edges are represented as dict of children to sets of parents: (child: string) -> [(parent: string, features: string)]
"""
nodes = set()
# edges = defaultdict(set) # parent -> (child, extra)
var_name_dict = dict()
if expression_names:
for e in graphviz_items: # e: Expression
if e in expression_names:
var_name_dict[e.vindex] = expression_names[e]
rnn_bldr_name = defaultdict(lambda: chr(len(rnn_bldr_name)+ord('A')))
def vidx2str(vidx): return '%s%s' % ('N', vidx)
for e in graphviz_items: # e: Expression
vidx = e.vindex
f_name = e.name
args = e.args
output_dim = e.dim
input_dim = None # basically just RNNStates use this since everything else has input_dim==output_dim
children = set()
node_type = '2_regular'
if f_name == 'vecInput':
[_dim] = args
arg_strs = []
elif f_name == 'inputVector':
[_v] = args
arg_strs = []
elif f_name == 'matInput':
[_d1, _d2] = args
arg_strs = []
elif f_name == 'inputMatrix':
[_v, _d] = args
arg_strs = []
elif f_name == 'parameters':
[_dim] = args
arg_strs = []
if compact:
if vidx in var_name_dict:
f_name = var_name_dict[vidx]
node_type = '1_param'
elif f_name == 'lookup_parameters':
[_dim] = args
arg_strs = []
if compact:
if vidx in var_name_dict:
f_name = var_name_dict[vidx]
node_type = '1_param'
elif f_name == 'lookup':
[p, idx, update] = args
[_dim] = p.args
if vidx in var_name_dict:
name = var_name_dict[vidx]
else:
name = None
item_name = None
if lookup_names and p in expression_names:
param_name = expression_names[p]
if param_name in lookup_names:
item_name = '\\"%s\\"' % (lookup_names[param_name][idx],)
if compact:
if item_name is not None:
f_name = item_name
elif name is not None:
f_name = '%s[%s]' % (name, idx)
else:
f_name = 'lookup(%s)' % (idx)
arg_strs = []
else:
arg_strs = [var_name_dict.get(p.vindex, 'v%d' % (p.vindex))]
if item_name is not None:
arg_strs.append(item_name)
vocab_size = _dim[0]
arg_strs.extend(['%s' % (idx), '%s' % (vocab_size), 'update' if update else 'fixed'])
#children.add(vidx2str(p.vindex))
#node_type = '1_param'
elif f_name == 'RNNState':
[arg, input_dim, bldr_type, bldr_num, state_idx] = args # arg==input_e
rnn_name = rnn_bldr_name[bldr_num]
if bldr_type.endswith('Builder'):
bldr_type[:-len('Builder')]
f_name = '%s-%s-%s' % (bldr_type, rnn_name, state_idx)
if not compact:
i = arg.vindex
s = var_name_dict.get(i, 'v%d' % (i))
arg_strs = [s]
else:
arg_strs = []
children.add(vidx2str(arg.vindex))
node_type = '3_rnn_state'
else:
arg_strs = []
for arg in args:
if isinstance(arg, Expression):
if not compact:
i = arg.vindex
s = var_name_dict.get(i, 'v%d' % (i))
arg_strs.append(s)
children.add(vidx2str(arg.vindex))
elif isinstance(arg, float) and compact:
s = re.sub('0+$', '', '%.3f' % (arg))
if s == '0.':
s = str(arg)
arg_strs.append(s)
else:
arg_strs.append(str(arg))
# f_name = { ,
# }.get(f_name, f_name)
if compact:
f_name = { 'add': '+',
'sub': '-',
'mul': '*',
'div': '/',
'cadd': '+',
'cmul': '*',
'cdiv': '/',
'scalarsub': '-',
'concatenate': 'cat',
'esum': 'sum',
'emax': 'max',
'emin': 'min',
}.get(f_name, f_name)
if arg_strs:
str_repr = '%s(%s)' % (f_name, ', '.join(arg_strs))
else:
str_repr = f_name
elif f_name == 'add':
[a,b] = arg_strs
str_repr = '%s + %s' % (a,b)
elif f_name == 'sub':
[a,b] = arg_strs
str_repr = '%s - %s' % (a,b)
elif f_name == 'mul':
[a,b] = arg_strs
str_repr = '%s * %s' % (a,b)
elif f_name == 'div':
[a,b] = arg_strs
str_repr = '%s / %s' % (a,b)
elif f_name == 'neg':
[a,] = arg_strs
str_repr = '-%s' % (a)
elif f_name == 'affine_transform':
str_repr = arg_strs[0]
for i in xrange(1, len(arg_strs), 2):
str_repr += ' + %s*%s' % tuple(arg_strs[i:i+2])
else:
if arg_strs is not None:
str_repr = '%s(%s)' % (f_name, ', '.join(arg_strs))
else:
str_repr = f_name
name = vidx2str(vidx)
var_name = '%s' % (var_name_dict.get(vidx, 'v%d' % (vidx))) if not compact else ''
# if show_dims:
# str_repr = '%s\\n%s' % (shape_str(e.dim), str_repr)
label = str_repr
if not compact:
label = '%s = %s' % (var_name, label)
features = ''
# if output_dim.invalid():
# features += " [color=red,style=filled,fillcolor=red]"
# node_def_lines.append(' %s [label="%s%s"] %s;' % (vidx2str(vidx), label_prefix, str_repr, ''))
expr_name = expression_names[e] if compact and expression_names and (e in expression_names) and (expression_names[e] != f_name) else None
nodes.add(GVNode(name, input_dim, label, output_dim, frozenset(children), features, node_type, expr_name))
return nodes | Make a network graph, represented as of nodes and a set of edges.
The nodes are represented as tuples: (name: string, input_dim: Dim, label: string, output_dim: Dim, children: set[name], features: string)
# The edges are represented as dict of children to sets of parents: (child: string) -> [(parent: string, features: string)] | Below is the the instruction that describes the task:
### Input:
Make a network graph, represented as of nodes and a set of edges.
The nodes are represented as tuples: (name: string, input_dim: Dim, label: string, output_dim: Dim, children: set[name], features: string)
# The edges are represented as dict of children to sets of parents: (child: string) -> [(parent: string, features: string)]
### Response:
def make_network_graph(compact, expression_names, lookup_names):
"""
Make a network graph, represented as of nodes and a set of edges.
The nodes are represented as tuples: (name: string, input_dim: Dim, label: string, output_dim: Dim, children: set[name], features: string)
# The edges are represented as dict of children to sets of parents: (child: string) -> [(parent: string, features: string)]
"""
nodes = set()
# edges = defaultdict(set) # parent -> (child, extra)
var_name_dict = dict()
if expression_names:
for e in graphviz_items: # e: Expression
if e in expression_names:
var_name_dict[e.vindex] = expression_names[e]
rnn_bldr_name = defaultdict(lambda: chr(len(rnn_bldr_name)+ord('A')))
def vidx2str(vidx): return '%s%s' % ('N', vidx)
for e in graphviz_items: # e: Expression
vidx = e.vindex
f_name = e.name
args = e.args
output_dim = e.dim
input_dim = None # basically just RNNStates use this since everything else has input_dim==output_dim
children = set()
node_type = '2_regular'
if f_name == 'vecInput':
[_dim] = args
arg_strs = []
elif f_name == 'inputVector':
[_v] = args
arg_strs = []
elif f_name == 'matInput':
[_d1, _d2] = args
arg_strs = []
elif f_name == 'inputMatrix':
[_v, _d] = args
arg_strs = []
elif f_name == 'parameters':
[_dim] = args
arg_strs = []
if compact:
if vidx in var_name_dict:
f_name = var_name_dict[vidx]
node_type = '1_param'
elif f_name == 'lookup_parameters':
[_dim] = args
arg_strs = []
if compact:
if vidx in var_name_dict:
f_name = var_name_dict[vidx]
node_type = '1_param'
elif f_name == 'lookup':
[p, idx, update] = args
[_dim] = p.args
if vidx in var_name_dict:
name = var_name_dict[vidx]
else:
name = None
item_name = None
if lookup_names and p in expression_names:
param_name = expression_names[p]
if param_name in lookup_names:
item_name = '\\"%s\\"' % (lookup_names[param_name][idx],)
if compact:
if item_name is not None:
f_name = item_name
elif name is not None:
f_name = '%s[%s]' % (name, idx)
else:
f_name = 'lookup(%s)' % (idx)
arg_strs = []
else:
arg_strs = [var_name_dict.get(p.vindex, 'v%d' % (p.vindex))]
if item_name is not None:
arg_strs.append(item_name)
vocab_size = _dim[0]
arg_strs.extend(['%s' % (idx), '%s' % (vocab_size), 'update' if update else 'fixed'])
#children.add(vidx2str(p.vindex))
#node_type = '1_param'
elif f_name == 'RNNState':
[arg, input_dim, bldr_type, bldr_num, state_idx] = args # arg==input_e
rnn_name = rnn_bldr_name[bldr_num]
if bldr_type.endswith('Builder'):
bldr_type[:-len('Builder')]
f_name = '%s-%s-%s' % (bldr_type, rnn_name, state_idx)
if not compact:
i = arg.vindex
s = var_name_dict.get(i, 'v%d' % (i))
arg_strs = [s]
else:
arg_strs = []
children.add(vidx2str(arg.vindex))
node_type = '3_rnn_state'
else:
arg_strs = []
for arg in args:
if isinstance(arg, Expression):
if not compact:
i = arg.vindex
s = var_name_dict.get(i, 'v%d' % (i))
arg_strs.append(s)
children.add(vidx2str(arg.vindex))
elif isinstance(arg, float) and compact:
s = re.sub('0+$', '', '%.3f' % (arg))
if s == '0.':
s = str(arg)
arg_strs.append(s)
else:
arg_strs.append(str(arg))
# f_name = { ,
# }.get(f_name, f_name)
if compact:
f_name = { 'add': '+',
'sub': '-',
'mul': '*',
'div': '/',
'cadd': '+',
'cmul': '*',
'cdiv': '/',
'scalarsub': '-',
'concatenate': 'cat',
'esum': 'sum',
'emax': 'max',
'emin': 'min',
}.get(f_name, f_name)
if arg_strs:
str_repr = '%s(%s)' % (f_name, ', '.join(arg_strs))
else:
str_repr = f_name
elif f_name == 'add':
[a,b] = arg_strs
str_repr = '%s + %s' % (a,b)
elif f_name == 'sub':
[a,b] = arg_strs
str_repr = '%s - %s' % (a,b)
elif f_name == 'mul':
[a,b] = arg_strs
str_repr = '%s * %s' % (a,b)
elif f_name == 'div':
[a,b] = arg_strs
str_repr = '%s / %s' % (a,b)
elif f_name == 'neg':
[a,] = arg_strs
str_repr = '-%s' % (a)
elif f_name == 'affine_transform':
str_repr = arg_strs[0]
for i in xrange(1, len(arg_strs), 2):
str_repr += ' + %s*%s' % tuple(arg_strs[i:i+2])
else:
if arg_strs is not None:
str_repr = '%s(%s)' % (f_name, ', '.join(arg_strs))
else:
str_repr = f_name
name = vidx2str(vidx)
var_name = '%s' % (var_name_dict.get(vidx, 'v%d' % (vidx))) if not compact else ''
# if show_dims:
# str_repr = '%s\\n%s' % (shape_str(e.dim), str_repr)
label = str_repr
if not compact:
label = '%s = %s' % (var_name, label)
features = ''
# if output_dim.invalid():
# features += " [color=red,style=filled,fillcolor=red]"
# node_def_lines.append(' %s [label="%s%s"] %s;' % (vidx2str(vidx), label_prefix, str_repr, ''))
expr_name = expression_names[e] if compact and expression_names and (e in expression_names) and (expression_names[e] != f_name) else None
nodes.add(GVNode(name, input_dim, label, output_dim, frozenset(children), features, node_type, expr_name))
return nodes |
def create(app_id: int = None,
login: str = None,
password: str = None,
service_token: str = None,
proxies: dict = None) -> API:
"""
Creates an API instance, requires app ID,
login and password or service token to create connection
:param app_id: int: specifies app ID
:param login: str: specifies login, can be phone number or email
:param password: str: specifies password
:param service_token: str: specifies password service token
:param proxies: dict: specifies proxies, require http and https proxy
"""
session_ = APISession(app_id,
login,
password,
service_token,
proxies)
return API(session_) | Creates an API instance, requires app ID,
login and password or service token to create connection
:param app_id: int: specifies app ID
:param login: str: specifies login, can be phone number or email
:param password: str: specifies password
:param service_token: str: specifies password service token
:param proxies: dict: specifies proxies, require http and https proxy | Below is the the instruction that describes the task:
### Input:
Creates an API instance, requires app ID,
login and password or service token to create connection
:param app_id: int: specifies app ID
:param login: str: specifies login, can be phone number or email
:param password: str: specifies password
:param service_token: str: specifies password service token
:param proxies: dict: specifies proxies, require http and https proxy
### Response:
def create(app_id: int = None,
login: str = None,
password: str = None,
service_token: str = None,
proxies: dict = None) -> API:
"""
Creates an API instance, requires app ID,
login and password or service token to create connection
:param app_id: int: specifies app ID
:param login: str: specifies login, can be phone number or email
:param password: str: specifies password
:param service_token: str: specifies password service token
:param proxies: dict: specifies proxies, require http and https proxy
"""
session_ = APISession(app_id,
login,
password,
service_token,
proxies)
return API(session_) |
def draw_lnm_samples(**kwargs):
''' Draw samples for uniform-in-log model
Parameters
----------
**kwargs: string
Keyword arguments as model parameters and number of samples
Returns
-------
array
The first mass
array
The second mass
'''
#PDF doesnt match with sampler
nsamples = kwargs.get('nsamples', 1)
min_mass = kwargs.get('min_mass', 5.)
max_mass = kwargs.get('max_mass', 95.)
max_mtotal = min_mass + max_mass
lnmmin = log(min_mass)
lnmmax = log(max_mass)
k = nsamples * int(1.5 + log(1 + 100./nsamples))
aa = np.exp(np.random.uniform(lnmmin, lnmmax, k))
bb = np.exp(np.random.uniform(lnmmin, lnmmax, k))
idx = np.where(aa + bb < max_mtotal)
m1, m2 = (np.maximum(aa, bb))[idx], (np.minimum(aa, bb))[idx]
return np.resize(m1, nsamples), np.resize(m2, nsamples) | Draw samples for uniform-in-log model
Parameters
----------
**kwargs: string
Keyword arguments as model parameters and number of samples
Returns
-------
array
The first mass
array
The second mass | Below is the the instruction that describes the task:
### Input:
Draw samples for uniform-in-log model
Parameters
----------
**kwargs: string
Keyword arguments as model parameters and number of samples
Returns
-------
array
The first mass
array
The second mass
### Response:
def draw_lnm_samples(**kwargs):
''' Draw samples for uniform-in-log model
Parameters
----------
**kwargs: string
Keyword arguments as model parameters and number of samples
Returns
-------
array
The first mass
array
The second mass
'''
#PDF doesnt match with sampler
nsamples = kwargs.get('nsamples', 1)
min_mass = kwargs.get('min_mass', 5.)
max_mass = kwargs.get('max_mass', 95.)
max_mtotal = min_mass + max_mass
lnmmin = log(min_mass)
lnmmax = log(max_mass)
k = nsamples * int(1.5 + log(1 + 100./nsamples))
aa = np.exp(np.random.uniform(lnmmin, lnmmax, k))
bb = np.exp(np.random.uniform(lnmmin, lnmmax, k))
idx = np.where(aa + bb < max_mtotal)
m1, m2 = (np.maximum(aa, bb))[idx], (np.minimum(aa, bb))[idx]
return np.resize(m1, nsamples), np.resize(m2, nsamples) |
def loads(astring):
"""Decompress and deserialize string into a Python object via pickle."""
try:
return pickle.loads(lzma.decompress(astring))
except lzma.LZMAError as e:
raise SerializerError(
'Cannot decompress object ("{}")'.format(str(e))
)
except pickle.UnpicklingError as e:
raise SerializerError(
'Cannot restore object ("{}")'.format(str(e))
) | Decompress and deserialize string into a Python object via pickle. | Below is the the instruction that describes the task:
### Input:
Decompress and deserialize string into a Python object via pickle.
### Response:
def loads(astring):
"""Decompress and deserialize string into a Python object via pickle."""
try:
return pickle.loads(lzma.decompress(astring))
except lzma.LZMAError as e:
raise SerializerError(
'Cannot decompress object ("{}")'.format(str(e))
)
except pickle.UnpicklingError as e:
raise SerializerError(
'Cannot restore object ("{}")'.format(str(e))
) |
def profile(name, profile, onlyif=None, unless=None, opts=None, **kwargs):
'''
Create a single instance on a cloud provider, using a salt-cloud profile.
Note that while profiles used this function do take any configuration
argument that would normally be used to create an instance using a profile,
this state will not verify the state of any of those arguments on an
existing instance. Stateful properties of an instance should be configured
using their own individual state (i.e., cloud.tagged, cloud.untagged, etc).
name
The name of the instance to create
profile
The name of the cloud profile to use
onlyif
Do run the state only if is unless succeed
unless
Do not run the state at least unless succeed
kwargs
Any profile override or addition
opts
Any extra opts that need to be used
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
retcode = __salt__['cmd.retcode']
if onlyif is not None:
if not isinstance(onlyif, six.string_types):
if not onlyif:
return _valid(name, comment='onlyif condition is false')
elif isinstance(onlyif, six.string_types):
if retcode(onlyif, python_shell=True) != 0:
return _valid(name, comment='onlyif condition is false')
if unless is not None:
if not isinstance(unless, six.string_types):
if unless:
return _valid(name, comment='unless condition is true')
elif isinstance(unless, six.string_types):
if retcode(unless, python_shell=True) == 0:
return _valid(name, comment='unless condition is true')
instance = _get_instance([name])
if instance and not any('Not Actioned' in key for key in instance):
ret['result'] = True
ret['comment'] = 'Already present instance {0}'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'Instance {0} needs to be created'.format(name)
return ret
info = __salt__['cloud.profile'](profile, name, vm_overrides=kwargs, opts=opts)
# get either {Error: ''} or {namestring: {Error: ''}}
# which is what we can get from providers returns
main_error = info.get('Error', '')
name_error = ''
if isinstance(info, dict):
subinfo = info.get(name, {})
if isinstance(subinfo, dict):
name_error = subinfo.get('Error', None)
error = main_error or name_error
if info and not error:
node_info = info.get(name)
ret['result'] = True
default_msg = 'Created instance {0} using profile {1}'.format(
name, profile,)
# some providers support changes
if 'changes' in node_info:
ret['changes'] = node_info['changes']
ret['comment'] = node_info.get('comment', default_msg)
else:
ret['changes'] = info
ret['comment'] = default_msg
elif error:
ret['result'] = False
ret['comment'] = ('Failed to create instance {0}'
' using profile {1}: {2}').format(
name,
profile,
'{0}\n{1}\n'.format(main_error, name_error).strip(),
)
else:
ret['result'] = False
ret['comment'] = ('Failed to create instance {0}'
'using profile {1}').format(
name,
profile,
)
return ret | Create a single instance on a cloud provider, using a salt-cloud profile.
Note that while profiles used this function do take any configuration
argument that would normally be used to create an instance using a profile,
this state will not verify the state of any of those arguments on an
existing instance. Stateful properties of an instance should be configured
using their own individual state (i.e., cloud.tagged, cloud.untagged, etc).
name
The name of the instance to create
profile
The name of the cloud profile to use
onlyif
Do run the state only if is unless succeed
unless
Do not run the state at least unless succeed
kwargs
Any profile override or addition
opts
Any extra opts that need to be used | Below is the the instruction that describes the task:
### Input:
Create a single instance on a cloud provider, using a salt-cloud profile.
Note that while profiles used this function do take any configuration
argument that would normally be used to create an instance using a profile,
this state will not verify the state of any of those arguments on an
existing instance. Stateful properties of an instance should be configured
using their own individual state (i.e., cloud.tagged, cloud.untagged, etc).
name
The name of the instance to create
profile
The name of the cloud profile to use
onlyif
Do run the state only if is unless succeed
unless
Do not run the state at least unless succeed
kwargs
Any profile override or addition
opts
Any extra opts that need to be used
### Response:
def profile(name, profile, onlyif=None, unless=None, opts=None, **kwargs):
'''
Create a single instance on a cloud provider, using a salt-cloud profile.
Note that while profiles used this function do take any configuration
argument that would normally be used to create an instance using a profile,
this state will not verify the state of any of those arguments on an
existing instance. Stateful properties of an instance should be configured
using their own individual state (i.e., cloud.tagged, cloud.untagged, etc).
name
The name of the instance to create
profile
The name of the cloud profile to use
onlyif
Do run the state only if is unless succeed
unless
Do not run the state at least unless succeed
kwargs
Any profile override or addition
opts
Any extra opts that need to be used
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
retcode = __salt__['cmd.retcode']
if onlyif is not None:
if not isinstance(onlyif, six.string_types):
if not onlyif:
return _valid(name, comment='onlyif condition is false')
elif isinstance(onlyif, six.string_types):
if retcode(onlyif, python_shell=True) != 0:
return _valid(name, comment='onlyif condition is false')
if unless is not None:
if not isinstance(unless, six.string_types):
if unless:
return _valid(name, comment='unless condition is true')
elif isinstance(unless, six.string_types):
if retcode(unless, python_shell=True) == 0:
return _valid(name, comment='unless condition is true')
instance = _get_instance([name])
if instance and not any('Not Actioned' in key for key in instance):
ret['result'] = True
ret['comment'] = 'Already present instance {0}'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'Instance {0} needs to be created'.format(name)
return ret
info = __salt__['cloud.profile'](profile, name, vm_overrides=kwargs, opts=opts)
# get either {Error: ''} or {namestring: {Error: ''}}
# which is what we can get from providers returns
main_error = info.get('Error', '')
name_error = ''
if isinstance(info, dict):
subinfo = info.get(name, {})
if isinstance(subinfo, dict):
name_error = subinfo.get('Error', None)
error = main_error or name_error
if info and not error:
node_info = info.get(name)
ret['result'] = True
default_msg = 'Created instance {0} using profile {1}'.format(
name, profile,)
# some providers support changes
if 'changes' in node_info:
ret['changes'] = node_info['changes']
ret['comment'] = node_info.get('comment', default_msg)
else:
ret['changes'] = info
ret['comment'] = default_msg
elif error:
ret['result'] = False
ret['comment'] = ('Failed to create instance {0}'
' using profile {1}: {2}').format(
name,
profile,
'{0}\n{1}\n'.format(main_error, name_error).strip(),
)
else:
ret['result'] = False
ret['comment'] = ('Failed to create instance {0}'
'using profile {1}').format(
name,
profile,
)
return ret |
def posterior_step(logposts, dim):
"""Finds the last time a chain made a jump > dim/2.
Parameters
----------
logposts : array
1D array of values that are proportional to the log posterior values.
dim : int
The dimension of the parameter space.
Returns
-------
int
The index of the last time the logpost made a jump > dim/2. If that
never happened, returns 0.
"""
if logposts.ndim > 1:
raise ValueError("logposts must be a 1D array")
criteria = dim/2.
dp = numpy.diff(logposts)
indices = numpy.where(dp >= criteria)[0]
if indices.size > 0:
idx = indices[-1] + 1
else:
idx = 0
return idx | Finds the last time a chain made a jump > dim/2.
Parameters
----------
logposts : array
1D array of values that are proportional to the log posterior values.
dim : int
The dimension of the parameter space.
Returns
-------
int
The index of the last time the logpost made a jump > dim/2. If that
never happened, returns 0. | Below is the the instruction that describes the task:
### Input:
Finds the last time a chain made a jump > dim/2.
Parameters
----------
logposts : array
1D array of values that are proportional to the log posterior values.
dim : int
The dimension of the parameter space.
Returns
-------
int
The index of the last time the logpost made a jump > dim/2. If that
never happened, returns 0.
### Response:
def posterior_step(logposts, dim):
"""Finds the last time a chain made a jump > dim/2.
Parameters
----------
logposts : array
1D array of values that are proportional to the log posterior values.
dim : int
The dimension of the parameter space.
Returns
-------
int
The index of the last time the logpost made a jump > dim/2. If that
never happened, returns 0.
"""
if logposts.ndim > 1:
raise ValueError("logposts must be a 1D array")
criteria = dim/2.
dp = numpy.diff(logposts)
indices = numpy.where(dp >= criteria)[0]
if indices.size > 0:
idx = indices[-1] + 1
else:
idx = 0
return idx |
def _ComputeUniquifier(self, debuggee):
"""Computes debuggee uniquifier.
The debuggee uniquifier has to be identical on all instances. Therefore the
uniquifier should not include any random numbers and should only be based
on inputs that are guaranteed to be the same on all instances.
Args:
debuggee: complete debuggee message without the uniquifier
Returns:
Hex string of SHA1 hash of project information, debuggee labels and
debuglet version.
"""
uniquifier = hashlib.sha1()
# Compute hash of application files if we don't have source context. This
# way we can still distinguish between different deployments.
if ('minorversion' not in debuggee.get('labels', []) and
'sourceContexts' not in debuggee):
uniquifier_computer.ComputeApplicationUniquifier(uniquifier)
return uniquifier.hexdigest() | Computes debuggee uniquifier.
The debuggee uniquifier has to be identical on all instances. Therefore the
uniquifier should not include any random numbers and should only be based
on inputs that are guaranteed to be the same on all instances.
Args:
debuggee: complete debuggee message without the uniquifier
Returns:
Hex string of SHA1 hash of project information, debuggee labels and
debuglet version. | Below is the the instruction that describes the task:
### Input:
Computes debuggee uniquifier.
The debuggee uniquifier has to be identical on all instances. Therefore the
uniquifier should not include any random numbers and should only be based
on inputs that are guaranteed to be the same on all instances.
Args:
debuggee: complete debuggee message without the uniquifier
Returns:
Hex string of SHA1 hash of project information, debuggee labels and
debuglet version.
### Response:
def _ComputeUniquifier(self, debuggee):
"""Computes debuggee uniquifier.
The debuggee uniquifier has to be identical on all instances. Therefore the
uniquifier should not include any random numbers and should only be based
on inputs that are guaranteed to be the same on all instances.
Args:
debuggee: complete debuggee message without the uniquifier
Returns:
Hex string of SHA1 hash of project information, debuggee labels and
debuglet version.
"""
uniquifier = hashlib.sha1()
# Compute hash of application files if we don't have source context. This
# way we can still distinguish between different deployments.
if ('minorversion' not in debuggee.get('labels', []) and
'sourceContexts' not in debuggee):
uniquifier_computer.ComputeApplicationUniquifier(uniquifier)
return uniquifier.hexdigest() |
def _synthesize_multiple_python(self, text_file, output_file_path, quit_after=None, backwards=False):
"""
Synthesize multiple fragments via a Python call.
:rtype: tuple (result, (anchors, current_time, num_chars))
"""
self.log(u"Synthesizing multiple via a Python call...")
ret = self._synthesize_multiple_generic(
helper_function=self._synthesize_single_python_helper,
text_file=text_file,
output_file_path=output_file_path,
quit_after=quit_after,
backwards=backwards
)
self.log(u"Synthesizing multiple via a Python call... done")
return ret | Synthesize multiple fragments via a Python call.
:rtype: tuple (result, (anchors, current_time, num_chars)) | Below is the the instruction that describes the task:
### Input:
Synthesize multiple fragments via a Python call.
:rtype: tuple (result, (anchors, current_time, num_chars))
### Response:
def _synthesize_multiple_python(self, text_file, output_file_path, quit_after=None, backwards=False):
"""
Synthesize multiple fragments via a Python call.
:rtype: tuple (result, (anchors, current_time, num_chars))
"""
self.log(u"Synthesizing multiple via a Python call...")
ret = self._synthesize_multiple_generic(
helper_function=self._synthesize_single_python_helper,
text_file=text_file,
output_file_path=output_file_path,
quit_after=quit_after,
backwards=backwards
)
self.log(u"Synthesizing multiple via a Python call... done")
return ret |
def combined(cls, code, path=None, extra_args=None):
""" Compile combined-json with abi,bin,devdoc,userdoc.
@param code: literal solidity code as a string.
@param path: absolute path to solidity-file. Note: code & path are
mutually exclusive!
@param extra_args: Either a space separated string or a list of extra
arguments to be passed to the solidity compiler.
"""
if code and path:
raise ValueError('sourcecode and path are mutually exclusive.')
if path:
contracts = compile_file(path, extra_args=extra_args)
with open(path) as handler:
code = handler.read()
elif code:
contracts = compile_code(code, extra_args=extra_args)
else:
raise ValueError('either code or path needs to be supplied.')
sorted_contracts = []
for name in solidity_names(code):
sorted_contracts.append(
(
name[1],
solidity_get_contract_data(contracts, path, name[1])
)
)
return sorted_contracts | Compile combined-json with abi,bin,devdoc,userdoc.
@param code: literal solidity code as a string.
@param path: absolute path to solidity-file. Note: code & path are
mutually exclusive!
@param extra_args: Either a space separated string or a list of extra
arguments to be passed to the solidity compiler. | Below is the the instruction that describes the task:
### Input:
Compile combined-json with abi,bin,devdoc,userdoc.
@param code: literal solidity code as a string.
@param path: absolute path to solidity-file. Note: code & path are
mutually exclusive!
@param extra_args: Either a space separated string or a list of extra
arguments to be passed to the solidity compiler.
### Response:
def combined(cls, code, path=None, extra_args=None):
""" Compile combined-json with abi,bin,devdoc,userdoc.
@param code: literal solidity code as a string.
@param path: absolute path to solidity-file. Note: code & path are
mutually exclusive!
@param extra_args: Either a space separated string or a list of extra
arguments to be passed to the solidity compiler.
"""
if code and path:
raise ValueError('sourcecode and path are mutually exclusive.')
if path:
contracts = compile_file(path, extra_args=extra_args)
with open(path) as handler:
code = handler.read()
elif code:
contracts = compile_code(code, extra_args=extra_args)
else:
raise ValueError('either code or path needs to be supplied.')
sorted_contracts = []
for name in solidity_names(code):
sorted_contracts.append(
(
name[1],
solidity_get_contract_data(contracts, path, name[1])
)
)
return sorted_contracts |
def get_logger(name="peyotl"):
"""Returns a logger with name set as given. See _read_logging_config for a description of the env var/config
file cascade that controls configuration of the logger.
"""
logger = logging.getLogger(name)
if len(logger.handlers) == 0:
log_init_warnings = []
lc = _read_logging_config(log_init_warnings)
logger.setLevel(lc['level'])
if lc['filepath'] is not None:
log_dir = lc['log_dir']
if log_dir and not os.path.exists(log_dir):
os.makedirs(log_dir)
ch = logging.FileHandler(lc['filepath'])
else:
ch = logging.StreamHandler()
ch.setLevel(lc['level'])
ch.setFormatter(lc['formatter'])
logger.addHandler(ch)
if log_init_warnings:
for w in log_init_warnings:
logger.warn(w)
return logger | Returns a logger with name set as given. See _read_logging_config for a description of the env var/config
file cascade that controls configuration of the logger. | Below is the the instruction that describes the task:
### Input:
Returns a logger with name set as given. See _read_logging_config for a description of the env var/config
file cascade that controls configuration of the logger.
### Response:
def get_logger(name="peyotl"):
"""Returns a logger with name set as given. See _read_logging_config for a description of the env var/config
file cascade that controls configuration of the logger.
"""
logger = logging.getLogger(name)
if len(logger.handlers) == 0:
log_init_warnings = []
lc = _read_logging_config(log_init_warnings)
logger.setLevel(lc['level'])
if lc['filepath'] is not None:
log_dir = lc['log_dir']
if log_dir and not os.path.exists(log_dir):
os.makedirs(log_dir)
ch = logging.FileHandler(lc['filepath'])
else:
ch = logging.StreamHandler()
ch.setLevel(lc['level'])
ch.setFormatter(lc['formatter'])
logger.addHandler(ch)
if log_init_warnings:
for w in log_init_warnings:
logger.warn(w)
return logger |
def previous_obj(self):
"""Returns a model obj that is the first occurrence of a previous
obj relative to this object's appointment.
Override this method if not am EDC subject model / CRF.
"""
previous_obj = None
if self.previous_visit:
try:
previous_obj = self.model.objects.get(
**{f"{self.model.visit_model_attr()}": self.previous_visit}
)
except ObjectDoesNotExist:
pass
return previous_obj | Returns a model obj that is the first occurrence of a previous
obj relative to this object's appointment.
Override this method if not am EDC subject model / CRF. | Below is the the instruction that describes the task:
### Input:
Returns a model obj that is the first occurrence of a previous
obj relative to this object's appointment.
Override this method if not am EDC subject model / CRF.
### Response:
def previous_obj(self):
"""Returns a model obj that is the first occurrence of a previous
obj relative to this object's appointment.
Override this method if not am EDC subject model / CRF.
"""
previous_obj = None
if self.previous_visit:
try:
previous_obj = self.model.objects.get(
**{f"{self.model.visit_model_attr()}": self.previous_visit}
)
except ObjectDoesNotExist:
pass
return previous_obj |
def bottom(self, N, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Filter matching the bottom N asset values of self each day.
If ``groupby`` is supplied, returns a Filter matching the bottom N
asset values for each group.
Parameters
----------
N : int
Number of assets passing the returned filter each day.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, bottom values are computed ignoring any
asset/date pairs for which `mask` produces a value of False.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to perform ranking.
Returns
-------
filter : zipline.pipeline.Filter
"""
return self.rank(ascending=True, mask=mask, groupby=groupby) <= N | Construct a Filter matching the bottom N asset values of self each day.
If ``groupby`` is supplied, returns a Filter matching the bottom N
asset values for each group.
Parameters
----------
N : int
Number of assets passing the returned filter each day.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, bottom values are computed ignoring any
asset/date pairs for which `mask` produces a value of False.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to perform ranking.
Returns
-------
filter : zipline.pipeline.Filter | Below is the the instruction that describes the task:
### Input:
Construct a Filter matching the bottom N asset values of self each day.
If ``groupby`` is supplied, returns a Filter matching the bottom N
asset values for each group.
Parameters
----------
N : int
Number of assets passing the returned filter each day.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, bottom values are computed ignoring any
asset/date pairs for which `mask` produces a value of False.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to perform ranking.
Returns
-------
filter : zipline.pipeline.Filter
### Response:
def bottom(self, N, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Filter matching the bottom N asset values of self each day.
If ``groupby`` is supplied, returns a Filter matching the bottom N
asset values for each group.
Parameters
----------
N : int
Number of assets passing the returned filter each day.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, bottom values are computed ignoring any
asset/date pairs for which `mask` produces a value of False.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to perform ranking.
Returns
-------
filter : zipline.pipeline.Filter
"""
return self.rank(ascending=True, mask=mask, groupby=groupby) <= N |
def prepare(self):
"""
Returns an aioxmpp.stanza.Message built from the Message and prepared to be sent.
Returns:
aioxmpp.stanza.Message: the message prepared to be sent
"""
msg = aioxmpp.stanza.Message(
to=self.to,
from_=self.sender,
type_=aioxmpp.MessageType.CHAT,
)
msg.body[None] = self.body
# Send metadata using xep-0004: Data Forms (https://xmpp.org/extensions/xep-0004.html)
if len(self.metadata):
data = forms_xso.Data(type_=forms_xso.DataType.FORM)
for name, value in self.metadata.items():
data.fields.append(
forms_xso.Field(
var=name,
type_=forms_xso.FieldType.TEXT_SINGLE,
values=[value],
)
)
if self.thread:
data.fields.append(forms_xso.Field(var="_thread_node",
type_=forms_xso.FieldType.TEXT_SINGLE,
values=[self.thread]))
data.title = SPADE_X_METADATA
msg.xep0004_data = [data]
return msg | Returns an aioxmpp.stanza.Message built from the Message and prepared to be sent.
Returns:
aioxmpp.stanza.Message: the message prepared to be sent | Below is the the instruction that describes the task:
### Input:
Returns an aioxmpp.stanza.Message built from the Message and prepared to be sent.
Returns:
aioxmpp.stanza.Message: the message prepared to be sent
### Response:
def prepare(self):
"""
Returns an aioxmpp.stanza.Message built from the Message and prepared to be sent.
Returns:
aioxmpp.stanza.Message: the message prepared to be sent
"""
msg = aioxmpp.stanza.Message(
to=self.to,
from_=self.sender,
type_=aioxmpp.MessageType.CHAT,
)
msg.body[None] = self.body
# Send metadata using xep-0004: Data Forms (https://xmpp.org/extensions/xep-0004.html)
if len(self.metadata):
data = forms_xso.Data(type_=forms_xso.DataType.FORM)
for name, value in self.metadata.items():
data.fields.append(
forms_xso.Field(
var=name,
type_=forms_xso.FieldType.TEXT_SINGLE,
values=[value],
)
)
if self.thread:
data.fields.append(forms_xso.Field(var="_thread_node",
type_=forms_xso.FieldType.TEXT_SINGLE,
values=[self.thread]))
data.title = SPADE_X_METADATA
msg.xep0004_data = [data]
return msg |
def _updateNonDefaultsForInspector(self, inspectorRegItem, inspector):
""" Store the (non-default) config values for the current inspector in a local dictionary.
This dictionary is later used to store value for persistence.
This function must be called after the inspector was drawn because that may update
some derived config values (e.g. ranges)
"""
if inspectorRegItem and inspector:
key = inspectorRegItem.identifier
logger.debug("_updateNonDefaultsForInspector: {} {}"
.format(key, type(inspector)))
self._inspectorsNonDefaults[key] = inspector.config.getNonDefaultsDict()
else:
logger.debug("_updateNonDefaultsForInspector: no inspector") | Store the (non-default) config values for the current inspector in a local dictionary.
This dictionary is later used to store value for persistence.
This function must be called after the inspector was drawn because that may update
some derived config values (e.g. ranges) | Below is the the instruction that describes the task:
### Input:
Store the (non-default) config values for the current inspector in a local dictionary.
This dictionary is later used to store value for persistence.
This function must be called after the inspector was drawn because that may update
some derived config values (e.g. ranges)
### Response:
def _updateNonDefaultsForInspector(self, inspectorRegItem, inspector):
""" Store the (non-default) config values for the current inspector in a local dictionary.
This dictionary is later used to store value for persistence.
This function must be called after the inspector was drawn because that may update
some derived config values (e.g. ranges)
"""
if inspectorRegItem and inspector:
key = inspectorRegItem.identifier
logger.debug("_updateNonDefaultsForInspector: {} {}"
.format(key, type(inspector)))
self._inspectorsNonDefaults[key] = inspector.config.getNonDefaultsDict()
else:
logger.debug("_updateNonDefaultsForInspector: no inspector") |
def prior_draw(self, N=1):
"""
Draw ``N`` samples from the prior.
"""
p = np.random.ranf(size=(N, self.ndim))
p = (self._upper_right - self._lower_left) * p + self._lower_left
return p | Draw ``N`` samples from the prior. | Below is the the instruction that describes the task:
### Input:
Draw ``N`` samples from the prior.
### Response:
def prior_draw(self, N=1):
"""
Draw ``N`` samples from the prior.
"""
p = np.random.ranf(size=(N, self.ndim))
p = (self._upper_right - self._lower_left) * p + self._lower_left
return p |
def set_connections_params(self, harakiri=None, timeout_socket=None):
"""Sets connection-related parameters.
:param int harakiri: Set gateway harakiri timeout (seconds).
:param int timeout_socket: Node socket timeout (seconds). Default: 60.
"""
self._set_aliased('harakiri', harakiri)
self._set_aliased('timeout', timeout_socket)
return self | Sets connection-related parameters.
:param int harakiri: Set gateway harakiri timeout (seconds).
:param int timeout_socket: Node socket timeout (seconds). Default: 60. | Below is the the instruction that describes the task:
### Input:
Sets connection-related parameters.
:param int harakiri: Set gateway harakiri timeout (seconds).
:param int timeout_socket: Node socket timeout (seconds). Default: 60.
### Response:
def set_connections_params(self, harakiri=None, timeout_socket=None):
"""Sets connection-related parameters.
:param int harakiri: Set gateway harakiri timeout (seconds).
:param int timeout_socket: Node socket timeout (seconds). Default: 60.
"""
self._set_aliased('harakiri', harakiri)
self._set_aliased('timeout', timeout_socket)
return self |
def pdf_link(self, link_f, y, Y_metadata=None):
"""
Likelihood function given link(f)
.. math::
p(y_{i}|\\lambda(f_{i})) = \\lambda(f_{i})\\exp (-y\\lambda(f_{i}))
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in exponential distribution
:returns: likelihood evaluated for this point
:rtype: float
"""
assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape
log_objective = link_f*np.exp(-y*link_f)
return np.exp(np.sum(np.log(log_objective))) | Likelihood function given link(f)
.. math::
p(y_{i}|\\lambda(f_{i})) = \\lambda(f_{i})\\exp (-y\\lambda(f_{i}))
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in exponential distribution
:returns: likelihood evaluated for this point
:rtype: float | Below is the the instruction that describes the task:
### Input:
Likelihood function given link(f)
.. math::
p(y_{i}|\\lambda(f_{i})) = \\lambda(f_{i})\\exp (-y\\lambda(f_{i}))
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in exponential distribution
:returns: likelihood evaluated for this point
:rtype: float
### Response:
def pdf_link(self, link_f, y, Y_metadata=None):
"""
Likelihood function given link(f)
.. math::
p(y_{i}|\\lambda(f_{i})) = \\lambda(f_{i})\\exp (-y\\lambda(f_{i}))
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in exponential distribution
:returns: likelihood evaluated for this point
:rtype: float
"""
assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape
log_objective = link_f*np.exp(-y*link_f)
return np.exp(np.sum(np.log(log_objective))) |
def get_topics(yaml_info):
''' Returns the names of all of the topics in the bag, and prints them
to stdout if requested
'''
# Pull out the topic info
names = []
# Store all of the topics in a dictionary
topics = yaml_info['topics']
for topic in topics:
names.append(topic['topic'])
return names | Returns the names of all of the topics in the bag, and prints them
to stdout if requested | Below is the the instruction that describes the task:
### Input:
Returns the names of all of the topics in the bag, and prints them
to stdout if requested
### Response:
def get_topics(yaml_info):
''' Returns the names of all of the topics in the bag, and prints them
to stdout if requested
'''
# Pull out the topic info
names = []
# Store all of the topics in a dictionary
topics = yaml_info['topics']
for topic in topics:
names.append(topic['topic'])
return names |
def do_bulk_config_update(hostnames):
"""
Given a list of hostnames, update the configs of all the
datanodes, tasktrackers and regionservers on those hosts.
"""
api = ApiResource(CM_HOST, username=CM_USER, password=CM_PASSWD)
hosts = collect_hosts(api, hostnames)
# Set config
for h in hosts:
configure_roles_on_host(api, h) | Given a list of hostnames, update the configs of all the
datanodes, tasktrackers and regionservers on those hosts. | Below is the the instruction that describes the task:
### Input:
Given a list of hostnames, update the configs of all the
datanodes, tasktrackers and regionservers on those hosts.
### Response:
def do_bulk_config_update(hostnames):
"""
Given a list of hostnames, update the configs of all the
datanodes, tasktrackers and regionservers on those hosts.
"""
api = ApiResource(CM_HOST, username=CM_USER, password=CM_PASSWD)
hosts = collect_hosts(api, hostnames)
# Set config
for h in hosts:
configure_roles_on_host(api, h) |
def _insert(self, feature, cursor):
"""
Insert a feature into the database.
"""
try:
cursor.execute(constants._INSERT, feature.astuple())
except sqlite3.ProgrammingError:
cursor.execute(
constants._INSERT, feature.astuple(self.default_encoding)) | Insert a feature into the database. | Below is the the instruction that describes the task:
### Input:
Insert a feature into the database.
### Response:
def _insert(self, feature, cursor):
"""
Insert a feature into the database.
"""
try:
cursor.execute(constants._INSERT, feature.astuple())
except sqlite3.ProgrammingError:
cursor.execute(
constants._INSERT, feature.astuple(self.default_encoding)) |
def terminal_type(cls):
"""
returns darwin, cygwin, cmd, or linux
"""
what = sys.platform
kind = 'UNDEFINED_TERMINAL_TYPE'
if 'linux' in what:
kind = 'linux'
elif 'darwin' in what:
kind = 'darwin'
elif 'cygwin' in what:
kind = 'cygwin'
elif 'windows' in what:
kind = 'windows'
return kind | returns darwin, cygwin, cmd, or linux | Below is the the instruction that describes the task:
### Input:
returns darwin, cygwin, cmd, or linux
### Response:
def terminal_type(cls):
"""
returns darwin, cygwin, cmd, or linux
"""
what = sys.platform
kind = 'UNDEFINED_TERMINAL_TYPE'
if 'linux' in what:
kind = 'linux'
elif 'darwin' in what:
kind = 'darwin'
elif 'cygwin' in what:
kind = 'cygwin'
elif 'windows' in what:
kind = 'windows'
return kind |
def navigation(self, id=None):
"""Function decorator for navbar registration.
Convenience function, calls :meth:`.register_element` with ``id`` and
the decorated function as ``elem``.
:param id: ID to pass on. If ``None``, uses the decorated functions
name.
"""
def wrapper(f):
self.register_element(id or f.__name__, f)
return f
return wrapper | Function decorator for navbar registration.
Convenience function, calls :meth:`.register_element` with ``id`` and
the decorated function as ``elem``.
:param id: ID to pass on. If ``None``, uses the decorated functions
name. | Below is the the instruction that describes the task:
### Input:
Function decorator for navbar registration.
Convenience function, calls :meth:`.register_element` with ``id`` and
the decorated function as ``elem``.
:param id: ID to pass on. If ``None``, uses the decorated functions
name.
### Response:
def navigation(self, id=None):
"""Function decorator for navbar registration.
Convenience function, calls :meth:`.register_element` with ``id`` and
the decorated function as ``elem``.
:param id: ID to pass on. If ``None``, uses the decorated functions
name.
"""
def wrapper(f):
self.register_element(id or f.__name__, f)
return f
return wrapper |
def build_factored_variational_loss(model,
observed_time_series,
init_batch_shape=(),
seed=None,
name=None):
"""Build a loss function for variational inference in STS models.
Variational inference searches for the distribution within some family of
approximate posteriors that minimizes a divergence between the approximate
posterior `q(z)` and true posterior `p(z|observed_time_series)`. By converting
inference to optimization, it's generally much faster than sampling-based
inference algorithms such as HMC. The tradeoff is that the approximating
family rarely contains the true posterior, so it may miss important aspects of
posterior structure (in particular, dependence between variables) and should
not be blindly trusted. Results may vary; it's generally wise to compare to
HMC to evaluate whether inference quality is sufficient for your task at hand.
This method constructs a loss function for variational inference using the
Kullback-Liebler divergence `KL[q(z) || p(z|observed_time_series)]`, with an
approximating family given by independent Normal distributions transformed to
the appropriate parameter space for each parameter. Minimizing this loss (the
negative ELBO) maximizes a lower bound on the log model evidence `-log
p(observed_time_series)`. This is equivalent to the 'mean-field' method
implemented in [1]. and is a standard approach. The resulting posterior
approximations are unimodal; they will tend to underestimate posterior
uncertainty when the true posterior contains multiple modes (the `KL[q||p]`
divergence encourages choosing a single mode) or dependence between variables.
Args:
model: An instance of `StructuralTimeSeries` representing a
time-series model. This represents a joint distribution over
time-series and their parameters with batch shape `[b1, ..., bN]`.
observed_time_series: `float` `Tensor` of shape
`concat([sample_shape, model.batch_shape, [num_timesteps, 1]]) where
`sample_shape` corresponds to i.i.d. observations, and the trailing `[1]`
dimension may (optionally) be omitted if `num_timesteps > 1`. May
optionally be an instance of `tfp.sts.MaskedTimeSeries`, which includes
a mask `Tensor` to specify timesteps with missing observations.
init_batch_shape: Batch shape (Python `tuple`, `list`, or `int`) of initial
states to optimize in parallel.
Default value: `()`. (i.e., just run a single optimization).
seed: Python integer to seed the random number generator.
name: Python `str` name prefixed to ops created by this function.
Default value: `None` (i.e., 'build_factored_variational_loss').
Returns:
variational_loss: `float` `Tensor` of shape
`concat([init_batch_shape, model.batch_shape])`, encoding a stochastic
estimate of an upper bound on the negative model evidence `-log p(y)`.
Minimizing this loss performs variational inference; the gap between the
variational bound and the true (generally unknown) model evidence
corresponds to the divergence `KL[q||p]` between the approximate and true
posterior.
variational_distributions: `collections.OrderedDict` giving
the approximate posterior for each model parameter. The keys are
Python `str` parameter names in order, corresponding to
`[param.name for param in model.parameters]`. The values are
`tfd.Distribution` instances with batch shape
`concat([init_batch_shape, model.batch_shape])`; these will typically be
of the form `tfd.TransformedDistribution(tfd.Normal(...),
bijector=param.bijector)`.
#### Examples
Assume we've built a structural time-series model:
```python
day_of_week = tfp.sts.Seasonal(
num_seasons=7,
observed_time_series=observed_time_series,
name='day_of_week')
local_linear_trend = tfp.sts.LocalLinearTrend(
observed_time_series=observed_time_series,
name='local_linear_trend')
model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],
observed_time_series=observed_time_series)
```
To run variational inference, we simply construct the loss and optimize
it:
```python
(variational_loss,
variational_distributions) = tfp.sts.build_factored_variational_loss(
model=model, observed_time_series=observed_time_series)
train_op = tf.train.AdamOptimizer(0.1).minimize(variational_loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(200):
_, loss_ = sess.run((train_op, variational_loss))
if step % 20 == 0:
print("step {} loss {}".format(step, loss_))
posterior_samples_ = sess.run({
param_name: q.sample(50)
for param_name, q in variational_distributions.items()})
```
As a more complex example, we might try to avoid local optima by optimizing
from multiple initializations in parallel, and selecting the result with the
lowest loss:
```python
(variational_loss,
variational_distributions) = tfp.sts.build_factored_variational_loss(
model=model, observed_time_series=observed_time_series,
init_batch_shape=[10])
train_op = tf.train.AdamOptimizer(0.1).minimize(variational_loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(200):
_, loss_ = sess.run((train_op, variational_loss))
if step % 20 == 0:
print("step {} losses {}".format(step, loss_))
# Draw multiple samples to reduce Monte Carlo error in the optimized
# variational bounds.
avg_loss = np.mean(
[sess.run(variational_loss) for _ in range(25)], axis=0)
best_posterior_idx = np.argmin(avg_loss, axis=0).astype(np.int32)
```
#### References
[1]: Alp Kucukelbir, Dustin Tran, Rajesh Ranganath, Andrew Gelman, and
David M. Blei. Automatic Differentiation Variational Inference. In
_Journal of Machine Learning Research_, 2017.
https://arxiv.org/abs/1603.00788
"""
with tf.compat.v1.name_scope(
name, 'build_factored_variational_loss',
values=[observed_time_series]) as name:
seed = tfd.SeedStream(
seed, salt='StructuralTimeSeries_build_factored_variational_loss')
variational_distributions = collections.OrderedDict()
variational_samples = []
for param in model.parameters:
def initial_loc_fn(param):
return sample_uniform_initial_state(
param, return_constrained=True,
init_sample_shape=init_batch_shape,
seed=seed())
q = _build_trainable_posterior(param, initial_loc_fn=initial_loc_fn)
variational_distributions[param.name] = q
variational_samples.append(q.sample(seed=seed()))
# Multiple initializations (similar to HMC chains) manifest as an extra
# param batch dimension, so we need to add corresponding batch dimension(s)
# to `observed_time_series`.
observed_time_series = sts_util.pad_batch_dimension_for_multiple_chains(
observed_time_series, model, chain_batch_shape=init_batch_shape)
# Construct the variational bound.
log_prob_fn = model.joint_log_prob(observed_time_series)
expected_log_joint = log_prob_fn(*variational_samples)
entropy = tf.reduce_sum(
input_tensor=[
-q.log_prob(sample) for (q, sample) in zip(
variational_distributions.values(), variational_samples)
],
axis=0)
variational_loss = -(expected_log_joint + entropy) # -ELBO
return variational_loss, variational_distributions | Build a loss function for variational inference in STS models.
Variational inference searches for the distribution within some family of
approximate posteriors that minimizes a divergence between the approximate
posterior `q(z)` and true posterior `p(z|observed_time_series)`. By converting
inference to optimization, it's generally much faster than sampling-based
inference algorithms such as HMC. The tradeoff is that the approximating
family rarely contains the true posterior, so it may miss important aspects of
posterior structure (in particular, dependence between variables) and should
not be blindly trusted. Results may vary; it's generally wise to compare to
HMC to evaluate whether inference quality is sufficient for your task at hand.
This method constructs a loss function for variational inference using the
Kullback-Liebler divergence `KL[q(z) || p(z|observed_time_series)]`, with an
approximating family given by independent Normal distributions transformed to
the appropriate parameter space for each parameter. Minimizing this loss (the
negative ELBO) maximizes a lower bound on the log model evidence `-log
p(observed_time_series)`. This is equivalent to the 'mean-field' method
implemented in [1]. and is a standard approach. The resulting posterior
approximations are unimodal; they will tend to underestimate posterior
uncertainty when the true posterior contains multiple modes (the `KL[q||p]`
divergence encourages choosing a single mode) or dependence between variables.
Args:
model: An instance of `StructuralTimeSeries` representing a
time-series model. This represents a joint distribution over
time-series and their parameters with batch shape `[b1, ..., bN]`.
observed_time_series: `float` `Tensor` of shape
`concat([sample_shape, model.batch_shape, [num_timesteps, 1]]) where
`sample_shape` corresponds to i.i.d. observations, and the trailing `[1]`
dimension may (optionally) be omitted if `num_timesteps > 1`. May
optionally be an instance of `tfp.sts.MaskedTimeSeries`, which includes
a mask `Tensor` to specify timesteps with missing observations.
init_batch_shape: Batch shape (Python `tuple`, `list`, or `int`) of initial
states to optimize in parallel.
Default value: `()`. (i.e., just run a single optimization).
seed: Python integer to seed the random number generator.
name: Python `str` name prefixed to ops created by this function.
Default value: `None` (i.e., 'build_factored_variational_loss').
Returns:
variational_loss: `float` `Tensor` of shape
`concat([init_batch_shape, model.batch_shape])`, encoding a stochastic
estimate of an upper bound on the negative model evidence `-log p(y)`.
Minimizing this loss performs variational inference; the gap between the
variational bound and the true (generally unknown) model evidence
corresponds to the divergence `KL[q||p]` between the approximate and true
posterior.
variational_distributions: `collections.OrderedDict` giving
the approximate posterior for each model parameter. The keys are
Python `str` parameter names in order, corresponding to
`[param.name for param in model.parameters]`. The values are
`tfd.Distribution` instances with batch shape
`concat([init_batch_shape, model.batch_shape])`; these will typically be
of the form `tfd.TransformedDistribution(tfd.Normal(...),
bijector=param.bijector)`.
#### Examples
Assume we've built a structural time-series model:
```python
day_of_week = tfp.sts.Seasonal(
num_seasons=7,
observed_time_series=observed_time_series,
name='day_of_week')
local_linear_trend = tfp.sts.LocalLinearTrend(
observed_time_series=observed_time_series,
name='local_linear_trend')
model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],
observed_time_series=observed_time_series)
```
To run variational inference, we simply construct the loss and optimize
it:
```python
(variational_loss,
variational_distributions) = tfp.sts.build_factored_variational_loss(
model=model, observed_time_series=observed_time_series)
train_op = tf.train.AdamOptimizer(0.1).minimize(variational_loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(200):
_, loss_ = sess.run((train_op, variational_loss))
if step % 20 == 0:
print("step {} loss {}".format(step, loss_))
posterior_samples_ = sess.run({
param_name: q.sample(50)
for param_name, q in variational_distributions.items()})
```
As a more complex example, we might try to avoid local optima by optimizing
from multiple initializations in parallel, and selecting the result with the
lowest loss:
```python
(variational_loss,
variational_distributions) = tfp.sts.build_factored_variational_loss(
model=model, observed_time_series=observed_time_series,
init_batch_shape=[10])
train_op = tf.train.AdamOptimizer(0.1).minimize(variational_loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(200):
_, loss_ = sess.run((train_op, variational_loss))
if step % 20 == 0:
print("step {} losses {}".format(step, loss_))
# Draw multiple samples to reduce Monte Carlo error in the optimized
# variational bounds.
avg_loss = np.mean(
[sess.run(variational_loss) for _ in range(25)], axis=0)
best_posterior_idx = np.argmin(avg_loss, axis=0).astype(np.int32)
```
#### References
[1]: Alp Kucukelbir, Dustin Tran, Rajesh Ranganath, Andrew Gelman, and
David M. Blei. Automatic Differentiation Variational Inference. In
_Journal of Machine Learning Research_, 2017.
https://arxiv.org/abs/1603.00788 | Below is the the instruction that describes the task:
### Input:
Build a loss function for variational inference in STS models.
Variational inference searches for the distribution within some family of
approximate posteriors that minimizes a divergence between the approximate
posterior `q(z)` and true posterior `p(z|observed_time_series)`. By converting
inference to optimization, it's generally much faster than sampling-based
inference algorithms such as HMC. The tradeoff is that the approximating
family rarely contains the true posterior, so it may miss important aspects of
posterior structure (in particular, dependence between variables) and should
not be blindly trusted. Results may vary; it's generally wise to compare to
HMC to evaluate whether inference quality is sufficient for your task at hand.
This method constructs a loss function for variational inference using the
Kullback-Liebler divergence `KL[q(z) || p(z|observed_time_series)]`, with an
approximating family given by independent Normal distributions transformed to
the appropriate parameter space for each parameter. Minimizing this loss (the
negative ELBO) maximizes a lower bound on the log model evidence `-log
p(observed_time_series)`. This is equivalent to the 'mean-field' method
implemented in [1]. and is a standard approach. The resulting posterior
approximations are unimodal; they will tend to underestimate posterior
uncertainty when the true posterior contains multiple modes (the `KL[q||p]`
divergence encourages choosing a single mode) or dependence between variables.
Args:
model: An instance of `StructuralTimeSeries` representing a
time-series model. This represents a joint distribution over
time-series and their parameters with batch shape `[b1, ..., bN]`.
observed_time_series: `float` `Tensor` of shape
`concat([sample_shape, model.batch_shape, [num_timesteps, 1]]) where
`sample_shape` corresponds to i.i.d. observations, and the trailing `[1]`
dimension may (optionally) be omitted if `num_timesteps > 1`. May
optionally be an instance of `tfp.sts.MaskedTimeSeries`, which includes
a mask `Tensor` to specify timesteps with missing observations.
init_batch_shape: Batch shape (Python `tuple`, `list`, or `int`) of initial
states to optimize in parallel.
Default value: `()`. (i.e., just run a single optimization).
seed: Python integer to seed the random number generator.
name: Python `str` name prefixed to ops created by this function.
Default value: `None` (i.e., 'build_factored_variational_loss').
Returns:
variational_loss: `float` `Tensor` of shape
`concat([init_batch_shape, model.batch_shape])`, encoding a stochastic
estimate of an upper bound on the negative model evidence `-log p(y)`.
Minimizing this loss performs variational inference; the gap between the
variational bound and the true (generally unknown) model evidence
corresponds to the divergence `KL[q||p]` between the approximate and true
posterior.
variational_distributions: `collections.OrderedDict` giving
the approximate posterior for each model parameter. The keys are
Python `str` parameter names in order, corresponding to
`[param.name for param in model.parameters]`. The values are
`tfd.Distribution` instances with batch shape
`concat([init_batch_shape, model.batch_shape])`; these will typically be
of the form `tfd.TransformedDistribution(tfd.Normal(...),
bijector=param.bijector)`.
#### Examples
Assume we've built a structural time-series model:
```python
day_of_week = tfp.sts.Seasonal(
num_seasons=7,
observed_time_series=observed_time_series,
name='day_of_week')
local_linear_trend = tfp.sts.LocalLinearTrend(
observed_time_series=observed_time_series,
name='local_linear_trend')
model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],
observed_time_series=observed_time_series)
```
To run variational inference, we simply construct the loss and optimize
it:
```python
(variational_loss,
variational_distributions) = tfp.sts.build_factored_variational_loss(
model=model, observed_time_series=observed_time_series)
train_op = tf.train.AdamOptimizer(0.1).minimize(variational_loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(200):
_, loss_ = sess.run((train_op, variational_loss))
if step % 20 == 0:
print("step {} loss {}".format(step, loss_))
posterior_samples_ = sess.run({
param_name: q.sample(50)
for param_name, q in variational_distributions.items()})
```
As a more complex example, we might try to avoid local optima by optimizing
from multiple initializations in parallel, and selecting the result with the
lowest loss:
```python
(variational_loss,
variational_distributions) = tfp.sts.build_factored_variational_loss(
model=model, observed_time_series=observed_time_series,
init_batch_shape=[10])
train_op = tf.train.AdamOptimizer(0.1).minimize(variational_loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(200):
_, loss_ = sess.run((train_op, variational_loss))
if step % 20 == 0:
print("step {} losses {}".format(step, loss_))
# Draw multiple samples to reduce Monte Carlo error in the optimized
# variational bounds.
avg_loss = np.mean(
[sess.run(variational_loss) for _ in range(25)], axis=0)
best_posterior_idx = np.argmin(avg_loss, axis=0).astype(np.int32)
```
#### References
[1]: Alp Kucukelbir, Dustin Tran, Rajesh Ranganath, Andrew Gelman, and
David M. Blei. Automatic Differentiation Variational Inference. In
_Journal of Machine Learning Research_, 2017.
https://arxiv.org/abs/1603.00788
### Response:
def build_factored_variational_loss(model,
observed_time_series,
init_batch_shape=(),
seed=None,
name=None):
"""Build a loss function for variational inference in STS models.
Variational inference searches for the distribution within some family of
approximate posteriors that minimizes a divergence between the approximate
posterior `q(z)` and true posterior `p(z|observed_time_series)`. By converting
inference to optimization, it's generally much faster than sampling-based
inference algorithms such as HMC. The tradeoff is that the approximating
family rarely contains the true posterior, so it may miss important aspects of
posterior structure (in particular, dependence between variables) and should
not be blindly trusted. Results may vary; it's generally wise to compare to
HMC to evaluate whether inference quality is sufficient for your task at hand.
This method constructs a loss function for variational inference using the
Kullback-Liebler divergence `KL[q(z) || p(z|observed_time_series)]`, with an
approximating family given by independent Normal distributions transformed to
the appropriate parameter space for each parameter. Minimizing this loss (the
negative ELBO) maximizes a lower bound on the log model evidence `-log
p(observed_time_series)`. This is equivalent to the 'mean-field' method
implemented in [1]. and is a standard approach. The resulting posterior
approximations are unimodal; they will tend to underestimate posterior
uncertainty when the true posterior contains multiple modes (the `KL[q||p]`
divergence encourages choosing a single mode) or dependence between variables.
Args:
model: An instance of `StructuralTimeSeries` representing a
time-series model. This represents a joint distribution over
time-series and their parameters with batch shape `[b1, ..., bN]`.
observed_time_series: `float` `Tensor` of shape
`concat([sample_shape, model.batch_shape, [num_timesteps, 1]]) where
`sample_shape` corresponds to i.i.d. observations, and the trailing `[1]`
dimension may (optionally) be omitted if `num_timesteps > 1`. May
optionally be an instance of `tfp.sts.MaskedTimeSeries`, which includes
a mask `Tensor` to specify timesteps with missing observations.
init_batch_shape: Batch shape (Python `tuple`, `list`, or `int`) of initial
states to optimize in parallel.
Default value: `()`. (i.e., just run a single optimization).
seed: Python integer to seed the random number generator.
name: Python `str` name prefixed to ops created by this function.
Default value: `None` (i.e., 'build_factored_variational_loss').
Returns:
variational_loss: `float` `Tensor` of shape
`concat([init_batch_shape, model.batch_shape])`, encoding a stochastic
estimate of an upper bound on the negative model evidence `-log p(y)`.
Minimizing this loss performs variational inference; the gap between the
variational bound and the true (generally unknown) model evidence
corresponds to the divergence `KL[q||p]` between the approximate and true
posterior.
variational_distributions: `collections.OrderedDict` giving
the approximate posterior for each model parameter. The keys are
Python `str` parameter names in order, corresponding to
`[param.name for param in model.parameters]`. The values are
`tfd.Distribution` instances with batch shape
`concat([init_batch_shape, model.batch_shape])`; these will typically be
of the form `tfd.TransformedDistribution(tfd.Normal(...),
bijector=param.bijector)`.
#### Examples
Assume we've built a structural time-series model:
```python
day_of_week = tfp.sts.Seasonal(
num_seasons=7,
observed_time_series=observed_time_series,
name='day_of_week')
local_linear_trend = tfp.sts.LocalLinearTrend(
observed_time_series=observed_time_series,
name='local_linear_trend')
model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],
observed_time_series=observed_time_series)
```
To run variational inference, we simply construct the loss and optimize
it:
```python
(variational_loss,
variational_distributions) = tfp.sts.build_factored_variational_loss(
model=model, observed_time_series=observed_time_series)
train_op = tf.train.AdamOptimizer(0.1).minimize(variational_loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(200):
_, loss_ = sess.run((train_op, variational_loss))
if step % 20 == 0:
print("step {} loss {}".format(step, loss_))
posterior_samples_ = sess.run({
param_name: q.sample(50)
for param_name, q in variational_distributions.items()})
```
As a more complex example, we might try to avoid local optima by optimizing
from multiple initializations in parallel, and selecting the result with the
lowest loss:
```python
(variational_loss,
variational_distributions) = tfp.sts.build_factored_variational_loss(
model=model, observed_time_series=observed_time_series,
init_batch_shape=[10])
train_op = tf.train.AdamOptimizer(0.1).minimize(variational_loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(200):
_, loss_ = sess.run((train_op, variational_loss))
if step % 20 == 0:
print("step {} losses {}".format(step, loss_))
# Draw multiple samples to reduce Monte Carlo error in the optimized
# variational bounds.
avg_loss = np.mean(
[sess.run(variational_loss) for _ in range(25)], axis=0)
best_posterior_idx = np.argmin(avg_loss, axis=0).astype(np.int32)
```
#### References
[1]: Alp Kucukelbir, Dustin Tran, Rajesh Ranganath, Andrew Gelman, and
David M. Blei. Automatic Differentiation Variational Inference. In
_Journal of Machine Learning Research_, 2017.
https://arxiv.org/abs/1603.00788
"""
with tf.compat.v1.name_scope(
name, 'build_factored_variational_loss',
values=[observed_time_series]) as name:
seed = tfd.SeedStream(
seed, salt='StructuralTimeSeries_build_factored_variational_loss')
variational_distributions = collections.OrderedDict()
variational_samples = []
for param in model.parameters:
def initial_loc_fn(param):
return sample_uniform_initial_state(
param, return_constrained=True,
init_sample_shape=init_batch_shape,
seed=seed())
q = _build_trainable_posterior(param, initial_loc_fn=initial_loc_fn)
variational_distributions[param.name] = q
variational_samples.append(q.sample(seed=seed()))
# Multiple initializations (similar to HMC chains) manifest as an extra
# param batch dimension, so we need to add corresponding batch dimension(s)
# to `observed_time_series`.
observed_time_series = sts_util.pad_batch_dimension_for_multiple_chains(
observed_time_series, model, chain_batch_shape=init_batch_shape)
# Construct the variational bound.
log_prob_fn = model.joint_log_prob(observed_time_series)
expected_log_joint = log_prob_fn(*variational_samples)
entropy = tf.reduce_sum(
input_tensor=[
-q.log_prob(sample) for (q, sample) in zip(
variational_distributions.values(), variational_samples)
],
axis=0)
variational_loss = -(expected_log_joint + entropy) # -ELBO
return variational_loss, variational_distributions |
def run_timed(self, **kwargs):
"""
Run the motor for the amount of time specified in `time_sp`
and then stop the motor using the action specified by `stop_action`.
"""
for key in kwargs:
setattr(self, key, kwargs[key])
self.command = self.COMMAND_RUN_TIMED | Run the motor for the amount of time specified in `time_sp`
and then stop the motor using the action specified by `stop_action`. | Below is the the instruction that describes the task:
### Input:
Run the motor for the amount of time specified in `time_sp`
and then stop the motor using the action specified by `stop_action`.
### Response:
def run_timed(self, **kwargs):
"""
Run the motor for the amount of time specified in `time_sp`
and then stop the motor using the action specified by `stop_action`.
"""
for key in kwargs:
setattr(self, key, kwargs[key])
self.command = self.COMMAND_RUN_TIMED |
def parse_template_config(template_config_data):
"""
>>> from tests import doctest_utils
>>> convert_html_to_text = registration_settings.VERIFICATION_EMAIL_HTML_TO_TEXT_CONVERTER # noqa: E501
>>> parse_template_config({}) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ImproperlyConfigured
>>> parse_template_config({
... 'subject': 'blah',
... }) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ImproperlyConfigured
>>> parse_template_config({
... 'subject': 'blah',
... 'body': 'blah',
... }) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ImproperlyConfigured
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'html_body': 'rest_registration/register/body.html',
... 'text_body': 'rest_registration/register/body.txt',
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.txt',
... 'rest_registration/register/body.html',
... identity))
OK
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'html_body': 'rest_registration/register/body.html',
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.html',
... 'rest_registration/register/body.html',
... convert_html_to_text))
OK
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'text_body': 'rest_registration/register/body.txt',
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.txt', None,
... identity))
OK
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'body': 'rest_registration/register/body.txt',
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.txt', None,
... identity))
OK
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'body': 'rest_registration/register/body.html',
... 'is_html': True,
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.html',
... 'rest_registration/register/body.html',
... convert_html_to_text))
OK
"""
try:
subject_template_name = template_config_data['subject']
except KeyError:
raise ImproperlyConfigured("No 'subject' key found")
body_template_name = template_config_data.get('body')
text_body_template_name = template_config_data.get('text_body')
html_body_template_name = template_config_data.get('html_body')
is_html_body = template_config_data.get('is_html')
convert_html_to_text = registration_settings.VERIFICATION_EMAIL_HTML_TO_TEXT_CONVERTER # noqa: E501
if html_body_template_name and text_body_template_name:
config = EmailTemplateConfig(
subject_template_name=subject_template_name,
text_body_template_name=text_body_template_name,
html_body_template_name=html_body_template_name,
text_body_processor=identity,
)
elif html_body_template_name:
config = EmailTemplateConfig(
subject_template_name=subject_template_name,
text_body_template_name=html_body_template_name,
html_body_template_name=html_body_template_name,
text_body_processor=convert_html_to_text,
)
elif text_body_template_name:
config = EmailTemplateConfig(
subject_template_name=subject_template_name,
text_body_template_name=text_body_template_name,
html_body_template_name=None,
text_body_processor=identity,
)
elif body_template_name:
if is_html_body:
config = EmailTemplateConfig(
subject_template_name=subject_template_name,
text_body_template_name=body_template_name,
html_body_template_name=body_template_name,
text_body_processor=convert_html_to_text,
)
else:
config = EmailTemplateConfig(
subject_template_name=subject_template_name,
text_body_template_name=body_template_name,
html_body_template_name=None,
text_body_processor=identity,
)
else:
raise ImproperlyConfigured(
'Could not parse template config data: {template_config_data}'.format( # noqa: E501
template_config_data=template_config_data))
_validate_template_name_existence(config.subject_template_name)
_validate_template_name_existence(config.text_body_template_name)
if config.html_body_template_name:
_validate_template_name_existence(config.html_body_template_name)
assert callable(config.text_body_processor)
return config | >>> from tests import doctest_utils
>>> convert_html_to_text = registration_settings.VERIFICATION_EMAIL_HTML_TO_TEXT_CONVERTER # noqa: E501
>>> parse_template_config({}) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ImproperlyConfigured
>>> parse_template_config({
... 'subject': 'blah',
... }) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ImproperlyConfigured
>>> parse_template_config({
... 'subject': 'blah',
... 'body': 'blah',
... }) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ImproperlyConfigured
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'html_body': 'rest_registration/register/body.html',
... 'text_body': 'rest_registration/register/body.txt',
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.txt',
... 'rest_registration/register/body.html',
... identity))
OK
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'html_body': 'rest_registration/register/body.html',
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.html',
... 'rest_registration/register/body.html',
... convert_html_to_text))
OK
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'text_body': 'rest_registration/register/body.txt',
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.txt', None,
... identity))
OK
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'body': 'rest_registration/register/body.txt',
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.txt', None,
... identity))
OK
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'body': 'rest_registration/register/body.html',
... 'is_html': True,
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.html',
... 'rest_registration/register/body.html',
... convert_html_to_text))
OK | Below is the the instruction that describes the task:
### Input:
>>> from tests import doctest_utils
>>> convert_html_to_text = registration_settings.VERIFICATION_EMAIL_HTML_TO_TEXT_CONVERTER # noqa: E501
>>> parse_template_config({}) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ImproperlyConfigured
>>> parse_template_config({
... 'subject': 'blah',
... }) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ImproperlyConfigured
>>> parse_template_config({
... 'subject': 'blah',
... 'body': 'blah',
... }) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ImproperlyConfigured
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'html_body': 'rest_registration/register/body.html',
... 'text_body': 'rest_registration/register/body.txt',
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.txt',
... 'rest_registration/register/body.html',
... identity))
OK
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'html_body': 'rest_registration/register/body.html',
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.html',
... 'rest_registration/register/body.html',
... convert_html_to_text))
OK
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'text_body': 'rest_registration/register/body.txt',
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.txt', None,
... identity))
OK
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'body': 'rest_registration/register/body.txt',
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.txt', None,
... identity))
OK
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'body': 'rest_registration/register/body.html',
... 'is_html': True,
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.html',
... 'rest_registration/register/body.html',
... convert_html_to_text))
OK
### Response:
def parse_template_config(template_config_data):
"""
>>> from tests import doctest_utils
>>> convert_html_to_text = registration_settings.VERIFICATION_EMAIL_HTML_TO_TEXT_CONVERTER # noqa: E501
>>> parse_template_config({}) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ImproperlyConfigured
>>> parse_template_config({
... 'subject': 'blah',
... }) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ImproperlyConfigured
>>> parse_template_config({
... 'subject': 'blah',
... 'body': 'blah',
... }) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ImproperlyConfigured
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'html_body': 'rest_registration/register/body.html',
... 'text_body': 'rest_registration/register/body.txt',
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.txt',
... 'rest_registration/register/body.html',
... identity))
OK
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'html_body': 'rest_registration/register/body.html',
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.html',
... 'rest_registration/register/body.html',
... convert_html_to_text))
OK
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'text_body': 'rest_registration/register/body.txt',
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.txt', None,
... identity))
OK
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'body': 'rest_registration/register/body.txt',
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.txt', None,
... identity))
OK
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'body': 'rest_registration/register/body.html',
... 'is_html': True,
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.html',
... 'rest_registration/register/body.html',
... convert_html_to_text))
OK
"""
try:
subject_template_name = template_config_data['subject']
except KeyError:
raise ImproperlyConfigured("No 'subject' key found")
body_template_name = template_config_data.get('body')
text_body_template_name = template_config_data.get('text_body')
html_body_template_name = template_config_data.get('html_body')
is_html_body = template_config_data.get('is_html')
convert_html_to_text = registration_settings.VERIFICATION_EMAIL_HTML_TO_TEXT_CONVERTER # noqa: E501
if html_body_template_name and text_body_template_name:
config = EmailTemplateConfig(
subject_template_name=subject_template_name,
text_body_template_name=text_body_template_name,
html_body_template_name=html_body_template_name,
text_body_processor=identity,
)
elif html_body_template_name:
config = EmailTemplateConfig(
subject_template_name=subject_template_name,
text_body_template_name=html_body_template_name,
html_body_template_name=html_body_template_name,
text_body_processor=convert_html_to_text,
)
elif text_body_template_name:
config = EmailTemplateConfig(
subject_template_name=subject_template_name,
text_body_template_name=text_body_template_name,
html_body_template_name=None,
text_body_processor=identity,
)
elif body_template_name:
if is_html_body:
config = EmailTemplateConfig(
subject_template_name=subject_template_name,
text_body_template_name=body_template_name,
html_body_template_name=body_template_name,
text_body_processor=convert_html_to_text,
)
else:
config = EmailTemplateConfig(
subject_template_name=subject_template_name,
text_body_template_name=body_template_name,
html_body_template_name=None,
text_body_processor=identity,
)
else:
raise ImproperlyConfigured(
'Could not parse template config data: {template_config_data}'.format( # noqa: E501
template_config_data=template_config_data))
_validate_template_name_existence(config.subject_template_name)
_validate_template_name_existence(config.text_body_template_name)
if config.html_body_template_name:
_validate_template_name_existence(config.html_body_template_name)
assert callable(config.text_body_processor)
return config |
def UpdateSNMPObjs():
""" Function that does the actual data update. """
global threadingString
LogMsg("Beginning data update.")
data = ""
# Obtain the data by calling an external command. We don't use
# subprocess.check_output() here for compatibility with Python versions
# older than 2.7.
LogMsg("Calling external command \"sleep 5; date\".")
proc = subprocess.Popen(
"sleep 5; date", shell=True, env={ "LANG": "C" },
stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
output = proc.communicate()[0].splitlines()[0]
rc = proc.poll()
if rc != 0:
LogMsg("An error occured executing the command: {0}".format(output))
return
msg = "Updating \"threadingString\" object with data \"{0}\"."
LogMsg(msg.format(output))
threadingString.update(output)
LogMsg("Data update done, exiting thread.") | Function that does the actual data update. | Below is the the instruction that describes the task:
### Input:
Function that does the actual data update.
### Response:
def UpdateSNMPObjs():
""" Function that does the actual data update. """
global threadingString
LogMsg("Beginning data update.")
data = ""
# Obtain the data by calling an external command. We don't use
# subprocess.check_output() here for compatibility with Python versions
# older than 2.7.
LogMsg("Calling external command \"sleep 5; date\".")
proc = subprocess.Popen(
"sleep 5; date", shell=True, env={ "LANG": "C" },
stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
output = proc.communicate()[0].splitlines()[0]
rc = proc.poll()
if rc != 0:
LogMsg("An error occured executing the command: {0}".format(output))
return
msg = "Updating \"threadingString\" object with data \"{0}\"."
LogMsg(msg.format(output))
threadingString.update(output)
LogMsg("Data update done, exiting thread.") |
def cmd_takeoff(self, args):
'''take off'''
if ( len(args) != 1):
print("Usage: takeoff ALTITUDE_IN_METERS")
return
if (len(args) == 1):
altitude = float(args[0])
print("Take Off started")
self.master.mav.command_long_send(
self.settings.target_system, # target_system
mavutil.mavlink.MAV_COMP_ID_SYSTEM_CONTROL, # target_component
mavutil.mavlink.MAV_CMD_NAV_TAKEOFF, # command
0, # confirmation
0, # param1
0, # param2
0, # param3
0, # param4
0, # param5
0, # param6
altitude) | take off | Below is the the instruction that describes the task:
### Input:
take off
### Response:
def cmd_takeoff(self, args):
'''take off'''
if ( len(args) != 1):
print("Usage: takeoff ALTITUDE_IN_METERS")
return
if (len(args) == 1):
altitude = float(args[0])
print("Take Off started")
self.master.mav.command_long_send(
self.settings.target_system, # target_system
mavutil.mavlink.MAV_COMP_ID_SYSTEM_CONTROL, # target_component
mavutil.mavlink.MAV_CMD_NAV_TAKEOFF, # command
0, # confirmation
0, # param1
0, # param2
0, # param3
0, # param4
0, # param5
0, # param6
altitude) |
def is_pa_terminal(cls, ball_tally, strike_tally, pitch_res, event_cd):
"""
Is PA terminal
:param ball_tally: Ball telly
:param strike_tally: Strike telly
:param pitch_res: pitching result(Retrosheet format)
:param event_cd: Event code
:return: FLG(T or F)
"""
if RetroSheet.is_pa_terminal(ball_tally, strike_tally, pitch_res, event_cd):
return MlbamConst.FLG_TRUE
return MlbamConst.FLG_FALSE | Is PA terminal
:param ball_tally: Ball telly
:param strike_tally: Strike telly
:param pitch_res: pitching result(Retrosheet format)
:param event_cd: Event code
:return: FLG(T or F) | Below is the the instruction that describes the task:
### Input:
Is PA terminal
:param ball_tally: Ball telly
:param strike_tally: Strike telly
:param pitch_res: pitching result(Retrosheet format)
:param event_cd: Event code
:return: FLG(T or F)
### Response:
def is_pa_terminal(cls, ball_tally, strike_tally, pitch_res, event_cd):
"""
Is PA terminal
:param ball_tally: Ball telly
:param strike_tally: Strike telly
:param pitch_res: pitching result(Retrosheet format)
:param event_cd: Event code
:return: FLG(T or F)
"""
if RetroSheet.is_pa_terminal(ball_tally, strike_tally, pitch_res, event_cd):
return MlbamConst.FLG_TRUE
return MlbamConst.FLG_FALSE |
def doDup(self, WHAT={}, **params):
"""This function will perform the command -dup."""
if hasattr(WHAT, '_modified'):
for key, value in WHAT._modified():
if WHAT.__new2old__.has_key(key):
self._addDBParam(WHAT.__new2old__[key].encode('utf-8'), value)
else:
self._addDBParam(key, value)
self._addDBParam('RECORDID', WHAT.RECORDID)
self._addDBParam('MODID', WHAT.MODID)
elif type(WHAT) == dict:
for key in WHAT:
self._addDBParam(key, WHAT[key])
else:
raise FMError, 'Python Runtime: Object type (%s) given to function doDup as argument WHAT cannot be used.' % type(WHAT)
if self._layout == '':
raise FMError, 'No layout was selected'
for key in params:
self._addDBParam(key, params[key])
if self._checkRecordID() == 0:
raise FMError, 'RecordID is missing'
return self._doAction('-dup') | This function will perform the command -dup. | Below is the the instruction that describes the task:
### Input:
This function will perform the command -dup.
### Response:
def doDup(self, WHAT={}, **params):
"""This function will perform the command -dup."""
if hasattr(WHAT, '_modified'):
for key, value in WHAT._modified():
if WHAT.__new2old__.has_key(key):
self._addDBParam(WHAT.__new2old__[key].encode('utf-8'), value)
else:
self._addDBParam(key, value)
self._addDBParam('RECORDID', WHAT.RECORDID)
self._addDBParam('MODID', WHAT.MODID)
elif type(WHAT) == dict:
for key in WHAT:
self._addDBParam(key, WHAT[key])
else:
raise FMError, 'Python Runtime: Object type (%s) given to function doDup as argument WHAT cannot be used.' % type(WHAT)
if self._layout == '':
raise FMError, 'No layout was selected'
for key in params:
self._addDBParam(key, params[key])
if self._checkRecordID() == 0:
raise FMError, 'RecordID is missing'
return self._doAction('-dup') |
def get_pulls_review_comments(self, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet, since=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/pulls/comments <http://developer.github.com/v3/pulls/comments>`_
:param sort: string
:param direction: string
:param since: datetime.datetime
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.PullRequestComment.PullRequestComment`
"""
assert sort is github.GithubObject.NotSet or isinstance(sort, (str, unicode)), sort
assert direction is github.GithubObject.NotSet or isinstance(direction, (str, unicode)), direction
assert since is github.GithubObject.NotSet or isinstance(since, datetime.datetime), since
url_parameters = dict()
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
if since is not github.GithubObject.NotSet:
url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
return github.PaginatedList.PaginatedList(
github.IssueComment.IssueComment,
self._requester,
self.url + "/pulls/comments",
url_parameters
) | :calls: `GET /repos/:owner/:repo/pulls/comments <http://developer.github.com/v3/pulls/comments>`_
:param sort: string
:param direction: string
:param since: datetime.datetime
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.PullRequestComment.PullRequestComment` | Below is the the instruction that describes the task:
### Input:
:calls: `GET /repos/:owner/:repo/pulls/comments <http://developer.github.com/v3/pulls/comments>`_
:param sort: string
:param direction: string
:param since: datetime.datetime
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.PullRequestComment.PullRequestComment`
### Response:
def get_pulls_review_comments(self, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet, since=github.GithubObject.NotSet):
"""
:calls: `GET /repos/:owner/:repo/pulls/comments <http://developer.github.com/v3/pulls/comments>`_
:param sort: string
:param direction: string
:param since: datetime.datetime
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.PullRequestComment.PullRequestComment`
"""
assert sort is github.GithubObject.NotSet or isinstance(sort, (str, unicode)), sort
assert direction is github.GithubObject.NotSet or isinstance(direction, (str, unicode)), direction
assert since is github.GithubObject.NotSet or isinstance(since, datetime.datetime), since
url_parameters = dict()
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
if since is not github.GithubObject.NotSet:
url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
return github.PaginatedList.PaginatedList(
github.IssueComment.IssueComment,
self._requester,
self.url + "/pulls/comments",
url_parameters
) |
def _p_o(self, o):
"""
Returns the output probability for symbol o from all hidden states
Parameters
----------
o : float
A single observation.
Return
------
p_o : ndarray (N)
p_o[i] is the probability density of the observation o from state i emission distribution
Examples
--------
Create an observation model.
>>> output_model = GaussianOutputModel(nstates=3, means=[-1, 0, 1], sigmas=[0.5, 1, 2])
Compute the output probability of a single observation from all hidden states.
>>> observation = 0
>>> p_o = output_model._p_o(observation)
"""
if self.__impl__ == self.__IMPL_C__:
return gc.p_o(o, self.means, self.sigmas, out=None, dtype=type(o))
elif self.__impl__ == self.__IMPL_PYTHON__:
if np.any(self.sigmas < np.finfo(self.sigmas.dtype).eps):
raise RuntimeError('at least one sigma is too small to continue.')
C = 1.0 / (np.sqrt(2.0 * np.pi) * self.sigmas)
Pobs = C * np.exp(-0.5 * ((o-self.means)/self.sigmas)**2)
return Pobs
else:
raise RuntimeError('Implementation '+str(self.__impl__)+' not available') | Returns the output probability for symbol o from all hidden states
Parameters
----------
o : float
A single observation.
Return
------
p_o : ndarray (N)
p_o[i] is the probability density of the observation o from state i emission distribution
Examples
--------
Create an observation model.
>>> output_model = GaussianOutputModel(nstates=3, means=[-1, 0, 1], sigmas=[0.5, 1, 2])
Compute the output probability of a single observation from all hidden states.
>>> observation = 0
>>> p_o = output_model._p_o(observation) | Below is the the instruction that describes the task:
### Input:
Returns the output probability for symbol o from all hidden states
Parameters
----------
o : float
A single observation.
Return
------
p_o : ndarray (N)
p_o[i] is the probability density of the observation o from state i emission distribution
Examples
--------
Create an observation model.
>>> output_model = GaussianOutputModel(nstates=3, means=[-1, 0, 1], sigmas=[0.5, 1, 2])
Compute the output probability of a single observation from all hidden states.
>>> observation = 0
>>> p_o = output_model._p_o(observation)
### Response:
def _p_o(self, o):
"""
Returns the output probability for symbol o from all hidden states
Parameters
----------
o : float
A single observation.
Return
------
p_o : ndarray (N)
p_o[i] is the probability density of the observation o from state i emission distribution
Examples
--------
Create an observation model.
>>> output_model = GaussianOutputModel(nstates=3, means=[-1, 0, 1], sigmas=[0.5, 1, 2])
Compute the output probability of a single observation from all hidden states.
>>> observation = 0
>>> p_o = output_model._p_o(observation)
"""
if self.__impl__ == self.__IMPL_C__:
return gc.p_o(o, self.means, self.sigmas, out=None, dtype=type(o))
elif self.__impl__ == self.__IMPL_PYTHON__:
if np.any(self.sigmas < np.finfo(self.sigmas.dtype).eps):
raise RuntimeError('at least one sigma is too small to continue.')
C = 1.0 / (np.sqrt(2.0 * np.pi) * self.sigmas)
Pobs = C * np.exp(-0.5 * ((o-self.means)/self.sigmas)**2)
return Pobs
else:
raise RuntimeError('Implementation '+str(self.__impl__)+' not available') |
def upgrade(self):
"""Upgrade the config file.
"""
warn('Upgrading ' + self.filename)
if self.backup_config(self.filename):
return self.write_default_config(self.filename)
return False | Upgrade the config file. | Below is the the instruction that describes the task:
### Input:
Upgrade the config file.
### Response:
def upgrade(self):
"""Upgrade the config file.
"""
warn('Upgrading ' + self.filename)
if self.backup_config(self.filename):
return self.write_default_config(self.filename)
return False |
async def _execute(self, appt):
'''
Fire off the task to make the storm query
'''
user = self.core.auth.user(appt.useriden)
if user is None:
logger.warning('Unknown user %s in stored appointment', appt.useriden)
await self._markfailed(appt)
return
await self.core.boss.execute(self._runJob(user, appt), f'Agenda {appt.iden}', user) | Fire off the task to make the storm query | Below is the the instruction that describes the task:
### Input:
Fire off the task to make the storm query
### Response:
async def _execute(self, appt):
'''
Fire off the task to make the storm query
'''
user = self.core.auth.user(appt.useriden)
if user is None:
logger.warning('Unknown user %s in stored appointment', appt.useriden)
await self._markfailed(appt)
return
await self.core.boss.execute(self._runJob(user, appt), f'Agenda {appt.iden}', user) |
def shift(ol,**kwargs):
'''
from elist.jprint import pobj
from elist.elist import *
ol = [1,2,3,4]
id(ol)
rslt = shift(ol)
pobj(rslt)
ol
id(ol)
id(rslt['list'])
####
ol = [1,2,3,4]
id(ol)
rslt = shift(ol,mode="original")
rslt
ol
id(ol)
'''
if('mode' in kwargs):
mode = kwargs['mode']
else:
mode = "new"
length = ol.__len__()
rslt = pop(ol,0,mode=mode)
return(rslt) | from elist.jprint import pobj
from elist.elist import *
ol = [1,2,3,4]
id(ol)
rslt = shift(ol)
pobj(rslt)
ol
id(ol)
id(rslt['list'])
####
ol = [1,2,3,4]
id(ol)
rslt = shift(ol,mode="original")
rslt
ol
id(ol) | Below is the the instruction that describes the task:
### Input:
from elist.jprint import pobj
from elist.elist import *
ol = [1,2,3,4]
id(ol)
rslt = shift(ol)
pobj(rslt)
ol
id(ol)
id(rslt['list'])
####
ol = [1,2,3,4]
id(ol)
rslt = shift(ol,mode="original")
rslt
ol
id(ol)
### Response:
def shift(ol,**kwargs):
'''
from elist.jprint import pobj
from elist.elist import *
ol = [1,2,3,4]
id(ol)
rslt = shift(ol)
pobj(rslt)
ol
id(ol)
id(rslt['list'])
####
ol = [1,2,3,4]
id(ol)
rslt = shift(ol,mode="original")
rslt
ol
id(ol)
'''
if('mode' in kwargs):
mode = kwargs['mode']
else:
mode = "new"
length = ol.__len__()
rslt = pop(ol,0,mode=mode)
return(rslt) |
def std_velocity(particle, social, state):
"""
Standard particle velocity update according to the equation:
:math:`v_{ij}(t+1) &= \\omega v_{ij}(t) + \
c_1 r_{1j}(t)[y_{ij}(t) - x_{ij}(t)]\\:+ \
c_2 r_{2j}(t)[\\hat{y}_{ij}(t) - x_{ij}(t)],\\;\\;\
\\forall\\; j \\in\\; \\{1,...,n\\}`
If a v_max parameter is supplied (state.params['v_max'] is not None) the
returned velocity is clamped to v_max.
Args:
particle (cipy.algorithms.pso.types.Particle): Particle to update the
velocity for.
social (numpy.array): The social best for the
particle.
state (cipy.algorithms.pso.types.State): The PSO algorithm state.
Returns:
numpy.array: The calculated velocity, clamped to state.params['v_max'].
"""
inertia = state.params['inertia']
c_1, c_2 = state.params['c_1'], state.params['c_2']
v_max = state.params['v_max']
size = particle.position.size
c1r1 = __acceleration__(state.rng, c_1, size)
c2r2 = __acceleration__(state.rng, c_2, size)
velocity = __std_velocity_equation__(inertia, c1r1, c2r2, particle, social)
return __clamp__(velocity, v_max) | Standard particle velocity update according to the equation:
:math:`v_{ij}(t+1) &= \\omega v_{ij}(t) + \
c_1 r_{1j}(t)[y_{ij}(t) - x_{ij}(t)]\\:+ \
c_2 r_{2j}(t)[\\hat{y}_{ij}(t) - x_{ij}(t)],\\;\\;\
\\forall\\; j \\in\\; \\{1,...,n\\}`
If a v_max parameter is supplied (state.params['v_max'] is not None) the
returned velocity is clamped to v_max.
Args:
particle (cipy.algorithms.pso.types.Particle): Particle to update the
velocity for.
social (numpy.array): The social best for the
particle.
state (cipy.algorithms.pso.types.State): The PSO algorithm state.
Returns:
numpy.array: The calculated velocity, clamped to state.params['v_max']. | Below is the the instruction that describes the task:
### Input:
Standard particle velocity update according to the equation:
:math:`v_{ij}(t+1) &= \\omega v_{ij}(t) + \
c_1 r_{1j}(t)[y_{ij}(t) - x_{ij}(t)]\\:+ \
c_2 r_{2j}(t)[\\hat{y}_{ij}(t) - x_{ij}(t)],\\;\\;\
\\forall\\; j \\in\\; \\{1,...,n\\}`
If a v_max parameter is supplied (state.params['v_max'] is not None) the
returned velocity is clamped to v_max.
Args:
particle (cipy.algorithms.pso.types.Particle): Particle to update the
velocity for.
social (numpy.array): The social best for the
particle.
state (cipy.algorithms.pso.types.State): The PSO algorithm state.
Returns:
numpy.array: The calculated velocity, clamped to state.params['v_max'].
### Response:
def std_velocity(particle, social, state):
"""
Standard particle velocity update according to the equation:
:math:`v_{ij}(t+1) &= \\omega v_{ij}(t) + \
c_1 r_{1j}(t)[y_{ij}(t) - x_{ij}(t)]\\:+ \
c_2 r_{2j}(t)[\\hat{y}_{ij}(t) - x_{ij}(t)],\\;\\;\
\\forall\\; j \\in\\; \\{1,...,n\\}`
If a v_max parameter is supplied (state.params['v_max'] is not None) the
returned velocity is clamped to v_max.
Args:
particle (cipy.algorithms.pso.types.Particle): Particle to update the
velocity for.
social (numpy.array): The social best for the
particle.
state (cipy.algorithms.pso.types.State): The PSO algorithm state.
Returns:
numpy.array: The calculated velocity, clamped to state.params['v_max'].
"""
inertia = state.params['inertia']
c_1, c_2 = state.params['c_1'], state.params['c_2']
v_max = state.params['v_max']
size = particle.position.size
c1r1 = __acceleration__(state.rng, c_1, size)
c2r2 = __acceleration__(state.rng, c_2, size)
velocity = __std_velocity_equation__(inertia, c1r1, c2r2, particle, social)
return __clamp__(velocity, v_max) |
def diff(self):
"""The Difference between a PDA and a DFA"""
self.mmb.complement(self.alphabet)
self.mmb.minimize()
print 'start intersection'
self.mmc = self._intesect()
print 'end intersection'
return self.mmc | The Difference between a PDA and a DFA | Below is the the instruction that describes the task:
### Input:
The Difference between a PDA and a DFA
### Response:
def diff(self):
"""The Difference between a PDA and a DFA"""
self.mmb.complement(self.alphabet)
self.mmb.minimize()
print 'start intersection'
self.mmc = self._intesect()
print 'end intersection'
return self.mmc |
def write_out_sitemap(self, opath):
"""
Banana banana
"""
if opath not in self.written_out_sitemaps:
Extension.formatted_sitemap = self.formatter.format_navigation(
self.app.project)
if Extension.formatted_sitemap:
escaped_sitemap = Extension.formatted_sitemap.replace(
'\\', '\\\\').replace('"', '\\"').replace('\n', '')
js_wrapper = 'sitemap_downloaded_cb("%s");' % escaped_sitemap
with open(opath, 'w') as _:
_.write(js_wrapper)
self.written_out_sitemaps.add(opath) | Banana banana | Below is the the instruction that describes the task:
### Input:
Banana banana
### Response:
def write_out_sitemap(self, opath):
"""
Banana banana
"""
if opath not in self.written_out_sitemaps:
Extension.formatted_sitemap = self.formatter.format_navigation(
self.app.project)
if Extension.formatted_sitemap:
escaped_sitemap = Extension.formatted_sitemap.replace(
'\\', '\\\\').replace('"', '\\"').replace('\n', '')
js_wrapper = 'sitemap_downloaded_cb("%s");' % escaped_sitemap
with open(opath, 'w') as _:
_.write(js_wrapper)
self.written_out_sitemaps.add(opath) |
def parse(date_string, date_formats=None, languages=None, locales=None, region=None, settings=None):
"""Parse date and time from given date string.
:param date_string:
A string representing date and/or time in a recognizably valid format.
:type date_string: str|unicode
:param date_formats:
A list of format strings using directives as given
`here <https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior>`_.
The parser applies formats one by one, taking into account the detected languages/locales.
:type date_formats: list
:param languages:
A list of language codes, e.g. ['en', 'es', 'zh-Hant'].
If locales are not given, languages and region are used to construct locales for translation.
:type languages: list
:param locales:
A list of locale codes, e.g. ['fr-PF', 'qu-EC', 'af-NA'].
The parser uses locales to translate date string.
:type locales: list
:param region:
A region code, e.g. 'IN', '001', 'NE'.
If locales are not given, languages and region are used to construct locales for translation.
:type region: str|unicode
:param settings:
Configure customized behavior using settings defined in :mod:`dateparser.conf.Settings`.
:type settings: dict
:return: Returns :class:`datetime <datetime.datetime>` representing parsed date if successful, else returns None
:rtype: :class:`datetime <datetime.datetime>`.
:raises: ValueError - Unknown Language
"""
parser = _default_parser
if any([languages, locales, region, not settings._default]):
parser = DateDataParser(languages=languages, locales=locales,
region=region, settings=settings)
data = parser.get_date_data(date_string, date_formats)
if data:
return data['date_obj'] | Parse date and time from given date string.
:param date_string:
A string representing date and/or time in a recognizably valid format.
:type date_string: str|unicode
:param date_formats:
A list of format strings using directives as given
`here <https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior>`_.
The parser applies formats one by one, taking into account the detected languages/locales.
:type date_formats: list
:param languages:
A list of language codes, e.g. ['en', 'es', 'zh-Hant'].
If locales are not given, languages and region are used to construct locales for translation.
:type languages: list
:param locales:
A list of locale codes, e.g. ['fr-PF', 'qu-EC', 'af-NA'].
The parser uses locales to translate date string.
:type locales: list
:param region:
A region code, e.g. 'IN', '001', 'NE'.
If locales are not given, languages and region are used to construct locales for translation.
:type region: str|unicode
:param settings:
Configure customized behavior using settings defined in :mod:`dateparser.conf.Settings`.
:type settings: dict
:return: Returns :class:`datetime <datetime.datetime>` representing parsed date if successful, else returns None
:rtype: :class:`datetime <datetime.datetime>`.
:raises: ValueError - Unknown Language | Below is the the instruction that describes the task:
### Input:
Parse date and time from given date string.
:param date_string:
A string representing date and/or time in a recognizably valid format.
:type date_string: str|unicode
:param date_formats:
A list of format strings using directives as given
`here <https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior>`_.
The parser applies formats one by one, taking into account the detected languages/locales.
:type date_formats: list
:param languages:
A list of language codes, e.g. ['en', 'es', 'zh-Hant'].
If locales are not given, languages and region are used to construct locales for translation.
:type languages: list
:param locales:
A list of locale codes, e.g. ['fr-PF', 'qu-EC', 'af-NA'].
The parser uses locales to translate date string.
:type locales: list
:param region:
A region code, e.g. 'IN', '001', 'NE'.
If locales are not given, languages and region are used to construct locales for translation.
:type region: str|unicode
:param settings:
Configure customized behavior using settings defined in :mod:`dateparser.conf.Settings`.
:type settings: dict
:return: Returns :class:`datetime <datetime.datetime>` representing parsed date if successful, else returns None
:rtype: :class:`datetime <datetime.datetime>`.
:raises: ValueError - Unknown Language
### Response:
def parse(date_string, date_formats=None, languages=None, locales=None, region=None, settings=None):
"""Parse date and time from given date string.
:param date_string:
A string representing date and/or time in a recognizably valid format.
:type date_string: str|unicode
:param date_formats:
A list of format strings using directives as given
`here <https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior>`_.
The parser applies formats one by one, taking into account the detected languages/locales.
:type date_formats: list
:param languages:
A list of language codes, e.g. ['en', 'es', 'zh-Hant'].
If locales are not given, languages and region are used to construct locales for translation.
:type languages: list
:param locales:
A list of locale codes, e.g. ['fr-PF', 'qu-EC', 'af-NA'].
The parser uses locales to translate date string.
:type locales: list
:param region:
A region code, e.g. 'IN', '001', 'NE'.
If locales are not given, languages and region are used to construct locales for translation.
:type region: str|unicode
:param settings:
Configure customized behavior using settings defined in :mod:`dateparser.conf.Settings`.
:type settings: dict
:return: Returns :class:`datetime <datetime.datetime>` representing parsed date if successful, else returns None
:rtype: :class:`datetime <datetime.datetime>`.
:raises: ValueError - Unknown Language
"""
parser = _default_parser
if any([languages, locales, region, not settings._default]):
parser = DateDataParser(languages=languages, locales=locales,
region=region, settings=settings)
data = parser.get_date_data(date_string, date_formats)
if data:
return data['date_obj'] |
def _calculate_hour_and_minute(float_hour):
"""Calculate hour and minutes as integers from a float hour."""
hour, minute = int(float_hour), int(round((float_hour - int(float_hour)) * 60))
if minute == 60:
return hour + 1, 0
else:
return hour, minute | Calculate hour and minutes as integers from a float hour. | Below is the the instruction that describes the task:
### Input:
Calculate hour and minutes as integers from a float hour.
### Response:
def _calculate_hour_and_minute(float_hour):
"""Calculate hour and minutes as integers from a float hour."""
hour, minute = int(float_hour), int(round((float_hour - int(float_hour)) * 60))
if minute == 60:
return hour + 1, 0
else:
return hour, minute |
def setSignalHeader(self, edfsignal, channel_info):
"""
Sets the parameter for signal edfsignal.
channel_info should be a dict with
these values:
'label' : channel label (string, <= 16 characters, must be unique)
'dimension' : physical dimension (e.g., mV) (string, <= 8 characters)
'sample_rate' : sample frequency in hertz (int)
'physical_max' : maximum physical value (float)
'physical_min' : minimum physical value (float)
'digital_max' : maximum digital value (int, -2**15 <= x < 2**15)
'digital_min' : minimum digital value (int, -2**15 <= x < 2**15)
"""
if edfsignal < 0 or edfsignal > self.n_channels:
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal] = channel_info
self.update_header() | Sets the parameter for signal edfsignal.
channel_info should be a dict with
these values:
'label' : channel label (string, <= 16 characters, must be unique)
'dimension' : physical dimension (e.g., mV) (string, <= 8 characters)
'sample_rate' : sample frequency in hertz (int)
'physical_max' : maximum physical value (float)
'physical_min' : minimum physical value (float)
'digital_max' : maximum digital value (int, -2**15 <= x < 2**15)
'digital_min' : minimum digital value (int, -2**15 <= x < 2**15) | Below is the the instruction that describes the task:
### Input:
Sets the parameter for signal edfsignal.
channel_info should be a dict with
these values:
'label' : channel label (string, <= 16 characters, must be unique)
'dimension' : physical dimension (e.g., mV) (string, <= 8 characters)
'sample_rate' : sample frequency in hertz (int)
'physical_max' : maximum physical value (float)
'physical_min' : minimum physical value (float)
'digital_max' : maximum digital value (int, -2**15 <= x < 2**15)
'digital_min' : minimum digital value (int, -2**15 <= x < 2**15)
### Response:
def setSignalHeader(self, edfsignal, channel_info):
"""
Sets the parameter for signal edfsignal.
channel_info should be a dict with
these values:
'label' : channel label (string, <= 16 characters, must be unique)
'dimension' : physical dimension (e.g., mV) (string, <= 8 characters)
'sample_rate' : sample frequency in hertz (int)
'physical_max' : maximum physical value (float)
'physical_min' : minimum physical value (float)
'digital_max' : maximum digital value (int, -2**15 <= x < 2**15)
'digital_min' : minimum digital value (int, -2**15 <= x < 2**15)
"""
if edfsignal < 0 or edfsignal > self.n_channels:
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal] = channel_info
self.update_header() |
def export(self, output=Mimetypes.PLAINTEXT, exclude=None, **kwargs):
""" Export the collection item in the Mimetype required.
..note:: If current implementation does not have special mimetypes, reuses default_export method
:param output: Mimetype to export to (Uses Mimetypes)
:type output: str
:param exclude: Informations to exclude. Specific to implementations
:type exclude: [str]
:return: Object using a different representation
"""
return self.getTextualNode().export(output, exclude) | Export the collection item in the Mimetype required.
..note:: If current implementation does not have special mimetypes, reuses default_export method
:param output: Mimetype to export to (Uses Mimetypes)
:type output: str
:param exclude: Informations to exclude. Specific to implementations
:type exclude: [str]
:return: Object using a different representation | Below is the the instruction that describes the task:
### Input:
Export the collection item in the Mimetype required.
..note:: If current implementation does not have special mimetypes, reuses default_export method
:param output: Mimetype to export to (Uses Mimetypes)
:type output: str
:param exclude: Informations to exclude. Specific to implementations
:type exclude: [str]
:return: Object using a different representation
### Response:
def export(self, output=Mimetypes.PLAINTEXT, exclude=None, **kwargs):
""" Export the collection item in the Mimetype required.
..note:: If current implementation does not have special mimetypes, reuses default_export method
:param output: Mimetype to export to (Uses Mimetypes)
:type output: str
:param exclude: Informations to exclude. Specific to implementations
:type exclude: [str]
:return: Object using a different representation
"""
return self.getTextualNode().export(output, exclude) |
def _compute_soil_amplification(cls, C, vs30, pga_rock, imt):
"""
Compute soil amplification (5th, 6th, and 7th terms in equation 1,
page 1706) and add the B/C site condition as implemented by NSHMP.
Call
:meth:`AtkinsonBoore2003SInterNSHMP2008._compute_soil_amplification`
"""
return AtkinsonBoore2003SInterNSHMP2008._compute_soil_amplification(
C, vs30, pga_rock, imt) | Compute soil amplification (5th, 6th, and 7th terms in equation 1,
page 1706) and add the B/C site condition as implemented by NSHMP.
Call
:meth:`AtkinsonBoore2003SInterNSHMP2008._compute_soil_amplification` | Below is the the instruction that describes the task:
### Input:
Compute soil amplification (5th, 6th, and 7th terms in equation 1,
page 1706) and add the B/C site condition as implemented by NSHMP.
Call
:meth:`AtkinsonBoore2003SInterNSHMP2008._compute_soil_amplification`
### Response:
def _compute_soil_amplification(cls, C, vs30, pga_rock, imt):
"""
Compute soil amplification (5th, 6th, and 7th terms in equation 1,
page 1706) and add the B/C site condition as implemented by NSHMP.
Call
:meth:`AtkinsonBoore2003SInterNSHMP2008._compute_soil_amplification`
"""
return AtkinsonBoore2003SInterNSHMP2008._compute_soil_amplification(
C, vs30, pga_rock, imt) |
def generate_api_doc(self, uri):
'''Make autodoc documentation template string for a module
Parameters
----------
uri : string
python location of module - e.g 'sphinx.builder'
Returns
-------
head : string
Module name, table of contents.
body : string
Function and class docstrings.
'''
# get the names of all classes and functions
functions, classes = self._parse_module_with_import(uri)
if not len(functions) and not len(classes) and DEBUG:
print('WARNING: Empty -', uri) # dbg
# Make a shorter version of the uri that omits the package name for
# titles
uri_short = re.sub(r'^%s\.' % self.package_name,'',uri)
head = '.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n'
body = ''
# Set the chapter title to read 'module' for all modules except for the
# main packages
if '.' in uri_short:
title = 'Module: :mod:`' + uri_short + '`'
head += title + '\n' + self.rst_section_levels[2] * len(title)
else:
title = ':mod:`' + uri_short + '`'
head += title + '\n' + self.rst_section_levels[1] * len(title)
head += '\n.. automodule:: ' + uri + '\n'
head += '\n.. currentmodule:: ' + uri + '\n'
body += '\n.. currentmodule:: ' + uri + '\n'
for c in classes:
body += '\n:class:`' + c + '`\n' \
+ self.rst_section_levels[3] * \
(len(c)+9) + '\n\n'
body += '\n.. autoclass:: ' + c + '\n'
# must NOT exclude from index to keep cross-refs working
body += ' :members:\n' \
' :undoc-members:\n' \
' :show-inheritance:\n' \
'\n' \
' .. automethod:: __init__\n\n'
head += '.. autosummary::\n\n'
for f in classes + functions:
head += ' ' + f + '\n'
head += '\n'
for f in functions:
# must NOT exclude from index to keep cross-refs working
body += f + '\n'
body += self.rst_section_levels[3] * len(f) + '\n'
body += '\n.. autofunction:: ' + f + '\n\n'
return head, body | Make autodoc documentation template string for a module
Parameters
----------
uri : string
python location of module - e.g 'sphinx.builder'
Returns
-------
head : string
Module name, table of contents.
body : string
Function and class docstrings. | Below is the the instruction that describes the task:
### Input:
Make autodoc documentation template string for a module
Parameters
----------
uri : string
python location of module - e.g 'sphinx.builder'
Returns
-------
head : string
Module name, table of contents.
body : string
Function and class docstrings.
### Response:
def generate_api_doc(self, uri):
'''Make autodoc documentation template string for a module
Parameters
----------
uri : string
python location of module - e.g 'sphinx.builder'
Returns
-------
head : string
Module name, table of contents.
body : string
Function and class docstrings.
'''
# get the names of all classes and functions
functions, classes = self._parse_module_with_import(uri)
if not len(functions) and not len(classes) and DEBUG:
print('WARNING: Empty -', uri) # dbg
# Make a shorter version of the uri that omits the package name for
# titles
uri_short = re.sub(r'^%s\.' % self.package_name,'',uri)
head = '.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n'
body = ''
# Set the chapter title to read 'module' for all modules except for the
# main packages
if '.' in uri_short:
title = 'Module: :mod:`' + uri_short + '`'
head += title + '\n' + self.rst_section_levels[2] * len(title)
else:
title = ':mod:`' + uri_short + '`'
head += title + '\n' + self.rst_section_levels[1] * len(title)
head += '\n.. automodule:: ' + uri + '\n'
head += '\n.. currentmodule:: ' + uri + '\n'
body += '\n.. currentmodule:: ' + uri + '\n'
for c in classes:
body += '\n:class:`' + c + '`\n' \
+ self.rst_section_levels[3] * \
(len(c)+9) + '\n\n'
body += '\n.. autoclass:: ' + c + '\n'
# must NOT exclude from index to keep cross-refs working
body += ' :members:\n' \
' :undoc-members:\n' \
' :show-inheritance:\n' \
'\n' \
' .. automethod:: __init__\n\n'
head += '.. autosummary::\n\n'
for f in classes + functions:
head += ' ' + f + '\n'
head += '\n'
for f in functions:
# must NOT exclude from index to keep cross-refs working
body += f + '\n'
body += self.rst_section_levels[3] * len(f) + '\n'
body += '\n.. autofunction:: ' + f + '\n\n'
return head, body |
def read_seal_status(self):
"""Read the seal status of the Vault.
This is an unauthenticated endpoint.
Supported methods:
GET: /sys/seal-status. Produces: 200 application/json
:return: The JSON response of the request.
:rtype: dict
"""
api_path = '/v1/sys/seal-status'
response = self._adapter.get(
url=api_path,
)
return response.json() | Read the seal status of the Vault.
This is an unauthenticated endpoint.
Supported methods:
GET: /sys/seal-status. Produces: 200 application/json
:return: The JSON response of the request.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Read the seal status of the Vault.
This is an unauthenticated endpoint.
Supported methods:
GET: /sys/seal-status. Produces: 200 application/json
:return: The JSON response of the request.
:rtype: dict
### Response:
def read_seal_status(self):
"""Read the seal status of the Vault.
This is an unauthenticated endpoint.
Supported methods:
GET: /sys/seal-status. Produces: 200 application/json
:return: The JSON response of the request.
:rtype: dict
"""
api_path = '/v1/sys/seal-status'
response = self._adapter.get(
url=api_path,
)
return response.json() |
def _init_metadata(self, **kwargs):
"""Initialize form metadata"""
osid_objects.OsidSourceableForm._init_metadata(self)
osid_objects.OsidObjectForm._init_metadata(self, **kwargs)
self._copyright_registration_default = self._mdata['copyright_registration']['default_string_values'][0]
update_display_text_defaults(self._mdata['copyright'], self._locale_map)
self._copyright_default = dict(self._mdata['copyright']['default_string_values'][0])
update_display_text_defaults(self._mdata['title'], self._locale_map)
self._title_default = dict(self._mdata['title']['default_string_values'][0])
self._distribute_verbatim_default = self._mdata['distribute_verbatim']['default_boolean_values'][0]
self._created_date_default = self._mdata['created_date']['default_date_time_values'][0]
self._distribute_alterations_default = self._mdata['distribute_alterations']['default_boolean_values'][0]
update_display_text_defaults(self._mdata['principal_credit_string'], self._locale_map)
self._principal_credit_string_default = dict(self._mdata['principal_credit_string']['default_string_values'][0])
self._published_date_default = self._mdata['published_date']['default_date_time_values'][0]
self._source_default = self._mdata['source']['default_id_values'][0]
self._provider_links_default = self._mdata['provider_links']['default_id_values']
self._public_domain_default = self._mdata['public_domain']['default_boolean_values'][0]
self._distribute_compositions_default = self._mdata['distribute_compositions']['default_boolean_values'][0]
self._composition_default = self._mdata['composition']['default_id_values'][0]
self._published_default = self._mdata['published']['default_boolean_values'][0] | Initialize form metadata | Below is the the instruction that describes the task:
### Input:
Initialize form metadata
### Response:
def _init_metadata(self, **kwargs):
"""Initialize form metadata"""
osid_objects.OsidSourceableForm._init_metadata(self)
osid_objects.OsidObjectForm._init_metadata(self, **kwargs)
self._copyright_registration_default = self._mdata['copyright_registration']['default_string_values'][0]
update_display_text_defaults(self._mdata['copyright'], self._locale_map)
self._copyright_default = dict(self._mdata['copyright']['default_string_values'][0])
update_display_text_defaults(self._mdata['title'], self._locale_map)
self._title_default = dict(self._mdata['title']['default_string_values'][0])
self._distribute_verbatim_default = self._mdata['distribute_verbatim']['default_boolean_values'][0]
self._created_date_default = self._mdata['created_date']['default_date_time_values'][0]
self._distribute_alterations_default = self._mdata['distribute_alterations']['default_boolean_values'][0]
update_display_text_defaults(self._mdata['principal_credit_string'], self._locale_map)
self._principal_credit_string_default = dict(self._mdata['principal_credit_string']['default_string_values'][0])
self._published_date_default = self._mdata['published_date']['default_date_time_values'][0]
self._source_default = self._mdata['source']['default_id_values'][0]
self._provider_links_default = self._mdata['provider_links']['default_id_values']
self._public_domain_default = self._mdata['public_domain']['default_boolean_values'][0]
self._distribute_compositions_default = self._mdata['distribute_compositions']['default_boolean_values'][0]
self._composition_default = self._mdata['composition']['default_id_values'][0]
self._published_default = self._mdata['published']['default_boolean_values'][0] |
def read_slaext(self,filename,params=None,force=False,timerange=None,datatype=None,**kwargs):
"""
Read AVISO Along-Track SLAEXT regional products
:return outStr: Output data structure containing all recorded parameters as specificied by NetCDF file PARAMETER list.
:author: Renaud Dussurget
"""
self.message(2,'Reading SLAext data ({0})'.format(datatype))
self.message(2,'Loading %s' % (filename))
#Open file
self._filename = filename
try:
self._ncfile = ncfile(self._filename, "r")
except Exception,e:
self.warning(1, repr(e))
return {}
#Get delimiter
if os.path.basename(filename).count('.') > os.path.basename(filename).count('_'): delim='.'
else : delim = '_'
#Gat sat name
splitted=os.path.basename(filename).split(delim)
if (datatype == 'DT') | (datatype == 'NRT') : sat_name = splitted[2] if splitted[0] == 'nrt' else splitted[3]
if datatype == 'PISTACH' : sat_name = 'J2'
#Get list of recorded parameters:
par_list=[i.encode() for i in self._ncfile.variables.keys()]
for i in ['time','longitude','latitude'] : par_list.pop(par_list.index(i))
nparam=len(par_list)
self.message(2,'Recorded parameters : '+str(nparam)+' -> '+str(par_list))
lon = self.load_ncVar('longitude',**kwargs)
lon['data'] = recale(lon['data'], degrees=True, zero_2pi=True) #shift longitudes
lat = self.load_ncVar('latitude',**kwargs)
#Extract within limits
ind, flag = in_limits(lon['data'],lat['data'],limit=self.limit)
dim_lon = lon['_dimensions']
lat = lat['data'].compress(flag)
lon = lon['data'].compress(flag)
dist=cumulative_distance(lat, lon)
sz=np.shape(lon)
ndims=np.size(sz)
id=np.repeat(sat_name,sz)
date = self.load_ncVar('time',time=ind,**kwargs)
dimStr = date['_dimensions']
date=date['data']
outStr=varStr(dimensions=dimStr)
outStr.update({'lon':lon})
outStr.update({'lat':lat})
outStr.update({'date':date})
outStr.update({'id':id})
#{'_dimensions':dimStr,'lon':lon,'lat':lat,'date':date}
for param in par_list :
dumVar = self.load_ncVar(param,time=ind,**kwargs) #Load variables
dimStr=dumVar['_dimensions']
#update dimensions
curDim = [str(dimname) for dimname in dimStr.keys()[1:]] #[str(dimname) for dimname in self._ncfile.variables['LONGITUDE'].dimensions]
curDimval = [dimStr[dim] for dim in curDim] #[len(self._ncfile.dimensions[dimname]) for dimname in curDim]
flag = [(np.array(dimname) == outStr['_dimensions'].keys()).sum() == 0 for dimname in curDim] #find dimensions to update
dimUpdate = np.array(curDim).compress(flag)
for enum in enumerate(dimUpdate) :
self.message(3, 'Appending dimensions {0}:{1} to dataStructure'.format(enum[1],np.array(curDimval).compress(flag)[enum[0]]))
outStr['_dimensions'].update({enum[1]:np.array(curDimval).compress(flag)[enum[0]]}) #Append new dimension
if not isinstance(outStr['_dimensions'],dimStr) : outStr['_dimensions']['_ndims']+=1 #update dimension counts
cmd = 'dumStr = {\''+param.lower()+'\':dumVar[\'data\']}'
self.message(4, 'exec : '+cmd)
exec(cmd)
outStr.update(dumStr)
self._ncfile.close()
return outStr | Read AVISO Along-Track SLAEXT regional products
:return outStr: Output data structure containing all recorded parameters as specificied by NetCDF file PARAMETER list.
:author: Renaud Dussurget | Below is the the instruction that describes the task:
### Input:
Read AVISO Along-Track SLAEXT regional products
:return outStr: Output data structure containing all recorded parameters as specificied by NetCDF file PARAMETER list.
:author: Renaud Dussurget
### Response:
def read_slaext(self,filename,params=None,force=False,timerange=None,datatype=None,**kwargs):
"""
Read AVISO Along-Track SLAEXT regional products
:return outStr: Output data structure containing all recorded parameters as specificied by NetCDF file PARAMETER list.
:author: Renaud Dussurget
"""
self.message(2,'Reading SLAext data ({0})'.format(datatype))
self.message(2,'Loading %s' % (filename))
#Open file
self._filename = filename
try:
self._ncfile = ncfile(self._filename, "r")
except Exception,e:
self.warning(1, repr(e))
return {}
#Get delimiter
if os.path.basename(filename).count('.') > os.path.basename(filename).count('_'): delim='.'
else : delim = '_'
#Gat sat name
splitted=os.path.basename(filename).split(delim)
if (datatype == 'DT') | (datatype == 'NRT') : sat_name = splitted[2] if splitted[0] == 'nrt' else splitted[3]
if datatype == 'PISTACH' : sat_name = 'J2'
#Get list of recorded parameters:
par_list=[i.encode() for i in self._ncfile.variables.keys()]
for i in ['time','longitude','latitude'] : par_list.pop(par_list.index(i))
nparam=len(par_list)
self.message(2,'Recorded parameters : '+str(nparam)+' -> '+str(par_list))
lon = self.load_ncVar('longitude',**kwargs)
lon['data'] = recale(lon['data'], degrees=True, zero_2pi=True) #shift longitudes
lat = self.load_ncVar('latitude',**kwargs)
#Extract within limits
ind, flag = in_limits(lon['data'],lat['data'],limit=self.limit)
dim_lon = lon['_dimensions']
lat = lat['data'].compress(flag)
lon = lon['data'].compress(flag)
dist=cumulative_distance(lat, lon)
sz=np.shape(lon)
ndims=np.size(sz)
id=np.repeat(sat_name,sz)
date = self.load_ncVar('time',time=ind,**kwargs)
dimStr = date['_dimensions']
date=date['data']
outStr=varStr(dimensions=dimStr)
outStr.update({'lon':lon})
outStr.update({'lat':lat})
outStr.update({'date':date})
outStr.update({'id':id})
#{'_dimensions':dimStr,'lon':lon,'lat':lat,'date':date}
for param in par_list :
dumVar = self.load_ncVar(param,time=ind,**kwargs) #Load variables
dimStr=dumVar['_dimensions']
#update dimensions
curDim = [str(dimname) for dimname in dimStr.keys()[1:]] #[str(dimname) for dimname in self._ncfile.variables['LONGITUDE'].dimensions]
curDimval = [dimStr[dim] for dim in curDim] #[len(self._ncfile.dimensions[dimname]) for dimname in curDim]
flag = [(np.array(dimname) == outStr['_dimensions'].keys()).sum() == 0 for dimname in curDim] #find dimensions to update
dimUpdate = np.array(curDim).compress(flag)
for enum in enumerate(dimUpdate) :
self.message(3, 'Appending dimensions {0}:{1} to dataStructure'.format(enum[1],np.array(curDimval).compress(flag)[enum[0]]))
outStr['_dimensions'].update({enum[1]:np.array(curDimval).compress(flag)[enum[0]]}) #Append new dimension
if not isinstance(outStr['_dimensions'],dimStr) : outStr['_dimensions']['_ndims']+=1 #update dimension counts
cmd = 'dumStr = {\''+param.lower()+'\':dumVar[\'data\']}'
self.message(4, 'exec : '+cmd)
exec(cmd)
outStr.update(dumStr)
self._ncfile.close()
return outStr |
def read_flags_from_files(self, argv, force_gnu=True):
"""Processes command line args, but also allow args to be read from file.
Args:
argv: [str], a list of strings, usually sys.argv[1:], which may contain
one or more flagfile directives of the form --flagfile="./filename".
Note that the name of the program (sys.argv[0]) should be omitted.
force_gnu: bool, if False, --flagfile parsing obeys the
FLAGS.is_gnu_getopt() value. If True, ignore the value and always
follow gnu_getopt semantics.
Returns:
A new list which has the original list combined with what we read
from any flagfile(s).
Raises:
IllegalFlagValueError: Raised when --flagfile is provided with no
argument.
This function is called by FLAGS(argv).
It scans the input list for a flag that looks like:
--flagfile=<somefile>. Then it opens <somefile>, reads all valid key
and value pairs and inserts them into the input list in exactly the
place where the --flagfile arg is found.
Note that your application's flags are still defined the usual way
using absl.flags DEFINE_flag() type functions.
Notes (assuming we're getting a commandline of some sort as our input):
--> For duplicate flags, the last one we hit should "win".
--> Since flags that appear later win, a flagfile's settings can be "weak"
if the --flagfile comes at the beginning of the argument sequence,
and it can be "strong" if the --flagfile comes at the end.
--> A further "--flagfile=<otherfile.cfg>" CAN be nested in a flagfile.
It will be expanded in exactly the spot where it is found.
--> In a flagfile, a line beginning with # or // is a comment.
--> Entirely blank lines _should_ be ignored.
"""
rest_of_args = argv
new_argv = []
while rest_of_args:
current_arg = rest_of_args[0]
rest_of_args = rest_of_args[1:]
if self._is_flag_file_directive(current_arg):
# This handles the case of -(-)flagfile foo. In this case the
# next arg really is part of this one.
if current_arg == '--flagfile' or current_arg == '-flagfile':
if not rest_of_args:
raise _exceptions.IllegalFlagValueError(
'--flagfile with no argument')
flag_filename = os.path.expanduser(rest_of_args[0])
rest_of_args = rest_of_args[1:]
else:
# This handles the case of (-)-flagfile=foo.
flag_filename = self._extract_filename(current_arg)
new_argv.extend(self._get_flag_file_lines(flag_filename))
else:
new_argv.append(current_arg)
# Stop parsing after '--', like getopt and gnu_getopt.
if current_arg == '--':
break
# Stop parsing after a non-flag, like getopt.
if not current_arg.startswith('-'):
if not force_gnu and not self.__dict__['__use_gnu_getopt']:
break
else:
if ('=' not in current_arg and
rest_of_args and not rest_of_args[0].startswith('-')):
# If this is an occurrence of a legitimate --x y, skip the value
# so that it won't be mistaken for a standalone arg.
fl = self._flags()
name = current_arg.lstrip('-')
if name in fl and not fl[name].boolean:
current_arg = rest_of_args[0]
rest_of_args = rest_of_args[1:]
new_argv.append(current_arg)
if rest_of_args:
new_argv.extend(rest_of_args)
return new_argv | Processes command line args, but also allow args to be read from file.
Args:
argv: [str], a list of strings, usually sys.argv[1:], which may contain
one or more flagfile directives of the form --flagfile="./filename".
Note that the name of the program (sys.argv[0]) should be omitted.
force_gnu: bool, if False, --flagfile parsing obeys the
FLAGS.is_gnu_getopt() value. If True, ignore the value and always
follow gnu_getopt semantics.
Returns:
A new list which has the original list combined with what we read
from any flagfile(s).
Raises:
IllegalFlagValueError: Raised when --flagfile is provided with no
argument.
This function is called by FLAGS(argv).
It scans the input list for a flag that looks like:
--flagfile=<somefile>. Then it opens <somefile>, reads all valid key
and value pairs and inserts them into the input list in exactly the
place where the --flagfile arg is found.
Note that your application's flags are still defined the usual way
using absl.flags DEFINE_flag() type functions.
Notes (assuming we're getting a commandline of some sort as our input):
--> For duplicate flags, the last one we hit should "win".
--> Since flags that appear later win, a flagfile's settings can be "weak"
if the --flagfile comes at the beginning of the argument sequence,
and it can be "strong" if the --flagfile comes at the end.
--> A further "--flagfile=<otherfile.cfg>" CAN be nested in a flagfile.
It will be expanded in exactly the spot where it is found.
--> In a flagfile, a line beginning with # or // is a comment.
--> Entirely blank lines _should_ be ignored. | Below is the the instruction that describes the task:
### Input:
Processes command line args, but also allow args to be read from file.
Args:
argv: [str], a list of strings, usually sys.argv[1:], which may contain
one or more flagfile directives of the form --flagfile="./filename".
Note that the name of the program (sys.argv[0]) should be omitted.
force_gnu: bool, if False, --flagfile parsing obeys the
FLAGS.is_gnu_getopt() value. If True, ignore the value and always
follow gnu_getopt semantics.
Returns:
A new list which has the original list combined with what we read
from any flagfile(s).
Raises:
IllegalFlagValueError: Raised when --flagfile is provided with no
argument.
This function is called by FLAGS(argv).
It scans the input list for a flag that looks like:
--flagfile=<somefile>. Then it opens <somefile>, reads all valid key
and value pairs and inserts them into the input list in exactly the
place where the --flagfile arg is found.
Note that your application's flags are still defined the usual way
using absl.flags DEFINE_flag() type functions.
Notes (assuming we're getting a commandline of some sort as our input):
--> For duplicate flags, the last one we hit should "win".
--> Since flags that appear later win, a flagfile's settings can be "weak"
if the --flagfile comes at the beginning of the argument sequence,
and it can be "strong" if the --flagfile comes at the end.
--> A further "--flagfile=<otherfile.cfg>" CAN be nested in a flagfile.
It will be expanded in exactly the spot where it is found.
--> In a flagfile, a line beginning with # or // is a comment.
--> Entirely blank lines _should_ be ignored.
### Response:
def read_flags_from_files(self, argv, force_gnu=True):
"""Processes command line args, but also allow args to be read from file.
Args:
argv: [str], a list of strings, usually sys.argv[1:], which may contain
one or more flagfile directives of the form --flagfile="./filename".
Note that the name of the program (sys.argv[0]) should be omitted.
force_gnu: bool, if False, --flagfile parsing obeys the
FLAGS.is_gnu_getopt() value. If True, ignore the value and always
follow gnu_getopt semantics.
Returns:
A new list which has the original list combined with what we read
from any flagfile(s).
Raises:
IllegalFlagValueError: Raised when --flagfile is provided with no
argument.
This function is called by FLAGS(argv).
It scans the input list for a flag that looks like:
--flagfile=<somefile>. Then it opens <somefile>, reads all valid key
and value pairs and inserts them into the input list in exactly the
place where the --flagfile arg is found.
Note that your application's flags are still defined the usual way
using absl.flags DEFINE_flag() type functions.
Notes (assuming we're getting a commandline of some sort as our input):
--> For duplicate flags, the last one we hit should "win".
--> Since flags that appear later win, a flagfile's settings can be "weak"
if the --flagfile comes at the beginning of the argument sequence,
and it can be "strong" if the --flagfile comes at the end.
--> A further "--flagfile=<otherfile.cfg>" CAN be nested in a flagfile.
It will be expanded in exactly the spot where it is found.
--> In a flagfile, a line beginning with # or // is a comment.
--> Entirely blank lines _should_ be ignored.
"""
rest_of_args = argv
new_argv = []
while rest_of_args:
current_arg = rest_of_args[0]
rest_of_args = rest_of_args[1:]
if self._is_flag_file_directive(current_arg):
# This handles the case of -(-)flagfile foo. In this case the
# next arg really is part of this one.
if current_arg == '--flagfile' or current_arg == '-flagfile':
if not rest_of_args:
raise _exceptions.IllegalFlagValueError(
'--flagfile with no argument')
flag_filename = os.path.expanduser(rest_of_args[0])
rest_of_args = rest_of_args[1:]
else:
# This handles the case of (-)-flagfile=foo.
flag_filename = self._extract_filename(current_arg)
new_argv.extend(self._get_flag_file_lines(flag_filename))
else:
new_argv.append(current_arg)
# Stop parsing after '--', like getopt and gnu_getopt.
if current_arg == '--':
break
# Stop parsing after a non-flag, like getopt.
if not current_arg.startswith('-'):
if not force_gnu and not self.__dict__['__use_gnu_getopt']:
break
else:
if ('=' not in current_arg and
rest_of_args and not rest_of_args[0].startswith('-')):
# If this is an occurrence of a legitimate --x y, skip the value
# so that it won't be mistaken for a standalone arg.
fl = self._flags()
name = current_arg.lstrip('-')
if name in fl and not fl[name].boolean:
current_arg = rest_of_args[0]
rest_of_args = rest_of_args[1:]
new_argv.append(current_arg)
if rest_of_args:
new_argv.extend(rest_of_args)
return new_argv |
def annotation_spec_set_path(cls, project, annotation_spec_set):
"""Return a fully-qualified annotation_spec_set string."""
return google.api_core.path_template.expand(
"projects/{project}/annotationSpecSets/{annotation_spec_set}",
project=project,
annotation_spec_set=annotation_spec_set,
) | Return a fully-qualified annotation_spec_set string. | Below is the the instruction that describes the task:
### Input:
Return a fully-qualified annotation_spec_set string.
### Response:
def annotation_spec_set_path(cls, project, annotation_spec_set):
"""Return a fully-qualified annotation_spec_set string."""
return google.api_core.path_template.expand(
"projects/{project}/annotationSpecSets/{annotation_spec_set}",
project=project,
annotation_spec_set=annotation_spec_set,
) |
def authenticate(self, request, identification, password=None, check_password=True):
"""
Authenticates a user through the combination email/username with
password.
:param request:
The authenticate() method of authentication backends requires
request as the first positional argument from Django 2.1.
:param identification:
A string containing the username or e-mail of the user that is
trying to authenticate.
:password:
Optional string containing the password for the user.
:param check_password:
Boolean that defines if the password should be checked for this
user. Always keep this ``True``. This is only used by userena at
activation when a user opens a page with a secret hash.
:return: The signed in :class:`User`.
"""
User = get_user_model()
try:
django.core.validators.validate_email(identification)
try: user = User.objects.get(email__iexact=identification)
except User.DoesNotExist: return None
except django.core.validators.ValidationError:
try: user = User.objects.get(username__iexact=identification)
except User.DoesNotExist: return None
if check_password:
if user.check_password(password):
return user
return None
else: return user | Authenticates a user through the combination email/username with
password.
:param request:
The authenticate() method of authentication backends requires
request as the first positional argument from Django 2.1.
:param identification:
A string containing the username or e-mail of the user that is
trying to authenticate.
:password:
Optional string containing the password for the user.
:param check_password:
Boolean that defines if the password should be checked for this
user. Always keep this ``True``. This is only used by userena at
activation when a user opens a page with a secret hash.
:return: The signed in :class:`User`. | Below is the the instruction that describes the task:
### Input:
Authenticates a user through the combination email/username with
password.
:param request:
The authenticate() method of authentication backends requires
request as the first positional argument from Django 2.1.
:param identification:
A string containing the username or e-mail of the user that is
trying to authenticate.
:password:
Optional string containing the password for the user.
:param check_password:
Boolean that defines if the password should be checked for this
user. Always keep this ``True``. This is only used by userena at
activation when a user opens a page with a secret hash.
:return: The signed in :class:`User`.
### Response:
def authenticate(self, request, identification, password=None, check_password=True):
"""
Authenticates a user through the combination email/username with
password.
:param request:
The authenticate() method of authentication backends requires
request as the first positional argument from Django 2.1.
:param identification:
A string containing the username or e-mail of the user that is
trying to authenticate.
:password:
Optional string containing the password for the user.
:param check_password:
Boolean that defines if the password should be checked for this
user. Always keep this ``True``. This is only used by userena at
activation when a user opens a page with a secret hash.
:return: The signed in :class:`User`.
"""
User = get_user_model()
try:
django.core.validators.validate_email(identification)
try: user = User.objects.get(email__iexact=identification)
except User.DoesNotExist: return None
except django.core.validators.ValidationError:
try: user = User.objects.get(username__iexact=identification)
except User.DoesNotExist: return None
if check_password:
if user.check_password(password):
return user
return None
else: return user |
def start(self):
""" Start or restart (the :meth:`~pypot.primitive.primitive.Primitive.stop` method will automatically be called) the primitive. """
if not self.robot._primitive_manager.running:
raise RuntimeError('Cannot run a primitive when the sync is stopped!')
StoppableThread.start(self)
self.wait_to_start()
logger.info("Primitive %s started.", self) | Start or restart (the :meth:`~pypot.primitive.primitive.Primitive.stop` method will automatically be called) the primitive. | Below is the the instruction that describes the task:
### Input:
Start or restart (the :meth:`~pypot.primitive.primitive.Primitive.stop` method will automatically be called) the primitive.
### Response:
def start(self):
""" Start or restart (the :meth:`~pypot.primitive.primitive.Primitive.stop` method will automatically be called) the primitive. """
if not self.robot._primitive_manager.running:
raise RuntimeError('Cannot run a primitive when the sync is stopped!')
StoppableThread.start(self)
self.wait_to_start()
logger.info("Primitive %s started.", self) |
def to_json(self):
"""
:return: str
"""
json_dict = self.to_json_basic()
json_dict['ds'] = self._ds
return json.dumps(json_dict) | :return: str | Below is the the instruction that describes the task:
### Input:
:return: str
### Response:
def to_json(self):
"""
:return: str
"""
json_dict = self.to_json_basic()
json_dict['ds'] = self._ds
return json.dumps(json_dict) |
def update_record(self, msg_id, rec):
"""Update the data in an existing record."""
query = "UPDATE %s SET "%self.table
sets = []
keys = sorted(rec.keys())
values = []
for key in keys:
sets.append('%s = ?'%key)
values.append(rec[key])
query += ', '.join(sets)
query += ' WHERE msg_id == ?'
values.append(msg_id)
self._db.execute(query, values) | Update the data in an existing record. | Below is the the instruction that describes the task:
### Input:
Update the data in an existing record.
### Response:
def update_record(self, msg_id, rec):
"""Update the data in an existing record."""
query = "UPDATE %s SET "%self.table
sets = []
keys = sorted(rec.keys())
values = []
for key in keys:
sets.append('%s = ?'%key)
values.append(rec[key])
query += ', '.join(sets)
query += ' WHERE msg_id == ?'
values.append(msg_id)
self._db.execute(query, values) |
def flush(table='filter', chain='', family='ipv4'):
'''
Flush the chain in the specified table, flush all chains in the specified
table if chain is not specified.
CLI Example:
.. code-block:: bash
salt '*' nftables.flush filter
salt '*' nftables.flush filter input
IPv6:
salt '*' nftables.flush filter input family=ipv6
'''
ret = {'comment': 'Failed to flush rules from chain {0} in table {1}.'.format(chain, table),
'result': False}
res = check_table(table, family=family)
if not res['result']:
return res
nft_family = _NFTABLES_FAMILIES[family]
if chain:
res = check_chain(table, chain, family=family)
if not res['result']:
return res
cmd = '{0} flush chain {1} {2} {3}'.\
format(_nftables_cmd(), nft_family, table, chain)
comment = 'from chain {0} in table {1} in family {2}.'.\
format(chain, table, family)
else:
cmd = '{0} flush table {1} {2}'.\
format(_nftables_cmd(), nft_family, table)
comment = 'from table {0} in family {1}.'.\
format(table, family)
out = __salt__['cmd.run'](cmd, python_shell=False)
if len(out) == 0:
ret['result'] = True
ret['comment'] = 'Flushed rules {0}'.format(comment)
else:
ret['comment'] = 'Failed to flush rules {0}'.format(comment)
return ret | Flush the chain in the specified table, flush all chains in the specified
table if chain is not specified.
CLI Example:
.. code-block:: bash
salt '*' nftables.flush filter
salt '*' nftables.flush filter input
IPv6:
salt '*' nftables.flush filter input family=ipv6 | Below is the the instruction that describes the task:
### Input:
Flush the chain in the specified table, flush all chains in the specified
table if chain is not specified.
CLI Example:
.. code-block:: bash
salt '*' nftables.flush filter
salt '*' nftables.flush filter input
IPv6:
salt '*' nftables.flush filter input family=ipv6
### Response:
def flush(table='filter', chain='', family='ipv4'):
'''
Flush the chain in the specified table, flush all chains in the specified
table if chain is not specified.
CLI Example:
.. code-block:: bash
salt '*' nftables.flush filter
salt '*' nftables.flush filter input
IPv6:
salt '*' nftables.flush filter input family=ipv6
'''
ret = {'comment': 'Failed to flush rules from chain {0} in table {1}.'.format(chain, table),
'result': False}
res = check_table(table, family=family)
if not res['result']:
return res
nft_family = _NFTABLES_FAMILIES[family]
if chain:
res = check_chain(table, chain, family=family)
if not res['result']:
return res
cmd = '{0} flush chain {1} {2} {3}'.\
format(_nftables_cmd(), nft_family, table, chain)
comment = 'from chain {0} in table {1} in family {2}.'.\
format(chain, table, family)
else:
cmd = '{0} flush table {1} {2}'.\
format(_nftables_cmd(), nft_family, table)
comment = 'from table {0} in family {1}.'.\
format(table, family)
out = __salt__['cmd.run'](cmd, python_shell=False)
if len(out) == 0:
ret['result'] = True
ret['comment'] = 'Flushed rules {0}'.format(comment)
else:
ret['comment'] = 'Failed to flush rules {0}'.format(comment)
return ret |
def _get_index_points(self, index_points=None):
"""Return `index_points` if not None, else `self._index_points`.
Args:
index_points: if given, this is what is returned; else,
`self._index_points`
Returns:
index_points: the given arg, if not None, else the class member
`self._index_points`.
Rases:
ValueError: if `index_points` and `self._index_points` are both `None`.
"""
if self._index_points is None and index_points is None:
raise ValueError(
'This GaussianProcess instance was not instantiated with a value for '
'index_points. One must therefore be provided when calling sample, '
'log_prob, and other such methods. In particular, one can\'t compute '
'KL divergences to/from an instance of `GaussianProccess` with '
'unspecified `index_points` directly. Instead, use the '
'`get_marginal_distribution` function, which takes `index_points` as '
'an argument and returns a `Normal` or '
'`MultivariateNormalLinearOperator` instance, whose KL can be '
'computed.')
return index_points if index_points is not None else self._index_points | Return `index_points` if not None, else `self._index_points`.
Args:
index_points: if given, this is what is returned; else,
`self._index_points`
Returns:
index_points: the given arg, if not None, else the class member
`self._index_points`.
Rases:
ValueError: if `index_points` and `self._index_points` are both `None`. | Below is the the instruction that describes the task:
### Input:
Return `index_points` if not None, else `self._index_points`.
Args:
index_points: if given, this is what is returned; else,
`self._index_points`
Returns:
index_points: the given arg, if not None, else the class member
`self._index_points`.
Rases:
ValueError: if `index_points` and `self._index_points` are both `None`.
### Response:
def _get_index_points(self, index_points=None):
"""Return `index_points` if not None, else `self._index_points`.
Args:
index_points: if given, this is what is returned; else,
`self._index_points`
Returns:
index_points: the given arg, if not None, else the class member
`self._index_points`.
Rases:
ValueError: if `index_points` and `self._index_points` are both `None`.
"""
if self._index_points is None and index_points is None:
raise ValueError(
'This GaussianProcess instance was not instantiated with a value for '
'index_points. One must therefore be provided when calling sample, '
'log_prob, and other such methods. In particular, one can\'t compute '
'KL divergences to/from an instance of `GaussianProccess` with '
'unspecified `index_points` directly. Instead, use the '
'`get_marginal_distribution` function, which takes `index_points` as '
'an argument and returns a `Normal` or '
'`MultivariateNormalLinearOperator` instance, whose KL can be '
'computed.')
return index_points if index_points is not None else self._index_points |
def extend_dirichlet(p):
"""
extend_dirichlet(p)
Concatenates 1-sum(p) to the end of p and returns.
"""
if len(np.shape(p)) > 1:
return np.hstack((p, np.atleast_2d(1. - np.sum(p))))
else:
return np.hstack((p, 1. - np.sum(p))) | extend_dirichlet(p)
Concatenates 1-sum(p) to the end of p and returns. | Below is the the instruction that describes the task:
### Input:
extend_dirichlet(p)
Concatenates 1-sum(p) to the end of p and returns.
### Response:
def extend_dirichlet(p):
"""
extend_dirichlet(p)
Concatenates 1-sum(p) to the end of p and returns.
"""
if len(np.shape(p)) > 1:
return np.hstack((p, np.atleast_2d(1. - np.sum(p))))
else:
return np.hstack((p, 1. - np.sum(p))) |
def table(tab):
"""Access IPTables transactionally in a uniform way.
Ensures all access is done without autocommit and that only the outer
most task commits, and also ensures we refresh once and commit once.
"""
global open_tables
if tab in open_tables:
yield open_tables[tab]
else:
open_tables[tab] = iptc.Table(tab)
open_tables[tab].refresh()
open_tables[tab].autocommit = False
yield open_tables[tab]
open_tables[tab].commit()
del open_tables[tab] | Access IPTables transactionally in a uniform way.
Ensures all access is done without autocommit and that only the outer
most task commits, and also ensures we refresh once and commit once. | Below is the the instruction that describes the task:
### Input:
Access IPTables transactionally in a uniform way.
Ensures all access is done without autocommit and that only the outer
most task commits, and also ensures we refresh once and commit once.
### Response:
def table(tab):
"""Access IPTables transactionally in a uniform way.
Ensures all access is done without autocommit and that only the outer
most task commits, and also ensures we refresh once and commit once.
"""
global open_tables
if tab in open_tables:
yield open_tables[tab]
else:
open_tables[tab] = iptc.Table(tab)
open_tables[tab].refresh()
open_tables[tab].autocommit = False
yield open_tables[tab]
open_tables[tab].commit()
del open_tables[tab] |
def text(self, txt, x, y, width=None, height=1000000, outline=False, draw=True, **kwargs):
'''
Draws a string of text according to current font settings.
:param txt: Text to output
:param x: x-coordinate of the top left corner
:param y: y-coordinate of the top left corner
:param width: text width
:param height: text height
:param outline: If True draws outline text (defaults to False)
:param draw: Set to False to inhibit immediate drawing (defaults to True)
:return: Path object representing the text.
'''
txt = self.Text(txt, x, y, width, height, outline=outline, ctx=None, **kwargs)
if outline:
path = txt.path
if draw:
path.draw()
return path
else:
return txt | Draws a string of text according to current font settings.
:param txt: Text to output
:param x: x-coordinate of the top left corner
:param y: y-coordinate of the top left corner
:param width: text width
:param height: text height
:param outline: If True draws outline text (defaults to False)
:param draw: Set to False to inhibit immediate drawing (defaults to True)
:return: Path object representing the text. | Below is the the instruction that describes the task:
### Input:
Draws a string of text according to current font settings.
:param txt: Text to output
:param x: x-coordinate of the top left corner
:param y: y-coordinate of the top left corner
:param width: text width
:param height: text height
:param outline: If True draws outline text (defaults to False)
:param draw: Set to False to inhibit immediate drawing (defaults to True)
:return: Path object representing the text.
### Response:
def text(self, txt, x, y, width=None, height=1000000, outline=False, draw=True, **kwargs):
'''
Draws a string of text according to current font settings.
:param txt: Text to output
:param x: x-coordinate of the top left corner
:param y: y-coordinate of the top left corner
:param width: text width
:param height: text height
:param outline: If True draws outline text (defaults to False)
:param draw: Set to False to inhibit immediate drawing (defaults to True)
:return: Path object representing the text.
'''
txt = self.Text(txt, x, y, width, height, outline=outline, ctx=None, **kwargs)
if outline:
path = txt.path
if draw:
path.draw()
return path
else:
return txt |
def form_valid(self, form):
"""First call the parent's form valid then let the user know it worked."""
form_valid_from_parent = super(HostCreate, self).form_valid(form)
messages.success(self.request, 'Host {} Successfully Created'.format(self.object))
return form_valid_from_parent | First call the parent's form valid then let the user know it worked. | Below is the the instruction that describes the task:
### Input:
First call the parent's form valid then let the user know it worked.
### Response:
def form_valid(self, form):
"""First call the parent's form valid then let the user know it worked."""
form_valid_from_parent = super(HostCreate, self).form_valid(form)
messages.success(self.request, 'Host {} Successfully Created'.format(self.object))
return form_valid_from_parent |
def zrlist(self, name_start, name_end, limit=10):
"""
Return a list of the top ``limit`` zset's name between ``name_start`` and
``name_end`` in descending order
.. note:: The range is (``name_start``, ``name_end``]. The ``name_start``
isn't in the range, but ``name_end`` is.
:param string name_start: The lower bound(not included) of zset names to
be returned, empty string ``''`` means +inf
:param string name_end: The upper bound(included) of zset names to be
returned, empty string ``''`` means -inf
:param int limit: number of elements will be returned.
:return: a list of zset's name
:rtype: list
>>> ssdb.zlist('zset_ ', 'zset_z', 10)
['zset_2', 'zset_1']
>>> ssdb.zlist('zset_ ', '', 3)
['zset_2', 'zset_1']
>>> ssdb.zlist('', 'aaa_not_exist', 10)
[]
"""
limit = get_positive_integer('limit', limit)
return self.execute_command('zrlist', name_start, name_end, limit) | Return a list of the top ``limit`` zset's name between ``name_start`` and
``name_end`` in descending order
.. note:: The range is (``name_start``, ``name_end``]. The ``name_start``
isn't in the range, but ``name_end`` is.
:param string name_start: The lower bound(not included) of zset names to
be returned, empty string ``''`` means +inf
:param string name_end: The upper bound(included) of zset names to be
returned, empty string ``''`` means -inf
:param int limit: number of elements will be returned.
:return: a list of zset's name
:rtype: list
>>> ssdb.zlist('zset_ ', 'zset_z', 10)
['zset_2', 'zset_1']
>>> ssdb.zlist('zset_ ', '', 3)
['zset_2', 'zset_1']
>>> ssdb.zlist('', 'aaa_not_exist', 10)
[] | Below is the the instruction that describes the task:
### Input:
Return a list of the top ``limit`` zset's name between ``name_start`` and
``name_end`` in descending order
.. note:: The range is (``name_start``, ``name_end``]. The ``name_start``
isn't in the range, but ``name_end`` is.
:param string name_start: The lower bound(not included) of zset names to
be returned, empty string ``''`` means +inf
:param string name_end: The upper bound(included) of zset names to be
returned, empty string ``''`` means -inf
:param int limit: number of elements will be returned.
:return: a list of zset's name
:rtype: list
>>> ssdb.zlist('zset_ ', 'zset_z', 10)
['zset_2', 'zset_1']
>>> ssdb.zlist('zset_ ', '', 3)
['zset_2', 'zset_1']
>>> ssdb.zlist('', 'aaa_not_exist', 10)
[]
### Response:
def zrlist(self, name_start, name_end, limit=10):
"""
Return a list of the top ``limit`` zset's name between ``name_start`` and
``name_end`` in descending order
.. note:: The range is (``name_start``, ``name_end``]. The ``name_start``
isn't in the range, but ``name_end`` is.
:param string name_start: The lower bound(not included) of zset names to
be returned, empty string ``''`` means +inf
:param string name_end: The upper bound(included) of zset names to be
returned, empty string ``''`` means -inf
:param int limit: number of elements will be returned.
:return: a list of zset's name
:rtype: list
>>> ssdb.zlist('zset_ ', 'zset_z', 10)
['zset_2', 'zset_1']
>>> ssdb.zlist('zset_ ', '', 3)
['zset_2', 'zset_1']
>>> ssdb.zlist('', 'aaa_not_exist', 10)
[]
"""
limit = get_positive_integer('limit', limit)
return self.execute_command('zrlist', name_start, name_end, limit) |
def bool_element(element, name, default=True):
"""
Returns the bool value of an element, or a default if it's not defined
:param element: The XML Element object
:type element: etree._Element
:param name: The name of the element to evaluate
:type name: str
:param default: The default value to return if the element is not defined
:type default: bool
"""
element_value = element.find(name)
if element_value is not None:
return element_value.text == 'true'
return default | Returns the bool value of an element, or a default if it's not defined
:param element: The XML Element object
:type element: etree._Element
:param name: The name of the element to evaluate
:type name: str
:param default: The default value to return if the element is not defined
:type default: bool | Below is the the instruction that describes the task:
### Input:
Returns the bool value of an element, or a default if it's not defined
:param element: The XML Element object
:type element: etree._Element
:param name: The name of the element to evaluate
:type name: str
:param default: The default value to return if the element is not defined
:type default: bool
### Response:
def bool_element(element, name, default=True):
"""
Returns the bool value of an element, or a default if it's not defined
:param element: The XML Element object
:type element: etree._Element
:param name: The name of the element to evaluate
:type name: str
:param default: The default value to return if the element is not defined
:type default: bool
"""
element_value = element.find(name)
if element_value is not None:
return element_value.text == 'true'
return default |
def show_hc(kwargs=None, call=None):
'''
Show the details of an existing health check.
CLI Example:
.. code-block:: bash
salt-cloud -f show_hc gce name=hc
'''
if call != 'function':
raise SaltCloudSystemExit(
'The show_hc function must be called with -f or --function.'
)
if not kwargs or 'name' not in kwargs:
log.error(
'Must specify name of health check.'
)
return False
conn = get_conn()
return _expand_item(conn.ex_get_healthcheck(kwargs['name'])) | Show the details of an existing health check.
CLI Example:
.. code-block:: bash
salt-cloud -f show_hc gce name=hc | Below is the the instruction that describes the task:
### Input:
Show the details of an existing health check.
CLI Example:
.. code-block:: bash
salt-cloud -f show_hc gce name=hc
### Response:
def show_hc(kwargs=None, call=None):
'''
Show the details of an existing health check.
CLI Example:
.. code-block:: bash
salt-cloud -f show_hc gce name=hc
'''
if call != 'function':
raise SaltCloudSystemExit(
'The show_hc function must be called with -f or --function.'
)
if not kwargs or 'name' not in kwargs:
log.error(
'Must specify name of health check.'
)
return False
conn = get_conn()
return _expand_item(conn.ex_get_healthcheck(kwargs['name'])) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.