code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def context_id(self):
"""Return this Async's Context Id if it exists."""
if not self._context_id:
self._context_id = self._get_context_id()
self.update_options(context_id=self._context_id)
return self._context_id | Return this Async's Context Id if it exists. | Below is the the instruction that describes the task:
### Input:
Return this Async's Context Id if it exists.
### Response:
def context_id(self):
"""Return this Async's Context Id if it exists."""
if not self._context_id:
self._context_id = self._get_context_id()
self.update_options(context_id=self._context_id)
return self._context_id |
def losses_by_period(losses, return_periods, num_events=None, eff_time=None):
"""
:param losses: array of simulated losses
:param return_periods: return periods of interest
:param num_events: the number of events (>= to the number of losses)
:param eff_time: investigation_time * ses_per_logic_tree_path
:returns: interpolated losses for the return periods, possibly with NaN
NB: the return periods must be ordered integers >= 1. The interpolated
losses are defined inside the interval min_time < time < eff_time
where min_time = eff_time /num_events. Outside the interval they
have NaN values. Here is an example:
>>> losses = [3, 2, 3.5, 4, 3, 23, 11, 2, 1, 4, 5, 7, 8, 9, 13]
>>> losses_by_period(losses, [1, 2, 5, 10, 20, 50, 100], 20)
array([ nan, nan, 0. , 3.5, 8. , 13. , 23. ])
If num_events is not passed, it is inferred from the number of losses;
if eff_time is not passed, it is inferred from the longest return period.
"""
if len(losses) == 0: # zero-curve
return numpy.zeros(len(return_periods))
if num_events is None:
num_events = len(losses)
elif num_events < len(losses):
raise ValueError(
'There are not enough events (%d) to compute the loss curve '
'from %d losses' % (num_events, len(losses)))
if eff_time is None:
eff_time = return_periods[-1]
losses = numpy.sort(losses)
num_zeros = num_events - len(losses)
if num_zeros:
losses = numpy.concatenate(
[numpy.zeros(num_zeros, losses.dtype), losses])
periods = eff_time / numpy.arange(num_events, 0., -1)
rperiods = [rp if periods[0] <= rp <= periods[-1] else numpy.nan
for rp in return_periods]
curve = numpy.interp(numpy.log(rperiods), numpy.log(periods), losses)
return curve | :param losses: array of simulated losses
:param return_periods: return periods of interest
:param num_events: the number of events (>= to the number of losses)
:param eff_time: investigation_time * ses_per_logic_tree_path
:returns: interpolated losses for the return periods, possibly with NaN
NB: the return periods must be ordered integers >= 1. The interpolated
losses are defined inside the interval min_time < time < eff_time
where min_time = eff_time /num_events. Outside the interval they
have NaN values. Here is an example:
>>> losses = [3, 2, 3.5, 4, 3, 23, 11, 2, 1, 4, 5, 7, 8, 9, 13]
>>> losses_by_period(losses, [1, 2, 5, 10, 20, 50, 100], 20)
array([ nan, nan, 0. , 3.5, 8. , 13. , 23. ])
If num_events is not passed, it is inferred from the number of losses;
if eff_time is not passed, it is inferred from the longest return period. | Below is the the instruction that describes the task:
### Input:
:param losses: array of simulated losses
:param return_periods: return periods of interest
:param num_events: the number of events (>= to the number of losses)
:param eff_time: investigation_time * ses_per_logic_tree_path
:returns: interpolated losses for the return periods, possibly with NaN
NB: the return periods must be ordered integers >= 1. The interpolated
losses are defined inside the interval min_time < time < eff_time
where min_time = eff_time /num_events. Outside the interval they
have NaN values. Here is an example:
>>> losses = [3, 2, 3.5, 4, 3, 23, 11, 2, 1, 4, 5, 7, 8, 9, 13]
>>> losses_by_period(losses, [1, 2, 5, 10, 20, 50, 100], 20)
array([ nan, nan, 0. , 3.5, 8. , 13. , 23. ])
If num_events is not passed, it is inferred from the number of losses;
if eff_time is not passed, it is inferred from the longest return period.
### Response:
def losses_by_period(losses, return_periods, num_events=None, eff_time=None):
"""
:param losses: array of simulated losses
:param return_periods: return periods of interest
:param num_events: the number of events (>= to the number of losses)
:param eff_time: investigation_time * ses_per_logic_tree_path
:returns: interpolated losses for the return periods, possibly with NaN
NB: the return periods must be ordered integers >= 1. The interpolated
losses are defined inside the interval min_time < time < eff_time
where min_time = eff_time /num_events. Outside the interval they
have NaN values. Here is an example:
>>> losses = [3, 2, 3.5, 4, 3, 23, 11, 2, 1, 4, 5, 7, 8, 9, 13]
>>> losses_by_period(losses, [1, 2, 5, 10, 20, 50, 100], 20)
array([ nan, nan, 0. , 3.5, 8. , 13. , 23. ])
If num_events is not passed, it is inferred from the number of losses;
if eff_time is not passed, it is inferred from the longest return period.
"""
if len(losses) == 0: # zero-curve
return numpy.zeros(len(return_periods))
if num_events is None:
num_events = len(losses)
elif num_events < len(losses):
raise ValueError(
'There are not enough events (%d) to compute the loss curve '
'from %d losses' % (num_events, len(losses)))
if eff_time is None:
eff_time = return_periods[-1]
losses = numpy.sort(losses)
num_zeros = num_events - len(losses)
if num_zeros:
losses = numpy.concatenate(
[numpy.zeros(num_zeros, losses.dtype), losses])
periods = eff_time / numpy.arange(num_events, 0., -1)
rperiods = [rp if periods[0] <= rp <= periods[-1] else numpy.nan
for rp in return_periods]
curve = numpy.interp(numpy.log(rperiods), numpy.log(periods), losses)
return curve |
def make_figure(plots):
"""
:param plots: list of pairs (task_name, memory array)
"""
# NB: matplotlib is imported inside since it is a costly import
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.grid(True)
ax.set_xlabel('tasks')
ax.set_ylabel('GB')
start = 0
for task_name, mem in plots:
ax.plot(range(start, start + len(mem)), mem, label=task_name)
start += len(mem)
ax.legend()
return plt | :param plots: list of pairs (task_name, memory array) | Below is the the instruction that describes the task:
### Input:
:param plots: list of pairs (task_name, memory array)
### Response:
def make_figure(plots):
"""
:param plots: list of pairs (task_name, memory array)
"""
# NB: matplotlib is imported inside since it is a costly import
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.grid(True)
ax.set_xlabel('tasks')
ax.set_ylabel('GB')
start = 0
for task_name, mem in plots:
ax.plot(range(start, start + len(mem)), mem, label=task_name)
start += len(mem)
ax.legend()
return plt |
def get_catalogs_by_query(self, catalog_query):
"""Gets a list of ``Catalogs`` matching the given catalog query.
arg: catalog_query (osid.cataloging.CatalogQuery): the
catalog query
return: (osid.cataloging.CatalogList) - the returned
``CatalogList``
raise: NullArgument - ``catalog_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``catalog_query`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinQuerySession.get_bins_by_query_template
if self._catalog_session is not None:
return self._catalog_session.get_catalogs_by_query(catalog_query)
query_terms = dict(catalog_query._query_terms)
collection = JSONClientValidated('cataloging',
collection='Catalog',
runtime=self._runtime)
result = collection.find(query_terms).sort('_id', DESCENDING)
return objects.CatalogList(result, runtime=self._runtime) | Gets a list of ``Catalogs`` matching the given catalog query.
arg: catalog_query (osid.cataloging.CatalogQuery): the
catalog query
return: (osid.cataloging.CatalogList) - the returned
``CatalogList``
raise: NullArgument - ``catalog_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``catalog_query`` is not of this service
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets a list of ``Catalogs`` matching the given catalog query.
arg: catalog_query (osid.cataloging.CatalogQuery): the
catalog query
return: (osid.cataloging.CatalogList) - the returned
``CatalogList``
raise: NullArgument - ``catalog_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``catalog_query`` is not of this service
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_catalogs_by_query(self, catalog_query):
"""Gets a list of ``Catalogs`` matching the given catalog query.
arg: catalog_query (osid.cataloging.CatalogQuery): the
catalog query
return: (osid.cataloging.CatalogList) - the returned
``CatalogList``
raise: NullArgument - ``catalog_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``catalog_query`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinQuerySession.get_bins_by_query_template
if self._catalog_session is not None:
return self._catalog_session.get_catalogs_by_query(catalog_query)
query_terms = dict(catalog_query._query_terms)
collection = JSONClientValidated('cataloging',
collection='Catalog',
runtime=self._runtime)
result = collection.find(query_terms).sort('_id', DESCENDING)
return objects.CatalogList(result, runtime=self._runtime) |
def refresh(self) -> None:
"""Prepare the actual |anntools.SeasonalANN| object for calculations.
Dispite all automated refreshings explained in the general
documentation on class |anntools.SeasonalANN|, it is still possible
to destroy the inner consistency of a |anntools.SeasonalANN| instance,
as it stores its |anntools.ANN| objects by reference. This is shown
by the following example:
>>> from hydpy import SeasonalANN, ann
>>> seasonalann = SeasonalANN(None)
>>> seasonalann.simulationstep = '1d'
>>> jan = ann(nmb_inputs=1, nmb_neurons=(1,), nmb_outputs=1,
... weights_input=0.0, weights_output=0.0,
... intercepts_hidden=0.0, intercepts_output=1.0)
>>> seasonalann(_1_1_12=jan)
>>> jan.nmb_inputs, jan.nmb_outputs = 2, 3
>>> jan.nmb_inputs, jan.nmb_outputs
(2, 3)
>>> seasonalann.nmb_inputs, seasonalann.nmb_outputs
(1, 1)
Due to the C level implementation of the mathematical core of
both |anntools.ANN| and |anntools.SeasonalANN| in module |annutils|,
such an inconsistency might result in a program crash without any
informative error message. Whenever you are afraid some
inconsistency might have crept in, and you want to repair it,
call method |anntools.SeasonalANN.refresh| explicitly:
>>> seasonalann.refresh()
>>> jan.nmb_inputs, jan.nmb_outputs
(2, 3)
>>> seasonalann.nmb_inputs, seasonalann.nmb_outputs
(2, 3)
"""
# pylint: disable=unsupported-assignment-operation
if self._do_refresh:
if self.anns:
self.__sann = annutils.SeasonalANN(self.anns)
setattr(self.fastaccess, self.name, self._sann)
self._set_shape((None, self._sann.nmb_anns))
if self._sann.nmb_anns > 1:
self._interp()
else:
self._sann.ratios[:, 0] = 1.
self.verify()
else:
self.__sann = None | Prepare the actual |anntools.SeasonalANN| object for calculations.
Dispite all automated refreshings explained in the general
documentation on class |anntools.SeasonalANN|, it is still possible
to destroy the inner consistency of a |anntools.SeasonalANN| instance,
as it stores its |anntools.ANN| objects by reference. This is shown
by the following example:
>>> from hydpy import SeasonalANN, ann
>>> seasonalann = SeasonalANN(None)
>>> seasonalann.simulationstep = '1d'
>>> jan = ann(nmb_inputs=1, nmb_neurons=(1,), nmb_outputs=1,
... weights_input=0.0, weights_output=0.0,
... intercepts_hidden=0.0, intercepts_output=1.0)
>>> seasonalann(_1_1_12=jan)
>>> jan.nmb_inputs, jan.nmb_outputs = 2, 3
>>> jan.nmb_inputs, jan.nmb_outputs
(2, 3)
>>> seasonalann.nmb_inputs, seasonalann.nmb_outputs
(1, 1)
Due to the C level implementation of the mathematical core of
both |anntools.ANN| and |anntools.SeasonalANN| in module |annutils|,
such an inconsistency might result in a program crash without any
informative error message. Whenever you are afraid some
inconsistency might have crept in, and you want to repair it,
call method |anntools.SeasonalANN.refresh| explicitly:
>>> seasonalann.refresh()
>>> jan.nmb_inputs, jan.nmb_outputs
(2, 3)
>>> seasonalann.nmb_inputs, seasonalann.nmb_outputs
(2, 3) | Below is the the instruction that describes the task:
### Input:
Prepare the actual |anntools.SeasonalANN| object for calculations.
Dispite all automated refreshings explained in the general
documentation on class |anntools.SeasonalANN|, it is still possible
to destroy the inner consistency of a |anntools.SeasonalANN| instance,
as it stores its |anntools.ANN| objects by reference. This is shown
by the following example:
>>> from hydpy import SeasonalANN, ann
>>> seasonalann = SeasonalANN(None)
>>> seasonalann.simulationstep = '1d'
>>> jan = ann(nmb_inputs=1, nmb_neurons=(1,), nmb_outputs=1,
... weights_input=0.0, weights_output=0.0,
... intercepts_hidden=0.0, intercepts_output=1.0)
>>> seasonalann(_1_1_12=jan)
>>> jan.nmb_inputs, jan.nmb_outputs = 2, 3
>>> jan.nmb_inputs, jan.nmb_outputs
(2, 3)
>>> seasonalann.nmb_inputs, seasonalann.nmb_outputs
(1, 1)
Due to the C level implementation of the mathematical core of
both |anntools.ANN| and |anntools.SeasonalANN| in module |annutils|,
such an inconsistency might result in a program crash without any
informative error message. Whenever you are afraid some
inconsistency might have crept in, and you want to repair it,
call method |anntools.SeasonalANN.refresh| explicitly:
>>> seasonalann.refresh()
>>> jan.nmb_inputs, jan.nmb_outputs
(2, 3)
>>> seasonalann.nmb_inputs, seasonalann.nmb_outputs
(2, 3)
### Response:
def refresh(self) -> None:
"""Prepare the actual |anntools.SeasonalANN| object for calculations.
Dispite all automated refreshings explained in the general
documentation on class |anntools.SeasonalANN|, it is still possible
to destroy the inner consistency of a |anntools.SeasonalANN| instance,
as it stores its |anntools.ANN| objects by reference. This is shown
by the following example:
>>> from hydpy import SeasonalANN, ann
>>> seasonalann = SeasonalANN(None)
>>> seasonalann.simulationstep = '1d'
>>> jan = ann(nmb_inputs=1, nmb_neurons=(1,), nmb_outputs=1,
... weights_input=0.0, weights_output=0.0,
... intercepts_hidden=0.0, intercepts_output=1.0)
>>> seasonalann(_1_1_12=jan)
>>> jan.nmb_inputs, jan.nmb_outputs = 2, 3
>>> jan.nmb_inputs, jan.nmb_outputs
(2, 3)
>>> seasonalann.nmb_inputs, seasonalann.nmb_outputs
(1, 1)
Due to the C level implementation of the mathematical core of
both |anntools.ANN| and |anntools.SeasonalANN| in module |annutils|,
such an inconsistency might result in a program crash without any
informative error message. Whenever you are afraid some
inconsistency might have crept in, and you want to repair it,
call method |anntools.SeasonalANN.refresh| explicitly:
>>> seasonalann.refresh()
>>> jan.nmb_inputs, jan.nmb_outputs
(2, 3)
>>> seasonalann.nmb_inputs, seasonalann.nmb_outputs
(2, 3)
"""
# pylint: disable=unsupported-assignment-operation
if self._do_refresh:
if self.anns:
self.__sann = annutils.SeasonalANN(self.anns)
setattr(self.fastaccess, self.name, self._sann)
self._set_shape((None, self._sann.nmb_anns))
if self._sann.nmb_anns > 1:
self._interp()
else:
self._sann.ratios[:, 0] = 1.
self.verify()
else:
self.__sann = None |
def update_resources(self, dstpath, names=None, languages=None):
""" Update or add manifest resource in dll/exe file dstpath """
UpdateManifestResourcesFromXML(dstpath, self.toprettyxml(), names,
languages) | Update or add manifest resource in dll/exe file dstpath | Below is the the instruction that describes the task:
### Input:
Update or add manifest resource in dll/exe file dstpath
### Response:
def update_resources(self, dstpath, names=None, languages=None):
""" Update or add manifest resource in dll/exe file dstpath """
UpdateManifestResourcesFromXML(dstpath, self.toprettyxml(), names,
languages) |
def get_sonos_playlist_by_attr(self, attr_name, match):
"""Return the first Sonos Playlist DidlPlaylistContainer that
matches the attribute specified.
Args:
attr_name (str): DidlPlaylistContainer attribute to compare. The
most useful being: 'title' and 'item_id'.
match (str): Value to match.
Returns:
(:class:`~.soco.data_structures.DidlPlaylistContainer`): The
first matching playlist object.
Raises:
(AttributeError): If indicated attribute name does not exist.
(ValueError): If a match can not be found.
Example::
device.get_sonos_playlist_by_attr('title', 'Foo')
device.get_sonos_playlist_by_attr('item_id', 'SQ:3')
"""
for sonos_playlist in self.get_sonos_playlists():
if getattr(sonos_playlist, attr_name) == match:
return sonos_playlist
raise ValueError('No match on "{0}" for value "{1}"'.format(attr_name,
match)) | Return the first Sonos Playlist DidlPlaylistContainer that
matches the attribute specified.
Args:
attr_name (str): DidlPlaylistContainer attribute to compare. The
most useful being: 'title' and 'item_id'.
match (str): Value to match.
Returns:
(:class:`~.soco.data_structures.DidlPlaylistContainer`): The
first matching playlist object.
Raises:
(AttributeError): If indicated attribute name does not exist.
(ValueError): If a match can not be found.
Example::
device.get_sonos_playlist_by_attr('title', 'Foo')
device.get_sonos_playlist_by_attr('item_id', 'SQ:3') | Below is the the instruction that describes the task:
### Input:
Return the first Sonos Playlist DidlPlaylistContainer that
matches the attribute specified.
Args:
attr_name (str): DidlPlaylistContainer attribute to compare. The
most useful being: 'title' and 'item_id'.
match (str): Value to match.
Returns:
(:class:`~.soco.data_structures.DidlPlaylistContainer`): The
first matching playlist object.
Raises:
(AttributeError): If indicated attribute name does not exist.
(ValueError): If a match can not be found.
Example::
device.get_sonos_playlist_by_attr('title', 'Foo')
device.get_sonos_playlist_by_attr('item_id', 'SQ:3')
### Response:
def get_sonos_playlist_by_attr(self, attr_name, match):
"""Return the first Sonos Playlist DidlPlaylistContainer that
matches the attribute specified.
Args:
attr_name (str): DidlPlaylistContainer attribute to compare. The
most useful being: 'title' and 'item_id'.
match (str): Value to match.
Returns:
(:class:`~.soco.data_structures.DidlPlaylistContainer`): The
first matching playlist object.
Raises:
(AttributeError): If indicated attribute name does not exist.
(ValueError): If a match can not be found.
Example::
device.get_sonos_playlist_by_attr('title', 'Foo')
device.get_sonos_playlist_by_attr('item_id', 'SQ:3')
"""
for sonos_playlist in self.get_sonos_playlists():
if getattr(sonos_playlist, attr_name) == match:
return sonos_playlist
raise ValueError('No match on "{0}" for value "{1}"'.format(attr_name,
match)) |
def check_datasource_perms(self, datasource_type=None, datasource_id=None):
"""
Check if user can access a cached response from explore_json.
This function takes `self` since it must have the same signature as the
the decorated method.
"""
form_data = get_form_data()[0]
datasource_id, datasource_type = get_datasource_info(
datasource_id, datasource_type, form_data)
viz_obj = get_viz(
datasource_type=datasource_type,
datasource_id=datasource_id,
form_data=form_data,
force=False,
)
security_manager.assert_datasource_permission(viz_obj.datasource) | Check if user can access a cached response from explore_json.
This function takes `self` since it must have the same signature as the
the decorated method. | Below is the the instruction that describes the task:
### Input:
Check if user can access a cached response from explore_json.
This function takes `self` since it must have the same signature as the
the decorated method.
### Response:
def check_datasource_perms(self, datasource_type=None, datasource_id=None):
"""
Check if user can access a cached response from explore_json.
This function takes `self` since it must have the same signature as the
the decorated method.
"""
form_data = get_form_data()[0]
datasource_id, datasource_type = get_datasource_info(
datasource_id, datasource_type, form_data)
viz_obj = get_viz(
datasource_type=datasource_type,
datasource_id=datasource_id,
form_data=form_data,
force=False,
)
security_manager.assert_datasource_permission(viz_obj.datasource) |
def check_overlap(a, b):
"""Check for wavelength overlap between two spectra.
.. note::
Generalized from
:meth:`pysynphot.spectrum.SpectralElement.check_overlap`.
Parameters
----------
a, b : `~pysynphot.spectrum.SourceSpectrum` or `~pysynphot.spectrum.SpectralElement`
Typically a source spectrum, spectral element, observation,
or bandpass from observation mode.
Returns
-------
result : {'full', 'partial', 'none'}
Full, partial, or no overlap.
Raises
------
AttributeError
Given spectrum does not have flux or throughput.
"""
if a.isAnalytic or b.isAnalytic:
#then it's defined everywhere
result = 'full'
else:
#get the wavelength arrays
waves = list()
for x in (a, b):
if hasattr(x,'throughput'):
wv = x.wave[np.where(x.throughput != 0)]
elif hasattr(x,'flux'):
wv = x.wave
else:
raise AttributeError("neither flux nor throughput in %s"%x)
waves.append(wv)
#get the endpoints
a1,a2 = waves[0].min(), waves[0].max()
b1,b2 = waves[1].min(), waves[1].max()
#do the comparison
if (a1>=b1 and a2<=b2):
result = 'full'
elif (a2<b1) or (b2<a1):
result = 'none'
else:
result = 'partial'
return result | Check for wavelength overlap between two spectra.
.. note::
Generalized from
:meth:`pysynphot.spectrum.SpectralElement.check_overlap`.
Parameters
----------
a, b : `~pysynphot.spectrum.SourceSpectrum` or `~pysynphot.spectrum.SpectralElement`
Typically a source spectrum, spectral element, observation,
or bandpass from observation mode.
Returns
-------
result : {'full', 'partial', 'none'}
Full, partial, or no overlap.
Raises
------
AttributeError
Given spectrum does not have flux or throughput. | Below is the the instruction that describes the task:
### Input:
Check for wavelength overlap between two spectra.
.. note::
Generalized from
:meth:`pysynphot.spectrum.SpectralElement.check_overlap`.
Parameters
----------
a, b : `~pysynphot.spectrum.SourceSpectrum` or `~pysynphot.spectrum.SpectralElement`
Typically a source spectrum, spectral element, observation,
or bandpass from observation mode.
Returns
-------
result : {'full', 'partial', 'none'}
Full, partial, or no overlap.
Raises
------
AttributeError
Given spectrum does not have flux or throughput.
### Response:
def check_overlap(a, b):
"""Check for wavelength overlap between two spectra.
.. note::
Generalized from
:meth:`pysynphot.spectrum.SpectralElement.check_overlap`.
Parameters
----------
a, b : `~pysynphot.spectrum.SourceSpectrum` or `~pysynphot.spectrum.SpectralElement`
Typically a source spectrum, spectral element, observation,
or bandpass from observation mode.
Returns
-------
result : {'full', 'partial', 'none'}
Full, partial, or no overlap.
Raises
------
AttributeError
Given spectrum does not have flux or throughput.
"""
if a.isAnalytic or b.isAnalytic:
#then it's defined everywhere
result = 'full'
else:
#get the wavelength arrays
waves = list()
for x in (a, b):
if hasattr(x,'throughput'):
wv = x.wave[np.where(x.throughput != 0)]
elif hasattr(x,'flux'):
wv = x.wave
else:
raise AttributeError("neither flux nor throughput in %s"%x)
waves.append(wv)
#get the endpoints
a1,a2 = waves[0].min(), waves[0].max()
b1,b2 = waves[1].min(), waves[1].max()
#do the comparison
if (a1>=b1 and a2<=b2):
result = 'full'
elif (a2<b1) or (b2<a1):
result = 'none'
else:
result = 'partial'
return result |
def with_port(self, port):
"""Return a new URL with port replaced.
Clear port to default if None is passed.
"""
# N.B. doesn't cleanup query/fragment
if port is not None and not isinstance(port, int):
raise TypeError("port should be int or None, got {}".format(type(port)))
if not self.is_absolute():
raise ValueError("port replacement is not allowed " "for relative URLs")
val = self._val
return URL(
self._val._replace(
netloc=self._make_netloc(
val.username, val.password, val.hostname, port, encode=False
)
),
encoded=True,
) | Return a new URL with port replaced.
Clear port to default if None is passed. | Below is the the instruction that describes the task:
### Input:
Return a new URL with port replaced.
Clear port to default if None is passed.
### Response:
def with_port(self, port):
"""Return a new URL with port replaced.
Clear port to default if None is passed.
"""
# N.B. doesn't cleanup query/fragment
if port is not None and not isinstance(port, int):
raise TypeError("port should be int or None, got {}".format(type(port)))
if not self.is_absolute():
raise ValueError("port replacement is not allowed " "for relative URLs")
val = self._val
return URL(
self._val._replace(
netloc=self._make_netloc(
val.username, val.password, val.hostname, port, encode=False
)
),
encoded=True,
) |
def getElementDeclaration(cls, namespaceURI, name, isref=False, lazy=False):
'''Grab an element declaration, returns a typecode instance
representation or a typecode class definition. An element
reference has its own facets, and is local so it will not be
cached.
Parameters:
namespaceURI --
name --
isref -- if element reference, return class definition.
'''
key = (namespaceURI, name)
if isref:
klass = cls.elements.get(key,None)
if klass is not None and lazy is True:
return _Mirage(klass)
return klass
typecode = cls.element_typecode_cache.get(key, None)
if typecode is None:
tcls = cls.elements.get(key,None)
if tcls is not None:
typecode = cls.element_typecode_cache[key] = tcls()
typecode.typed = False
return typecode | Grab an element declaration, returns a typecode instance
representation or a typecode class definition. An element
reference has its own facets, and is local so it will not be
cached.
Parameters:
namespaceURI --
name --
isref -- if element reference, return class definition. | Below is the the instruction that describes the task:
### Input:
Grab an element declaration, returns a typecode instance
representation or a typecode class definition. An element
reference has its own facets, and is local so it will not be
cached.
Parameters:
namespaceURI --
name --
isref -- if element reference, return class definition.
### Response:
def getElementDeclaration(cls, namespaceURI, name, isref=False, lazy=False):
'''Grab an element declaration, returns a typecode instance
representation or a typecode class definition. An element
reference has its own facets, and is local so it will not be
cached.
Parameters:
namespaceURI --
name --
isref -- if element reference, return class definition.
'''
key = (namespaceURI, name)
if isref:
klass = cls.elements.get(key,None)
if klass is not None and lazy is True:
return _Mirage(klass)
return klass
typecode = cls.element_typecode_cache.get(key, None)
if typecode is None:
tcls = cls.elements.get(key,None)
if tcls is not None:
typecode = cls.element_typecode_cache[key] = tcls()
typecode.typed = False
return typecode |
def get_layer_output(self, name):
'''Get symbolic output of a layer.'''
if not name in self._f_layer_outputs:
layer = self.net.get_layer(name)
self._f_layer_outputs[name] = layer.output
return self._f_layer_outputs[name] | Get symbolic output of a layer. | Below is the the instruction that describes the task:
### Input:
Get symbolic output of a layer.
### Response:
def get_layer_output(self, name):
'''Get symbolic output of a layer.'''
if not name in self._f_layer_outputs:
layer = self.net.get_layer(name)
self._f_layer_outputs[name] = layer.output
return self._f_layer_outputs[name] |
def run_healthchecks(self):
"""
Runs all registered healthchecks and returns a list of
HealthcheckResponse.
"""
if not self._registry_loaded:
self.load_healthchecks()
def get_healthcheck_name(hc):
if hasattr(hc, 'name'):
return hc.name
return hc.__name__
responses = []
for healthcheck in self._registry:
try:
if inspect.isclass(healthcheck):
healthcheck = healthcheck()
response = healthcheck()
if isinstance(response, bool):
response = HealthcheckResponse(
name=get_healthcheck_name(healthcheck),
status=response,
)
except Exception as e:
response = HealthcheckResponse(
name=get_healthcheck_name(healthcheck),
status=False,
exception=str(e),
exception_class=e.__class__.__name__,
)
responses.append(response)
return responses | Runs all registered healthchecks and returns a list of
HealthcheckResponse. | Below is the the instruction that describes the task:
### Input:
Runs all registered healthchecks and returns a list of
HealthcheckResponse.
### Response:
def run_healthchecks(self):
"""
Runs all registered healthchecks and returns a list of
HealthcheckResponse.
"""
if not self._registry_loaded:
self.load_healthchecks()
def get_healthcheck_name(hc):
if hasattr(hc, 'name'):
return hc.name
return hc.__name__
responses = []
for healthcheck in self._registry:
try:
if inspect.isclass(healthcheck):
healthcheck = healthcheck()
response = healthcheck()
if isinstance(response, bool):
response = HealthcheckResponse(
name=get_healthcheck_name(healthcheck),
status=response,
)
except Exception as e:
response = HealthcheckResponse(
name=get_healthcheck_name(healthcheck),
status=False,
exception=str(e),
exception_class=e.__class__.__name__,
)
responses.append(response)
return responses |
def mean_harmonic(self):
'返回DataStruct.price的调和平均数'
res = self.price.groupby(level=1
).apply(lambda x: statistics.harmonic_mean(x))
res.name = 'mean_harmonic'
return res | 返回DataStruct.price的调和平均数 | Below is the the instruction that describes the task:
### Input:
返回DataStruct.price的调和平均数
### Response:
def mean_harmonic(self):
'返回DataStruct.price的调和平均数'
res = self.price.groupby(level=1
).apply(lambda x: statistics.harmonic_mean(x))
res.name = 'mean_harmonic'
return res |
def create_api_key(self):
"""Create API Key for API access."""
apikeys = self.client.get_api_keys()
for key in apikeys['items']:
if key['name'] == self.app_name:
self.log.info("Key %s already exists", self.app_name)
break
else:
self.client.create_api_key(
name=self.app_name, enabled=True, stageKeys=[{
'restApiId': self.api_id,
'stageName': self.env
}])
self.log.info("Successfully created API Key %s. Look in the AWS console for the key", self.app_name) | Create API Key for API access. | Below is the the instruction that describes the task:
### Input:
Create API Key for API access.
### Response:
def create_api_key(self):
"""Create API Key for API access."""
apikeys = self.client.get_api_keys()
for key in apikeys['items']:
if key['name'] == self.app_name:
self.log.info("Key %s already exists", self.app_name)
break
else:
self.client.create_api_key(
name=self.app_name, enabled=True, stageKeys=[{
'restApiId': self.api_id,
'stageName': self.env
}])
self.log.info("Successfully created API Key %s. Look in the AWS console for the key", self.app_name) |
def atq(tag=None):
'''
List all queued and running jobs or only those with
an optional 'tag'.
CLI Example:
.. code-block:: bash
salt '*' at.atq
salt '*' at.atq [tag]
salt '*' at.atq [job number]
'''
jobs = []
# Shim to produce output similar to what __virtual__() should do
# but __salt__ isn't available in __virtual__()
# Tested on CentOS 5.8
if __grains__['os_family'] == 'RedHat':
output = _cmd('at', '-l')
else:
output = _cmd('atq')
if output is None:
return '\'at.atq\' is not available.'
# No jobs so return
if output == '':
return {'jobs': jobs}
# Jobs created with at.at() will use the following
# comment to denote a tagged job.
job_kw_regex = re.compile(r'^### SALT: (\w+)')
# Split each job into a dictionary and handle
# pulling out tags or only listing jobs with a certain
# tag
for line in output.splitlines():
job_tag = ''
# Redhat/CentOS
if __grains__['os_family'] == 'RedHat':
job, spec = line.split('\t')
specs = spec.split()
elif __grains__['os'] == 'OpenBSD':
if line.startswith(' Rank'):
continue
else:
tmp = line.split()
timestr = ' '.join(tmp[1:5])
job = tmp[6]
specs = datetime.datetime(*(time.strptime(timestr, '%b %d, %Y '
'%H:%M')[0:5])).isoformat().split('T')
specs.append(tmp[7])
specs.append(tmp[5])
elif __grains__['os'] == 'FreeBSD':
if line.startswith('Date'):
continue
else:
tmp = line.split()
timestr = ' '.join(tmp[1:6])
job = tmp[8]
specs = datetime.datetime(*(time.strptime(timestr,
'%b %d %H:%M:%S %Z %Y')[0:5])).isoformat().split('T')
specs.append(tmp[7])
specs.append(tmp[6])
else:
job, spec = line.split('\t')
tmp = spec.split()
timestr = ' '.join(tmp[0:5])
specs = datetime.datetime(*(time.strptime(timestr)
[0:5])).isoformat().split('T')
specs.append(tmp[5])
specs.append(tmp[6])
# Search for any tags
atc_out = _cmd('at', '-c', job)
for line in atc_out.splitlines():
tmp = job_kw_regex.match(line)
if tmp:
job_tag = tmp.groups()[0]
if __grains__['os'] in BSD:
job = six.text_type(job)
else:
job = int(job)
# If a tag is supplied, only list jobs with that tag
if tag:
# TODO: Looks like there is a difference between salt and salt-call
# If I don't wrap job in an int(), it fails on salt but works on
# salt-call. With the int(), it fails with salt-call but not salt.
if tag == job_tag or tag == job:
jobs.append({'job': job, 'date': specs[0], 'time': specs[1],
'queue': specs[2], 'user': specs[3], 'tag': job_tag})
else:
jobs.append({'job': job, 'date': specs[0], 'time': specs[1],
'queue': specs[2], 'user': specs[3], 'tag': job_tag})
return {'jobs': jobs} | List all queued and running jobs or only those with
an optional 'tag'.
CLI Example:
.. code-block:: bash
salt '*' at.atq
salt '*' at.atq [tag]
salt '*' at.atq [job number] | Below is the the instruction that describes the task:
### Input:
List all queued and running jobs or only those with
an optional 'tag'.
CLI Example:
.. code-block:: bash
salt '*' at.atq
salt '*' at.atq [tag]
salt '*' at.atq [job number]
### Response:
def atq(tag=None):
'''
List all queued and running jobs or only those with
an optional 'tag'.
CLI Example:
.. code-block:: bash
salt '*' at.atq
salt '*' at.atq [tag]
salt '*' at.atq [job number]
'''
jobs = []
# Shim to produce output similar to what __virtual__() should do
# but __salt__ isn't available in __virtual__()
# Tested on CentOS 5.8
if __grains__['os_family'] == 'RedHat':
output = _cmd('at', '-l')
else:
output = _cmd('atq')
if output is None:
return '\'at.atq\' is not available.'
# No jobs so return
if output == '':
return {'jobs': jobs}
# Jobs created with at.at() will use the following
# comment to denote a tagged job.
job_kw_regex = re.compile(r'^### SALT: (\w+)')
# Split each job into a dictionary and handle
# pulling out tags or only listing jobs with a certain
# tag
for line in output.splitlines():
job_tag = ''
# Redhat/CentOS
if __grains__['os_family'] == 'RedHat':
job, spec = line.split('\t')
specs = spec.split()
elif __grains__['os'] == 'OpenBSD':
if line.startswith(' Rank'):
continue
else:
tmp = line.split()
timestr = ' '.join(tmp[1:5])
job = tmp[6]
specs = datetime.datetime(*(time.strptime(timestr, '%b %d, %Y '
'%H:%M')[0:5])).isoformat().split('T')
specs.append(tmp[7])
specs.append(tmp[5])
elif __grains__['os'] == 'FreeBSD':
if line.startswith('Date'):
continue
else:
tmp = line.split()
timestr = ' '.join(tmp[1:6])
job = tmp[8]
specs = datetime.datetime(*(time.strptime(timestr,
'%b %d %H:%M:%S %Z %Y')[0:5])).isoformat().split('T')
specs.append(tmp[7])
specs.append(tmp[6])
else:
job, spec = line.split('\t')
tmp = spec.split()
timestr = ' '.join(tmp[0:5])
specs = datetime.datetime(*(time.strptime(timestr)
[0:5])).isoformat().split('T')
specs.append(tmp[5])
specs.append(tmp[6])
# Search for any tags
atc_out = _cmd('at', '-c', job)
for line in atc_out.splitlines():
tmp = job_kw_regex.match(line)
if tmp:
job_tag = tmp.groups()[0]
if __grains__['os'] in BSD:
job = six.text_type(job)
else:
job = int(job)
# If a tag is supplied, only list jobs with that tag
if tag:
# TODO: Looks like there is a difference between salt and salt-call
# If I don't wrap job in an int(), it fails on salt but works on
# salt-call. With the int(), it fails with salt-call but not salt.
if tag == job_tag or tag == job:
jobs.append({'job': job, 'date': specs[0], 'time': specs[1],
'queue': specs[2], 'user': specs[3], 'tag': job_tag})
else:
jobs.append({'job': job, 'date': specs[0], 'time': specs[1],
'queue': specs[2], 'user': specs[3], 'tag': job_tag})
return {'jobs': jobs} |
def get_current_selection(self, i=None):
"""Get the :class:`TaskFileInfo` for the file selected in the active tab
:param i: If None, returns selection of active tab. If 0, assetselection. If 1, shotselection
:type i:
:returns: The taskfile info in the currently active tab
:rtype: :class:`TaskFileInfo` | None
:raises: None
"""
taskfile = None
if (i is None and self.selection_tabw.currentIndex() == 0) or (i is not None and i == 0):
indexes = self.assetverbrws.selected_indexes(0)
if indexes and indexes[0].isValid():
item = indexes[0].internalPointer()
taskfile = item.internal_data()
elif (i is None and self.selection_tabw.currentIndex() == 1) or (i is not None and i == 1):
indexes = self.shotverbrws.selected_indexes(0)
if indexes and indexes[0].isValid():
item = indexes[0].internalPointer()
taskfile = item.internal_data()
return taskfile | Get the :class:`TaskFileInfo` for the file selected in the active tab
:param i: If None, returns selection of active tab. If 0, assetselection. If 1, shotselection
:type i:
:returns: The taskfile info in the currently active tab
:rtype: :class:`TaskFileInfo` | None
:raises: None | Below is the the instruction that describes the task:
### Input:
Get the :class:`TaskFileInfo` for the file selected in the active tab
:param i: If None, returns selection of active tab. If 0, assetselection. If 1, shotselection
:type i:
:returns: The taskfile info in the currently active tab
:rtype: :class:`TaskFileInfo` | None
:raises: None
### Response:
def get_current_selection(self, i=None):
"""Get the :class:`TaskFileInfo` for the file selected in the active tab
:param i: If None, returns selection of active tab. If 0, assetselection. If 1, shotselection
:type i:
:returns: The taskfile info in the currently active tab
:rtype: :class:`TaskFileInfo` | None
:raises: None
"""
taskfile = None
if (i is None and self.selection_tabw.currentIndex() == 0) or (i is not None and i == 0):
indexes = self.assetverbrws.selected_indexes(0)
if indexes and indexes[0].isValid():
item = indexes[0].internalPointer()
taskfile = item.internal_data()
elif (i is None and self.selection_tabw.currentIndex() == 1) or (i is not None and i == 1):
indexes = self.shotverbrws.selected_indexes(0)
if indexes and indexes[0].isValid():
item = indexes[0].internalPointer()
taskfile = item.internal_data()
return taskfile |
def make_plot(self):
"""Make the horizon plot.
"""
self.get_contour_values()
# sets levels of main contour plot
colors1 = ['blue', 'green', 'red', 'purple', 'orange',
'gold', 'magenta']
# set contour value. Default is SNR_CUT.
self.snr_contour_value = (self.SNR_CUT if self.snr_contour_value is None
else self.snr_contour_value)
# plot contours
for j in range(len(self.zvals)):
hz = self.axis.contour(self.xvals[j], self.yvals[j],
self.zvals[j], np.array([self.snr_contour_value]),
colors=colors1[j], linewidths=1., linestyles='solid')
# plot invisible lines for purpose of creating a legend
if self.legend_labels != []:
# plot a curve off of the grid with same color for legend label.
self.axis.plot([0.1, 0.2], [0.1, 0.2], color=colors1[j],
label=self.legend_labels[j])
if self.add_legend:
self.axis.legend(**self.legend_kwargs)
return | Make the horizon plot. | Below is the the instruction that describes the task:
### Input:
Make the horizon plot.
### Response:
def make_plot(self):
"""Make the horizon plot.
"""
self.get_contour_values()
# sets levels of main contour plot
colors1 = ['blue', 'green', 'red', 'purple', 'orange',
'gold', 'magenta']
# set contour value. Default is SNR_CUT.
self.snr_contour_value = (self.SNR_CUT if self.snr_contour_value is None
else self.snr_contour_value)
# plot contours
for j in range(len(self.zvals)):
hz = self.axis.contour(self.xvals[j], self.yvals[j],
self.zvals[j], np.array([self.snr_contour_value]),
colors=colors1[j], linewidths=1., linestyles='solid')
# plot invisible lines for purpose of creating a legend
if self.legend_labels != []:
# plot a curve off of the grid with same color for legend label.
self.axis.plot([0.1, 0.2], [0.1, 0.2], color=colors1[j],
label=self.legend_labels[j])
if self.add_legend:
self.axis.legend(**self.legend_kwargs)
return |
def ani_depthplot2(ani_file='rmag_anisotropy.txt', meas_file='magic_measurements.txt', samp_file='er_samples.txt', age_file=None, sum_file=None, fmt='svg', dmin=-1, dmax=-1, depth_scale='sample_core_depth', dir_path='.'):
"""
returns matplotlib figure with anisotropy data plotted against depth
available depth scales: 'sample_composite_depth', 'sample_core_depth', or 'age' (you must provide an age file to use this option)
"""
pcol = 4
tint = 9
plots = 0
# format files to use full path
# os.path.join(dir_path, ani_file)
ani_file = pmag.resolve_file_name(ani_file, dir_path)
if not os.path.isfile(ani_file):
print("Could not find rmag_anisotropy type file: {}.\nPlease provide a valid file path and try again".format(ani_file))
return False, "Could not find rmag_anisotropy type file: {}.\nPlease provide a valid file path and try again".format(ani_file)
# os.path.join(dir_path, meas_file)
meas_file = pmag.resolve_file_name(meas_file, dir_path)
if age_file:
if not os.path.isfile(age_file):
print(
'Warning: you have provided an invalid age file. Attempting to use sample file instead')
age_file = None
depth_scale = 'sample_core_depth'
# os.path.join(dir_path, samp_file)
samp_file = pmag.resolve_file_name(samp_file, dir_path)
else:
# os.path.join(dir_path, age_file)
samp_file = pmag.resolve_file_name(samp_file, dir_path)
depth_scale = 'age'
print(
'Warning: you have provided an er_ages format file, which will take precedence over er_samples')
else:
samp_file = pmag.resolve_file_name(samp_file, dir_path)
label = 1
if sum_file:
sum_file = os.path.join(dir_path, sum_file)
dmin, dmax = float(dmin), float(dmax)
# get data read in
isbulk = 0 # tests if there are bulk susceptibility measurements
AniData, file_type = pmag.magic_read(ani_file) # read in tensor elements
if not age_file:
# read in sample depth info from er_sample.txt format file
Samps, file_type = pmag.magic_read(samp_file)
else:
# read in sample age info from er_ages.txt format file
Samps, file_type = pmag.magic_read(samp_file)
age_unit = Samps[0]['age_unit']
for s in Samps:
# change to upper case for every sample name
s['er_sample_name'] = s['er_sample_name'].upper()
Meas, file_type = pmag.magic_read(meas_file)
# print 'meas_file', meas_file
# print 'file_type', file_type
if file_type == 'magic_measurements':
isbulk = 1
Data = []
Bulks = []
BulkDepths = []
for rec in AniData:
# look for depth record for this sample
samprecs = pmag.get_dictitem(Samps, 'er_sample_name',
rec['er_sample_name'].upper(), 'T')
# see if there are non-blank depth data
sampdepths = pmag.get_dictitem(samprecs, depth_scale, '', 'F')
if dmax != -1:
# fishes out records within depth bounds
sampdepths = pmag.get_dictitem(
sampdepths, depth_scale, dmax, 'max')
sampdepths = pmag.get_dictitem(
sampdepths, depth_scale, dmin, 'min')
if len(sampdepths) > 0: # if there are any....
# set the core depth of this record
rec['core_depth'] = sampdepths[0][depth_scale]
Data.append(rec) # fish out data with core_depth
if isbulk: # if there are bulk data
chis = pmag.get_dictitem(
Meas, 'er_specimen_name', rec['er_specimen_name'], 'T')
# get the non-zero values for this specimen
chis = pmag.get_dictitem(
chis, 'measurement_chi_volume', '', 'F')
if len(chis) > 0: # if there are any....
# put in microSI
Bulks.append(
1e6 * float(chis[0]['measurement_chi_volume']))
BulkDepths.append(float(sampdepths[0][depth_scale]))
if len(Bulks) > 0: # set min and max bulk values
bmin = min(Bulks)
bmax = max(Bulks)
xlab = "Depth (m)"
if len(Data) > 0:
location = Data[0]['er_location_name']
else:
return False, 'no data to plot'
# collect the data for plotting tau V3_inc and V1_dec
Depths, Tau1, Tau2, Tau3, V3Incs, P, V1Decs = [], [], [], [], [], [], []
F23s = []
Axs = [] # collect the plot ids
# START HERE
if len(Bulks) > 0:
pcol += 1
# get all the s1 values from Data as floats
s1 = pmag.get_dictkey(Data, 'anisotropy_s1', 'f')
s2 = pmag.get_dictkey(Data, 'anisotropy_s2', 'f')
s3 = pmag.get_dictkey(Data, 'anisotropy_s3', 'f')
s4 = pmag.get_dictkey(Data, 'anisotropy_s4', 'f')
s5 = pmag.get_dictkey(Data, 'anisotropy_s5', 'f')
s6 = pmag.get_dictkey(Data, 'anisotropy_s6', 'f')
nmeas = pmag.get_dictkey(Data, 'anisotropy_n', 'int')
sigma = pmag.get_dictkey(Data, 'anisotropy_sigma', 'f')
Depths = pmag.get_dictkey(Data, 'core_depth', 'f')
# Ss=np.array([s1,s4,s5,s4,s2,s6,s5,s6,s3]).transpose() # make an array
Ss = np.array([s1, s2, s3, s4, s5, s6]).transpose() # make an array
# Ts=np.reshape(Ss,(len(Ss),3,-1)) # and re-shape to be n-length array of
# 3x3 sub-arrays
for k in range(len(Depths)):
# tau,Evecs= pmag.tauV(Ts[k]) # get the sorted eigenvalues and eigenvectors
# v3=pmag.cart2dir(Evecs[2])[1] # convert to inclination of the minimum
# eigenvector
fpars = pmag.dohext(nmeas[k] - 6, sigma[k], Ss[k])
V3Incs.append(fpars['v3_inc'])
V1Decs.append(fpars['v1_dec'])
Tau1.append(fpars['t1'])
Tau2.append(fpars['t2'])
Tau3.append(fpars['t3'])
P.append(old_div(Tau1[-1], Tau3[-1]))
F23s.append(fpars['F23'])
if len(Depths) > 0:
if dmax == -1:
dmax = max(Depths)
dmin = min(Depths)
tau_min = 1
for t in Tau3:
if t > 0 and t < tau_min:
tau_min = t
tau_max = max(Tau1)
# tau_min=min(Tau3)
P_max = max(P)
P_min = min(P)
# dmax=dmax+.05*dmax
# dmin=dmin-.05*dmax
main_plot = plt.figure(1, figsize=(10, 8)) # make the figure
version_num = pmag.get_version()
plt.figtext(.02, .01, version_num) # attach the pmagpy version number
ax = plt.subplot(1, pcol, 1) # make the first column
Axs.append(ax)
ax.plot(Tau1, Depths, 'rs')
ax.plot(Tau2, Depths, 'b^')
ax.plot(Tau3, Depths, 'ko')
if sum_file:
core_depth_key, core_label_key, Cores = read_core_csv_file(
sum_file)
for core in Cores:
depth = float(core[core_depth_key])
if depth > dmin and depth < dmax:
plt.plot([0, 90], [depth, depth], 'b--')
ax.axis([tau_min, tau_max, dmax, dmin])
ax.set_xlabel('Eigenvalues')
if depth_scale == 'sample_core_depth':
ax.set_ylabel('Depth (mbsf)')
elif depth_scale == 'age':
ax.set_ylabel('Age (' + age_unit + ')')
else:
ax.set_ylabel('Depth (mcd)')
ax2 = plt.subplot(1, pcol, 2) # make the second column
ax2.plot(P, Depths, 'rs')
ax2.axis([P_min, P_max, dmax, dmin])
ax2.set_xlabel('P')
ax2.set_title(location)
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
if depth > dmin and depth < dmax:
plt.plot([0, 90], [depth, depth], 'b--')
Axs.append(ax2)
ax3 = plt.subplot(1, pcol, 3)
Axs.append(ax3)
ax3.plot(V3Incs, Depths, 'ko')
ax3.axis([0, 90, dmax, dmin])
ax3.set_xlabel('V3 Inclination')
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
if depth > dmin and depth < dmax:
plt.plot([0, 90], [depth, depth], 'b--')
ax4 = plt.subplot(1, np.abs(pcol), 4)
Axs.append(ax4)
ax4.plot(V1Decs, Depths, 'rs')
ax4.axis([0, 360, dmax, dmin])
ax4.set_xlabel('V1 Declination')
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
if depth >= dmin and depth <= dmax:
plt.plot([0, 360], [depth, depth], 'b--')
if pcol == 4 and label == 1:
plt.text(360, depth + tint, core[core_label_key])
# ax5=plt.subplot(1,np.abs(pcol),5)
# Axs.append(ax5)
# ax5.plot(F23s,Depths,'rs')
# bounds=ax5.axis()
# ax5.axis([bounds[0],bounds[1],dmax,dmin])
# ax5.set_xlabel('F_23')
# ax5.semilogx()
# if sum_file:
# for core in Cores:
# depth=float(core[core_depth_key])
# if depth>=dmin and depth<=dmax:
# plt.plot([bounds[0],bounds[1]],[depth,depth],'b--')
# if pcol==5 and label==1:plt.text(bounds[1],depth+tint,core[core_label_key])
# if pcol==6:
if pcol == 5:
# ax6=plt.subplot(1,pcol,6)
ax6 = plt.subplot(1, pcol, 5)
Axs.append(ax6)
ax6.plot(Bulks, BulkDepths, 'bo')
ax6.axis([bmin - 1, 1.1 * bmax, dmax, dmin])
ax6.set_xlabel('Bulk Susc. (uSI)')
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
if depth >= dmin and depth <= dmax:
plt.plot([0, bmax], [depth, depth], 'b--')
if label == 1:
plt.text(1.1 * bmax, depth + tint,
core[core_label_key])
for x in Axs:
# this makes the x-tick labels more reasonable - they were
# overcrowded using the defaults
pmagplotlib.delticks(x)
fig_name = location + '_ani_depthplot.' + fmt
return main_plot, fig_name
else:
return False, "No data to plot" | returns matplotlib figure with anisotropy data plotted against depth
available depth scales: 'sample_composite_depth', 'sample_core_depth', or 'age' (you must provide an age file to use this option) | Below is the the instruction that describes the task:
### Input:
returns matplotlib figure with anisotropy data plotted against depth
available depth scales: 'sample_composite_depth', 'sample_core_depth', or 'age' (you must provide an age file to use this option)
### Response:
def ani_depthplot2(ani_file='rmag_anisotropy.txt', meas_file='magic_measurements.txt', samp_file='er_samples.txt', age_file=None, sum_file=None, fmt='svg', dmin=-1, dmax=-1, depth_scale='sample_core_depth', dir_path='.'):
"""
returns matplotlib figure with anisotropy data plotted against depth
available depth scales: 'sample_composite_depth', 'sample_core_depth', or 'age' (you must provide an age file to use this option)
"""
pcol = 4
tint = 9
plots = 0
# format files to use full path
# os.path.join(dir_path, ani_file)
ani_file = pmag.resolve_file_name(ani_file, dir_path)
if not os.path.isfile(ani_file):
print("Could not find rmag_anisotropy type file: {}.\nPlease provide a valid file path and try again".format(ani_file))
return False, "Could not find rmag_anisotropy type file: {}.\nPlease provide a valid file path and try again".format(ani_file)
# os.path.join(dir_path, meas_file)
meas_file = pmag.resolve_file_name(meas_file, dir_path)
if age_file:
if not os.path.isfile(age_file):
print(
'Warning: you have provided an invalid age file. Attempting to use sample file instead')
age_file = None
depth_scale = 'sample_core_depth'
# os.path.join(dir_path, samp_file)
samp_file = pmag.resolve_file_name(samp_file, dir_path)
else:
# os.path.join(dir_path, age_file)
samp_file = pmag.resolve_file_name(samp_file, dir_path)
depth_scale = 'age'
print(
'Warning: you have provided an er_ages format file, which will take precedence over er_samples')
else:
samp_file = pmag.resolve_file_name(samp_file, dir_path)
label = 1
if sum_file:
sum_file = os.path.join(dir_path, sum_file)
dmin, dmax = float(dmin), float(dmax)
# get data read in
isbulk = 0 # tests if there are bulk susceptibility measurements
AniData, file_type = pmag.magic_read(ani_file) # read in tensor elements
if not age_file:
# read in sample depth info from er_sample.txt format file
Samps, file_type = pmag.magic_read(samp_file)
else:
# read in sample age info from er_ages.txt format file
Samps, file_type = pmag.magic_read(samp_file)
age_unit = Samps[0]['age_unit']
for s in Samps:
# change to upper case for every sample name
s['er_sample_name'] = s['er_sample_name'].upper()
Meas, file_type = pmag.magic_read(meas_file)
# print 'meas_file', meas_file
# print 'file_type', file_type
if file_type == 'magic_measurements':
isbulk = 1
Data = []
Bulks = []
BulkDepths = []
for rec in AniData:
# look for depth record for this sample
samprecs = pmag.get_dictitem(Samps, 'er_sample_name',
rec['er_sample_name'].upper(), 'T')
# see if there are non-blank depth data
sampdepths = pmag.get_dictitem(samprecs, depth_scale, '', 'F')
if dmax != -1:
# fishes out records within depth bounds
sampdepths = pmag.get_dictitem(
sampdepths, depth_scale, dmax, 'max')
sampdepths = pmag.get_dictitem(
sampdepths, depth_scale, dmin, 'min')
if len(sampdepths) > 0: # if there are any....
# set the core depth of this record
rec['core_depth'] = sampdepths[0][depth_scale]
Data.append(rec) # fish out data with core_depth
if isbulk: # if there are bulk data
chis = pmag.get_dictitem(
Meas, 'er_specimen_name', rec['er_specimen_name'], 'T')
# get the non-zero values for this specimen
chis = pmag.get_dictitem(
chis, 'measurement_chi_volume', '', 'F')
if len(chis) > 0: # if there are any....
# put in microSI
Bulks.append(
1e6 * float(chis[0]['measurement_chi_volume']))
BulkDepths.append(float(sampdepths[0][depth_scale]))
if len(Bulks) > 0: # set min and max bulk values
bmin = min(Bulks)
bmax = max(Bulks)
xlab = "Depth (m)"
if len(Data) > 0:
location = Data[0]['er_location_name']
else:
return False, 'no data to plot'
# collect the data for plotting tau V3_inc and V1_dec
Depths, Tau1, Tau2, Tau3, V3Incs, P, V1Decs = [], [], [], [], [], [], []
F23s = []
Axs = [] # collect the plot ids
# START HERE
if len(Bulks) > 0:
pcol += 1
# get all the s1 values from Data as floats
s1 = pmag.get_dictkey(Data, 'anisotropy_s1', 'f')
s2 = pmag.get_dictkey(Data, 'anisotropy_s2', 'f')
s3 = pmag.get_dictkey(Data, 'anisotropy_s3', 'f')
s4 = pmag.get_dictkey(Data, 'anisotropy_s4', 'f')
s5 = pmag.get_dictkey(Data, 'anisotropy_s5', 'f')
s6 = pmag.get_dictkey(Data, 'anisotropy_s6', 'f')
nmeas = pmag.get_dictkey(Data, 'anisotropy_n', 'int')
sigma = pmag.get_dictkey(Data, 'anisotropy_sigma', 'f')
Depths = pmag.get_dictkey(Data, 'core_depth', 'f')
# Ss=np.array([s1,s4,s5,s4,s2,s6,s5,s6,s3]).transpose() # make an array
Ss = np.array([s1, s2, s3, s4, s5, s6]).transpose() # make an array
# Ts=np.reshape(Ss,(len(Ss),3,-1)) # and re-shape to be n-length array of
# 3x3 sub-arrays
for k in range(len(Depths)):
# tau,Evecs= pmag.tauV(Ts[k]) # get the sorted eigenvalues and eigenvectors
# v3=pmag.cart2dir(Evecs[2])[1] # convert to inclination of the minimum
# eigenvector
fpars = pmag.dohext(nmeas[k] - 6, sigma[k], Ss[k])
V3Incs.append(fpars['v3_inc'])
V1Decs.append(fpars['v1_dec'])
Tau1.append(fpars['t1'])
Tau2.append(fpars['t2'])
Tau3.append(fpars['t3'])
P.append(old_div(Tau1[-1], Tau3[-1]))
F23s.append(fpars['F23'])
if len(Depths) > 0:
if dmax == -1:
dmax = max(Depths)
dmin = min(Depths)
tau_min = 1
for t in Tau3:
if t > 0 and t < tau_min:
tau_min = t
tau_max = max(Tau1)
# tau_min=min(Tau3)
P_max = max(P)
P_min = min(P)
# dmax=dmax+.05*dmax
# dmin=dmin-.05*dmax
main_plot = plt.figure(1, figsize=(10, 8)) # make the figure
version_num = pmag.get_version()
plt.figtext(.02, .01, version_num) # attach the pmagpy version number
ax = plt.subplot(1, pcol, 1) # make the first column
Axs.append(ax)
ax.plot(Tau1, Depths, 'rs')
ax.plot(Tau2, Depths, 'b^')
ax.plot(Tau3, Depths, 'ko')
if sum_file:
core_depth_key, core_label_key, Cores = read_core_csv_file(
sum_file)
for core in Cores:
depth = float(core[core_depth_key])
if depth > dmin and depth < dmax:
plt.plot([0, 90], [depth, depth], 'b--')
ax.axis([tau_min, tau_max, dmax, dmin])
ax.set_xlabel('Eigenvalues')
if depth_scale == 'sample_core_depth':
ax.set_ylabel('Depth (mbsf)')
elif depth_scale == 'age':
ax.set_ylabel('Age (' + age_unit + ')')
else:
ax.set_ylabel('Depth (mcd)')
ax2 = plt.subplot(1, pcol, 2) # make the second column
ax2.plot(P, Depths, 'rs')
ax2.axis([P_min, P_max, dmax, dmin])
ax2.set_xlabel('P')
ax2.set_title(location)
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
if depth > dmin and depth < dmax:
plt.plot([0, 90], [depth, depth], 'b--')
Axs.append(ax2)
ax3 = plt.subplot(1, pcol, 3)
Axs.append(ax3)
ax3.plot(V3Incs, Depths, 'ko')
ax3.axis([0, 90, dmax, dmin])
ax3.set_xlabel('V3 Inclination')
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
if depth > dmin and depth < dmax:
plt.plot([0, 90], [depth, depth], 'b--')
ax4 = plt.subplot(1, np.abs(pcol), 4)
Axs.append(ax4)
ax4.plot(V1Decs, Depths, 'rs')
ax4.axis([0, 360, dmax, dmin])
ax4.set_xlabel('V1 Declination')
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
if depth >= dmin and depth <= dmax:
plt.plot([0, 360], [depth, depth], 'b--')
if pcol == 4 and label == 1:
plt.text(360, depth + tint, core[core_label_key])
# ax5=plt.subplot(1,np.abs(pcol),5)
# Axs.append(ax5)
# ax5.plot(F23s,Depths,'rs')
# bounds=ax5.axis()
# ax5.axis([bounds[0],bounds[1],dmax,dmin])
# ax5.set_xlabel('F_23')
# ax5.semilogx()
# if sum_file:
# for core in Cores:
# depth=float(core[core_depth_key])
# if depth>=dmin and depth<=dmax:
# plt.plot([bounds[0],bounds[1]],[depth,depth],'b--')
# if pcol==5 and label==1:plt.text(bounds[1],depth+tint,core[core_label_key])
# if pcol==6:
if pcol == 5:
# ax6=plt.subplot(1,pcol,6)
ax6 = plt.subplot(1, pcol, 5)
Axs.append(ax6)
ax6.plot(Bulks, BulkDepths, 'bo')
ax6.axis([bmin - 1, 1.1 * bmax, dmax, dmin])
ax6.set_xlabel('Bulk Susc. (uSI)')
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
if depth >= dmin and depth <= dmax:
plt.plot([0, bmax], [depth, depth], 'b--')
if label == 1:
plt.text(1.1 * bmax, depth + tint,
core[core_label_key])
for x in Axs:
# this makes the x-tick labels more reasonable - they were
# overcrowded using the defaults
pmagplotlib.delticks(x)
fig_name = location + '_ani_depthplot.' + fmt
return main_plot, fig_name
else:
return False, "No data to plot" |
def factor_cumulative_returns(factor_data,
period,
long_short=True,
group_neutral=False,
equal_weight=False,
quantiles=None,
groups=None):
"""
Simulate a portfolio using the factor in input and returns the cumulative
returns of the simulated portfolio
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to,
and (optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
period : string
'factor_data' column name corresponding to the 'period' returns to be
used in the computation of porfolio returns
long_short : bool, optional
if True then simulates a dollar neutral long-short portfolio
- see performance.create_pyfolio_input for more details
group_neutral : bool, optional
If True then simulates a group neutral portfolio
- see performance.create_pyfolio_input for more details
equal_weight : bool, optional
Control the assets weights:
- see performance.create_pyfolio_input for more details
quantiles: sequence[int], optional
Use only specific quantiles in the computation. By default all
quantiles are used
groups: sequence[string], optional
Use only specific groups in the computation. By default all groups
are used
Returns
-------
Cumulative returns series : pd.Series
Example:
2015-07-16 09:30:00 -0.012143
2015-07-16 12:30:00 0.012546
2015-07-17 09:30:00 0.045350
2015-07-17 12:30:00 0.065897
2015-07-20 09:30:00 0.030957
"""
fwd_ret_cols = utils.get_forward_returns_columns(factor_data.columns)
if period not in fwd_ret_cols:
raise ValueError("Period '%s' not found" % period)
todrop = list(fwd_ret_cols)
todrop.remove(period)
portfolio_data = factor_data.drop(todrop, axis=1)
if quantiles is not None:
portfolio_data = portfolio_data[portfolio_data['factor_quantile'].isin(
quantiles)]
if groups is not None:
portfolio_data = portfolio_data[portfolio_data['group'].isin(groups)]
returns = \
factor_returns(portfolio_data, long_short, group_neutral, equal_weight)
return cumulative_returns(returns[period], period) | Simulate a portfolio using the factor in input and returns the cumulative
returns of the simulated portfolio
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to,
and (optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
period : string
'factor_data' column name corresponding to the 'period' returns to be
used in the computation of porfolio returns
long_short : bool, optional
if True then simulates a dollar neutral long-short portfolio
- see performance.create_pyfolio_input for more details
group_neutral : bool, optional
If True then simulates a group neutral portfolio
- see performance.create_pyfolio_input for more details
equal_weight : bool, optional
Control the assets weights:
- see performance.create_pyfolio_input for more details
quantiles: sequence[int], optional
Use only specific quantiles in the computation. By default all
quantiles are used
groups: sequence[string], optional
Use only specific groups in the computation. By default all groups
are used
Returns
-------
Cumulative returns series : pd.Series
Example:
2015-07-16 09:30:00 -0.012143
2015-07-16 12:30:00 0.012546
2015-07-17 09:30:00 0.045350
2015-07-17 12:30:00 0.065897
2015-07-20 09:30:00 0.030957 | Below is the the instruction that describes the task:
### Input:
Simulate a portfolio using the factor in input and returns the cumulative
returns of the simulated portfolio
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to,
and (optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
period : string
'factor_data' column name corresponding to the 'period' returns to be
used in the computation of porfolio returns
long_short : bool, optional
if True then simulates a dollar neutral long-short portfolio
- see performance.create_pyfolio_input for more details
group_neutral : bool, optional
If True then simulates a group neutral portfolio
- see performance.create_pyfolio_input for more details
equal_weight : bool, optional
Control the assets weights:
- see performance.create_pyfolio_input for more details
quantiles: sequence[int], optional
Use only specific quantiles in the computation. By default all
quantiles are used
groups: sequence[string], optional
Use only specific groups in the computation. By default all groups
are used
Returns
-------
Cumulative returns series : pd.Series
Example:
2015-07-16 09:30:00 -0.012143
2015-07-16 12:30:00 0.012546
2015-07-17 09:30:00 0.045350
2015-07-17 12:30:00 0.065897
2015-07-20 09:30:00 0.030957
### Response:
def factor_cumulative_returns(factor_data,
period,
long_short=True,
group_neutral=False,
equal_weight=False,
quantiles=None,
groups=None):
"""
Simulate a portfolio using the factor in input and returns the cumulative
returns of the simulated portfolio
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to,
and (optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
period : string
'factor_data' column name corresponding to the 'period' returns to be
used in the computation of porfolio returns
long_short : bool, optional
if True then simulates a dollar neutral long-short portfolio
- see performance.create_pyfolio_input for more details
group_neutral : bool, optional
If True then simulates a group neutral portfolio
- see performance.create_pyfolio_input for more details
equal_weight : bool, optional
Control the assets weights:
- see performance.create_pyfolio_input for more details
quantiles: sequence[int], optional
Use only specific quantiles in the computation. By default all
quantiles are used
groups: sequence[string], optional
Use only specific groups in the computation. By default all groups
are used
Returns
-------
Cumulative returns series : pd.Series
Example:
2015-07-16 09:30:00 -0.012143
2015-07-16 12:30:00 0.012546
2015-07-17 09:30:00 0.045350
2015-07-17 12:30:00 0.065897
2015-07-20 09:30:00 0.030957
"""
fwd_ret_cols = utils.get_forward_returns_columns(factor_data.columns)
if period not in fwd_ret_cols:
raise ValueError("Period '%s' not found" % period)
todrop = list(fwd_ret_cols)
todrop.remove(period)
portfolio_data = factor_data.drop(todrop, axis=1)
if quantiles is not None:
portfolio_data = portfolio_data[portfolio_data['factor_quantile'].isin(
quantiles)]
if groups is not None:
portfolio_data = portfolio_data[portfolio_data['group'].isin(groups)]
returns = \
factor_returns(portfolio_data, long_short, group_neutral, equal_weight)
return cumulative_returns(returns[period], period) |
def get_chrom_for_transcript(self, transcript_id, hgnc_id):
""" obtain the sequence for a transcript from ensembl
"""
headers = {"content-type": "application/json"}
self.attempt = 0
ext = "/overlap/id/{}?feature=gene".format(transcript_id)
r = self.ensembl_request(ext, headers)
for gene in json.loads(r):
if gene["external_name"] == hgnc_id:
return gene["seq_region_name"]
return None | obtain the sequence for a transcript from ensembl | Below is the the instruction that describes the task:
### Input:
obtain the sequence for a transcript from ensembl
### Response:
def get_chrom_for_transcript(self, transcript_id, hgnc_id):
""" obtain the sequence for a transcript from ensembl
"""
headers = {"content-type": "application/json"}
self.attempt = 0
ext = "/overlap/id/{}?feature=gene".format(transcript_id)
r = self.ensembl_request(ext, headers)
for gene in json.loads(r):
if gene["external_name"] == hgnc_id:
return gene["seq_region_name"]
return None |
def _state(self):
"""The internal state of the object.
The api responses are not consistent so a retry is performed on every
call with information updating the internally saved state refreshing
the data. The info is cached for STATE_CACHING_SECONDS.
:return: The current state of the toons' information state.
"""
state = {}
required_keys = ('deviceStatusInfo',
'gasUsage',
'powerUsage',
'thermostatInfo',
'thermostatStates')
try:
for _ in range(self._state_retries):
state.update(self._get_data('/client/auth/retrieveToonState'))
except TypeError:
self._logger.exception('Could not get answer from service.')
message = ('Updating internal state with retrieved '
'state:{state}').format(state=state)
self._logger.debug(message)
self._state_.update(state)
if not all([key in self._state_.keys() for key in required_keys]):
raise IncompleteResponse(state)
return self._state_ | The internal state of the object.
The api responses are not consistent so a retry is performed on every
call with information updating the internally saved state refreshing
the data. The info is cached for STATE_CACHING_SECONDS.
:return: The current state of the toons' information state. | Below is the the instruction that describes the task:
### Input:
The internal state of the object.
The api responses are not consistent so a retry is performed on every
call with information updating the internally saved state refreshing
the data. The info is cached for STATE_CACHING_SECONDS.
:return: The current state of the toons' information state.
### Response:
def _state(self):
"""The internal state of the object.
The api responses are not consistent so a retry is performed on every
call with information updating the internally saved state refreshing
the data. The info is cached for STATE_CACHING_SECONDS.
:return: The current state of the toons' information state.
"""
state = {}
required_keys = ('deviceStatusInfo',
'gasUsage',
'powerUsage',
'thermostatInfo',
'thermostatStates')
try:
for _ in range(self._state_retries):
state.update(self._get_data('/client/auth/retrieveToonState'))
except TypeError:
self._logger.exception('Could not get answer from service.')
message = ('Updating internal state with retrieved '
'state:{state}').format(state=state)
self._logger.debug(message)
self._state_.update(state)
if not all([key in self._state_.keys() for key in required_keys]):
raise IncompleteResponse(state)
return self._state_ |
def summary_by_datacenter(self):
"""Summary of the networks on the account, grouped by data center.
The resultant dictionary is primarily useful for statistical purposes.
It contains count information rather than raw data. If you want raw
information, see the :func:`list_vlans` method instead.
:returns: A dictionary keyed by data center with the data containing a
set of counts for subnets, hardware, virtual servers, and
other objects residing within that data center.
"""
datacenters = collections.defaultdict(lambda: {
'hardware_count': 0,
'public_ip_count': 0,
'subnet_count': 0,
'virtual_guest_count': 0,
'vlan_count': 0,
})
for vlan in self.list_vlans():
name = utils.lookup(vlan, 'primaryRouter', 'datacenter', 'name')
datacenters[name]['vlan_count'] += 1
datacenters[name]['public_ip_count'] += (
vlan['totalPrimaryIpAddressCount'])
datacenters[name]['subnet_count'] += vlan['subnetCount']
# NOTE(kmcdonald): Only count hardware/guests once
if vlan.get('networkSpace') == 'PRIVATE':
datacenters[name]['hardware_count'] += (
vlan['hardwareCount'])
datacenters[name]['virtual_guest_count'] += (
vlan['virtualGuestCount'])
return dict(datacenters) | Summary of the networks on the account, grouped by data center.
The resultant dictionary is primarily useful for statistical purposes.
It contains count information rather than raw data. If you want raw
information, see the :func:`list_vlans` method instead.
:returns: A dictionary keyed by data center with the data containing a
set of counts for subnets, hardware, virtual servers, and
other objects residing within that data center. | Below is the the instruction that describes the task:
### Input:
Summary of the networks on the account, grouped by data center.
The resultant dictionary is primarily useful for statistical purposes.
It contains count information rather than raw data. If you want raw
information, see the :func:`list_vlans` method instead.
:returns: A dictionary keyed by data center with the data containing a
set of counts for subnets, hardware, virtual servers, and
other objects residing within that data center.
### Response:
def summary_by_datacenter(self):
"""Summary of the networks on the account, grouped by data center.
The resultant dictionary is primarily useful for statistical purposes.
It contains count information rather than raw data. If you want raw
information, see the :func:`list_vlans` method instead.
:returns: A dictionary keyed by data center with the data containing a
set of counts for subnets, hardware, virtual servers, and
other objects residing within that data center.
"""
datacenters = collections.defaultdict(lambda: {
'hardware_count': 0,
'public_ip_count': 0,
'subnet_count': 0,
'virtual_guest_count': 0,
'vlan_count': 0,
})
for vlan in self.list_vlans():
name = utils.lookup(vlan, 'primaryRouter', 'datacenter', 'name')
datacenters[name]['vlan_count'] += 1
datacenters[name]['public_ip_count'] += (
vlan['totalPrimaryIpAddressCount'])
datacenters[name]['subnet_count'] += vlan['subnetCount']
# NOTE(kmcdonald): Only count hardware/guests once
if vlan.get('networkSpace') == 'PRIVATE':
datacenters[name]['hardware_count'] += (
vlan['hardwareCount'])
datacenters[name]['virtual_guest_count'] += (
vlan['virtualGuestCount'])
return dict(datacenters) |
def pwd(self, **kwargs):
"""
Returns the cwd
Optional kwargs:
node = <node>
If specified, return only the directory name at depth <node>.
"""
b_node = False
node = 0
for key,val in kwargs.items():
if key == 'node':
b_node = True
node = int(val)
str_path = self.cwd()
if b_node:
l_path = str_path.split('/')
if len(l_path) >= node+1:
str_path = str_path.split('/')[node]
return str_path | Returns the cwd
Optional kwargs:
node = <node>
If specified, return only the directory name at depth <node>. | Below is the the instruction that describes the task:
### Input:
Returns the cwd
Optional kwargs:
node = <node>
If specified, return only the directory name at depth <node>.
### Response:
def pwd(self, **kwargs):
"""
Returns the cwd
Optional kwargs:
node = <node>
If specified, return only the directory name at depth <node>.
"""
b_node = False
node = 0
for key,val in kwargs.items():
if key == 'node':
b_node = True
node = int(val)
str_path = self.cwd()
if b_node:
l_path = str_path.split('/')
if len(l_path) >= node+1:
str_path = str_path.split('/')[node]
return str_path |
def ls_command(
endpoint_plus_path,
recursive_depth_limit,
recursive,
long_output,
show_hidden,
filter_val,
):
"""
Executor for `globus ls`
"""
endpoint_id, path = endpoint_plus_path
# do autoactivation before the `ls` call so that recursive invocations
# won't do this repeatedly, and won't have to instantiate new clients
client = get_client()
autoactivate(client, endpoint_id, if_expires_in=60)
# create the query paramaters to send to operation_ls
ls_params = {"show_hidden": int(show_hidden)}
if path:
ls_params["path"] = path
if filter_val:
# this char has special meaning in the LS API's filter clause
# can't be part of the pattern (but we don't support globbing across
# dir structures anyway)
if "/" in filter_val:
raise click.UsageError('--filter cannot contain "/"')
# format into a simple filter clause which operates on filenames
ls_params["filter"] = "name:{}".format(filter_val)
# get the `ls` result
if recursive:
# NOTE:
# --recursive and --filter have an interplay that some users may find
# surprising
# if we're asked to change or "improve" the behavior in the future, we
# could do so with "type:dir" or "type:file" filters added in, and
# potentially work out some viable behavior based on what people want
res = client.recursive_operation_ls(
endpoint_id, depth=recursive_depth_limit, **ls_params
)
else:
res = client.operation_ls(endpoint_id, **ls_params)
def cleaned_item_name(item):
return item["name"] + ("/" if item["type"] == "dir" else "")
# and then print it, per formatting rules
formatted_print(
res,
fields=[
("Permissions", "permissions"),
("User", "user"),
("Group", "group"),
("Size", "size"),
("Last Modified", "last_modified"),
("File Type", "type"),
("Filename", cleaned_item_name),
],
simple_text=(
None
if long_output or is_verbose() or not outformat_is_text()
else "\n".join(cleaned_item_name(x) for x in res)
),
json_converter=iterable_response_to_dict,
) | Executor for `globus ls` | Below is the the instruction that describes the task:
### Input:
Executor for `globus ls`
### Response:
def ls_command(
endpoint_plus_path,
recursive_depth_limit,
recursive,
long_output,
show_hidden,
filter_val,
):
"""
Executor for `globus ls`
"""
endpoint_id, path = endpoint_plus_path
# do autoactivation before the `ls` call so that recursive invocations
# won't do this repeatedly, and won't have to instantiate new clients
client = get_client()
autoactivate(client, endpoint_id, if_expires_in=60)
# create the query paramaters to send to operation_ls
ls_params = {"show_hidden": int(show_hidden)}
if path:
ls_params["path"] = path
if filter_val:
# this char has special meaning in the LS API's filter clause
# can't be part of the pattern (but we don't support globbing across
# dir structures anyway)
if "/" in filter_val:
raise click.UsageError('--filter cannot contain "/"')
# format into a simple filter clause which operates on filenames
ls_params["filter"] = "name:{}".format(filter_val)
# get the `ls` result
if recursive:
# NOTE:
# --recursive and --filter have an interplay that some users may find
# surprising
# if we're asked to change or "improve" the behavior in the future, we
# could do so with "type:dir" or "type:file" filters added in, and
# potentially work out some viable behavior based on what people want
res = client.recursive_operation_ls(
endpoint_id, depth=recursive_depth_limit, **ls_params
)
else:
res = client.operation_ls(endpoint_id, **ls_params)
def cleaned_item_name(item):
return item["name"] + ("/" if item["type"] == "dir" else "")
# and then print it, per formatting rules
formatted_print(
res,
fields=[
("Permissions", "permissions"),
("User", "user"),
("Group", "group"),
("Size", "size"),
("Last Modified", "last_modified"),
("File Type", "type"),
("Filename", cleaned_item_name),
],
simple_text=(
None
if long_output or is_verbose() or not outformat_is_text()
else "\n".join(cleaned_item_name(x) for x in res)
),
json_converter=iterable_response_to_dict,
) |
def _nama(self):
"""Mengembalikan representasi string untuk nama entri ini.
:returns: String representasi nama entri
:rtype: str
"""
hasil = self.nama
if self.nomor:
hasil += " [{}]".format(self.nomor)
if self.kata_dasar:
hasil = " » ".join(self.kata_dasar) + " » " + hasil
return hasil | Mengembalikan representasi string untuk nama entri ini.
:returns: String representasi nama entri
:rtype: str | Below is the the instruction that describes the task:
### Input:
Mengembalikan representasi string untuk nama entri ini.
:returns: String representasi nama entri
:rtype: str
### Response:
def _nama(self):
"""Mengembalikan representasi string untuk nama entri ini.
:returns: String representasi nama entri
:rtype: str
"""
hasil = self.nama
if self.nomor:
hasil += " [{}]".format(self.nomor)
if self.kata_dasar:
hasil = " » ".join(self.kata_dasar) + " » " + hasil
return hasil |
def calculate_auc_covar(auc_structure1, auc_structure2):
"""
determine AUC covariance due to actives (covar_a) and decoys (covar_d)
:param auc_structure1: list [(id, best_score, best_query, status, fpf, tpf), ...,]
:param auc_structure2: list [(id, best_score, best_query, status, fpf, tpf), ...,]
:return (covar_a, covar_d): tuple
"""
# split data by activity class
actives1, decoys1 = splitter(auc_structure1)
actives2, decoys2 = splitter(auc_structure2)
# covariance due to actives = E[{fpf2 - E(fpf2)a} * {fpf1 - E(fpf1)a}]a
fpf1 = [x[4] for x in actives1]
fpf2 = [x[4] for x in actives2]
covara = np.cov(fpf1,fpf2)[0][1]
# covariance due to decoys = E[{tpf2 - E(tpf2)d} * {tpf1 - E(tpf1)d}]
tpf1 = [x[5] for x in decoys1]
tpf2 = [x[5] for x in decoys2]
covard = np.cov(tpf1,tpf2)[0][1] # this is only compatible with versions >= 1.5
return covara, covard | determine AUC covariance due to actives (covar_a) and decoys (covar_d)
:param auc_structure1: list [(id, best_score, best_query, status, fpf, tpf), ...,]
:param auc_structure2: list [(id, best_score, best_query, status, fpf, tpf), ...,]
:return (covar_a, covar_d): tuple | Below is the the instruction that describes the task:
### Input:
determine AUC covariance due to actives (covar_a) and decoys (covar_d)
:param auc_structure1: list [(id, best_score, best_query, status, fpf, tpf), ...,]
:param auc_structure2: list [(id, best_score, best_query, status, fpf, tpf), ...,]
:return (covar_a, covar_d): tuple
### Response:
def calculate_auc_covar(auc_structure1, auc_structure2):
"""
determine AUC covariance due to actives (covar_a) and decoys (covar_d)
:param auc_structure1: list [(id, best_score, best_query, status, fpf, tpf), ...,]
:param auc_structure2: list [(id, best_score, best_query, status, fpf, tpf), ...,]
:return (covar_a, covar_d): tuple
"""
# split data by activity class
actives1, decoys1 = splitter(auc_structure1)
actives2, decoys2 = splitter(auc_structure2)
# covariance due to actives = E[{fpf2 - E(fpf2)a} * {fpf1 - E(fpf1)a}]a
fpf1 = [x[4] for x in actives1]
fpf2 = [x[4] for x in actives2]
covara = np.cov(fpf1,fpf2)[0][1]
# covariance due to decoys = E[{tpf2 - E(tpf2)d} * {tpf1 - E(tpf1)d}]
tpf1 = [x[5] for x in decoys1]
tpf2 = [x[5] for x in decoys2]
covard = np.cov(tpf1,tpf2)[0][1] # this is only compatible with versions >= 1.5
return covara, covard |
def _read_bytes_from_framed_body(self, b):
"""Reads the requested number of bytes from a streaming framed message body.
:param int b: Number of bytes to read
:returns: Bytes read from source stream and decrypted
:rtype: bytes
"""
plaintext = b""
final_frame = False
_LOGGER.debug("collecting %d bytes", b)
while len(plaintext) < b and not final_frame:
_LOGGER.debug("Reading frame")
frame_data, final_frame = deserialize_frame(
stream=self.source_stream, header=self._header, verifier=self.verifier
)
_LOGGER.debug("Read complete for frame %d", frame_data.sequence_number)
if frame_data.sequence_number != self.last_sequence_number + 1:
raise SerializationError("Malformed message: frames out of order")
self.last_sequence_number += 1
aad_content_string = aws_encryption_sdk.internal.utils.get_aad_content_string(
content_type=self._header.content_type, is_final_frame=frame_data.final_frame
)
associated_data = assemble_content_aad(
message_id=self._header.message_id,
aad_content_string=aad_content_string,
seq_num=frame_data.sequence_number,
length=len(frame_data.ciphertext),
)
plaintext += decrypt(
algorithm=self._header.algorithm,
key=self._derived_data_key,
encrypted_data=frame_data,
associated_data=associated_data,
)
plaintext_length = len(plaintext)
_LOGGER.debug("bytes collected: %d", plaintext_length)
if final_frame:
_LOGGER.debug("Reading footer")
self.footer = deserialize_footer(stream=self.source_stream, verifier=self.verifier)
return plaintext | Reads the requested number of bytes from a streaming framed message body.
:param int b: Number of bytes to read
:returns: Bytes read from source stream and decrypted
:rtype: bytes | Below is the the instruction that describes the task:
### Input:
Reads the requested number of bytes from a streaming framed message body.
:param int b: Number of bytes to read
:returns: Bytes read from source stream and decrypted
:rtype: bytes
### Response:
def _read_bytes_from_framed_body(self, b):
"""Reads the requested number of bytes from a streaming framed message body.
:param int b: Number of bytes to read
:returns: Bytes read from source stream and decrypted
:rtype: bytes
"""
plaintext = b""
final_frame = False
_LOGGER.debug("collecting %d bytes", b)
while len(plaintext) < b and not final_frame:
_LOGGER.debug("Reading frame")
frame_data, final_frame = deserialize_frame(
stream=self.source_stream, header=self._header, verifier=self.verifier
)
_LOGGER.debug("Read complete for frame %d", frame_data.sequence_number)
if frame_data.sequence_number != self.last_sequence_number + 1:
raise SerializationError("Malformed message: frames out of order")
self.last_sequence_number += 1
aad_content_string = aws_encryption_sdk.internal.utils.get_aad_content_string(
content_type=self._header.content_type, is_final_frame=frame_data.final_frame
)
associated_data = assemble_content_aad(
message_id=self._header.message_id,
aad_content_string=aad_content_string,
seq_num=frame_data.sequence_number,
length=len(frame_data.ciphertext),
)
plaintext += decrypt(
algorithm=self._header.algorithm,
key=self._derived_data_key,
encrypted_data=frame_data,
associated_data=associated_data,
)
plaintext_length = len(plaintext)
_LOGGER.debug("bytes collected: %d", plaintext_length)
if final_frame:
_LOGGER.debug("Reading footer")
self.footer = deserialize_footer(stream=self.source_stream, verifier=self.verifier)
return plaintext |
def format_string(self, fmat_string):
"""
Takes a string containing 0 or more {variables} and formats it
according to this instance's attributes.
:param fmat_string: A string, e.g. '{name}-foo.txt'
:type fmat_string: ``str``
:return: The string formatted according to this instance. E.g.
'production-runtime-foo.txt'
:rtype: ``str``
"""
try:
return fmat_string.format(**vars(self))
except KeyError as e:
raise ValueError('Invalid format string: {0}. Instance has no '
'attribute {1}.'.format(repr(fmat_string),
repr(e))) | Takes a string containing 0 or more {variables} and formats it
according to this instance's attributes.
:param fmat_string: A string, e.g. '{name}-foo.txt'
:type fmat_string: ``str``
:return: The string formatted according to this instance. E.g.
'production-runtime-foo.txt'
:rtype: ``str`` | Below is the the instruction that describes the task:
### Input:
Takes a string containing 0 or more {variables} and formats it
according to this instance's attributes.
:param fmat_string: A string, e.g. '{name}-foo.txt'
:type fmat_string: ``str``
:return: The string formatted according to this instance. E.g.
'production-runtime-foo.txt'
:rtype: ``str``
### Response:
def format_string(self, fmat_string):
"""
Takes a string containing 0 or more {variables} and formats it
according to this instance's attributes.
:param fmat_string: A string, e.g. '{name}-foo.txt'
:type fmat_string: ``str``
:return: The string formatted according to this instance. E.g.
'production-runtime-foo.txt'
:rtype: ``str``
"""
try:
return fmat_string.format(**vars(self))
except KeyError as e:
raise ValueError('Invalid format string: {0}. Instance has no '
'attribute {1}.'.format(repr(fmat_string),
repr(e))) |
def for_web(self, data):
"""
Convert data to web output.
Parameters
----------
data : array
Returns
-------
web data : array
"""
rgba = self._prepare_array_for_png(data)
data = ma.masked_where(rgba == self.nodata, rgba)
return memory_file(data, self.profile()), 'image/png' | Convert data to web output.
Parameters
----------
data : array
Returns
-------
web data : array | Below is the the instruction that describes the task:
### Input:
Convert data to web output.
Parameters
----------
data : array
Returns
-------
web data : array
### Response:
def for_web(self, data):
"""
Convert data to web output.
Parameters
----------
data : array
Returns
-------
web data : array
"""
rgba = self._prepare_array_for_png(data)
data = ma.masked_where(rgba == self.nodata, rgba)
return memory_file(data, self.profile()), 'image/png' |
def persist(self, context):
"""Update or insert the session document into the configured collection"""
D = self._Document
document = context.session[self.name]
D.get_collection().replace_one(D.id == document.id, document, True) | Update or insert the session document into the configured collection | Below is the the instruction that describes the task:
### Input:
Update or insert the session document into the configured collection
### Response:
def persist(self, context):
"""Update or insert the session document into the configured collection"""
D = self._Document
document = context.session[self.name]
D.get_collection().replace_one(D.id == document.id, document, True) |
def preview(self, components=None, ask=0):
"""
Inspects differences between the last deployment and the current code state.
"""
ask = int(ask)
self.init()
component_order, plan_funcs = self.get_component_funcs(components=components)
print('\n%i changes found for host %s.\n' % (len(component_order), self.genv.host_string))
if component_order and plan_funcs:
if self.verbose:
print('These components have changed:\n')
for component in sorted(component_order):
print((' '*4)+component)
print('Deployment plan for host %s:\n' % self.genv.host_string)
for func_name, _ in plan_funcs:
print(success_str((' '*4)+func_name))
if component_order:
print()
if ask and self.genv.host_string == self.genv.hosts[-1]:
if component_order:
if not raw_input('Begin deployment? [yn] ').strip().lower().startswith('y'):
sys.exit(0)
else:
sys.exit(0) | Inspects differences between the last deployment and the current code state. | Below is the the instruction that describes the task:
### Input:
Inspects differences between the last deployment and the current code state.
### Response:
def preview(self, components=None, ask=0):
"""
Inspects differences between the last deployment and the current code state.
"""
ask = int(ask)
self.init()
component_order, plan_funcs = self.get_component_funcs(components=components)
print('\n%i changes found for host %s.\n' % (len(component_order), self.genv.host_string))
if component_order and plan_funcs:
if self.verbose:
print('These components have changed:\n')
for component in sorted(component_order):
print((' '*4)+component)
print('Deployment plan for host %s:\n' % self.genv.host_string)
for func_name, _ in plan_funcs:
print(success_str((' '*4)+func_name))
if component_order:
print()
if ask and self.genv.host_string == self.genv.hosts[-1]:
if component_order:
if not raw_input('Begin deployment? [yn] ').strip().lower().startswith('y'):
sys.exit(0)
else:
sys.exit(0) |
def dn(self, fraction, n=None):
r'''Computes the diameter at which a specified `fraction` of the
distribution falls under. Utilizes a bounded solver to search for the
desired diameter.
Parameters
----------
fraction : float
Fraction of the distribution which should be under the calculated
diameter, [-]
n : int, optional
None (for the `order` specified when the distribution was created),
0 (number), 1 (length), 2 (area), 3 (volume/mass),
or any integer, [-]
Returns
-------
d : float
Particle size diameter, [m]
Examples
--------
>>> psd = PSDLognormal(s=0.5, d_characteristic=5E-6, order=3)
>>> psd.dn(.5)
5e-06
>>> psd.dn(1)
0.00029474365335233776
>>> psd.dn(0)
0.0
'''
if fraction == 1.0:
# Avoid returning the maximum value of the search interval
fraction = 1.0 - epsilon
if fraction < 0:
raise ValueError('Fraction must be more than 0')
elif fraction == 0: # pragma: no cover
if self.truncated:
return self.d_min
return 0.0
# Solve to float prevision limit - works well, but is there a real
# point when with mpmath it would never happen?
# dist.cdf(dist.dn(0)-1e-35) == 0
# dist.cdf(dist.dn(0)-1e-36) == input
# dn(0) == 1.9663615597466143e-20
# def err(d):
# cdf = self.cdf(d, n=n)
# if cdf == 0:
# cdf = -1
# return cdf
# return brenth(err, self.d_minimum, self.d_excessive, maxiter=1000, xtol=1E-200)
elif fraction > 1:
raise ValueError('Fraction less than 1')
# As the dn may be incredibly small, it is required for the absolute
# tolerance to not be happy - it needs to continue iterating as long
# as necessary to pin down the answer
return brenth(lambda d:self.cdf(d, n=n) -fraction,
self.d_minimum, self.d_excessive, maxiter=1000, xtol=1E-200) | r'''Computes the diameter at which a specified `fraction` of the
distribution falls under. Utilizes a bounded solver to search for the
desired diameter.
Parameters
----------
fraction : float
Fraction of the distribution which should be under the calculated
diameter, [-]
n : int, optional
None (for the `order` specified when the distribution was created),
0 (number), 1 (length), 2 (area), 3 (volume/mass),
or any integer, [-]
Returns
-------
d : float
Particle size diameter, [m]
Examples
--------
>>> psd = PSDLognormal(s=0.5, d_characteristic=5E-6, order=3)
>>> psd.dn(.5)
5e-06
>>> psd.dn(1)
0.00029474365335233776
>>> psd.dn(0)
0.0 | Below is the the instruction that describes the task:
### Input:
r'''Computes the diameter at which a specified `fraction` of the
distribution falls under. Utilizes a bounded solver to search for the
desired diameter.
Parameters
----------
fraction : float
Fraction of the distribution which should be under the calculated
diameter, [-]
n : int, optional
None (for the `order` specified when the distribution was created),
0 (number), 1 (length), 2 (area), 3 (volume/mass),
or any integer, [-]
Returns
-------
d : float
Particle size diameter, [m]
Examples
--------
>>> psd = PSDLognormal(s=0.5, d_characteristic=5E-6, order=3)
>>> psd.dn(.5)
5e-06
>>> psd.dn(1)
0.00029474365335233776
>>> psd.dn(0)
0.0
### Response:
def dn(self, fraction, n=None):
r'''Computes the diameter at which a specified `fraction` of the
distribution falls under. Utilizes a bounded solver to search for the
desired diameter.
Parameters
----------
fraction : float
Fraction of the distribution which should be under the calculated
diameter, [-]
n : int, optional
None (for the `order` specified when the distribution was created),
0 (number), 1 (length), 2 (area), 3 (volume/mass),
or any integer, [-]
Returns
-------
d : float
Particle size diameter, [m]
Examples
--------
>>> psd = PSDLognormal(s=0.5, d_characteristic=5E-6, order=3)
>>> psd.dn(.5)
5e-06
>>> psd.dn(1)
0.00029474365335233776
>>> psd.dn(0)
0.0
'''
if fraction == 1.0:
# Avoid returning the maximum value of the search interval
fraction = 1.0 - epsilon
if fraction < 0:
raise ValueError('Fraction must be more than 0')
elif fraction == 0: # pragma: no cover
if self.truncated:
return self.d_min
return 0.0
# Solve to float prevision limit - works well, but is there a real
# point when with mpmath it would never happen?
# dist.cdf(dist.dn(0)-1e-35) == 0
# dist.cdf(dist.dn(0)-1e-36) == input
# dn(0) == 1.9663615597466143e-20
# def err(d):
# cdf = self.cdf(d, n=n)
# if cdf == 0:
# cdf = -1
# return cdf
# return brenth(err, self.d_minimum, self.d_excessive, maxiter=1000, xtol=1E-200)
elif fraction > 1:
raise ValueError('Fraction less than 1')
# As the dn may be incredibly small, it is required for the absolute
# tolerance to not be happy - it needs to continue iterating as long
# as necessary to pin down the answer
return brenth(lambda d:self.cdf(d, n=n) -fraction,
self.d_minimum, self.d_excessive, maxiter=1000, xtol=1E-200) |
def _new(self, dx_hash, media_type=None, **kwargs):
"""
:param dx_hash: Standard hash populated in :func:`dxpy.bindings.DXDataObject.new()` containing attributes common to all data object classes.
:type dx_hash: dict
:param media_type: Internet Media Type
:type media_type: string
Creates a new remote file with media type *media_type*, if given.
"""
if media_type is not None:
dx_hash["media"] = media_type
resp = dxpy.api.file_new(dx_hash, **kwargs)
self.set_ids(resp["id"], dx_hash["project"]) | :param dx_hash: Standard hash populated in :func:`dxpy.bindings.DXDataObject.new()` containing attributes common to all data object classes.
:type dx_hash: dict
:param media_type: Internet Media Type
:type media_type: string
Creates a new remote file with media type *media_type*, if given. | Below is the the instruction that describes the task:
### Input:
:param dx_hash: Standard hash populated in :func:`dxpy.bindings.DXDataObject.new()` containing attributes common to all data object classes.
:type dx_hash: dict
:param media_type: Internet Media Type
:type media_type: string
Creates a new remote file with media type *media_type*, if given.
### Response:
def _new(self, dx_hash, media_type=None, **kwargs):
"""
:param dx_hash: Standard hash populated in :func:`dxpy.bindings.DXDataObject.new()` containing attributes common to all data object classes.
:type dx_hash: dict
:param media_type: Internet Media Type
:type media_type: string
Creates a new remote file with media type *media_type*, if given.
"""
if media_type is not None:
dx_hash["media"] = media_type
resp = dxpy.api.file_new(dx_hash, **kwargs)
self.set_ids(resp["id"], dx_hash["project"]) |
def pad(self, pad):
"""
Pad this tile by an equal amount on each side as specified by pad
>>> Tile(10).pad(2)
Tile [-2, -2, -2] -> [12, 12, 12] ([14, 14, 14])
>>> Tile(10).pad([1,2,3])
Tile [-1, -2, -3] -> [11, 12, 13] ([12, 14, 16])
"""
tile = self.copy()
tile.l -= pad
tile.r += pad
return tile | Pad this tile by an equal amount on each side as specified by pad
>>> Tile(10).pad(2)
Tile [-2, -2, -2] -> [12, 12, 12] ([14, 14, 14])
>>> Tile(10).pad([1,2,3])
Tile [-1, -2, -3] -> [11, 12, 13] ([12, 14, 16]) | Below is the the instruction that describes the task:
### Input:
Pad this tile by an equal amount on each side as specified by pad
>>> Tile(10).pad(2)
Tile [-2, -2, -2] -> [12, 12, 12] ([14, 14, 14])
>>> Tile(10).pad([1,2,3])
Tile [-1, -2, -3] -> [11, 12, 13] ([12, 14, 16])
### Response:
def pad(self, pad):
"""
Pad this tile by an equal amount on each side as specified by pad
>>> Tile(10).pad(2)
Tile [-2, -2, -2] -> [12, 12, 12] ([14, 14, 14])
>>> Tile(10).pad([1,2,3])
Tile [-1, -2, -3] -> [11, 12, 13] ([12, 14, 16])
"""
tile = self.copy()
tile.l -= pad
tile.r += pad
return tile |
def get_lldp_neighbor_detail_output_lldp_neighbor_detail_remote_port_description(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail")
config = get_lldp_neighbor_detail
output = ET.SubElement(get_lldp_neighbor_detail, "output")
lldp_neighbor_detail = ET.SubElement(output, "lldp-neighbor-detail")
local_interface_name_key = ET.SubElement(lldp_neighbor_detail, "local-interface-name")
local_interface_name_key.text = kwargs.pop('local_interface_name')
remote_interface_name_key = ET.SubElement(lldp_neighbor_detail, "remote-interface-name")
remote_interface_name_key.text = kwargs.pop('remote_interface_name')
remote_port_description = ET.SubElement(lldp_neighbor_detail, "remote-port-description")
remote_port_description.text = kwargs.pop('remote_port_description')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_lldp_neighbor_detail_output_lldp_neighbor_detail_remote_port_description(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail")
config = get_lldp_neighbor_detail
output = ET.SubElement(get_lldp_neighbor_detail, "output")
lldp_neighbor_detail = ET.SubElement(output, "lldp-neighbor-detail")
local_interface_name_key = ET.SubElement(lldp_neighbor_detail, "local-interface-name")
local_interface_name_key.text = kwargs.pop('local_interface_name')
remote_interface_name_key = ET.SubElement(lldp_neighbor_detail, "remote-interface-name")
remote_interface_name_key.text = kwargs.pop('remote_interface_name')
remote_port_description = ET.SubElement(lldp_neighbor_detail, "remote-port-description")
remote_port_description.text = kwargs.pop('remote_port_description')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def verify2(self):
"""Last verification step.
The derived keys (output, input) are returned here.
"""
output_key = hkdf_expand('MediaRemote-Salt',
'MediaRemote-Write-Encryption-Key',
self._shared)
input_key = hkdf_expand('MediaRemote-Salt',
'MediaRemote-Read-Encryption-Key',
self._shared)
log_binary(_LOGGER, 'Keys', Output=output_key, Input=input_key)
return output_key, input_key | Last verification step.
The derived keys (output, input) are returned here. | Below is the the instruction that describes the task:
### Input:
Last verification step.
The derived keys (output, input) are returned here.
### Response:
def verify2(self):
"""Last verification step.
The derived keys (output, input) are returned here.
"""
output_key = hkdf_expand('MediaRemote-Salt',
'MediaRemote-Write-Encryption-Key',
self._shared)
input_key = hkdf_expand('MediaRemote-Salt',
'MediaRemote-Read-Encryption-Key',
self._shared)
log_binary(_LOGGER, 'Keys', Output=output_key, Input=input_key)
return output_key, input_key |
def save(self):
"""Convert to JSON.
Returns
-------
`dict`
JSON data.
"""
data = super().save()
data['end_chars'] = self.end_chars
data['default_end'] = self.default_end
return data | Convert to JSON.
Returns
-------
`dict`
JSON data. | Below is the the instruction that describes the task:
### Input:
Convert to JSON.
Returns
-------
`dict`
JSON data.
### Response:
def save(self):
"""Convert to JSON.
Returns
-------
`dict`
JSON data.
"""
data = super().save()
data['end_chars'] = self.end_chars
data['default_end'] = self.default_end
return data |
def TextWidget(*args, **kw):
"""Forces a parameter value to be text"""
kw['value'] = str(kw['value'])
kw.pop('options', None)
return TextInput(*args,**kw) | Forces a parameter value to be text | Below is the the instruction that describes the task:
### Input:
Forces a parameter value to be text
### Response:
def TextWidget(*args, **kw):
"""Forces a parameter value to be text"""
kw['value'] = str(kw['value'])
kw.pop('options', None)
return TextInput(*args,**kw) |
def is_data_value(searchpath, searchtree, dtype = None, empty_is_false = False):
"""
Follow searchpath through the datatree in searchtree
and report if there exists a value of type dtype
searchpath is a list of keys/indices
If dtype is None check for any value
you can also supply a tuple to dtype
"""
if isinstance(searchpath, (str, unicode, int)):
searchpath = [searchpath]
if not isinstance(searchpath, (list, tuple)):
return False
for d in searchpath:
if isinstance(searchtree, dict):
if not d in searchtree.keys():
return False
elif isinstance(searchtree, (list, tuple)):
if (not isinstance(d, int) or (d >= 0 and d >= len(searchtree)) or (d < 0 and -d > len(searchtree))):
return False
else:
return False
searchtree = searchtree[d]
if dtype == None and not (empty_is_false and searchtree == None):
return True
if empty_is_false and searchtree in (None, "", {}, []):
return False
if isinstance(dtype, tuple):
dtype = list(dtype)
elif not isinstance(dtype, list):
dtype = [dtype]
if float in dtype and not int in dtype:
dtype.append(int)
if str in dtype or unicode in dtype or 'string' in dtype:
for dtp in (str, unicode, 'string'):
while dtp in dtype:
dtype.remove(dtp)
dtype.extend([str, unicode])
if list in dtype or tuple in dtype or 'list' in dtype:
for dtp in (list, tuple, 'list'):
while dtp in dtype:
dtype.remove(dtp)
dtype.extend([list, tuple])
dtype = tuple(dtype)
return bool(isinstance(searchtree, dtype)) | Follow searchpath through the datatree in searchtree
and report if there exists a value of type dtype
searchpath is a list of keys/indices
If dtype is None check for any value
you can also supply a tuple to dtype | Below is the the instruction that describes the task:
### Input:
Follow searchpath through the datatree in searchtree
and report if there exists a value of type dtype
searchpath is a list of keys/indices
If dtype is None check for any value
you can also supply a tuple to dtype
### Response:
def is_data_value(searchpath, searchtree, dtype = None, empty_is_false = False):
"""
Follow searchpath through the datatree in searchtree
and report if there exists a value of type dtype
searchpath is a list of keys/indices
If dtype is None check for any value
you can also supply a tuple to dtype
"""
if isinstance(searchpath, (str, unicode, int)):
searchpath = [searchpath]
if not isinstance(searchpath, (list, tuple)):
return False
for d in searchpath:
if isinstance(searchtree, dict):
if not d in searchtree.keys():
return False
elif isinstance(searchtree, (list, tuple)):
if (not isinstance(d, int) or (d >= 0 and d >= len(searchtree)) or (d < 0 and -d > len(searchtree))):
return False
else:
return False
searchtree = searchtree[d]
if dtype == None and not (empty_is_false and searchtree == None):
return True
if empty_is_false and searchtree in (None, "", {}, []):
return False
if isinstance(dtype, tuple):
dtype = list(dtype)
elif not isinstance(dtype, list):
dtype = [dtype]
if float in dtype and not int in dtype:
dtype.append(int)
if str in dtype or unicode in dtype or 'string' in dtype:
for dtp in (str, unicode, 'string'):
while dtp in dtype:
dtype.remove(dtp)
dtype.extend([str, unicode])
if list in dtype or tuple in dtype or 'list' in dtype:
for dtp in (list, tuple, 'list'):
while dtp in dtype:
dtype.remove(dtp)
dtype.extend([list, tuple])
dtype = tuple(dtype)
return bool(isinstance(searchtree, dtype)) |
def __apply_nested_option(self, option_name, field_names, set_operation):
"""Apply nested options to nested fields"""
# Split nested field names on the first dot.
nested_fields = [name.split('.', 1) for name in field_names if '.' in name]
# Partition the nested field names by parent field.
nested_options = defaultdict(list)
for parent, nested_names in nested_fields:
nested_options[parent].append(nested_names)
# Apply the nested field options.
for key, options in iter(nested_options.items()):
new_options = self.set_class(options)
original_options = getattr(self.declared_fields[key], option_name, ())
if original_options:
if set_operation == 'union':
new_options |= self.set_class(original_options)
if set_operation == 'intersection':
new_options &= self.set_class(original_options)
setattr(self.declared_fields[key], option_name, new_options) | Apply nested options to nested fields | Below is the the instruction that describes the task:
### Input:
Apply nested options to nested fields
### Response:
def __apply_nested_option(self, option_name, field_names, set_operation):
"""Apply nested options to nested fields"""
# Split nested field names on the first dot.
nested_fields = [name.split('.', 1) for name in field_names if '.' in name]
# Partition the nested field names by parent field.
nested_options = defaultdict(list)
for parent, nested_names in nested_fields:
nested_options[parent].append(nested_names)
# Apply the nested field options.
for key, options in iter(nested_options.items()):
new_options = self.set_class(options)
original_options = getattr(self.declared_fields[key], option_name, ())
if original_options:
if set_operation == 'union':
new_options |= self.set_class(original_options)
if set_operation == 'intersection':
new_options &= self.set_class(original_options)
setattr(self.declared_fields[key], option_name, new_options) |
def _postprocess_options(dbg, opts):
''' Handle options (`opts') that feed into the debugger (`dbg')'''
# Set dbg.settings['printset']
print_events = []
if opts.fntrace: print_events = ['c_call', 'c_return', 'call', 'return']
# if opts.linetrace: print_events += ['line']
if len(print_events):
dbg.settings['printset'] = frozenset(print_events)
pass
for setting in ('basename', 'different',):
dbg.settings[setting] = getattr(opts, setting)
pass
dbg.settings['highlight'] = 'plain'
Mdebugger.debugger_obj = dbg
return | Handle options (`opts') that feed into the debugger (`dbg') | Below is the the instruction that describes the task:
### Input:
Handle options (`opts') that feed into the debugger (`dbg')
### Response:
def _postprocess_options(dbg, opts):
''' Handle options (`opts') that feed into the debugger (`dbg')'''
# Set dbg.settings['printset']
print_events = []
if opts.fntrace: print_events = ['c_call', 'c_return', 'call', 'return']
# if opts.linetrace: print_events += ['line']
if len(print_events):
dbg.settings['printset'] = frozenset(print_events)
pass
for setting in ('basename', 'different',):
dbg.settings[setting] = getattr(opts, setting)
pass
dbg.settings['highlight'] = 'plain'
Mdebugger.debugger_obj = dbg
return |
def add_definition_tags(self, tags, project, definition_id):
"""AddDefinitionTags.
[Preview API] Adds multiple tags to a definition.
:param [str] tags: The tags to add.
:param str project: Project ID or project name
:param int definition_id: The ID of the definition.
:rtype: [str]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if definition_id is not None:
route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int')
content = self._serialize.body(tags, '[str]')
response = self._send(http_method='POST',
location_id='cb894432-134a-4d31-a839-83beceaace4b',
version='5.0-preview.2',
route_values=route_values,
content=content)
return self._deserialize('[str]', self._unwrap_collection(response)) | AddDefinitionTags.
[Preview API] Adds multiple tags to a definition.
:param [str] tags: The tags to add.
:param str project: Project ID or project name
:param int definition_id: The ID of the definition.
:rtype: [str] | Below is the the instruction that describes the task:
### Input:
AddDefinitionTags.
[Preview API] Adds multiple tags to a definition.
:param [str] tags: The tags to add.
:param str project: Project ID or project name
:param int definition_id: The ID of the definition.
:rtype: [str]
### Response:
def add_definition_tags(self, tags, project, definition_id):
"""AddDefinitionTags.
[Preview API] Adds multiple tags to a definition.
:param [str] tags: The tags to add.
:param str project: Project ID or project name
:param int definition_id: The ID of the definition.
:rtype: [str]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if definition_id is not None:
route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int')
content = self._serialize.body(tags, '[str]')
response = self._send(http_method='POST',
location_id='cb894432-134a-4d31-a839-83beceaace4b',
version='5.0-preview.2',
route_values=route_values,
content=content)
return self._deserialize('[str]', self._unwrap_collection(response)) |
def collect_reponames():
"""
Try to figure out a list of repos to consider by default from the contents of the working directory.
"""
reponames = []
#try to figure out the repo from git repo in current directory
try:
with open(os.devnull) as devnull:
remote_data = subprocess.check_output(["git","remote","-v","show"],stderr=devnull)
branches = {}
for line in remote_data.decode('utf-8').split("\n"):
if line.strip() == "":
continue
remote_match = re_mote.match(line)
if not remote_match is None:
branches[remote_match.group(1)] = remote_match.group(5)
if len(branches) > 0:
if "origin" in branches:
reponames.append(branches["origin"])
else:
reponames.append(branches.values()[0])
except OSError:
pass
except subprocess.CalledProcessError:
pass
#scan html files for further repos to consider
for fname in glob.iglob("*.html"):
fid = open(fname,"r","utf8")
#check the second line for the repo marker
fid.readline()
line = fid.readline()
match = re.match(repo_marker_re,line)
if not match is None:
reponames.append(match.group(1))
reponames = list(set(reponames))
return reponames | Try to figure out a list of repos to consider by default from the contents of the working directory. | Below is the the instruction that describes the task:
### Input:
Try to figure out a list of repos to consider by default from the contents of the working directory.
### Response:
def collect_reponames():
"""
Try to figure out a list of repos to consider by default from the contents of the working directory.
"""
reponames = []
#try to figure out the repo from git repo in current directory
try:
with open(os.devnull) as devnull:
remote_data = subprocess.check_output(["git","remote","-v","show"],stderr=devnull)
branches = {}
for line in remote_data.decode('utf-8').split("\n"):
if line.strip() == "":
continue
remote_match = re_mote.match(line)
if not remote_match is None:
branches[remote_match.group(1)] = remote_match.group(5)
if len(branches) > 0:
if "origin" in branches:
reponames.append(branches["origin"])
else:
reponames.append(branches.values()[0])
except OSError:
pass
except subprocess.CalledProcessError:
pass
#scan html files for further repos to consider
for fname in glob.iglob("*.html"):
fid = open(fname,"r","utf8")
#check the second line for the repo marker
fid.readline()
line = fid.readline()
match = re.match(repo_marker_re,line)
if not match is None:
reponames.append(match.group(1))
reponames = list(set(reponames))
return reponames |
def remove_user(self, name, session=None):
"""**DEPRECATED**: Remove user `name` from this :class:`Database`.
User `name` will no longer have permissions to access this
:class:`Database`.
.. note:: remove_user is deprecated and will be removed in PyMongo
4.0. Use the dropUser command instead::
db.command("dropUser", "user")
:Parameters:
- `name`: the name of the user to remove
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
.. versionchanged:: 3.6
Added ``session`` parameter. Deprecated remove_user.
"""
warnings.warn("remove_user is deprecated and will be removed in "
"PyMongo 4.0. Use db.command with dropUser "
"instead", DeprecationWarning, stacklevel=2)
cmd = SON([("dropUser", name)])
# Don't send {} as writeConcern.
if self.write_concern.acknowledged and self.write_concern.document:
cmd["writeConcern"] = self.write_concern.document
self.command(cmd, session=session) | **DEPRECATED**: Remove user `name` from this :class:`Database`.
User `name` will no longer have permissions to access this
:class:`Database`.
.. note:: remove_user is deprecated and will be removed in PyMongo
4.0. Use the dropUser command instead::
db.command("dropUser", "user")
:Parameters:
- `name`: the name of the user to remove
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
.. versionchanged:: 3.6
Added ``session`` parameter. Deprecated remove_user. | Below is the the instruction that describes the task:
### Input:
**DEPRECATED**: Remove user `name` from this :class:`Database`.
User `name` will no longer have permissions to access this
:class:`Database`.
.. note:: remove_user is deprecated and will be removed in PyMongo
4.0. Use the dropUser command instead::
db.command("dropUser", "user")
:Parameters:
- `name`: the name of the user to remove
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
.. versionchanged:: 3.6
Added ``session`` parameter. Deprecated remove_user.
### Response:
def remove_user(self, name, session=None):
"""**DEPRECATED**: Remove user `name` from this :class:`Database`.
User `name` will no longer have permissions to access this
:class:`Database`.
.. note:: remove_user is deprecated and will be removed in PyMongo
4.0. Use the dropUser command instead::
db.command("dropUser", "user")
:Parameters:
- `name`: the name of the user to remove
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
.. versionchanged:: 3.6
Added ``session`` parameter. Deprecated remove_user.
"""
warnings.warn("remove_user is deprecated and will be removed in "
"PyMongo 4.0. Use db.command with dropUser "
"instead", DeprecationWarning, stacklevel=2)
cmd = SON([("dropUser", name)])
# Don't send {} as writeConcern.
if self.write_concern.acknowledged and self.write_concern.document:
cmd["writeConcern"] = self.write_concern.document
self.command(cmd, session=session) |
def render(self, name=None, template=None, context={}):
''''Render Template meta from jinja2 templates.
'''
if isinstance(template, Template):
_template = template
else:
_template = Template.objects.get(name=name)
# Maybe cache or save local ?
response = self.env.from_string(
_template.content).render(context)
return response | Render Template meta from jinja2 templates. | Below is the the instruction that describes the task:
### Input:
Render Template meta from jinja2 templates.
### Response:
def render(self, name=None, template=None, context={}):
''''Render Template meta from jinja2 templates.
'''
if isinstance(template, Template):
_template = template
else:
_template = Template.objects.get(name=name)
# Maybe cache or save local ?
response = self.env.from_string(
_template.content).render(context)
return response |
def slew(self, value):
'''move to a given position in the file'''
if float(value) != self.filepos:
pos = float(value) * self.filesize
self.mlog.f.seek(int(pos))
self.find_message() | move to a given position in the file | Below is the the instruction that describes the task:
### Input:
move to a given position in the file
### Response:
def slew(self, value):
'''move to a given position in the file'''
if float(value) != self.filepos:
pos = float(value) * self.filesize
self.mlog.f.seek(int(pos))
self.find_message() |
def do_mode(self, target, msg, nick, send):
"""reop and handle guard violations."""
mode_changes = irc.modes.parse_channel_modes(msg)
with self.data_lock:
for change in mode_changes:
if change[1] == 'v':
self.voiced[target][change[2]] = True if change[0] == '+' else False
if change[1] == 'o':
self.opers[target][change[2]] = True if change[0] == '+' else False
# reop
# FIXME: handle -o+o msbobBot msbobBot
if [x for x in mode_changes if self.check_mode(x)]:
send("%s: :(" % nick, target=target)
# Assume bot admins know what they're doing.
if not self.is_admin(None, nick):
send("OP %s" % target, target='ChanServ')
send("UNBAN %s" % target, target='ChanServ')
if len(self.guarded) > 0:
# if user is guarded and quieted, devoiced, or deopped, fix that
regex = r"(.*(-v|-o|\+q|\+b)[^ ]*) (%s)" % "|".join(self.guarded)
match = re.search(regex, msg)
if match and nick not in [match.group(3), self.connection.real_nickname]:
modestring = "+voe-qb %s" % (" ".join([match.group(3)] * 5))
self.connection.mode(target, modestring)
send('Mode %s on %s by the guard system' % (modestring, target), target=self.config['core']['ctrlchan']) | reop and handle guard violations. | Below is the the instruction that describes the task:
### Input:
reop and handle guard violations.
### Response:
def do_mode(self, target, msg, nick, send):
"""reop and handle guard violations."""
mode_changes = irc.modes.parse_channel_modes(msg)
with self.data_lock:
for change in mode_changes:
if change[1] == 'v':
self.voiced[target][change[2]] = True if change[0] == '+' else False
if change[1] == 'o':
self.opers[target][change[2]] = True if change[0] == '+' else False
# reop
# FIXME: handle -o+o msbobBot msbobBot
if [x for x in mode_changes if self.check_mode(x)]:
send("%s: :(" % nick, target=target)
# Assume bot admins know what they're doing.
if not self.is_admin(None, nick):
send("OP %s" % target, target='ChanServ')
send("UNBAN %s" % target, target='ChanServ')
if len(self.guarded) > 0:
# if user is guarded and quieted, devoiced, or deopped, fix that
regex = r"(.*(-v|-o|\+q|\+b)[^ ]*) (%s)" % "|".join(self.guarded)
match = re.search(regex, msg)
if match and nick not in [match.group(3), self.connection.real_nickname]:
modestring = "+voe-qb %s" % (" ".join([match.group(3)] * 5))
self.connection.mode(target, modestring)
send('Mode %s on %s by the guard system' % (modestring, target), target=self.config['core']['ctrlchan']) |
def randomWalkFunction(requestContext, name, step=60):
"""
Short Alias: randomWalk()
Returns a random walk starting at 0. This is great for testing when there
is no real data in whisper.
Example::
&target=randomWalk("The.time.series")
This would create a series named "The.time.series" that contains points
where x(t) == x(t-1)+random()-0.5, and x(0) == 0.
Accepts an optional second argument as step parameter (default step is
60 sec).
"""
delta = timedelta(seconds=step)
when = requestContext["startTime"]
values = []
current = 0
while when < requestContext["endTime"]:
values.append(current)
current += random.random() - 0.5
when += delta
return [TimeSeries(
name, int(epoch(requestContext["startTime"])),
int(epoch(requestContext["endTime"])),
step, values)] | Short Alias: randomWalk()
Returns a random walk starting at 0. This is great for testing when there
is no real data in whisper.
Example::
&target=randomWalk("The.time.series")
This would create a series named "The.time.series" that contains points
where x(t) == x(t-1)+random()-0.5, and x(0) == 0.
Accepts an optional second argument as step parameter (default step is
60 sec). | Below is the the instruction that describes the task:
### Input:
Short Alias: randomWalk()
Returns a random walk starting at 0. This is great for testing when there
is no real data in whisper.
Example::
&target=randomWalk("The.time.series")
This would create a series named "The.time.series" that contains points
where x(t) == x(t-1)+random()-0.5, and x(0) == 0.
Accepts an optional second argument as step parameter (default step is
60 sec).
### Response:
def randomWalkFunction(requestContext, name, step=60):
"""
Short Alias: randomWalk()
Returns a random walk starting at 0. This is great for testing when there
is no real data in whisper.
Example::
&target=randomWalk("The.time.series")
This would create a series named "The.time.series" that contains points
where x(t) == x(t-1)+random()-0.5, and x(0) == 0.
Accepts an optional second argument as step parameter (default step is
60 sec).
"""
delta = timedelta(seconds=step)
when = requestContext["startTime"]
values = []
current = 0
while when < requestContext["endTime"]:
values.append(current)
current += random.random() - 0.5
when += delta
return [TimeSeries(
name, int(epoch(requestContext["startTime"])),
int(epoch(requestContext["endTime"])),
step, values)] |
def unpause_topic(self, topic):
"""Resume message flow to channels of an existing, paused, topic."""
nsq.assert_valid_topic_name(topic)
return self._request('POST', '/topic/unpause', fields={'topic': topic}) | Resume message flow to channels of an existing, paused, topic. | Below is the the instruction that describes the task:
### Input:
Resume message flow to channels of an existing, paused, topic.
### Response:
def unpause_topic(self, topic):
"""Resume message flow to channels of an existing, paused, topic."""
nsq.assert_valid_topic_name(topic)
return self._request('POST', '/topic/unpause', fields={'topic': topic}) |
def send_script_async(self, conn_id, data, progress_callback, callback):
"""Asynchronously send a a script to this IOTile device
Args:
conn_id (int): A unique identifer that will refer to this connection
data (string): the script to send to the device
progress_callback (callable): A function to be called with status on our progress, called as:
progress_callback(done_count, total_count)
callback (callable): A callback for when we have finished sending the script. The callback will be called as"
callback(connection_id, adapter_id, success, failure_reason)
'connection_id': the connection id
'adapter_id': this adapter's id
'success': a bool indicating whether we received a response to our attempted RPC
'failure_reason': a string with the reason for the failure if success == False
"""
def _on_finished(_name, _retval, exception):
if exception is not None:
callback(conn_id, self.id, False, str(exception))
return
callback(conn_id, self.id, True, None)
self._control_thread.command(JLinkControlThread.SEND_SCRIPT, _on_finished, self._device_info, self._control_info, data, progress_callback) | Asynchronously send a a script to this IOTile device
Args:
conn_id (int): A unique identifer that will refer to this connection
data (string): the script to send to the device
progress_callback (callable): A function to be called with status on our progress, called as:
progress_callback(done_count, total_count)
callback (callable): A callback for when we have finished sending the script. The callback will be called as"
callback(connection_id, adapter_id, success, failure_reason)
'connection_id': the connection id
'adapter_id': this adapter's id
'success': a bool indicating whether we received a response to our attempted RPC
'failure_reason': a string with the reason for the failure if success == False | Below is the the instruction that describes the task:
### Input:
Asynchronously send a a script to this IOTile device
Args:
conn_id (int): A unique identifer that will refer to this connection
data (string): the script to send to the device
progress_callback (callable): A function to be called with status on our progress, called as:
progress_callback(done_count, total_count)
callback (callable): A callback for when we have finished sending the script. The callback will be called as"
callback(connection_id, adapter_id, success, failure_reason)
'connection_id': the connection id
'adapter_id': this adapter's id
'success': a bool indicating whether we received a response to our attempted RPC
'failure_reason': a string with the reason for the failure if success == False
### Response:
def send_script_async(self, conn_id, data, progress_callback, callback):
"""Asynchronously send a a script to this IOTile device
Args:
conn_id (int): A unique identifer that will refer to this connection
data (string): the script to send to the device
progress_callback (callable): A function to be called with status on our progress, called as:
progress_callback(done_count, total_count)
callback (callable): A callback for when we have finished sending the script. The callback will be called as"
callback(connection_id, adapter_id, success, failure_reason)
'connection_id': the connection id
'adapter_id': this adapter's id
'success': a bool indicating whether we received a response to our attempted RPC
'failure_reason': a string with the reason for the failure if success == False
"""
def _on_finished(_name, _retval, exception):
if exception is not None:
callback(conn_id, self.id, False, str(exception))
return
callback(conn_id, self.id, True, None)
self._control_thread.command(JLinkControlThread.SEND_SCRIPT, _on_finished, self._device_info, self._control_info, data, progress_callback) |
def _apply_updates(self, gradients):
"""Apply AdaGrad update to parameters.
Parameters
----------
gradients
Returns
-------
"""
if not hasattr(self, 'optimizers'):
self.optimizers = \
{obj: AdaGradOptimizer(self.learning_rate)
for obj in ['W', 'C', 'bw', 'bc']}
self.W -= self.optimizers['W'].get_step(gradients['W'])
self.C -= self.optimizers['C'].get_step(gradients['C'])
self.bw -= self.optimizers['bw'].get_step(gradients['bw'])
self.bc -= self.optimizers['bc'].get_step(gradients['bc']) | Apply AdaGrad update to parameters.
Parameters
----------
gradients
Returns
------- | Below is the the instruction that describes the task:
### Input:
Apply AdaGrad update to parameters.
Parameters
----------
gradients
Returns
-------
### Response:
def _apply_updates(self, gradients):
"""Apply AdaGrad update to parameters.
Parameters
----------
gradients
Returns
-------
"""
if not hasattr(self, 'optimizers'):
self.optimizers = \
{obj: AdaGradOptimizer(self.learning_rate)
for obj in ['W', 'C', 'bw', 'bc']}
self.W -= self.optimizers['W'].get_step(gradients['W'])
self.C -= self.optimizers['C'].get_step(gradients['C'])
self.bw -= self.optimizers['bw'].get_step(gradients['bw'])
self.bc -= self.optimizers['bc'].get_step(gradients['bc']) |
def configure(self, width, height):
"""See :meth:`set_window_size`."""
self._imgwin_set = True
self.set_window_size(width, height) | See :meth:`set_window_size`. | Below is the the instruction that describes the task:
### Input:
See :meth:`set_window_size`.
### Response:
def configure(self, width, height):
"""See :meth:`set_window_size`."""
self._imgwin_set = True
self.set_window_size(width, height) |
def merge_entities(self, from_entity_ids, to_entity_id, force=False, mount_point=DEFAULT_MOUNT_POINT):
"""Merge many entities into one entity.
Supported methods:
POST: /{mount_point}/entity/merge. Produces: 204 (empty body)
:param from_entity_ids: Entity IDs which needs to get merged.
:type from_entity_ids: array
:param to_entity_id: Entity ID into which all the other entities need to get merged.
:type to_entity_id: str | unicode
:param force: Setting this will follow the 'mine' strategy for merging MFA secrets. If there are secrets of the
same type both in entities that are merged from and in entity into which all others are getting merged,
secrets in the destination will be unaltered. If not set, this API will throw an error containing all the
conflicts.
:type force: bool
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
params = {
'from_entity_ids': from_entity_ids,
'to_entity_id': to_entity_id,
'force': force,
}
api_path = '/v1/{mount_point}/entity/merge'.format(mount_point=mount_point)
return self._adapter.post(
url=api_path,
json=params,
) | Merge many entities into one entity.
Supported methods:
POST: /{mount_point}/entity/merge. Produces: 204 (empty body)
:param from_entity_ids: Entity IDs which needs to get merged.
:type from_entity_ids: array
:param to_entity_id: Entity ID into which all the other entities need to get merged.
:type to_entity_id: str | unicode
:param force: Setting this will follow the 'mine' strategy for merging MFA secrets. If there are secrets of the
same type both in entities that are merged from and in entity into which all others are getting merged,
secrets in the destination will be unaltered. If not set, this API will throw an error containing all the
conflicts.
:type force: bool
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response | Below is the the instruction that describes the task:
### Input:
Merge many entities into one entity.
Supported methods:
POST: /{mount_point}/entity/merge. Produces: 204 (empty body)
:param from_entity_ids: Entity IDs which needs to get merged.
:type from_entity_ids: array
:param to_entity_id: Entity ID into which all the other entities need to get merged.
:type to_entity_id: str | unicode
:param force: Setting this will follow the 'mine' strategy for merging MFA secrets. If there are secrets of the
same type both in entities that are merged from and in entity into which all others are getting merged,
secrets in the destination will be unaltered. If not set, this API will throw an error containing all the
conflicts.
:type force: bool
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
### Response:
def merge_entities(self, from_entity_ids, to_entity_id, force=False, mount_point=DEFAULT_MOUNT_POINT):
"""Merge many entities into one entity.
Supported methods:
POST: /{mount_point}/entity/merge. Produces: 204 (empty body)
:param from_entity_ids: Entity IDs which needs to get merged.
:type from_entity_ids: array
:param to_entity_id: Entity ID into which all the other entities need to get merged.
:type to_entity_id: str | unicode
:param force: Setting this will follow the 'mine' strategy for merging MFA secrets. If there are secrets of the
same type both in entities that are merged from and in entity into which all others are getting merged,
secrets in the destination will be unaltered. If not set, this API will throw an error containing all the
conflicts.
:type force: bool
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
params = {
'from_entity_ids': from_entity_ids,
'to_entity_id': to_entity_id,
'force': force,
}
api_path = '/v1/{mount_point}/entity/merge'.format(mount_point=mount_point)
return self._adapter.post(
url=api_path,
json=params,
) |
def execute_command(self, generator, write_concern, session):
"""Execute using write commands.
"""
# nModified is only reported for write commands, not legacy ops.
full_result = {
"writeErrors": [],
"writeConcernErrors": [],
"nInserted": 0,
"nUpserted": 0,
"nMatched": 0,
"nModified": 0,
"nRemoved": 0,
"upserted": [],
}
op_id = _randint()
def retryable_bulk(session, sock_info, retryable):
self._execute_command(
generator, write_concern, session, sock_info, op_id,
retryable, full_result)
client = self.collection.database.client
with client._tmp_session(session) as s:
client._retry_with_session(
self.is_retryable, retryable_bulk, s, self)
if full_result["writeErrors"] or full_result["writeConcernErrors"]:
_raise_bulk_write_error(full_result)
return full_result | Execute using write commands. | Below is the the instruction that describes the task:
### Input:
Execute using write commands.
### Response:
def execute_command(self, generator, write_concern, session):
"""Execute using write commands.
"""
# nModified is only reported for write commands, not legacy ops.
full_result = {
"writeErrors": [],
"writeConcernErrors": [],
"nInserted": 0,
"nUpserted": 0,
"nMatched": 0,
"nModified": 0,
"nRemoved": 0,
"upserted": [],
}
op_id = _randint()
def retryable_bulk(session, sock_info, retryable):
self._execute_command(
generator, write_concern, session, sock_info, op_id,
retryable, full_result)
client = self.collection.database.client
with client._tmp_session(session) as s:
client._retry_with_session(
self.is_retryable, retryable_bulk, s, self)
if full_result["writeErrors"] or full_result["writeConcernErrors"]:
_raise_bulk_write_error(full_result)
return full_result |
def OnCellFontSize(self, event):
"""Cell font size event handler"""
with undo.group(_("Font size")):
self.grid.actions.set_attr("pointsize", event.size)
self.grid.ForceRefresh()
self.grid.update_attribute_toolbar()
event.Skip() | Cell font size event handler | Below is the the instruction that describes the task:
### Input:
Cell font size event handler
### Response:
def OnCellFontSize(self, event):
"""Cell font size event handler"""
with undo.group(_("Font size")):
self.grid.actions.set_attr("pointsize", event.size)
self.grid.ForceRefresh()
self.grid.update_attribute_toolbar()
event.Skip() |
def history_json(self, nb=0):
"""Return the history in ISO JSON format"""
return [(i[0].isoformat(), i[1]) for i in self._history[-nb:]] | Return the history in ISO JSON format | Below is the the instruction that describes the task:
### Input:
Return the history in ISO JSON format
### Response:
def history_json(self, nb=0):
"""Return the history in ISO JSON format"""
return [(i[0].isoformat(), i[1]) for i in self._history[-nb:]] |
def _writeBlock(block, blockID):
'''writes the block to a file with the id'''
with open("blockIDs.txt", "a") as fp:
fp.write("blockID: " + str(blockID) + "\n")
sentences = ""
for sentence in block:
sentences += sentence+","
fp.write("block sentences: "+sentences[:-1]+"\n")
fp.write("\n") | writes the block to a file with the id | Below is the the instruction that describes the task:
### Input:
writes the block to a file with the id
### Response:
def _writeBlock(block, blockID):
'''writes the block to a file with the id'''
with open("blockIDs.txt", "a") as fp:
fp.write("blockID: " + str(blockID) + "\n")
sentences = ""
for sentence in block:
sentences += sentence+","
fp.write("block sentences: "+sentences[:-1]+"\n")
fp.write("\n") |
def _calc_dimension(self, dim_val, dim_max, font_dim):
"Calculate final pos and size (auto, absolute in pixels & relativa)"
if dim_val is None:
return -1 # let wx automatic pos/size
elif isinstance(dim_val, int):
return dim_val # use fixed pixel value (absolute)
elif isinstance(dim_val, basestring):
if dim_val.endswith("%"):
# percentaje, relative to parent max size:
dim_val = int(dim_val[:-1])
dim_val = dim_val / 100.0 * dim_max
elif dim_val.endswith("em"):
# use current font size (suport fractions):
dim_val = float(dim_val[:-2])
dim_val = dim_val * font_dim
elif dim_val.endswith("px"):
# fixed pixels
dim_val = dim_val[:-2]
elif dim_val == "" or dim_val == "auto":
dim_val = -1
return int(dim_val) | Calculate final pos and size (auto, absolute in pixels & relativa) | Below is the the instruction that describes the task:
### Input:
Calculate final pos and size (auto, absolute in pixels & relativa)
### Response:
def _calc_dimension(self, dim_val, dim_max, font_dim):
"Calculate final pos and size (auto, absolute in pixels & relativa)"
if dim_val is None:
return -1 # let wx automatic pos/size
elif isinstance(dim_val, int):
return dim_val # use fixed pixel value (absolute)
elif isinstance(dim_val, basestring):
if dim_val.endswith("%"):
# percentaje, relative to parent max size:
dim_val = int(dim_val[:-1])
dim_val = dim_val / 100.0 * dim_max
elif dim_val.endswith("em"):
# use current font size (suport fractions):
dim_val = float(dim_val[:-2])
dim_val = dim_val * font_dim
elif dim_val.endswith("px"):
# fixed pixels
dim_val = dim_val[:-2]
elif dim_val == "" or dim_val == "auto":
dim_val = -1
return int(dim_val) |
def upload(self):
"""
upload via the method configured
:return:
"""
if self.upload_method == "setup":
self.upload_by_setup()
if self.upload_method == "twine":
self.upload_by_twine()
if self.upload_method == "gemfury":
self.upload_by_gemfury() | upload via the method configured
:return: | Below is the the instruction that describes the task:
### Input:
upload via the method configured
:return:
### Response:
def upload(self):
"""
upload via the method configured
:return:
"""
if self.upload_method == "setup":
self.upload_by_setup()
if self.upload_method == "twine":
self.upload_by_twine()
if self.upload_method == "gemfury":
self.upload_by_gemfury() |
def move(src, dst):
"""Recursively move a file or directory to another location. This is
similar to the Unix "mv" command.
If the destination is a directory or a symlink to a directory, the source
is moved inside the directory. The destination path must not already
exist.
If the destination already exists but is not a directory, it may be
overwritten depending on os.rename() semantics.
If the destination is on our current filesystem, then rename() is used.
Otherwise, src is copied to the destination and then removed.
A lot more could be done here... A look at a mv.c shows a lot of
the issues this implementation glosses over.
"""
real_dst = dst
if os.path.isdir(dst):
if _samefile(src, dst):
# We might be on a case insensitive filesystem,
# perform the rename anyway.
os.rename(src, dst)
return
real_dst = os.path.join(dst, _basename(src))
if os.path.exists(real_dst):
raise Error("Destination path '%s' already exists" % real_dst)
try:
os.rename(src, real_dst)
except OSError:
if os.path.isdir(src):
if _destinsrc(src, dst):
raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst))
copytree(src, real_dst, symlinks=True)
rmtree(src)
else:
copy2(src, real_dst)
os.unlink(src) | Recursively move a file or directory to another location. This is
similar to the Unix "mv" command.
If the destination is a directory or a symlink to a directory, the source
is moved inside the directory. The destination path must not already
exist.
If the destination already exists but is not a directory, it may be
overwritten depending on os.rename() semantics.
If the destination is on our current filesystem, then rename() is used.
Otherwise, src is copied to the destination and then removed.
A lot more could be done here... A look at a mv.c shows a lot of
the issues this implementation glosses over. | Below is the the instruction that describes the task:
### Input:
Recursively move a file or directory to another location. This is
similar to the Unix "mv" command.
If the destination is a directory or a symlink to a directory, the source
is moved inside the directory. The destination path must not already
exist.
If the destination already exists but is not a directory, it may be
overwritten depending on os.rename() semantics.
If the destination is on our current filesystem, then rename() is used.
Otherwise, src is copied to the destination and then removed.
A lot more could be done here... A look at a mv.c shows a lot of
the issues this implementation glosses over.
### Response:
def move(src, dst):
"""Recursively move a file or directory to another location. This is
similar to the Unix "mv" command.
If the destination is a directory or a symlink to a directory, the source
is moved inside the directory. The destination path must not already
exist.
If the destination already exists but is not a directory, it may be
overwritten depending on os.rename() semantics.
If the destination is on our current filesystem, then rename() is used.
Otherwise, src is copied to the destination and then removed.
A lot more could be done here... A look at a mv.c shows a lot of
the issues this implementation glosses over.
"""
real_dst = dst
if os.path.isdir(dst):
if _samefile(src, dst):
# We might be on a case insensitive filesystem,
# perform the rename anyway.
os.rename(src, dst)
return
real_dst = os.path.join(dst, _basename(src))
if os.path.exists(real_dst):
raise Error("Destination path '%s' already exists" % real_dst)
try:
os.rename(src, real_dst)
except OSError:
if os.path.isdir(src):
if _destinsrc(src, dst):
raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst))
copytree(src, real_dst, symlinks=True)
rmtree(src)
else:
copy2(src, real_dst)
os.unlink(src) |
def _write_cdx_header(self):
'''Write the CDX header.
It writes the fields:
1. a: original URL
2. b: UNIX timestamp
3. m: MIME Type from the HTTP Content-type
4. s: response code
5. k: new style checksum
6. S: raw file record size
7. V: offset in raw file
8. g: filename of raw file
9. u: record ID
'''
with open(self._cdx_filename, mode='a', encoding='utf-8') as out_file:
out_file.write(self.CDX_DELIMINATOR)
out_file.write(self.CDX_DELIMINATOR.join((
'CDX',
'a', 'b', 'm', 's',
'k', 'S', 'V', 'g',
'u'
)))
out_file.write('\n') | Write the CDX header.
It writes the fields:
1. a: original URL
2. b: UNIX timestamp
3. m: MIME Type from the HTTP Content-type
4. s: response code
5. k: new style checksum
6. S: raw file record size
7. V: offset in raw file
8. g: filename of raw file
9. u: record ID | Below is the the instruction that describes the task:
### Input:
Write the CDX header.
It writes the fields:
1. a: original URL
2. b: UNIX timestamp
3. m: MIME Type from the HTTP Content-type
4. s: response code
5. k: new style checksum
6. S: raw file record size
7. V: offset in raw file
8. g: filename of raw file
9. u: record ID
### Response:
def _write_cdx_header(self):
'''Write the CDX header.
It writes the fields:
1. a: original URL
2. b: UNIX timestamp
3. m: MIME Type from the HTTP Content-type
4. s: response code
5. k: new style checksum
6. S: raw file record size
7. V: offset in raw file
8. g: filename of raw file
9. u: record ID
'''
with open(self._cdx_filename, mode='a', encoding='utf-8') as out_file:
out_file.write(self.CDX_DELIMINATOR)
out_file.write(self.CDX_DELIMINATOR.join((
'CDX',
'a', 'b', 'm', 's',
'k', 'S', 'V', 'g',
'u'
)))
out_file.write('\n') |
def _secret_yaml(loader, node):
"""Load secrets and embed it into the configuration YAML."""
fname = os.path.join(os.path.dirname(loader.name), "secrets.yaml")
try:
with open(fname, encoding="utf-8") as secret_file:
secrets = YAML(typ="safe").load(secret_file)
except FileNotFoundError:
raise ValueError("Secrets file {} not found".format(fname)) from None
try:
return secrets[node.value]
except KeyError:
raise ValueError("Secret {} not found".format(node.value)) from None | Load secrets and embed it into the configuration YAML. | Below is the the instruction that describes the task:
### Input:
Load secrets and embed it into the configuration YAML.
### Response:
def _secret_yaml(loader, node):
"""Load secrets and embed it into the configuration YAML."""
fname = os.path.join(os.path.dirname(loader.name), "secrets.yaml")
try:
with open(fname, encoding="utf-8") as secret_file:
secrets = YAML(typ="safe").load(secret_file)
except FileNotFoundError:
raise ValueError("Secrets file {} not found".format(fname)) from None
try:
return secrets[node.value]
except KeyError:
raise ValueError("Secret {} not found".format(node.value)) from None |
def push_channel(self, content, channel, content_url=None):
'''Push a notification to a Pushed channel.
Param: content -> content of Pushed notification message
channel -> string identifying a Pushed channel
content_url (optional) -> enrich message with URL
Returns Shipment ID as string
'''
parameters = {
'app_key': self.app_key,
'app_secret': self.app_secret,
'target_alias': channel
}
return self._push(content, 'channel', parameters, content_url) | Push a notification to a Pushed channel.
Param: content -> content of Pushed notification message
channel -> string identifying a Pushed channel
content_url (optional) -> enrich message with URL
Returns Shipment ID as string | Below is the the instruction that describes the task:
### Input:
Push a notification to a Pushed channel.
Param: content -> content of Pushed notification message
channel -> string identifying a Pushed channel
content_url (optional) -> enrich message with URL
Returns Shipment ID as string
### Response:
def push_channel(self, content, channel, content_url=None):
'''Push a notification to a Pushed channel.
Param: content -> content of Pushed notification message
channel -> string identifying a Pushed channel
content_url (optional) -> enrich message with URL
Returns Shipment ID as string
'''
parameters = {
'app_key': self.app_key,
'app_secret': self.app_secret,
'target_alias': channel
}
return self._push(content, 'channel', parameters, content_url) |
def has_frames(self, destination):
"""
Whether specified queue has any frames.
@param destination: The queue name (destinationination).
@type destination: C{str}
@return: Whether there are any frames in the specified queue.
@rtype: C{bool}
"""
session = meta.Session()
sel = select([model.frames_table.c.message_id]).where(
model.frames_table.c.destination == destination)
result = session.execute(sel)
first = result.fetchone()
return first is not None | Whether specified queue has any frames.
@param destination: The queue name (destinationination).
@type destination: C{str}
@return: Whether there are any frames in the specified queue.
@rtype: C{bool} | Below is the the instruction that describes the task:
### Input:
Whether specified queue has any frames.
@param destination: The queue name (destinationination).
@type destination: C{str}
@return: Whether there are any frames in the specified queue.
@rtype: C{bool}
### Response:
def has_frames(self, destination):
"""
Whether specified queue has any frames.
@param destination: The queue name (destinationination).
@type destination: C{str}
@return: Whether there are any frames in the specified queue.
@rtype: C{bool}
"""
session = meta.Session()
sel = select([model.frames_table.c.message_id]).where(
model.frames_table.c.destination == destination)
result = session.execute(sel)
first = result.fetchone()
return first is not None |
def move_top_cards(self, other, number=1):
"""
Move the top `number` of cards to the top of some `other` deck.
By default only one card will be moved if `number` is not specified.
"""
other.cards.append(reversed(self.cards[-number:])) | Move the top `number` of cards to the top of some `other` deck.
By default only one card will be moved if `number` is not specified. | Below is the the instruction that describes the task:
### Input:
Move the top `number` of cards to the top of some `other` deck.
By default only one card will be moved if `number` is not specified.
### Response:
def move_top_cards(self, other, number=1):
"""
Move the top `number` of cards to the top of some `other` deck.
By default only one card will be moved if `number` is not specified.
"""
other.cards.append(reversed(self.cards[-number:])) |
def set_onscreen_message(self, text, redraw=True):
"""Called by a subclass to update the onscreen message.
Parameters
----------
text : str
The text to show in the display.
"""
width, height = self.get_window_size()
font = self.t_.get('onscreen_font', 'sans serif')
font_size = self.t_.get('onscreen_font_size', None)
if font_size is None:
font_size = self._calc_font_size(width)
# TODO: need some way to accurately estimate text extents
# without actually putting text on the canvas
ht, wd = font_size, font_size
if text is not None:
wd = len(text) * font_size * 1.1
x = (width // 2) - (wd // 2)
y = ((height // 3) * 2) - (ht // 2)
tag = '_$onscreen_msg'
canvas = self.get_private_canvas()
try:
message = canvas.get_object_by_tag(tag)
if text is None:
message.text = ''
else:
message.x = x
message.y = y
message.text = text
message.fontsize = font_size
except KeyError:
if text is None:
text = ''
Text = canvas.get_draw_class('text')
canvas.add(Text(x, y, text=text,
font=font, fontsize=font_size,
color=self.img_fg, coord='window'),
tag=tag, redraw=False)
if redraw:
canvas.update_canvas(whence=3) | Called by a subclass to update the onscreen message.
Parameters
----------
text : str
The text to show in the display. | Below is the the instruction that describes the task:
### Input:
Called by a subclass to update the onscreen message.
Parameters
----------
text : str
The text to show in the display.
### Response:
def set_onscreen_message(self, text, redraw=True):
"""Called by a subclass to update the onscreen message.
Parameters
----------
text : str
The text to show in the display.
"""
width, height = self.get_window_size()
font = self.t_.get('onscreen_font', 'sans serif')
font_size = self.t_.get('onscreen_font_size', None)
if font_size is None:
font_size = self._calc_font_size(width)
# TODO: need some way to accurately estimate text extents
# without actually putting text on the canvas
ht, wd = font_size, font_size
if text is not None:
wd = len(text) * font_size * 1.1
x = (width // 2) - (wd // 2)
y = ((height // 3) * 2) - (ht // 2)
tag = '_$onscreen_msg'
canvas = self.get_private_canvas()
try:
message = canvas.get_object_by_tag(tag)
if text is None:
message.text = ''
else:
message.x = x
message.y = y
message.text = text
message.fontsize = font_size
except KeyError:
if text is None:
text = ''
Text = canvas.get_draw_class('text')
canvas.add(Text(x, y, text=text,
font=font, fontsize=font_size,
color=self.img_fg, coord='window'),
tag=tag, redraw=False)
if redraw:
canvas.update_canvas(whence=3) |
def by_geopoint(self, lat, long):
"""
Perform a Yelp Neighborhood API Search based on a geopoint.
Args:
lat - geopoint latitude
long - geopoint longitude
"""
header, content = self._http_request(self.BASE_URL, lat=lat, long=long)
return json.loads(content) | Perform a Yelp Neighborhood API Search based on a geopoint.
Args:
lat - geopoint latitude
long - geopoint longitude | Below is the the instruction that describes the task:
### Input:
Perform a Yelp Neighborhood API Search based on a geopoint.
Args:
lat - geopoint latitude
long - geopoint longitude
### Response:
def by_geopoint(self, lat, long):
"""
Perform a Yelp Neighborhood API Search based on a geopoint.
Args:
lat - geopoint latitude
long - geopoint longitude
"""
header, content = self._http_request(self.BASE_URL, lat=lat, long=long)
return json.loads(content) |
def login(self, user=None, password=None, restrict_login=None):
"""
Attempt to log in using the given username and password. Subsequent
method calls will use this username and password. Returns False if
login fails, otherwise returns some kind of login info - typically
either a numeric userid, or a dict of user info.
If user is not set, the value of Bugzilla.user will be used. If *that*
is not set, ValueError will be raised. If login fails, BugzillaError
will be raised.
The login session can be restricted to current user IP address
with restrict_login argument. (Bugzilla 4.4+)
This method will be called implicitly at the end of connect() if user
and password are both set. So under most circumstances you won't need
to call this yourself.
"""
if self.api_key:
raise ValueError("cannot login when using an API key")
if user:
self.user = user
if password:
self.password = password
if not self.user:
raise ValueError("missing username")
if not self.password:
raise ValueError("missing password")
if restrict_login:
log.info("logging in with restrict_login=True")
try:
ret = self._login(self.user, self.password, restrict_login)
self.password = ''
log.info("login successful for user=%s", self.user)
return ret
except Fault as e:
raise BugzillaError("Login failed: %s" % str(e.faultString)) | Attempt to log in using the given username and password. Subsequent
method calls will use this username and password. Returns False if
login fails, otherwise returns some kind of login info - typically
either a numeric userid, or a dict of user info.
If user is not set, the value of Bugzilla.user will be used. If *that*
is not set, ValueError will be raised. If login fails, BugzillaError
will be raised.
The login session can be restricted to current user IP address
with restrict_login argument. (Bugzilla 4.4+)
This method will be called implicitly at the end of connect() if user
and password are both set. So under most circumstances you won't need
to call this yourself. | Below is the the instruction that describes the task:
### Input:
Attempt to log in using the given username and password. Subsequent
method calls will use this username and password. Returns False if
login fails, otherwise returns some kind of login info - typically
either a numeric userid, or a dict of user info.
If user is not set, the value of Bugzilla.user will be used. If *that*
is not set, ValueError will be raised. If login fails, BugzillaError
will be raised.
The login session can be restricted to current user IP address
with restrict_login argument. (Bugzilla 4.4+)
This method will be called implicitly at the end of connect() if user
and password are both set. So under most circumstances you won't need
to call this yourself.
### Response:
def login(self, user=None, password=None, restrict_login=None):
"""
Attempt to log in using the given username and password. Subsequent
method calls will use this username and password. Returns False if
login fails, otherwise returns some kind of login info - typically
either a numeric userid, or a dict of user info.
If user is not set, the value of Bugzilla.user will be used. If *that*
is not set, ValueError will be raised. If login fails, BugzillaError
will be raised.
The login session can be restricted to current user IP address
with restrict_login argument. (Bugzilla 4.4+)
This method will be called implicitly at the end of connect() if user
and password are both set. So under most circumstances you won't need
to call this yourself.
"""
if self.api_key:
raise ValueError("cannot login when using an API key")
if user:
self.user = user
if password:
self.password = password
if not self.user:
raise ValueError("missing username")
if not self.password:
raise ValueError("missing password")
if restrict_login:
log.info("logging in with restrict_login=True")
try:
ret = self._login(self.user, self.password, restrict_login)
self.password = ''
log.info("login successful for user=%s", self.user)
return ret
except Fault as e:
raise BugzillaError("Login failed: %s" % str(e.faultString)) |
def make_request(self, resource, params=None):
"""
Performs the API request. Most methods are a wrapper around this one.
"""
if params is None:
params = {}
url = self.request_url(resource)
params['format'] = 'json'
r = self.session.get(url=url, params=params)
r.raise_for_status()
return r | Performs the API request. Most methods are a wrapper around this one. | Below is the the instruction that describes the task:
### Input:
Performs the API request. Most methods are a wrapper around this one.
### Response:
def make_request(self, resource, params=None):
"""
Performs the API request. Most methods are a wrapper around this one.
"""
if params is None:
params = {}
url = self.request_url(resource)
params['format'] = 'json'
r = self.session.get(url=url, params=params)
r.raise_for_status()
return r |
def boottime():
"""Returns boot time if remotely possible, or None if not."""
global __boottime
if __boottime is None:
up = uptime()
if up is None:
return None
if __boottime is None:
_boottime_linux()
if datetime is None:
raise RuntimeError('datetime module required.')
return datetime.fromtimestamp(__boottime or time.time() - up) | Returns boot time if remotely possible, or None if not. | Below is the the instruction that describes the task:
### Input:
Returns boot time if remotely possible, or None if not.
### Response:
def boottime():
"""Returns boot time if remotely possible, or None if not."""
global __boottime
if __boottime is None:
up = uptime()
if up is None:
return None
if __boottime is None:
_boottime_linux()
if datetime is None:
raise RuntimeError('datetime module required.')
return datetime.fromtimestamp(__boottime or time.time() - up) |
def set_params(self, deep=False, force=False, **parameters):
"""
sets an object's paramters
Parameters
----------
deep : boolean, default: False
when True, also sets non-user-facing paramters
force : boolean, default: False
when True, also sets parameters that the object does not already
have
**parameters : paramters to set
Returns
------
self
"""
param_names = self.get_params(deep=deep).keys()
for parameter, value in parameters.items():
if (parameter in param_names
or force
or (hasattr(self, parameter) and parameter == parameter.strip('_'))):
setattr(self, parameter, value)
return self | sets an object's paramters
Parameters
----------
deep : boolean, default: False
when True, also sets non-user-facing paramters
force : boolean, default: False
when True, also sets parameters that the object does not already
have
**parameters : paramters to set
Returns
------
self | Below is the the instruction that describes the task:
### Input:
sets an object's paramters
Parameters
----------
deep : boolean, default: False
when True, also sets non-user-facing paramters
force : boolean, default: False
when True, also sets parameters that the object does not already
have
**parameters : paramters to set
Returns
------
self
### Response:
def set_params(self, deep=False, force=False, **parameters):
"""
sets an object's paramters
Parameters
----------
deep : boolean, default: False
when True, also sets non-user-facing paramters
force : boolean, default: False
when True, also sets parameters that the object does not already
have
**parameters : paramters to set
Returns
------
self
"""
param_names = self.get_params(deep=deep).keys()
for parameter, value in parameters.items():
if (parameter in param_names
or force
or (hasattr(self, parameter) and parameter == parameter.strip('_'))):
setattr(self, parameter, value)
return self |
def read_line(self, sep=six.b('\n')):
"""Read the data stream until a given separator is found (default \n)
:param sep: Separator to read until. Must by of the bytes type (str in python 2,
bytes in python 3)
:return: The str of the data read until sep
"""
start = 0
while not self._stream.closed:
loc = self._buffer.find(sep, start)
if loc >= 0:
return self._pop(loc + len(sep))
else:
start = len(self._buffer)
self._buffer += self._stream.read(self._chunk_size)
return six.b('') | Read the data stream until a given separator is found (default \n)
:param sep: Separator to read until. Must by of the bytes type (str in python 2,
bytes in python 3)
:return: The str of the data read until sep | Below is the the instruction that describes the task:
### Input:
Read the data stream until a given separator is found (default \n)
:param sep: Separator to read until. Must by of the bytes type (str in python 2,
bytes in python 3)
:return: The str of the data read until sep
### Response:
def read_line(self, sep=six.b('\n')):
"""Read the data stream until a given separator is found (default \n)
:param sep: Separator to read until. Must by of the bytes type (str in python 2,
bytes in python 3)
:return: The str of the data read until sep
"""
start = 0
while not self._stream.closed:
loc = self._buffer.find(sep, start)
if loc >= 0:
return self._pop(loc + len(sep))
else:
start = len(self._buffer)
self._buffer += self._stream.read(self._chunk_size)
return six.b('') |
def by_median_household_income(self,
lower=-1,
upper=2 ** 31,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.median_household_income.name,
ascending=False,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by median household income.
"""
return self.query(
median_household_income_lower=lower,
median_household_income_upper=upper,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
) | Search zipcode information by median household income. | Below is the the instruction that describes the task:
### Input:
Search zipcode information by median household income.
### Response:
def by_median_household_income(self,
lower=-1,
upper=2 ** 31,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.median_household_income.name,
ascending=False,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by median household income.
"""
return self.query(
median_household_income_lower=lower,
median_household_income_upper=upper,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
) |
def on_left_click(self, event, grid, choices):
"""
creates popup menu when user clicks on the column
if that column is in the list of choices that get a drop-down menu.
allows user to edit the column, but only from available values
"""
row, col = event.GetRow(), event.GetCol()
if col == 0 and self.grid.name != 'ages':
default_val = self.grid.GetCellValue(row, col)
msg = "Choose a new name for {}.\nThe new value will propagate throughout the contribution.".format(default_val)
dia = wx.TextEntryDialog(self.grid, msg,
"Rename {}".format(self.grid.name, default_val),
default_val)
res = dia.ShowModal()
if res == wx.ID_OK:
new_val = dia.GetValue()
# update the contribution with new name
self.contribution.rename_item(self.grid.name,
default_val, new_val)
# don't propagate changes if we are just assigning a new name
# and not really renaming
# (i.e., if a blank row was added then named)
if default_val == '':
self.grid.SetCellValue(row, 0, new_val)
return
# update the current grid with new name
for row in range(self.grid.GetNumberRows()):
cell_value = self.grid.GetCellValue(row, 0)
if cell_value == default_val:
self.grid.SetCellValue(row, 0, new_val)
else:
continue
return
color = self.grid.GetCellBackgroundColour(event.GetRow(), event.GetCol())
# allow user to cherry-pick cells for editing.
# gets selection of meta key for mac, ctrl key for pc
if event.ControlDown() or event.MetaDown():
row, col = event.GetRow(), event.GetCol()
if (row, col) not in self.dispersed_selection:
self.dispersed_selection.append((row, col))
self.grid.SetCellBackgroundColour(row, col, 'light blue')
else:
self.dispersed_selection.remove((row, col))
self.grid.SetCellBackgroundColour(row, col, color)# 'white'
self.grid.ForceRefresh()
return
if event.ShiftDown(): # allow user to highlight multiple consecutive cells in a column
previous_col = self.grid.GetGridCursorCol()
previous_row = self.grid.GetGridCursorRow()
col = event.GetCol()
row = event.GetRow()
if col != previous_col:
return
else:
if row > previous_row:
row_range = list(range(previous_row, row+1))
else:
row_range = list(range(row, previous_row+1))
for r in row_range:
self.grid.SetCellBackgroundColour(r, col, 'light blue')
self.selection.append((r, col))
self.grid.ForceRefresh()
return
selection = False
if self.dispersed_selection:
is_dispersed = True
selection = self.dispersed_selection
if self.selection:
is_dispersed = False
selection = self.selection
try:
col = event.GetCol()
row = event.GetRow()
except AttributeError:
row, col = selection[0][0], selection[0][1]
self.grid.SetGridCursor(row, col)
if col in list(choices.keys()): # column should have a pop-up menu
menu = wx.Menu()
two_tiered = choices[col][1]
choices = choices[col][0]
if not two_tiered: # menu is one tiered
if 'CLEAR cell of all values' not in choices:
choices.insert(0, 'CLEAR cell of all values')
for choice in choices:
if not choice:
choice = " " # prevents error if choice is an empty string
menuitem = menu.Append(wx.ID_ANY, str(choice))
self.window.Bind(wx.EVT_MENU, lambda event: self.on_select_menuitem(event, grid, row, col, selection), menuitem)
self.show_menu(event, menu)
else: # menu is two_tiered
clear = menu.Append(-1, 'CLEAR cell of all values')
self.window.Bind(wx.EVT_MENU, lambda event: self.on_select_menuitem(event, grid, row, col, selection), clear)
for choice in sorted(choices.items()):
submenu = wx.Menu()
for item in choice[1]:
menuitem = submenu.Append(-1, str(item))
self.window.Bind(wx.EVT_MENU, lambda event: self.on_select_menuitem(event, grid, row, col, selection), menuitem)
menu.Append(-1, choice[0], submenu)
self.show_menu(event, menu)
if selection:
# re-whiten the cells that were previously highlighted
for row, col in selection:
self.grid.SetCellBackgroundColour(row, col, self.col_color)
self.dispersed_selection = []
self.selection = []
self.grid.ForceRefresh() | creates popup menu when user clicks on the column
if that column is in the list of choices that get a drop-down menu.
allows user to edit the column, but only from available values | Below is the the instruction that describes the task:
### Input:
creates popup menu when user clicks on the column
if that column is in the list of choices that get a drop-down menu.
allows user to edit the column, but only from available values
### Response:
def on_left_click(self, event, grid, choices):
"""
creates popup menu when user clicks on the column
if that column is in the list of choices that get a drop-down menu.
allows user to edit the column, but only from available values
"""
row, col = event.GetRow(), event.GetCol()
if col == 0 and self.grid.name != 'ages':
default_val = self.grid.GetCellValue(row, col)
msg = "Choose a new name for {}.\nThe new value will propagate throughout the contribution.".format(default_val)
dia = wx.TextEntryDialog(self.grid, msg,
"Rename {}".format(self.grid.name, default_val),
default_val)
res = dia.ShowModal()
if res == wx.ID_OK:
new_val = dia.GetValue()
# update the contribution with new name
self.contribution.rename_item(self.grid.name,
default_val, new_val)
# don't propagate changes if we are just assigning a new name
# and not really renaming
# (i.e., if a blank row was added then named)
if default_val == '':
self.grid.SetCellValue(row, 0, new_val)
return
# update the current grid with new name
for row in range(self.grid.GetNumberRows()):
cell_value = self.grid.GetCellValue(row, 0)
if cell_value == default_val:
self.grid.SetCellValue(row, 0, new_val)
else:
continue
return
color = self.grid.GetCellBackgroundColour(event.GetRow(), event.GetCol())
# allow user to cherry-pick cells for editing.
# gets selection of meta key for mac, ctrl key for pc
if event.ControlDown() or event.MetaDown():
row, col = event.GetRow(), event.GetCol()
if (row, col) not in self.dispersed_selection:
self.dispersed_selection.append((row, col))
self.grid.SetCellBackgroundColour(row, col, 'light blue')
else:
self.dispersed_selection.remove((row, col))
self.grid.SetCellBackgroundColour(row, col, color)# 'white'
self.grid.ForceRefresh()
return
if event.ShiftDown(): # allow user to highlight multiple consecutive cells in a column
previous_col = self.grid.GetGridCursorCol()
previous_row = self.grid.GetGridCursorRow()
col = event.GetCol()
row = event.GetRow()
if col != previous_col:
return
else:
if row > previous_row:
row_range = list(range(previous_row, row+1))
else:
row_range = list(range(row, previous_row+1))
for r in row_range:
self.grid.SetCellBackgroundColour(r, col, 'light blue')
self.selection.append((r, col))
self.grid.ForceRefresh()
return
selection = False
if self.dispersed_selection:
is_dispersed = True
selection = self.dispersed_selection
if self.selection:
is_dispersed = False
selection = self.selection
try:
col = event.GetCol()
row = event.GetRow()
except AttributeError:
row, col = selection[0][0], selection[0][1]
self.grid.SetGridCursor(row, col)
if col in list(choices.keys()): # column should have a pop-up menu
menu = wx.Menu()
two_tiered = choices[col][1]
choices = choices[col][0]
if not two_tiered: # menu is one tiered
if 'CLEAR cell of all values' not in choices:
choices.insert(0, 'CLEAR cell of all values')
for choice in choices:
if not choice:
choice = " " # prevents error if choice is an empty string
menuitem = menu.Append(wx.ID_ANY, str(choice))
self.window.Bind(wx.EVT_MENU, lambda event: self.on_select_menuitem(event, grid, row, col, selection), menuitem)
self.show_menu(event, menu)
else: # menu is two_tiered
clear = menu.Append(-1, 'CLEAR cell of all values')
self.window.Bind(wx.EVT_MENU, lambda event: self.on_select_menuitem(event, grid, row, col, selection), clear)
for choice in sorted(choices.items()):
submenu = wx.Menu()
for item in choice[1]:
menuitem = submenu.Append(-1, str(item))
self.window.Bind(wx.EVT_MENU, lambda event: self.on_select_menuitem(event, grid, row, col, selection), menuitem)
menu.Append(-1, choice[0], submenu)
self.show_menu(event, menu)
if selection:
# re-whiten the cells that were previously highlighted
for row, col in selection:
self.grid.SetCellBackgroundColour(row, col, self.col_color)
self.dispersed_selection = []
self.selection = []
self.grid.ForceRefresh() |
def get_engine(name):
"""
get an engine from string (engine class without Engine)
"""
name = name.capitalize() + 'Engine'
if name in globals():
return globals()[name]
raise KeyError("engine '%s' does not exist" % name) | get an engine from string (engine class without Engine) | Below is the the instruction that describes the task:
### Input:
get an engine from string (engine class without Engine)
### Response:
def get_engine(name):
"""
get an engine from string (engine class without Engine)
"""
name = name.capitalize() + 'Engine'
if name in globals():
return globals()[name]
raise KeyError("engine '%s' does not exist" % name) |
def register_extension_method(ext, base, *args, **kwargs):
"""Register the given extension method as a public attribute of the given base.
README: The expected protocol here is that the given extension method is an unbound function.
It will be bound to the specified base as a method, and then set as a public attribute of that
base.
"""
bound_method = create_bound_method(ext.plugin, base)
setattr(base, ext.name.lstrip('_'), bound_method) | Register the given extension method as a public attribute of the given base.
README: The expected protocol here is that the given extension method is an unbound function.
It will be bound to the specified base as a method, and then set as a public attribute of that
base. | Below is the the instruction that describes the task:
### Input:
Register the given extension method as a public attribute of the given base.
README: The expected protocol here is that the given extension method is an unbound function.
It will be bound to the specified base as a method, and then set as a public attribute of that
base.
### Response:
def register_extension_method(ext, base, *args, **kwargs):
"""Register the given extension method as a public attribute of the given base.
README: The expected protocol here is that the given extension method is an unbound function.
It will be bound to the specified base as a method, and then set as a public attribute of that
base.
"""
bound_method = create_bound_method(ext.plugin, base)
setattr(base, ext.name.lstrip('_'), bound_method) |
def _viewdata_to_view(self, p_data):
"""
Converts a dictionary describing a view to an actual UIView instance.
"""
sorter = Sorter(p_data['sortexpr'], p_data['groupexpr'])
filters = []
if not p_data['show_all']:
filters.append(DependencyFilter(self.todolist))
filters.append(RelevanceFilter())
filters.append(HiddenTagFilter())
filters += get_filter_list(p_data['filterexpr'].split())
return UIView(sorter, filters, self.todolist, p_data) | Converts a dictionary describing a view to an actual UIView instance. | Below is the the instruction that describes the task:
### Input:
Converts a dictionary describing a view to an actual UIView instance.
### Response:
def _viewdata_to_view(self, p_data):
"""
Converts a dictionary describing a view to an actual UIView instance.
"""
sorter = Sorter(p_data['sortexpr'], p_data['groupexpr'])
filters = []
if not p_data['show_all']:
filters.append(DependencyFilter(self.todolist))
filters.append(RelevanceFilter())
filters.append(HiddenTagFilter())
filters += get_filter_list(p_data['filterexpr'].split())
return UIView(sorter, filters, self.todolist, p_data) |
def namespace(self, namespace):
"""Setter method; for a description see the getter method."""
# pylint: disable=attribute-defined-outside-init
self._namespace = _ensure_unicode(namespace)
if self._namespace is not None:
# In Python 3, a byte string cannot be stripped by a unicode char
# Therefore, the stripping needs to be done after the unicode
# conversion.
self._namespace = self._namespace.strip('/') | Setter method; for a description see the getter method. | Below is the the instruction that describes the task:
### Input:
Setter method; for a description see the getter method.
### Response:
def namespace(self, namespace):
"""Setter method; for a description see the getter method."""
# pylint: disable=attribute-defined-outside-init
self._namespace = _ensure_unicode(namespace)
if self._namespace is not None:
# In Python 3, a byte string cannot be stripped by a unicode char
# Therefore, the stripping needs to be done after the unicode
# conversion.
self._namespace = self._namespace.strip('/') |
def _extract_model_params(self, defaults, **kwargs):
"""this method allows django managers use `objects.get_or_create` and
`objects.update_or_create` on a hashable object.
"""
obj = kwargs.pop(self.object_property_name, None)
if obj is not None:
kwargs['object_hash'] = self.model._compute_hash(obj)
lookup, params = super()._extract_model_params(defaults, **kwargs)
if obj is not None:
params[self.object_property_name] = obj
del params['object_hash']
return lookup, params | this method allows django managers use `objects.get_or_create` and
`objects.update_or_create` on a hashable object. | Below is the the instruction that describes the task:
### Input:
this method allows django managers use `objects.get_or_create` and
`objects.update_or_create` on a hashable object.
### Response:
def _extract_model_params(self, defaults, **kwargs):
"""this method allows django managers use `objects.get_or_create` and
`objects.update_or_create` on a hashable object.
"""
obj = kwargs.pop(self.object_property_name, None)
if obj is not None:
kwargs['object_hash'] = self.model._compute_hash(obj)
lookup, params = super()._extract_model_params(defaults, **kwargs)
if obj is not None:
params[self.object_property_name] = obj
del params['object_hash']
return lookup, params |
def add_line(self, line='', *, empty=False):
"""Adds a line to the current page.
If the line exceeds the :attr:`max_size` then an exception
is raised.
Parameters
-----------
line: :class:`str`
The line to add.
empty: :class:`bool`
Indicates if another empty line should be added.
Raises
------
RuntimeError
The line was too big for the current :attr:`max_size`.
"""
max_page_size = self.max_size - self._prefix_len - 2
if len(line) > max_page_size:
raise RuntimeError('Line exceeds maximum page size %s' % (max_page_size))
if self._count + len(line) + 1 > self.max_size:
self.close_page()
self._count += len(line) + 1
self._current_page.append(line)
if empty:
self._current_page.append('')
self._count += 1 | Adds a line to the current page.
If the line exceeds the :attr:`max_size` then an exception
is raised.
Parameters
-----------
line: :class:`str`
The line to add.
empty: :class:`bool`
Indicates if another empty line should be added.
Raises
------
RuntimeError
The line was too big for the current :attr:`max_size`. | Below is the the instruction that describes the task:
### Input:
Adds a line to the current page.
If the line exceeds the :attr:`max_size` then an exception
is raised.
Parameters
-----------
line: :class:`str`
The line to add.
empty: :class:`bool`
Indicates if another empty line should be added.
Raises
------
RuntimeError
The line was too big for the current :attr:`max_size`.
### Response:
def add_line(self, line='', *, empty=False):
"""Adds a line to the current page.
If the line exceeds the :attr:`max_size` then an exception
is raised.
Parameters
-----------
line: :class:`str`
The line to add.
empty: :class:`bool`
Indicates if another empty line should be added.
Raises
------
RuntimeError
The line was too big for the current :attr:`max_size`.
"""
max_page_size = self.max_size - self._prefix_len - 2
if len(line) > max_page_size:
raise RuntimeError('Line exceeds maximum page size %s' % (max_page_size))
if self._count + len(line) + 1 > self.max_size:
self.close_page()
self._count += len(line) + 1
self._current_page.append(line)
if empty:
self._current_page.append('')
self._count += 1 |
def create_js_pay_params(self, **package):
"""
签名 js 需要的参数
详情请参考 支付开发文档
::
wxclient.create_js_pay_params(
body=标题, out_trade_no=本地订单号, total_fee=价格单位分,
notify_url=通知url,
spbill_create_ip=建议为支付人ip,
)
:param package: 需要签名的的参数
:return: 支付需要的对象
"""
pay_param, sign, sign_type = self._pay_sign_dict(
package=self.create_js_pay_package(**package)
)
pay_param['paySign'] = sign
pay_param['signType'] = sign_type
# 腾讯这个还得转成大写 JS 才认
for key in ['appId', 'timeStamp', 'nonceStr']:
pay_param[key] = str(pay_param.pop(key.lower()))
return pay_param | 签名 js 需要的参数
详情请参考 支付开发文档
::
wxclient.create_js_pay_params(
body=标题, out_trade_no=本地订单号, total_fee=价格单位分,
notify_url=通知url,
spbill_create_ip=建议为支付人ip,
)
:param package: 需要签名的的参数
:return: 支付需要的对象 | Below is the the instruction that describes the task:
### Input:
签名 js 需要的参数
详情请参考 支付开发文档
::
wxclient.create_js_pay_params(
body=标题, out_trade_no=本地订单号, total_fee=价格单位分,
notify_url=通知url,
spbill_create_ip=建议为支付人ip,
)
:param package: 需要签名的的参数
:return: 支付需要的对象
### Response:
def create_js_pay_params(self, **package):
"""
签名 js 需要的参数
详情请参考 支付开发文档
::
wxclient.create_js_pay_params(
body=标题, out_trade_no=本地订单号, total_fee=价格单位分,
notify_url=通知url,
spbill_create_ip=建议为支付人ip,
)
:param package: 需要签名的的参数
:return: 支付需要的对象
"""
pay_param, sign, sign_type = self._pay_sign_dict(
package=self.create_js_pay_package(**package)
)
pay_param['paySign'] = sign
pay_param['signType'] = sign_type
# 腾讯这个还得转成大写 JS 才认
for key in ['appId', 'timeStamp', 'nonceStr']:
pay_param[key] = str(pay_param.pop(key.lower()))
return pay_param |
def _on_bytes_read(self, num_bytes_read):
"""
Record our progress so we can validate that we receive all the data
:param num_bytes_read: int: number of bytes we received as part of one chunk
"""
self.actual_bytes_read += num_bytes_read
if self.actual_bytes_read > self.bytes_to_read:
raise TooLargeChunkDownloadError(self.actual_bytes_read, self.bytes_to_read, self.local_path)
self.download_context.send_processed_message(num_bytes_read) | Record our progress so we can validate that we receive all the data
:param num_bytes_read: int: number of bytes we received as part of one chunk | Below is the the instruction that describes the task:
### Input:
Record our progress so we can validate that we receive all the data
:param num_bytes_read: int: number of bytes we received as part of one chunk
### Response:
def _on_bytes_read(self, num_bytes_read):
"""
Record our progress so we can validate that we receive all the data
:param num_bytes_read: int: number of bytes we received as part of one chunk
"""
self.actual_bytes_read += num_bytes_read
if self.actual_bytes_read > self.bytes_to_read:
raise TooLargeChunkDownloadError(self.actual_bytes_read, self.bytes_to_read, self.local_path)
self.download_context.send_processed_message(num_bytes_read) |
def _get_method_full_name(func):
"""
Return fully qualified function name.
This method will attempt to find "full name" of the given function object. This full name is either of
the form "<class name>.<method name>" if the function is a class method, or "<module name>.<func name>"
if it's a regular function. Thus, this is an attempt to back-port func.__qualname__ to Python 2.
:param func: a function object.
:returns: string with the function's full name as explained above.
"""
# Python 3.3 already has this information available...
if hasattr(func, "__qualname__"): return func.__qualname__
module = inspect.getmodule(func)
if module is None:
return "?.%s" % getattr(func, "__name__", "?")
for cls_name in dir(module):
cls = getattr(module, cls_name)
if not inspect.isclass(cls): continue
for method_name in dir(cls):
cls_method = getattr(cls, method_name)
if cls_method == func:
return "%s.%s" % (cls_name, method_name)
if hasattr(func, "__name__"):
return "%s.%s" % (module.__name__, func.__name__)
return "<unknown>" | Return fully qualified function name.
This method will attempt to find "full name" of the given function object. This full name is either of
the form "<class name>.<method name>" if the function is a class method, or "<module name>.<func name>"
if it's a regular function. Thus, this is an attempt to back-port func.__qualname__ to Python 2.
:param func: a function object.
:returns: string with the function's full name as explained above. | Below is the the instruction that describes the task:
### Input:
Return fully qualified function name.
This method will attempt to find "full name" of the given function object. This full name is either of
the form "<class name>.<method name>" if the function is a class method, or "<module name>.<func name>"
if it's a regular function. Thus, this is an attempt to back-port func.__qualname__ to Python 2.
:param func: a function object.
:returns: string with the function's full name as explained above.
### Response:
def _get_method_full_name(func):
"""
Return fully qualified function name.
This method will attempt to find "full name" of the given function object. This full name is either of
the form "<class name>.<method name>" if the function is a class method, or "<module name>.<func name>"
if it's a regular function. Thus, this is an attempt to back-port func.__qualname__ to Python 2.
:param func: a function object.
:returns: string with the function's full name as explained above.
"""
# Python 3.3 already has this information available...
if hasattr(func, "__qualname__"): return func.__qualname__
module = inspect.getmodule(func)
if module is None:
return "?.%s" % getattr(func, "__name__", "?")
for cls_name in dir(module):
cls = getattr(module, cls_name)
if not inspect.isclass(cls): continue
for method_name in dir(cls):
cls_method = getattr(cls, method_name)
if cls_method == func:
return "%s.%s" % (cls_name, method_name)
if hasattr(func, "__name__"):
return "%s.%s" % (module.__name__, func.__name__)
return "<unknown>" |
def check_argument_list(kernel_name, kernel_string, args):
""" raise an exception if a kernel arguments do not match host arguments """
kernel_arguments = list()
collected_errors = list()
for iterator in re.finditer(kernel_name + "[ \n\t]*" + "\(", kernel_string):
kernel_start = iterator.end()
kernel_end = kernel_string.find(")", kernel_start)
if kernel_start != 0:
kernel_arguments.append(kernel_string[kernel_start:kernel_end].split(","))
for arguments_set, arguments in enumerate(kernel_arguments):
collected_errors.append(list())
if len(arguments) != len(args):
collected_errors[arguments_set].append("Kernel and host argument lists do not match in size.")
continue
for (i, arg) in enumerate(args):
kernel_argument = arguments[i]
if not isinstance(arg, (numpy.ndarray, numpy.generic)):
raise TypeError("Argument at position " + str(i) + " of type: " + str(type(arg)) + " should be of type numpy.ndarray or numpy scalar")
correct = True
if isinstance(arg, numpy.ndarray) and not "*" in kernel_argument:
correct = False #array is passed to non-pointer kernel argument
if correct and check_argument_type(str(arg.dtype), kernel_argument, i):
continue
collected_errors[arguments_set].append("Argument at position " + str(i) + " of dtype: " + str(arg.dtype) +
" does not match " + kernel_argument + ".")
if not collected_errors[arguments_set]:
# We assume that if there is a possible list of arguments that matches with the provided one
# it is the right one
return
for errors in collected_errors:
warnings.warn(errors[0], UserWarning) | raise an exception if a kernel arguments do not match host arguments | Below is the the instruction that describes the task:
### Input:
raise an exception if a kernel arguments do not match host arguments
### Response:
def check_argument_list(kernel_name, kernel_string, args):
""" raise an exception if a kernel arguments do not match host arguments """
kernel_arguments = list()
collected_errors = list()
for iterator in re.finditer(kernel_name + "[ \n\t]*" + "\(", kernel_string):
kernel_start = iterator.end()
kernel_end = kernel_string.find(")", kernel_start)
if kernel_start != 0:
kernel_arguments.append(kernel_string[kernel_start:kernel_end].split(","))
for arguments_set, arguments in enumerate(kernel_arguments):
collected_errors.append(list())
if len(arguments) != len(args):
collected_errors[arguments_set].append("Kernel and host argument lists do not match in size.")
continue
for (i, arg) in enumerate(args):
kernel_argument = arguments[i]
if not isinstance(arg, (numpy.ndarray, numpy.generic)):
raise TypeError("Argument at position " + str(i) + " of type: " + str(type(arg)) + " should be of type numpy.ndarray or numpy scalar")
correct = True
if isinstance(arg, numpy.ndarray) and not "*" in kernel_argument:
correct = False #array is passed to non-pointer kernel argument
if correct and check_argument_type(str(arg.dtype), kernel_argument, i):
continue
collected_errors[arguments_set].append("Argument at position " + str(i) + " of dtype: " + str(arg.dtype) +
" does not match " + kernel_argument + ".")
if not collected_errors[arguments_set]:
# We assume that if there is a possible list of arguments that matches with the provided one
# it is the right one
return
for errors in collected_errors:
warnings.warn(errors[0], UserWarning) |
def _print(self, *data, **kw):
"""
_print(self, *data, sep=' ', end='\n', file=None)
Alternative 'print' function that prints back into the SSH channel.
"""
# Pop keyword-only arguments. (We cannot use the syntax from the
# signature. Otherwise, Python2 will give a syntax error message when
# installing.)
sep = kw.pop('sep', ' ')
end = kw.pop('end', '\n')
_ = kw.pop('file', None)
assert not kw, 'Too many keyword-only arguments'
data = sep.join(map(str, data))
self._chan.write(data + end) | _print(self, *data, sep=' ', end='\n', file=None)
Alternative 'print' function that prints back into the SSH channel. | Below is the the instruction that describes the task:
### Input:
_print(self, *data, sep=' ', end='\n', file=None)
Alternative 'print' function that prints back into the SSH channel.
### Response:
def _print(self, *data, **kw):
"""
_print(self, *data, sep=' ', end='\n', file=None)
Alternative 'print' function that prints back into the SSH channel.
"""
# Pop keyword-only arguments. (We cannot use the syntax from the
# signature. Otherwise, Python2 will give a syntax error message when
# installing.)
sep = kw.pop('sep', ' ')
end = kw.pop('end', '\n')
_ = kw.pop('file', None)
assert not kw, 'Too many keyword-only arguments'
data = sep.join(map(str, data))
self._chan.write(data + end) |
def get_work_kind(self):
"""
We'll have a kind_slug like 'movies'.
We need to translate that into a work `kind` like 'movie'.
"""
slugs_to_kinds = {v:k for k,v in Work.KIND_SLUGS.items()}
return slugs_to_kinds.get(self.kind_slug, None) | We'll have a kind_slug like 'movies'.
We need to translate that into a work `kind` like 'movie'. | Below is the the instruction that describes the task:
### Input:
We'll have a kind_slug like 'movies'.
We need to translate that into a work `kind` like 'movie'.
### Response:
def get_work_kind(self):
"""
We'll have a kind_slug like 'movies'.
We need to translate that into a work `kind` like 'movie'.
"""
slugs_to_kinds = {v:k for k,v in Work.KIND_SLUGS.items()}
return slugs_to_kinds.get(self.kind_slug, None) |
def writeToken(self):
"""
Store details of the current connection in the named file.
This can be used by :meth:`readToken` to re-authenticate at a later time.
"""
# Write token file privately.
with os.fdopen(os.open(self.tokenFile, os.O_WRONLY | os.O_CREAT, 0o600), "w") as f:
# When opening files via os, truncation must be done manually.
f.truncate()
f.write(self.userId + "\n")
f.write(self.tokens["skype"] + "\n")
f.write(str(int(time.mktime(self.tokenExpiry["skype"].timetuple()))) + "\n")
f.write(self.tokens["reg"] + "\n")
f.write(str(int(time.mktime(self.tokenExpiry["reg"].timetuple()))) + "\n")
f.write(self.msgsHost + "\n") | Store details of the current connection in the named file.
This can be used by :meth:`readToken` to re-authenticate at a later time. | Below is the the instruction that describes the task:
### Input:
Store details of the current connection in the named file.
This can be used by :meth:`readToken` to re-authenticate at a later time.
### Response:
def writeToken(self):
"""
Store details of the current connection in the named file.
This can be used by :meth:`readToken` to re-authenticate at a later time.
"""
# Write token file privately.
with os.fdopen(os.open(self.tokenFile, os.O_WRONLY | os.O_CREAT, 0o600), "w") as f:
# When opening files via os, truncation must be done manually.
f.truncate()
f.write(self.userId + "\n")
f.write(self.tokens["skype"] + "\n")
f.write(str(int(time.mktime(self.tokenExpiry["skype"].timetuple()))) + "\n")
f.write(self.tokens["reg"] + "\n")
f.write(str(int(time.mktime(self.tokenExpiry["reg"].timetuple()))) + "\n")
f.write(self.msgsHost + "\n") |
def strip(self, col: str):
"""
Remove leading and trailing white spaces in a column's values
:param col: name of the column
:type col: str
:example: ``ds.strip("mycol")``
"""
def remove_ws(row):
val = str(row[col])
if " " in val.startswith(" "):
row[col] = val.strip()
return row
try:
self.df.apply(remove_ws)
except Exception as e:
self.err(e, "Can not remove white space in column")
return
self.ok("White space removed in column values") | Remove leading and trailing white spaces in a column's values
:param col: name of the column
:type col: str
:example: ``ds.strip("mycol")`` | Below is the the instruction that describes the task:
### Input:
Remove leading and trailing white spaces in a column's values
:param col: name of the column
:type col: str
:example: ``ds.strip("mycol")``
### Response:
def strip(self, col: str):
"""
Remove leading and trailing white spaces in a column's values
:param col: name of the column
:type col: str
:example: ``ds.strip("mycol")``
"""
def remove_ws(row):
val = str(row[col])
if " " in val.startswith(" "):
row[col] = val.strip()
return row
try:
self.df.apply(remove_ws)
except Exception as e:
self.err(e, "Can not remove white space in column")
return
self.ok("White space removed in column values") |
def create_hitor_calibration(output_filename, plot_pixel_calibrations=False):
'''Generating HitOr calibration file (_calibration.h5) from raw data file and plotting of calibration data.
Parameters
----------
output_filename : string
Input raw data file name.
plot_pixel_calibrations : bool, iterable
If True, genearating additional pixel calibration plots. If list of column and row tuples (from 1 to 80 / 336), print selected pixels.
Returns
-------
nothing
'''
logging.info('Analyze HitOR calibration data and plot results of %s', output_filename)
with AnalyzeRawData(raw_data_file=output_filename, create_pdf=True) as analyze_raw_data: # Interpret the raw data file
analyze_raw_data.create_occupancy_hist = False # too many scan parameters to do in ram histogramming
analyze_raw_data.create_hit_table = True
analyze_raw_data.create_tdc_hist = True
analyze_raw_data.align_at_tdc = True # align events at TDC words, first word of event has to be a tdc word
analyze_raw_data.interpret_word_table()
analyze_raw_data.interpreter.print_summary()
analyze_raw_data.plot_histograms()
n_injections = analyze_raw_data.n_injections # use later
meta_data = analyze_raw_data.out_file_h5.root.meta_data[:]
scan_parameters_dict = get_scan_parameter(meta_data)
inner_loop_parameter_values = scan_parameters_dict[next(reversed(scan_parameters_dict))] # inner loop parameter name is unknown
scan_parameter_names = scan_parameters_dict.keys()
# col_row_combinations = get_unique_scan_parameter_combinations(analyze_raw_data.out_file_h5.root.meta_data[:], scan_parameters=('column', 'row'), scan_parameter_columns_only=True)
meta_data_table_at_scan_parameter = get_unique_scan_parameter_combinations(meta_data, scan_parameters=scan_parameter_names)
scan_parameter_values = get_scan_parameters_table_from_meta_data(meta_data_table_at_scan_parameter, scan_parameter_names)
event_number_ranges = get_ranges_from_array(meta_data_table_at_scan_parameter['event_number'])
event_ranges_per_parameter = np.column_stack((scan_parameter_values, event_number_ranges))
if analyze_raw_data.out_file_h5.root.Hits.nrows == 0:
raise AnalysisError("Found no hits.")
hits = analyze_raw_data.out_file_h5.root.Hits[:]
event_numbers = hits['event_number'].copy() # create contigous array, otherwise np.searchsorted too slow, http://stackoverflow.com/questions/15139299/performance-of-numpy-searchsorted-is-poor-on-structured-arrays
output_filename = os.path.splitext(output_filename)[0]
with tb.open_file(output_filename + "_calibration.h5", mode="w") as calibration_data_file:
logging.info('Create calibration')
calibration_data = np.full(shape=(80, 336, len(inner_loop_parameter_values), 4), fill_value=np.nan, dtype='f4') # result of the calibration is a histogram with col_index, row_index, plsrDAC value, mean discrete tot, rms discrete tot, mean tot from TDC, rms tot from TDC
progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=len(event_ranges_per_parameter), term_width=80)
progress_bar.start()
for index, (actual_scan_parameter_values, event_start, event_stop) in enumerate(event_ranges_per_parameter):
if event_stop is None: # happens for the last chunk
event_stop = hits[-1]['event_number'] + 1
array_index = np.searchsorted(event_numbers, np.array([event_start, event_stop]))
actual_hits = hits[array_index[0]:array_index[1]]
for item_index, item in enumerate(scan_parameter_names):
if item == "column":
actual_col = actual_scan_parameter_values[item_index]
elif item == "row":
actual_row = actual_scan_parameter_values[item_index]
elif item == "PlsrDAC":
plser_dac = actual_scan_parameter_values[item_index]
else:
raise ValueError("Unknown scan parameter %s" % item)
# Only pixel of actual column/row should be in the actual data chunk but since FIFO is not cleared for each scan step due to speed reasons and there might be noisy pixels this is not always the case
n_wrong_pixel = np.count_nonzero(np.logical_or(actual_hits['column'] != actual_col, actual_hits['row'] != actual_row))
if n_wrong_pixel != 0:
logging.warning('%d hit(s) from other pixels for scan parameters %s', n_wrong_pixel, ', '.join(['%s=%s' % (name, value) for (name, value) in zip(scan_parameter_names, actual_scan_parameter_values)]))
actual_hits = actual_hits[np.logical_and(actual_hits['column'] == actual_col, actual_hits['row'] == actual_row)] # Only take data from selected pixel
actual_tdc_hits = actual_hits[(actual_hits['event_status'] & 0b0000111110011100) == 0b0000000100000000] # only take hits from good events (one TDC word only, no error)
actual_tot_hits = actual_hits[(actual_hits['event_status'] & 0b0000100010011100) == 0b0000000000000000] # only take hits from good events for tot
tot, tdc = actual_tot_hits['tot'], actual_tdc_hits['TDC']
if tdc.shape[0] < n_injections:
logging.info('%d of %d expected TDC hits for scan parameters %s', tdc.shape[0], n_injections, ', '.join(['%s=%s' % (name, value) for (name, value) in zip(scan_parameter_names, actual_scan_parameter_values)]))
if tot.shape[0] < n_injections:
logging.info('%d of %d expected hits for scan parameters %s', tot.shape[0], n_injections, ', '.join(['%s=%s' % (name, value) for (name, value) in zip(scan_parameter_names, actual_scan_parameter_values)]))
inner_loop_scan_parameter_index = np.where(plser_dac == inner_loop_parameter_values)[0][0] # translate the scan parameter value to an index for the result histogram
# numpy mean and std return nan if array is empty
calibration_data[actual_col - 1, actual_row - 1, inner_loop_scan_parameter_index, 0] = np.mean(tot)
calibration_data[actual_col - 1, actual_row - 1, inner_loop_scan_parameter_index, 1] = np.mean(tdc)
calibration_data[actual_col - 1, actual_row - 1, inner_loop_scan_parameter_index, 2] = np.std(tot)
calibration_data[actual_col - 1, actual_row - 1, inner_loop_scan_parameter_index, 3] = np.std(tdc)
progress_bar.update(index)
progress_bar.finish()
calibration_data_out = calibration_data_file.create_carray(calibration_data_file.root, name='HitOrCalibration', title='Hit OR calibration data', atom=tb.Atom.from_dtype(calibration_data.dtype), shape=calibration_data.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
calibration_data_out[:] = calibration_data
calibration_data_out.attrs.dimensions = scan_parameter_names
calibration_data_out.attrs.scan_parameter_values = inner_loop_parameter_values
calibration_data_out.flush()
# with PdfPages(output_filename + "_calibration.pdf") as output_pdf:
plot_scurves(calibration_data[:, :, :, 0], inner_loop_parameter_values, "ToT calibration", "ToT", 15, "Charge [PlsrDAC]", filename=analyze_raw_data.output_pdf)
plot_scurves(calibration_data[:, :, :, 1], inner_loop_parameter_values, "TDC calibration", "TDC [ns]", None, "Charge [PlsrDAC]", filename=analyze_raw_data.output_pdf)
tot_mean_all_pix = np.nanmean(calibration_data[:, :, :, 0], axis=(0, 1))
tot_error_all_pix = np.nanstd(calibration_data[:, :, :, 0], axis=(0, 1))
tdc_mean_all_pix = np.nanmean(calibration_data[:, :, :, 1], axis=(0, 1))
tdc_error_all_pix = np.nanstd(calibration_data[:, :, :, 1], axis=(0, 1))
plot_tot_tdc_calibration(scan_parameters=inner_loop_parameter_values, tot_mean=tot_mean_all_pix, tot_error=tot_error_all_pix, tdc_mean=tdc_mean_all_pix, tdc_error=tdc_error_all_pix, filename=analyze_raw_data.output_pdf, title="Mean charge calibration of %d pixel(s)" % np.count_nonzero(~np.all(np.isnan(calibration_data[:, :, :, 0]), axis=2)))
# plotting individual pixels
if plot_pixel_calibrations is True:
# selecting pixels with non-nan entries
col_row_non_nan = np.nonzero(~np.all(np.isnan(calibration_data[:, :, :, 0]), axis=2))
plot_pixel_calibrations = np.dstack(col_row_non_nan)[0]
elif plot_pixel_calibrations is False:
plot_pixel_calibrations = np.array([], dtype=np.int)
else: # assuming list of column / row tuples
plot_pixel_calibrations = np.array(plot_pixel_calibrations) - 1
# generate index array
pixel_indices = np.arange(plot_pixel_calibrations.shape[0])
plot_n_pixels = 10 # number of pixels at the beginning, center and end of the array
np.random.seed(0)
# select random pixels
if pixel_indices.size - 2 * plot_n_pixels >= 0:
random_pixel_indices = np.sort(np.random.choice(pixel_indices[plot_n_pixels:-plot_n_pixels], min(plot_n_pixels, pixel_indices.size - 2 * plot_n_pixels), replace=False))
else:
random_pixel_indices = np.array([], dtype=np.int)
selected_pixel_indices = np.unique(np.hstack([pixel_indices[:plot_n_pixels], random_pixel_indices, pixel_indices[-plot_n_pixels:]]))
# plotting individual pixels
for (column, row) in plot_pixel_calibrations[selected_pixel_indices]:
logging.info("Plotting charge calibration for pixel column " + str(column + 1) + " / row " + str(row + 1))
tot_mean_single_pix = calibration_data[column, row, :, 0]
tot_std_single_pix = calibration_data[column, row, :, 2]
tdc_mean_single_pix = calibration_data[column, row, :, 1]
tdc_std_single_pix = calibration_data[column, row, :, 3]
plot_tot_tdc_calibration(scan_parameters=inner_loop_parameter_values, tot_mean=tot_mean_single_pix, tot_error=tot_std_single_pix, tdc_mean=tdc_mean_single_pix, tdc_error=tdc_std_single_pix, filename=analyze_raw_data.output_pdf, title="Charge calibration for pixel column " + str(column + 1) + " / row " + str(row + 1)) | Generating HitOr calibration file (_calibration.h5) from raw data file and plotting of calibration data.
Parameters
----------
output_filename : string
Input raw data file name.
plot_pixel_calibrations : bool, iterable
If True, genearating additional pixel calibration plots. If list of column and row tuples (from 1 to 80 / 336), print selected pixels.
Returns
-------
nothing | Below is the the instruction that describes the task:
### Input:
Generating HitOr calibration file (_calibration.h5) from raw data file and plotting of calibration data.
Parameters
----------
output_filename : string
Input raw data file name.
plot_pixel_calibrations : bool, iterable
If True, genearating additional pixel calibration plots. If list of column and row tuples (from 1 to 80 / 336), print selected pixels.
Returns
-------
nothing
### Response:
def create_hitor_calibration(output_filename, plot_pixel_calibrations=False):
'''Generating HitOr calibration file (_calibration.h5) from raw data file and plotting of calibration data.
Parameters
----------
output_filename : string
Input raw data file name.
plot_pixel_calibrations : bool, iterable
If True, genearating additional pixel calibration plots. If list of column and row tuples (from 1 to 80 / 336), print selected pixels.
Returns
-------
nothing
'''
logging.info('Analyze HitOR calibration data and plot results of %s', output_filename)
with AnalyzeRawData(raw_data_file=output_filename, create_pdf=True) as analyze_raw_data: # Interpret the raw data file
analyze_raw_data.create_occupancy_hist = False # too many scan parameters to do in ram histogramming
analyze_raw_data.create_hit_table = True
analyze_raw_data.create_tdc_hist = True
analyze_raw_data.align_at_tdc = True # align events at TDC words, first word of event has to be a tdc word
analyze_raw_data.interpret_word_table()
analyze_raw_data.interpreter.print_summary()
analyze_raw_data.plot_histograms()
n_injections = analyze_raw_data.n_injections # use later
meta_data = analyze_raw_data.out_file_h5.root.meta_data[:]
scan_parameters_dict = get_scan_parameter(meta_data)
inner_loop_parameter_values = scan_parameters_dict[next(reversed(scan_parameters_dict))] # inner loop parameter name is unknown
scan_parameter_names = scan_parameters_dict.keys()
# col_row_combinations = get_unique_scan_parameter_combinations(analyze_raw_data.out_file_h5.root.meta_data[:], scan_parameters=('column', 'row'), scan_parameter_columns_only=True)
meta_data_table_at_scan_parameter = get_unique_scan_parameter_combinations(meta_data, scan_parameters=scan_parameter_names)
scan_parameter_values = get_scan_parameters_table_from_meta_data(meta_data_table_at_scan_parameter, scan_parameter_names)
event_number_ranges = get_ranges_from_array(meta_data_table_at_scan_parameter['event_number'])
event_ranges_per_parameter = np.column_stack((scan_parameter_values, event_number_ranges))
if analyze_raw_data.out_file_h5.root.Hits.nrows == 0:
raise AnalysisError("Found no hits.")
hits = analyze_raw_data.out_file_h5.root.Hits[:]
event_numbers = hits['event_number'].copy() # create contigous array, otherwise np.searchsorted too slow, http://stackoverflow.com/questions/15139299/performance-of-numpy-searchsorted-is-poor-on-structured-arrays
output_filename = os.path.splitext(output_filename)[0]
with tb.open_file(output_filename + "_calibration.h5", mode="w") as calibration_data_file:
logging.info('Create calibration')
calibration_data = np.full(shape=(80, 336, len(inner_loop_parameter_values), 4), fill_value=np.nan, dtype='f4') # result of the calibration is a histogram with col_index, row_index, plsrDAC value, mean discrete tot, rms discrete tot, mean tot from TDC, rms tot from TDC
progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=len(event_ranges_per_parameter), term_width=80)
progress_bar.start()
for index, (actual_scan_parameter_values, event_start, event_stop) in enumerate(event_ranges_per_parameter):
if event_stop is None: # happens for the last chunk
event_stop = hits[-1]['event_number'] + 1
array_index = np.searchsorted(event_numbers, np.array([event_start, event_stop]))
actual_hits = hits[array_index[0]:array_index[1]]
for item_index, item in enumerate(scan_parameter_names):
if item == "column":
actual_col = actual_scan_parameter_values[item_index]
elif item == "row":
actual_row = actual_scan_parameter_values[item_index]
elif item == "PlsrDAC":
plser_dac = actual_scan_parameter_values[item_index]
else:
raise ValueError("Unknown scan parameter %s" % item)
# Only pixel of actual column/row should be in the actual data chunk but since FIFO is not cleared for each scan step due to speed reasons and there might be noisy pixels this is not always the case
n_wrong_pixel = np.count_nonzero(np.logical_or(actual_hits['column'] != actual_col, actual_hits['row'] != actual_row))
if n_wrong_pixel != 0:
logging.warning('%d hit(s) from other pixels for scan parameters %s', n_wrong_pixel, ', '.join(['%s=%s' % (name, value) for (name, value) in zip(scan_parameter_names, actual_scan_parameter_values)]))
actual_hits = actual_hits[np.logical_and(actual_hits['column'] == actual_col, actual_hits['row'] == actual_row)] # Only take data from selected pixel
actual_tdc_hits = actual_hits[(actual_hits['event_status'] & 0b0000111110011100) == 0b0000000100000000] # only take hits from good events (one TDC word only, no error)
actual_tot_hits = actual_hits[(actual_hits['event_status'] & 0b0000100010011100) == 0b0000000000000000] # only take hits from good events for tot
tot, tdc = actual_tot_hits['tot'], actual_tdc_hits['TDC']
if tdc.shape[0] < n_injections:
logging.info('%d of %d expected TDC hits for scan parameters %s', tdc.shape[0], n_injections, ', '.join(['%s=%s' % (name, value) for (name, value) in zip(scan_parameter_names, actual_scan_parameter_values)]))
if tot.shape[0] < n_injections:
logging.info('%d of %d expected hits for scan parameters %s', tot.shape[0], n_injections, ', '.join(['%s=%s' % (name, value) for (name, value) in zip(scan_parameter_names, actual_scan_parameter_values)]))
inner_loop_scan_parameter_index = np.where(plser_dac == inner_loop_parameter_values)[0][0] # translate the scan parameter value to an index for the result histogram
# numpy mean and std return nan if array is empty
calibration_data[actual_col - 1, actual_row - 1, inner_loop_scan_parameter_index, 0] = np.mean(tot)
calibration_data[actual_col - 1, actual_row - 1, inner_loop_scan_parameter_index, 1] = np.mean(tdc)
calibration_data[actual_col - 1, actual_row - 1, inner_loop_scan_parameter_index, 2] = np.std(tot)
calibration_data[actual_col - 1, actual_row - 1, inner_loop_scan_parameter_index, 3] = np.std(tdc)
progress_bar.update(index)
progress_bar.finish()
calibration_data_out = calibration_data_file.create_carray(calibration_data_file.root, name='HitOrCalibration', title='Hit OR calibration data', atom=tb.Atom.from_dtype(calibration_data.dtype), shape=calibration_data.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
calibration_data_out[:] = calibration_data
calibration_data_out.attrs.dimensions = scan_parameter_names
calibration_data_out.attrs.scan_parameter_values = inner_loop_parameter_values
calibration_data_out.flush()
# with PdfPages(output_filename + "_calibration.pdf") as output_pdf:
plot_scurves(calibration_data[:, :, :, 0], inner_loop_parameter_values, "ToT calibration", "ToT", 15, "Charge [PlsrDAC]", filename=analyze_raw_data.output_pdf)
plot_scurves(calibration_data[:, :, :, 1], inner_loop_parameter_values, "TDC calibration", "TDC [ns]", None, "Charge [PlsrDAC]", filename=analyze_raw_data.output_pdf)
tot_mean_all_pix = np.nanmean(calibration_data[:, :, :, 0], axis=(0, 1))
tot_error_all_pix = np.nanstd(calibration_data[:, :, :, 0], axis=(0, 1))
tdc_mean_all_pix = np.nanmean(calibration_data[:, :, :, 1], axis=(0, 1))
tdc_error_all_pix = np.nanstd(calibration_data[:, :, :, 1], axis=(0, 1))
plot_tot_tdc_calibration(scan_parameters=inner_loop_parameter_values, tot_mean=tot_mean_all_pix, tot_error=tot_error_all_pix, tdc_mean=tdc_mean_all_pix, tdc_error=tdc_error_all_pix, filename=analyze_raw_data.output_pdf, title="Mean charge calibration of %d pixel(s)" % np.count_nonzero(~np.all(np.isnan(calibration_data[:, :, :, 0]), axis=2)))
# plotting individual pixels
if plot_pixel_calibrations is True:
# selecting pixels with non-nan entries
col_row_non_nan = np.nonzero(~np.all(np.isnan(calibration_data[:, :, :, 0]), axis=2))
plot_pixel_calibrations = np.dstack(col_row_non_nan)[0]
elif plot_pixel_calibrations is False:
plot_pixel_calibrations = np.array([], dtype=np.int)
else: # assuming list of column / row tuples
plot_pixel_calibrations = np.array(plot_pixel_calibrations) - 1
# generate index array
pixel_indices = np.arange(plot_pixel_calibrations.shape[0])
plot_n_pixels = 10 # number of pixels at the beginning, center and end of the array
np.random.seed(0)
# select random pixels
if pixel_indices.size - 2 * plot_n_pixels >= 0:
random_pixel_indices = np.sort(np.random.choice(pixel_indices[plot_n_pixels:-plot_n_pixels], min(plot_n_pixels, pixel_indices.size - 2 * plot_n_pixels), replace=False))
else:
random_pixel_indices = np.array([], dtype=np.int)
selected_pixel_indices = np.unique(np.hstack([pixel_indices[:plot_n_pixels], random_pixel_indices, pixel_indices[-plot_n_pixels:]]))
# plotting individual pixels
for (column, row) in plot_pixel_calibrations[selected_pixel_indices]:
logging.info("Plotting charge calibration for pixel column " + str(column + 1) + " / row " + str(row + 1))
tot_mean_single_pix = calibration_data[column, row, :, 0]
tot_std_single_pix = calibration_data[column, row, :, 2]
tdc_mean_single_pix = calibration_data[column, row, :, 1]
tdc_std_single_pix = calibration_data[column, row, :, 3]
plot_tot_tdc_calibration(scan_parameters=inner_loop_parameter_values, tot_mean=tot_mean_single_pix, tot_error=tot_std_single_pix, tdc_mean=tdc_mean_single_pix, tdc_error=tdc_std_single_pix, filename=analyze_raw_data.output_pdf, title="Charge calibration for pixel column " + str(column + 1) + " / row " + str(row + 1)) |
def set_dimensional_calibrations(self, dimensional_calibrations: typing.List[CalibrationModule.Calibration]) -> None:
"""Set the dimensional calibrations.
:param dimensional_calibrations: A list of calibrations, must match the dimensions of the data.
.. versionadded:: 1.0
Scriptable: Yes
"""
self.__data_item.set_dimensional_calibrations(dimensional_calibrations) | Set the dimensional calibrations.
:param dimensional_calibrations: A list of calibrations, must match the dimensions of the data.
.. versionadded:: 1.0
Scriptable: Yes | Below is the the instruction that describes the task:
### Input:
Set the dimensional calibrations.
:param dimensional_calibrations: A list of calibrations, must match the dimensions of the data.
.. versionadded:: 1.0
Scriptable: Yes
### Response:
def set_dimensional_calibrations(self, dimensional_calibrations: typing.List[CalibrationModule.Calibration]) -> None:
"""Set the dimensional calibrations.
:param dimensional_calibrations: A list of calibrations, must match the dimensions of the data.
.. versionadded:: 1.0
Scriptable: Yes
"""
self.__data_item.set_dimensional_calibrations(dimensional_calibrations) |
def pp_event(seq):
"""Returns pretty representation of an Event or keypress"""
if isinstance(seq, Event):
return str(seq)
# Get the original sequence back if seq is a pretty name already
rev_curses = dict((v, k) for k, v in CURSES_NAMES.items())
rev_curtsies = dict((v, k) for k, v in CURTSIES_NAMES.items())
if seq in rev_curses:
seq = rev_curses[seq]
elif seq in rev_curtsies:
seq = rev_curtsies[seq]
pretty = curtsies_name(seq)
if pretty != seq:
return pretty
return repr(seq).lstrip('u')[1:-1] | Returns pretty representation of an Event or keypress | Below is the the instruction that describes the task:
### Input:
Returns pretty representation of an Event or keypress
### Response:
def pp_event(seq):
"""Returns pretty representation of an Event or keypress"""
if isinstance(seq, Event):
return str(seq)
# Get the original sequence back if seq is a pretty name already
rev_curses = dict((v, k) for k, v in CURSES_NAMES.items())
rev_curtsies = dict((v, k) for k, v in CURTSIES_NAMES.items())
if seq in rev_curses:
seq = rev_curses[seq]
elif seq in rev_curtsies:
seq = rev_curtsies[seq]
pretty = curtsies_name(seq)
if pretty != seq:
return pretty
return repr(seq).lstrip('u')[1:-1] |
def create_business_rules(self, hosts, services, hostgroups, servicegroups,
macromodulations, timeperiods, running=False):
# pylint: disable=too-many-locals
"""Create business rules if necessary (cmd contains bp_rule)
:param hosts: Hosts object to look for objects
:type hosts: alignak.objects.host.Hosts
:param services: Services object to look for objects
:type services: alignak.objects.service.Services
:param running: flag used in eval_cor_pattern function
:type running: bool
:return: None
"""
cmdcall = getattr(self, 'check_command', None)
# If we do not have a command, we bailout
if cmdcall is None:
return
# we get our base command, like
# bp_rule!(host,svc & host, svc) -> bp_rule
cmd = cmdcall.call
elts = cmd.split('!')
base_cmd = elts[0]
# If it's bp_rule, we got a rule :)
if base_cmd == 'bp_rule':
self.got_business_rule = True
rule = ''
if len(elts) >= 2:
rule = '!'.join(elts[1:])
# Only (re-)evaluate the business rule if it has never been
# evaluated before, or it contains a macro.
if re.match(r"\$[\w\d_-]+\$", rule) or self.business_rule is None:
macroresolver = MacroResolver()
data = self.get_data_for_checks(hosts)
rule = macroresolver.resolve_simple_macros_in_string(rule, data,
macromodulations,
timeperiods)
prev = getattr(self, "processed_business_rule", "")
if rule == prev:
# Business rule did not changed (no macro was modulated)
return
fact = DependencyNodeFactory(self)
node = fact.eval_cor_pattern(rule, hosts, services,
hostgroups, servicegroups, running)
self.processed_business_rule = rule
self.business_rule = node | Create business rules if necessary (cmd contains bp_rule)
:param hosts: Hosts object to look for objects
:type hosts: alignak.objects.host.Hosts
:param services: Services object to look for objects
:type services: alignak.objects.service.Services
:param running: flag used in eval_cor_pattern function
:type running: bool
:return: None | Below is the the instruction that describes the task:
### Input:
Create business rules if necessary (cmd contains bp_rule)
:param hosts: Hosts object to look for objects
:type hosts: alignak.objects.host.Hosts
:param services: Services object to look for objects
:type services: alignak.objects.service.Services
:param running: flag used in eval_cor_pattern function
:type running: bool
:return: None
### Response:
def create_business_rules(self, hosts, services, hostgroups, servicegroups,
macromodulations, timeperiods, running=False):
# pylint: disable=too-many-locals
"""Create business rules if necessary (cmd contains bp_rule)
:param hosts: Hosts object to look for objects
:type hosts: alignak.objects.host.Hosts
:param services: Services object to look for objects
:type services: alignak.objects.service.Services
:param running: flag used in eval_cor_pattern function
:type running: bool
:return: None
"""
cmdcall = getattr(self, 'check_command', None)
# If we do not have a command, we bailout
if cmdcall is None:
return
# we get our base command, like
# bp_rule!(host,svc & host, svc) -> bp_rule
cmd = cmdcall.call
elts = cmd.split('!')
base_cmd = elts[0]
# If it's bp_rule, we got a rule :)
if base_cmd == 'bp_rule':
self.got_business_rule = True
rule = ''
if len(elts) >= 2:
rule = '!'.join(elts[1:])
# Only (re-)evaluate the business rule if it has never been
# evaluated before, or it contains a macro.
if re.match(r"\$[\w\d_-]+\$", rule) or self.business_rule is None:
macroresolver = MacroResolver()
data = self.get_data_for_checks(hosts)
rule = macroresolver.resolve_simple_macros_in_string(rule, data,
macromodulations,
timeperiods)
prev = getattr(self, "processed_business_rule", "")
if rule == prev:
# Business rule did not changed (no macro was modulated)
return
fact = DependencyNodeFactory(self)
node = fact.eval_cor_pattern(rule, hosts, services,
hostgroups, servicegroups, running)
self.processed_business_rule = rule
self.business_rule = node |
def purge(gandi, email, background, force, alias):
"""Purge a mailbox."""
login, domain = email
if alias:
if not force:
proceed = click.confirm('Are you sure to purge all aliases for '
'mailbox %s@%s ?' % (login, domain))
if not proceed:
return
result = gandi.mail.set_alias(domain, login, [])
else:
if not force:
proceed = click.confirm('Are you sure to purge mailbox %s@%s ?'
% (login, domain))
if not proceed:
return
result = gandi.mail.purge(domain, login, background)
return result | Purge a mailbox. | Below is the the instruction that describes the task:
### Input:
Purge a mailbox.
### Response:
def purge(gandi, email, background, force, alias):
"""Purge a mailbox."""
login, domain = email
if alias:
if not force:
proceed = click.confirm('Are you sure to purge all aliases for '
'mailbox %s@%s ?' % (login, domain))
if not proceed:
return
result = gandi.mail.set_alias(domain, login, [])
else:
if not force:
proceed = click.confirm('Are you sure to purge mailbox %s@%s ?'
% (login, domain))
if not proceed:
return
result = gandi.mail.purge(domain, login, background)
return result |
def fetch(self, remote, branch, local_branch = None, force=False):
'''Pull a repository
:param remote: git-remote instance
:param branch: name of the branch to pull
'''
pb = ProgressBar()
pb.setup(self.name)
if local_branch:
branch = ':'.join([branch, local_branch])
remote.fetch(branch, update_head_ok=True, force=force, progress=pb)
print() | Pull a repository
:param remote: git-remote instance
:param branch: name of the branch to pull | Below is the the instruction that describes the task:
### Input:
Pull a repository
:param remote: git-remote instance
:param branch: name of the branch to pull
### Response:
def fetch(self, remote, branch, local_branch = None, force=False):
'''Pull a repository
:param remote: git-remote instance
:param branch: name of the branch to pull
'''
pb = ProgressBar()
pb.setup(self.name)
if local_branch:
branch = ':'.join([branch, local_branch])
remote.fetch(branch, update_head_ok=True, force=force, progress=pb)
print() |
def set_current_thumbnail(self, thumbnail):
"""Set the currently selected thumbnail."""
self.current_thumbnail = thumbnail
self.figure_viewer.load_figure(
thumbnail.canvas.fig, thumbnail.canvas.fmt)
for thumbnail in self._thumbnails:
thumbnail.highlight_canvas(thumbnail == self.current_thumbnail) | Set the currently selected thumbnail. | Below is the the instruction that describes the task:
### Input:
Set the currently selected thumbnail.
### Response:
def set_current_thumbnail(self, thumbnail):
"""Set the currently selected thumbnail."""
self.current_thumbnail = thumbnail
self.figure_viewer.load_figure(
thumbnail.canvas.fig, thumbnail.canvas.fmt)
for thumbnail in self._thumbnails:
thumbnail.highlight_canvas(thumbnail == self.current_thumbnail) |
def dealias_image(alias):
'''Remove an image alias.'''
with Session() as session:
try:
result = session.Image.dealiasImage(alias)
except Exception as e:
print_error(e)
sys.exit(1)
if result['ok']:
print("alias {0} removed.".format(alias))
else:
print(result['msg']) | Remove an image alias. | Below is the the instruction that describes the task:
### Input:
Remove an image alias.
### Response:
def dealias_image(alias):
'''Remove an image alias.'''
with Session() as session:
try:
result = session.Image.dealiasImage(alias)
except Exception as e:
print_error(e)
sys.exit(1)
if result['ok']:
print("alias {0} removed.".format(alias))
else:
print(result['msg']) |
def wrap(cls, private_key, algorithm):
"""
Wraps a private key in a PrivateKeyInfo structure
:param private_key:
A byte string or Asn1Value object of the private key
:param algorithm:
A unicode string of "rsa", "dsa" or "ec"
:return:
A PrivateKeyInfo object
"""
if not isinstance(private_key, byte_cls) and not isinstance(private_key, Asn1Value):
raise TypeError(unwrap(
'''
private_key must be a byte string or Asn1Value, not %s
''',
type_name(private_key)
))
if algorithm == 'rsa':
if not isinstance(private_key, RSAPrivateKey):
private_key = RSAPrivateKey.load(private_key)
params = Null()
elif algorithm == 'dsa':
if not isinstance(private_key, DSAPrivateKey):
private_key = DSAPrivateKey.load(private_key)
params = DSAParams()
params['p'] = private_key['p']
params['q'] = private_key['q']
params['g'] = private_key['g']
public_key = private_key['public_key']
private_key = private_key['private_key']
elif algorithm == 'ec':
if not isinstance(private_key, ECPrivateKey):
private_key = ECPrivateKey.load(private_key)
else:
private_key = private_key.copy()
params = private_key['parameters']
del private_key['parameters']
else:
raise ValueError(unwrap(
'''
algorithm must be one of "rsa", "dsa", "ec", not %s
''',
repr(algorithm)
))
private_key_algo = PrivateKeyAlgorithm()
private_key_algo['algorithm'] = PrivateKeyAlgorithmId(algorithm)
private_key_algo['parameters'] = params
container = cls()
container._algorithm = algorithm
container['version'] = Integer(0)
container['private_key_algorithm'] = private_key_algo
container['private_key'] = private_key
# Here we save the DSA public key if possible since it is not contained
# within the PKCS#8 structure for a DSA key
if algorithm == 'dsa':
container._public_key = public_key
return container | Wraps a private key in a PrivateKeyInfo structure
:param private_key:
A byte string or Asn1Value object of the private key
:param algorithm:
A unicode string of "rsa", "dsa" or "ec"
:return:
A PrivateKeyInfo object | Below is the the instruction that describes the task:
### Input:
Wraps a private key in a PrivateKeyInfo structure
:param private_key:
A byte string or Asn1Value object of the private key
:param algorithm:
A unicode string of "rsa", "dsa" or "ec"
:return:
A PrivateKeyInfo object
### Response:
def wrap(cls, private_key, algorithm):
"""
Wraps a private key in a PrivateKeyInfo structure
:param private_key:
A byte string or Asn1Value object of the private key
:param algorithm:
A unicode string of "rsa", "dsa" or "ec"
:return:
A PrivateKeyInfo object
"""
if not isinstance(private_key, byte_cls) and not isinstance(private_key, Asn1Value):
raise TypeError(unwrap(
'''
private_key must be a byte string or Asn1Value, not %s
''',
type_name(private_key)
))
if algorithm == 'rsa':
if not isinstance(private_key, RSAPrivateKey):
private_key = RSAPrivateKey.load(private_key)
params = Null()
elif algorithm == 'dsa':
if not isinstance(private_key, DSAPrivateKey):
private_key = DSAPrivateKey.load(private_key)
params = DSAParams()
params['p'] = private_key['p']
params['q'] = private_key['q']
params['g'] = private_key['g']
public_key = private_key['public_key']
private_key = private_key['private_key']
elif algorithm == 'ec':
if not isinstance(private_key, ECPrivateKey):
private_key = ECPrivateKey.load(private_key)
else:
private_key = private_key.copy()
params = private_key['parameters']
del private_key['parameters']
else:
raise ValueError(unwrap(
'''
algorithm must be one of "rsa", "dsa", "ec", not %s
''',
repr(algorithm)
))
private_key_algo = PrivateKeyAlgorithm()
private_key_algo['algorithm'] = PrivateKeyAlgorithmId(algorithm)
private_key_algo['parameters'] = params
container = cls()
container._algorithm = algorithm
container['version'] = Integer(0)
container['private_key_algorithm'] = private_key_algo
container['private_key'] = private_key
# Here we save the DSA public key if possible since it is not contained
# within the PKCS#8 structure for a DSA key
if algorithm == 'dsa':
container._public_key = public_key
return container |
def find(cls, id='', slug=None):
"""
Similar to :py:meth:`.PanoptesObject.find`, but allows lookup by slug
as well as ID.
Examples::
collection_1234 = Collection.find(1234)
my_collection = Collection.find(slug="example/my-collection")
"""
if not id and not slug:
return None
try:
return cls.where(id=id, slug=slug).next()
except StopIteration:
raise PanoptesAPIException(
"Could not find collection with slug='{}'".format(slug)
) | Similar to :py:meth:`.PanoptesObject.find`, but allows lookup by slug
as well as ID.
Examples::
collection_1234 = Collection.find(1234)
my_collection = Collection.find(slug="example/my-collection") | Below is the the instruction that describes the task:
### Input:
Similar to :py:meth:`.PanoptesObject.find`, but allows lookup by slug
as well as ID.
Examples::
collection_1234 = Collection.find(1234)
my_collection = Collection.find(slug="example/my-collection")
### Response:
def find(cls, id='', slug=None):
"""
Similar to :py:meth:`.PanoptesObject.find`, but allows lookup by slug
as well as ID.
Examples::
collection_1234 = Collection.find(1234)
my_collection = Collection.find(slug="example/my-collection")
"""
if not id and not slug:
return None
try:
return cls.where(id=id, slug=slug).next()
except StopIteration:
raise PanoptesAPIException(
"Could not find collection with slug='{}'".format(slug)
) |
def iresolve(self, *keys):
'''
Iterates over resolved instances for given provider keys.
:param keys: Provider keys
:type keys: tuple
:return: Iterator of resolved instances
:rtype: generator
'''
for key in keys:
missing = self.get_missing_deps(key)
if missing:
raise UnresolvableError("Missing dependencies for %s: %s" % (key, missing))
provider = self._providers.get(key)
if not provider:
raise UnresolvableError("Provider does not exist for %s" % key)
yield provider() | Iterates over resolved instances for given provider keys.
:param keys: Provider keys
:type keys: tuple
:return: Iterator of resolved instances
:rtype: generator | Below is the the instruction that describes the task:
### Input:
Iterates over resolved instances for given provider keys.
:param keys: Provider keys
:type keys: tuple
:return: Iterator of resolved instances
:rtype: generator
### Response:
def iresolve(self, *keys):
'''
Iterates over resolved instances for given provider keys.
:param keys: Provider keys
:type keys: tuple
:return: Iterator of resolved instances
:rtype: generator
'''
for key in keys:
missing = self.get_missing_deps(key)
if missing:
raise UnresolvableError("Missing dependencies for %s: %s" % (key, missing))
provider = self._providers.get(key)
if not provider:
raise UnresolvableError("Provider does not exist for %s" % key)
yield provider() |
Subsets and Splits