code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def getLocation(self):
"""
Return the latitude+longitutde of the picture.
Returns None if no location given for this pic.
"""
method = 'flickr.photos.geo.getLocation'
try:
data = _doget(method, photo_id=self.id)
except FlickrError: # Some other error might have occured too!?
return None
loc = data.rsp.photo.location
return [loc.latitude, loc.longitude] | Return the latitude+longitutde of the picture.
Returns None if no location given for this pic. | Below is the the instruction that describes the task:
### Input:
Return the latitude+longitutde of the picture.
Returns None if no location given for this pic.
### Response:
def getLocation(self):
"""
Return the latitude+longitutde of the picture.
Returns None if no location given for this pic.
"""
method = 'flickr.photos.geo.getLocation'
try:
data = _doget(method, photo_id=self.id)
except FlickrError: # Some other error might have occured too!?
return None
loc = data.rsp.photo.location
return [loc.latitude, loc.longitude] |
def from_numpy_vectors(cls, linear, quadratic, offset, vartype, variable_order=None):
"""Create a binary quadratic model from vectors.
Args:
linear (array_like):
A 1D array-like iterable of linear biases.
quadratic (tuple[array_like, array_like, array_like]):
A 3-tuple of 1D array_like vectors of the form (row, col, bias).
offset (numeric, optional):
Constant offset for the binary quadratic model.
vartype (:class:`.Vartype`/str/set):
Variable type for the binary quadratic model. Accepted input values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
variable_order (iterable, optional):
If provided, labels the variables; otherwise, indices are used.
Returns:
:obj:`.BinaryQuadraticModel`
Examples:
>>> import dimod
>>> import numpy as np
...
>>> linear_vector = np.asarray([-1, 1])
>>> quadratic_vectors = (np.asarray([0]), np.asarray([1]), np.asarray([-1.0]))
>>> bqm = dimod.BinaryQuadraticModel.from_numpy_vectors(linear_vector, quadratic_vectors, 0.0, dimod.SPIN)
>>> print(bqm.quadratic)
{(0, 1): -1.0}
"""
try:
heads, tails, values = quadratic
except ValueError:
raise ValueError("quadratic should be a 3-tuple")
if not len(heads) == len(tails) == len(values):
raise ValueError("row, col, and bias should be of equal length")
if variable_order is None:
variable_order = list(range(len(linear)))
linear = {v: float(bias) for v, bias in zip(variable_order, linear)}
quadratic = {(variable_order[u], variable_order[v]): float(bias)
for u, v, bias in zip(heads, tails, values)}
return cls(linear, quadratic, offset, vartype) | Create a binary quadratic model from vectors.
Args:
linear (array_like):
A 1D array-like iterable of linear biases.
quadratic (tuple[array_like, array_like, array_like]):
A 3-tuple of 1D array_like vectors of the form (row, col, bias).
offset (numeric, optional):
Constant offset for the binary quadratic model.
vartype (:class:`.Vartype`/str/set):
Variable type for the binary quadratic model. Accepted input values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
variable_order (iterable, optional):
If provided, labels the variables; otherwise, indices are used.
Returns:
:obj:`.BinaryQuadraticModel`
Examples:
>>> import dimod
>>> import numpy as np
...
>>> linear_vector = np.asarray([-1, 1])
>>> quadratic_vectors = (np.asarray([0]), np.asarray([1]), np.asarray([-1.0]))
>>> bqm = dimod.BinaryQuadraticModel.from_numpy_vectors(linear_vector, quadratic_vectors, 0.0, dimod.SPIN)
>>> print(bqm.quadratic)
{(0, 1): -1.0} | Below is the the instruction that describes the task:
### Input:
Create a binary quadratic model from vectors.
Args:
linear (array_like):
A 1D array-like iterable of linear biases.
quadratic (tuple[array_like, array_like, array_like]):
A 3-tuple of 1D array_like vectors of the form (row, col, bias).
offset (numeric, optional):
Constant offset for the binary quadratic model.
vartype (:class:`.Vartype`/str/set):
Variable type for the binary quadratic model. Accepted input values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
variable_order (iterable, optional):
If provided, labels the variables; otherwise, indices are used.
Returns:
:obj:`.BinaryQuadraticModel`
Examples:
>>> import dimod
>>> import numpy as np
...
>>> linear_vector = np.asarray([-1, 1])
>>> quadratic_vectors = (np.asarray([0]), np.asarray([1]), np.asarray([-1.0]))
>>> bqm = dimod.BinaryQuadraticModel.from_numpy_vectors(linear_vector, quadratic_vectors, 0.0, dimod.SPIN)
>>> print(bqm.quadratic)
{(0, 1): -1.0}
### Response:
def from_numpy_vectors(cls, linear, quadratic, offset, vartype, variable_order=None):
"""Create a binary quadratic model from vectors.
Args:
linear (array_like):
A 1D array-like iterable of linear biases.
quadratic (tuple[array_like, array_like, array_like]):
A 3-tuple of 1D array_like vectors of the form (row, col, bias).
offset (numeric, optional):
Constant offset for the binary quadratic model.
vartype (:class:`.Vartype`/str/set):
Variable type for the binary quadratic model. Accepted input values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
variable_order (iterable, optional):
If provided, labels the variables; otherwise, indices are used.
Returns:
:obj:`.BinaryQuadraticModel`
Examples:
>>> import dimod
>>> import numpy as np
...
>>> linear_vector = np.asarray([-1, 1])
>>> quadratic_vectors = (np.asarray([0]), np.asarray([1]), np.asarray([-1.0]))
>>> bqm = dimod.BinaryQuadraticModel.from_numpy_vectors(linear_vector, quadratic_vectors, 0.0, dimod.SPIN)
>>> print(bqm.quadratic)
{(0, 1): -1.0}
"""
try:
heads, tails, values = quadratic
except ValueError:
raise ValueError("quadratic should be a 3-tuple")
if not len(heads) == len(tails) == len(values):
raise ValueError("row, col, and bias should be of equal length")
if variable_order is None:
variable_order = list(range(len(linear)))
linear = {v: float(bias) for v, bias in zip(variable_order, linear)}
quadratic = {(variable_order[u], variable_order[v]): float(bias)
for u, v, bias in zip(heads, tails, values)}
return cls(linear, quadratic, offset, vartype) |
def mask(self):
"""
Returns mask associated with this layer.
:return: :py:class:`~psd_tools.api.mask.Mask` or `None`
"""
if not hasattr(self, "_mask"):
self._mask = Mask(self) if self.has_mask() else None
return self._mask | Returns mask associated with this layer.
:return: :py:class:`~psd_tools.api.mask.Mask` or `None` | Below is the the instruction that describes the task:
### Input:
Returns mask associated with this layer.
:return: :py:class:`~psd_tools.api.mask.Mask` or `None`
### Response:
def mask(self):
"""
Returns mask associated with this layer.
:return: :py:class:`~psd_tools.api.mask.Mask` or `None`
"""
if not hasattr(self, "_mask"):
self._mask = Mask(self) if self.has_mask() else None
return self._mask |
def create_node(self, network, participant):
"""Make a new node for participants."""
if network.role == "practice" or network.role == "catch":
return RogersAgentFounder(network=network, participant=participant)
elif network.size(type=Agent) < network.generation_size:
return RogersAgentFounder(network=network, participant=participant)
else:
return RogersAgent(network=network, participant=participant) | Make a new node for participants. | Below is the the instruction that describes the task:
### Input:
Make a new node for participants.
### Response:
def create_node(self, network, participant):
"""Make a new node for participants."""
if network.role == "practice" or network.role == "catch":
return RogersAgentFounder(network=network, participant=participant)
elif network.size(type=Agent) < network.generation_size:
return RogersAgentFounder(network=network, participant=participant)
else:
return RogersAgent(network=network, participant=participant) |
def select_site_view(self, request, form_url=''):
"""
Display a choice form to select which site to add settings.
"""
if not self.has_add_permission(request):
raise PermissionDenied
extra_qs = ''
if request.META['QUERY_STRING']:
extra_qs = '&' + request.META['QUERY_STRING']
site_choices = self.get_site_choices()
if len(site_choices) == 1:
return HttpResponseRedirect('?site_id={0}{1}'.format(site_choices[0][0], extra_qs))
# Create form
form = self.select_site_form(
data=request.POST if request.method == 'POST' else None,
initial={'site': site_choices[0][0]}
)
form.fields['site'].choices = site_choices
if form.is_valid():
return HttpResponseRedirect(
'?site_id={0}{1}'.format(form.cleaned_data['site'], extra_qs))
# Wrap in all admin layout
fieldsets = ((None, {'fields': ('site',)}),)
adminForm = AdminForm(form, fieldsets, {}, model_admin=self)
media = self.media + adminForm.media
context = {
'title': _('Add %s') % force_text(self.opts.verbose_name),
'adminform': adminForm,
'is_popup': '_popup' in request.GET,
'media': mark_safe(media),
'errors': AdminErrorList(form, ()),
'app_label': self.opts.app_label,
}
return self.render_select_site_form(request, context, form_url) | Display a choice form to select which site to add settings. | Below is the the instruction that describes the task:
### Input:
Display a choice form to select which site to add settings.
### Response:
def select_site_view(self, request, form_url=''):
"""
Display a choice form to select which site to add settings.
"""
if not self.has_add_permission(request):
raise PermissionDenied
extra_qs = ''
if request.META['QUERY_STRING']:
extra_qs = '&' + request.META['QUERY_STRING']
site_choices = self.get_site_choices()
if len(site_choices) == 1:
return HttpResponseRedirect('?site_id={0}{1}'.format(site_choices[0][0], extra_qs))
# Create form
form = self.select_site_form(
data=request.POST if request.method == 'POST' else None,
initial={'site': site_choices[0][0]}
)
form.fields['site'].choices = site_choices
if form.is_valid():
return HttpResponseRedirect(
'?site_id={0}{1}'.format(form.cleaned_data['site'], extra_qs))
# Wrap in all admin layout
fieldsets = ((None, {'fields': ('site',)}),)
adminForm = AdminForm(form, fieldsets, {}, model_admin=self)
media = self.media + adminForm.media
context = {
'title': _('Add %s') % force_text(self.opts.verbose_name),
'adminform': adminForm,
'is_popup': '_popup' in request.GET,
'media': mark_safe(media),
'errors': AdminErrorList(form, ()),
'app_label': self.opts.app_label,
}
return self.render_select_site_form(request, context, form_url) |
def get_json_files(p):
"""
Scan the provided policy directory for all JSON policy files.
"""
f = [os.path.join(p, x) for x in os.listdir(p) if x.endswith(".json")]
return sorted(f) | Scan the provided policy directory for all JSON policy files. | Below is the the instruction that describes the task:
### Input:
Scan the provided policy directory for all JSON policy files.
### Response:
def get_json_files(p):
"""
Scan the provided policy directory for all JSON policy files.
"""
f = [os.path.join(p, x) for x in os.listdir(p) if x.endswith(".json")]
return sorted(f) |
def failover_to_replicant(self, volume_id, replicant_id, immediate=False):
"""Failover to a volume replicant.
:param integer volume_id: The id of the volume
:param integer replicant_id: ID of replicant to failover to
:param boolean immediate: Flag indicating if failover is immediate
:return: Returns whether failover was successful or not
"""
return self.client.call('Network_Storage', 'failoverToReplicant',
replicant_id, immediate, id=volume_id) | Failover to a volume replicant.
:param integer volume_id: The id of the volume
:param integer replicant_id: ID of replicant to failover to
:param boolean immediate: Flag indicating if failover is immediate
:return: Returns whether failover was successful or not | Below is the the instruction that describes the task:
### Input:
Failover to a volume replicant.
:param integer volume_id: The id of the volume
:param integer replicant_id: ID of replicant to failover to
:param boolean immediate: Flag indicating if failover is immediate
:return: Returns whether failover was successful or not
### Response:
def failover_to_replicant(self, volume_id, replicant_id, immediate=False):
"""Failover to a volume replicant.
:param integer volume_id: The id of the volume
:param integer replicant_id: ID of replicant to failover to
:param boolean immediate: Flag indicating if failover is immediate
:return: Returns whether failover was successful or not
"""
return self.client.call('Network_Storage', 'failoverToReplicant',
replicant_id, immediate, id=volume_id) |
def get_argument_role(self):
"""
Helper function to get request argument.
Raises exception if argument is missing.
Returns the role argument.
"""
try:
return self.get_argument(constants.PARAM_ROLE, default=None)
except tornado.web.MissingArgumentError as e:
raise Exception(e.log_message) | Helper function to get request argument.
Raises exception if argument is missing.
Returns the role argument. | Below is the the instruction that describes the task:
### Input:
Helper function to get request argument.
Raises exception if argument is missing.
Returns the role argument.
### Response:
def get_argument_role(self):
"""
Helper function to get request argument.
Raises exception if argument is missing.
Returns the role argument.
"""
try:
return self.get_argument(constants.PARAM_ROLE, default=None)
except tornado.web.MissingArgumentError as e:
raise Exception(e.log_message) |
def get_clumpp_table(self, kvalues, max_var_multiple=0, quiet=False):
"""
Returns a dictionary of results tables for making structure barplots.
This calls the same functions used in get_evanno_table() to call
CLUMPP to permute replicates.
Parameters:
-----------
kvalues : list or int
A kvalue or list of kvalues to run CLUMPP on and return a
results table.
max_var_multiple: int
A multiplier value to use as a filter for convergence of runs.
Default=0=no filtering. As an example, if 10 replicates
were run then the variance of the run with the minimum variance is
used as a benchmark. If other runs have a variance that is N times
greater then that run will be excluded. Remember, if replicate runs
sampled different distributions of SNPs then it is not unexpected that
they will have very different variances. However, you may still want
to exclude runs with very high variance since they likely have
not converged.
Returns:
--------
table : dict or pd.DataFrame
A dictionary of dataframes with admixture proportions.
"""
## do not allow bad vals
if max_var_multiple:
if max_var_multiple < 1:
raise ValueError('max_var_multiple must be >1')
if isinstance(kvalues, int):
return _get_clumpp_table(self, kvalues, max_var_multiple, quiet)
else:
tabledict = {}
for kpop in kvalues:
table = _get_clumpp_table(self, kpop, max_var_multiple, quiet)
tabledict[kpop] = table
return tabledict | Returns a dictionary of results tables for making structure barplots.
This calls the same functions used in get_evanno_table() to call
CLUMPP to permute replicates.
Parameters:
-----------
kvalues : list or int
A kvalue or list of kvalues to run CLUMPP on and return a
results table.
max_var_multiple: int
A multiplier value to use as a filter for convergence of runs.
Default=0=no filtering. As an example, if 10 replicates
were run then the variance of the run with the minimum variance is
used as a benchmark. If other runs have a variance that is N times
greater then that run will be excluded. Remember, if replicate runs
sampled different distributions of SNPs then it is not unexpected that
they will have very different variances. However, you may still want
to exclude runs with very high variance since they likely have
not converged.
Returns:
--------
table : dict or pd.DataFrame
A dictionary of dataframes with admixture proportions. | Below is the the instruction that describes the task:
### Input:
Returns a dictionary of results tables for making structure barplots.
This calls the same functions used in get_evanno_table() to call
CLUMPP to permute replicates.
Parameters:
-----------
kvalues : list or int
A kvalue or list of kvalues to run CLUMPP on and return a
results table.
max_var_multiple: int
A multiplier value to use as a filter for convergence of runs.
Default=0=no filtering. As an example, if 10 replicates
were run then the variance of the run with the minimum variance is
used as a benchmark. If other runs have a variance that is N times
greater then that run will be excluded. Remember, if replicate runs
sampled different distributions of SNPs then it is not unexpected that
they will have very different variances. However, you may still want
to exclude runs with very high variance since they likely have
not converged.
Returns:
--------
table : dict or pd.DataFrame
A dictionary of dataframes with admixture proportions.
### Response:
def get_clumpp_table(self, kvalues, max_var_multiple=0, quiet=False):
"""
Returns a dictionary of results tables for making structure barplots.
This calls the same functions used in get_evanno_table() to call
CLUMPP to permute replicates.
Parameters:
-----------
kvalues : list or int
A kvalue or list of kvalues to run CLUMPP on and return a
results table.
max_var_multiple: int
A multiplier value to use as a filter for convergence of runs.
Default=0=no filtering. As an example, if 10 replicates
were run then the variance of the run with the minimum variance is
used as a benchmark. If other runs have a variance that is N times
greater then that run will be excluded. Remember, if replicate runs
sampled different distributions of SNPs then it is not unexpected that
they will have very different variances. However, you may still want
to exclude runs with very high variance since they likely have
not converged.
Returns:
--------
table : dict or pd.DataFrame
A dictionary of dataframes with admixture proportions.
"""
## do not allow bad vals
if max_var_multiple:
if max_var_multiple < 1:
raise ValueError('max_var_multiple must be >1')
if isinstance(kvalues, int):
return _get_clumpp_table(self, kvalues, max_var_multiple, quiet)
else:
tabledict = {}
for kpop in kvalues:
table = _get_clumpp_table(self, kpop, max_var_multiple, quiet)
tabledict[kpop] = table
return tabledict |
def inspiral_range(psd, snr=8, mass1=1.4, mass2=1.4, fmin=None, fmax=None,
horizon=False):
"""Calculate the inspiral sensitive distance from a GW strain PSD
The method returns the distance (in megaparsecs) to which an compact
binary inspiral with the given component masses would be detectable
given the instrumental PSD. The calculation is as defined in:
https://dcc.ligo.org/LIGO-T030276/public
Parameters
----------
psd : `~gwpy.frequencyseries.FrequencySeries`
the instrumental power-spectral-density data
snr : `float`, optional
the signal-to-noise ratio for which to calculate range,
default: `8`
mass1 : `float`, `~astropy.units.Quantity`, optional
the mass (`float` assumed in solar masses) of the first binary
component, default: `1.4`
mass2 : `float`, `~astropy.units.Quantity`, optional
the mass (`float` assumed in solar masses) of the second binary
component, default: `1.4`
fmin : `float`, optional
the lower frequency cut-off of the integral, default: `psd.df`
fmax : `float`, optional
the maximum frequency limit of the integral, defaults to
innermost stable circular orbit (ISCO) frequency
horizon : `bool`, optional
if `True`, return the maximal 'horizon' sensitive distance, otherwise
return the angle-averaged range, default: `False`
Returns
-------
range : `~astropy.units.Quantity`
the calculated inspiral range [Mpc]
Examples
--------
Grab some data for LIGO-Livingston around GW150914 and generate a PSD
>>> from gwpy.timeseries import TimeSeries
>>> hoft = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478)
>>> hoff = hoft.psd(fftlength=4)
Now we can calculate the :func:`inspiral_range`:
>>> from gwpy.astro import inspiral_range
>>> r = inspiral_range(hoff, fmin=30)
>>> print(r)
70.4612102889 Mpc
"""
mass1 = units.Quantity(mass1, 'solMass').to('kg')
mass2 = units.Quantity(mass2, 'solMass').to('kg')
mtotal = mass1 + mass2
# compute ISCO
fisco = (constants.c ** 3 / (constants.G * 6**1.5 * pi * mtotal)).to('Hz')
# format frequency limits
fmax = units.Quantity(fmax or fisco, 'Hz')
if fmax > fisco:
warnings.warn("Upper frequency bound greater than %s-%s ISCO "
"frequency of %s, using ISCO" % (mass1, mass2, fisco))
fmax = fisco
if fmin is None:
fmin = psd.df # avoid using 0 as lower limit
fmin = units.Quantity(fmin, 'Hz')
# integrate
f = psd.frequencies.to('Hz')
condition = (f >= fmin) & (f < fmax)
integrand = inspiral_range_psd(psd[condition], snr=snr, mass1=mass1,
mass2=mass2, horizon=horizon)
result = units.Quantity(
integrate.trapz(integrand.value, f.value[condition]),
unit=integrand.unit * units.Hertz)
return (result ** (1/2.)).to('Mpc') | Calculate the inspiral sensitive distance from a GW strain PSD
The method returns the distance (in megaparsecs) to which an compact
binary inspiral with the given component masses would be detectable
given the instrumental PSD. The calculation is as defined in:
https://dcc.ligo.org/LIGO-T030276/public
Parameters
----------
psd : `~gwpy.frequencyseries.FrequencySeries`
the instrumental power-spectral-density data
snr : `float`, optional
the signal-to-noise ratio for which to calculate range,
default: `8`
mass1 : `float`, `~astropy.units.Quantity`, optional
the mass (`float` assumed in solar masses) of the first binary
component, default: `1.4`
mass2 : `float`, `~astropy.units.Quantity`, optional
the mass (`float` assumed in solar masses) of the second binary
component, default: `1.4`
fmin : `float`, optional
the lower frequency cut-off of the integral, default: `psd.df`
fmax : `float`, optional
the maximum frequency limit of the integral, defaults to
innermost stable circular orbit (ISCO) frequency
horizon : `bool`, optional
if `True`, return the maximal 'horizon' sensitive distance, otherwise
return the angle-averaged range, default: `False`
Returns
-------
range : `~astropy.units.Quantity`
the calculated inspiral range [Mpc]
Examples
--------
Grab some data for LIGO-Livingston around GW150914 and generate a PSD
>>> from gwpy.timeseries import TimeSeries
>>> hoft = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478)
>>> hoff = hoft.psd(fftlength=4)
Now we can calculate the :func:`inspiral_range`:
>>> from gwpy.astro import inspiral_range
>>> r = inspiral_range(hoff, fmin=30)
>>> print(r)
70.4612102889 Mpc | Below is the the instruction that describes the task:
### Input:
Calculate the inspiral sensitive distance from a GW strain PSD
The method returns the distance (in megaparsecs) to which an compact
binary inspiral with the given component masses would be detectable
given the instrumental PSD. The calculation is as defined in:
https://dcc.ligo.org/LIGO-T030276/public
Parameters
----------
psd : `~gwpy.frequencyseries.FrequencySeries`
the instrumental power-spectral-density data
snr : `float`, optional
the signal-to-noise ratio for which to calculate range,
default: `8`
mass1 : `float`, `~astropy.units.Quantity`, optional
the mass (`float` assumed in solar masses) of the first binary
component, default: `1.4`
mass2 : `float`, `~astropy.units.Quantity`, optional
the mass (`float` assumed in solar masses) of the second binary
component, default: `1.4`
fmin : `float`, optional
the lower frequency cut-off of the integral, default: `psd.df`
fmax : `float`, optional
the maximum frequency limit of the integral, defaults to
innermost stable circular orbit (ISCO) frequency
horizon : `bool`, optional
if `True`, return the maximal 'horizon' sensitive distance, otherwise
return the angle-averaged range, default: `False`
Returns
-------
range : `~astropy.units.Quantity`
the calculated inspiral range [Mpc]
Examples
--------
Grab some data for LIGO-Livingston around GW150914 and generate a PSD
>>> from gwpy.timeseries import TimeSeries
>>> hoft = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478)
>>> hoff = hoft.psd(fftlength=4)
Now we can calculate the :func:`inspiral_range`:
>>> from gwpy.astro import inspiral_range
>>> r = inspiral_range(hoff, fmin=30)
>>> print(r)
70.4612102889 Mpc
### Response:
def inspiral_range(psd, snr=8, mass1=1.4, mass2=1.4, fmin=None, fmax=None,
horizon=False):
"""Calculate the inspiral sensitive distance from a GW strain PSD
The method returns the distance (in megaparsecs) to which an compact
binary inspiral with the given component masses would be detectable
given the instrumental PSD. The calculation is as defined in:
https://dcc.ligo.org/LIGO-T030276/public
Parameters
----------
psd : `~gwpy.frequencyseries.FrequencySeries`
the instrumental power-spectral-density data
snr : `float`, optional
the signal-to-noise ratio for which to calculate range,
default: `8`
mass1 : `float`, `~astropy.units.Quantity`, optional
the mass (`float` assumed in solar masses) of the first binary
component, default: `1.4`
mass2 : `float`, `~astropy.units.Quantity`, optional
the mass (`float` assumed in solar masses) of the second binary
component, default: `1.4`
fmin : `float`, optional
the lower frequency cut-off of the integral, default: `psd.df`
fmax : `float`, optional
the maximum frequency limit of the integral, defaults to
innermost stable circular orbit (ISCO) frequency
horizon : `bool`, optional
if `True`, return the maximal 'horizon' sensitive distance, otherwise
return the angle-averaged range, default: `False`
Returns
-------
range : `~astropy.units.Quantity`
the calculated inspiral range [Mpc]
Examples
--------
Grab some data for LIGO-Livingston around GW150914 and generate a PSD
>>> from gwpy.timeseries import TimeSeries
>>> hoft = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478)
>>> hoff = hoft.psd(fftlength=4)
Now we can calculate the :func:`inspiral_range`:
>>> from gwpy.astro import inspiral_range
>>> r = inspiral_range(hoff, fmin=30)
>>> print(r)
70.4612102889 Mpc
"""
mass1 = units.Quantity(mass1, 'solMass').to('kg')
mass2 = units.Quantity(mass2, 'solMass').to('kg')
mtotal = mass1 + mass2
# compute ISCO
fisco = (constants.c ** 3 / (constants.G * 6**1.5 * pi * mtotal)).to('Hz')
# format frequency limits
fmax = units.Quantity(fmax or fisco, 'Hz')
if fmax > fisco:
warnings.warn("Upper frequency bound greater than %s-%s ISCO "
"frequency of %s, using ISCO" % (mass1, mass2, fisco))
fmax = fisco
if fmin is None:
fmin = psd.df # avoid using 0 as lower limit
fmin = units.Quantity(fmin, 'Hz')
# integrate
f = psd.frequencies.to('Hz')
condition = (f >= fmin) & (f < fmax)
integrand = inspiral_range_psd(psd[condition], snr=snr, mass1=mass1,
mass2=mass2, horizon=horizon)
result = units.Quantity(
integrate.trapz(integrand.value, f.value[condition]),
unit=integrand.unit * units.Hertz)
return (result ** (1/2.)).to('Mpc') |
def create_namespaced_cron_job(self, namespace, body, **kwargs):
"""
create a CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_cron_job(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V2alpha1CronJob body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_cron_job_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_cron_job_with_http_info(namespace, body, **kwargs)
return data | create a CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_cron_job(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V2alpha1CronJob body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
create a CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_cron_job(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V2alpha1CronJob body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread.
### Response:
def create_namespaced_cron_job(self, namespace, body, **kwargs):
"""
create a CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_cron_job(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V2alpha1CronJob body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_cron_job_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_cron_job_with_http_info(namespace, body, **kwargs)
return data |
def topDownCompute(self, encoded):
"""[ScalarEncoder class method override]"""
#Decode to delta scalar
if self._prevAbsolute==None or self._prevDelta==None:
return [EncoderResult(value=0, scalar=0,
encoding=numpy.zeros(self.n))]
ret = self._adaptiveScalarEnc.topDownCompute(encoded)
if self._prevAbsolute != None:
ret = [EncoderResult(value=ret[0].value+self._prevAbsolute,
scalar=ret[0].scalar+self._prevAbsolute,
encoding=ret[0].encoding)]
# ret[0].value+=self._prevAbsolute
# ret[0].scalar+=self._prevAbsolute
return ret | [ScalarEncoder class method override] | Below is the the instruction that describes the task:
### Input:
[ScalarEncoder class method override]
### Response:
def topDownCompute(self, encoded):
"""[ScalarEncoder class method override]"""
#Decode to delta scalar
if self._prevAbsolute==None or self._prevDelta==None:
return [EncoderResult(value=0, scalar=0,
encoding=numpy.zeros(self.n))]
ret = self._adaptiveScalarEnc.topDownCompute(encoded)
if self._prevAbsolute != None:
ret = [EncoderResult(value=ret[0].value+self._prevAbsolute,
scalar=ret[0].scalar+self._prevAbsolute,
encoding=ret[0].encoding)]
# ret[0].value+=self._prevAbsolute
# ret[0].scalar+=self._prevAbsolute
return ret |
def log_task(
task, logger=logging, level='info', propagate_fail=True, uuid=None
):
"""
Parameterized decorator to wrap a function in a log task
Example:
>>> @log_task('mytask')
... def do_something():
... pass
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
with LogTask(
task,
logger=logger,
level=level,
propagate_fail=propagate_fail,
uuid=uuid
):
return func(*args, **kwargs)
return wrapper
return decorator | Parameterized decorator to wrap a function in a log task
Example:
>>> @log_task('mytask')
... def do_something():
... pass | Below is the the instruction that describes the task:
### Input:
Parameterized decorator to wrap a function in a log task
Example:
>>> @log_task('mytask')
... def do_something():
... pass
### Response:
def log_task(
task, logger=logging, level='info', propagate_fail=True, uuid=None
):
"""
Parameterized decorator to wrap a function in a log task
Example:
>>> @log_task('mytask')
... def do_something():
... pass
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
with LogTask(
task,
logger=logger,
level=level,
propagate_fail=propagate_fail,
uuid=uuid
):
return func(*args, **kwargs)
return wrapper
return decorator |
def rightAt(self, offset=0):
""" Returns point in the center of the region's right side (offset to the right
by ``offset``) """
return Location(self.getX() + self.getW() + offset, self.getY() + (self.getH() / 2)) | Returns point in the center of the region's right side (offset to the right
by ``offset``) | Below is the the instruction that describes the task:
### Input:
Returns point in the center of the region's right side (offset to the right
by ``offset``)
### Response:
def rightAt(self, offset=0):
""" Returns point in the center of the region's right side (offset to the right
by ``offset``) """
return Location(self.getX() + self.getW() + offset, self.getY() + (self.getH() / 2)) |
def execute_ccm_remotely(remote_options, ccm_args):
"""
Execute CCM operation(s) remotely
:return A tuple defining the execution of the command
* output - The output of the execution if the output was not displayed
* exit_status - The exit status of remotely executed script
:raises Exception if invalid options are passed for `--dse-credentials`, `--ssl`, or
`--node-ssl` when initiating a remote execution; also if
error occured during ssh connection
"""
if not PARAMIKO_IS_AVAILABLE:
logging.warn("Paramiko is not Availble: Skipping remote execution of CCM command")
return None, None
# Create the SSH client
ssh_client = SSHClient(remote_options.ssh_host, remote_options.ssh_port,
remote_options.ssh_username, remote_options.ssh_password,
remote_options.ssh_private_key)
# Handle CCM arguments that require SFTP
for index, argument in enumerate(ccm_args):
# Determine if DSE credentials argument is being used
if "--dse-credentials" in argument:
# Get the filename being used for the DSE credentials
tokens = argument.split("=")
credentials_path = os.path.join(os.path.expanduser("~"), ".ccm", ".dse.ini")
if len(tokens) == 2:
credentials_path = tokens[1]
# Ensure the credential file exists locally and copy to remote host
if not os.path.isfile(credentials_path):
raise Exception("DSE Credentials File Does not Exist: %s"
% credentials_path)
ssh_client.put(credentials_path, ssh_client.ccm_config_dir)
# Update the DSE credentials argument
ccm_args[index] = "--dse-credentials"
# Determine if SSL or node SSL path argument is being used
if "--ssl" in argument or "--node-ssl" in argument:
# Get the directory being used for the path
tokens = argument.split("=")
if len(tokens) != 2:
raise Exception("Path is not Specified: %s" % argument)
ssl_path = tokens[1]
# Ensure the path exists locally and copy to remote host
if not os.path.isdir(ssl_path):
raise Exception("Path Does not Exist: %s" % ssl_path)
remote_ssl_path = ssh_client.temp + os.path.basename(ssl_path)
ssh_client.put(ssl_path, remote_ssl_path)
# Update the argument
ccm_args[index] = tokens[0] + "=" + remote_ssl_path
# Execute the CCM request, return output and exit status
return ssh_client.execute_ccm_command(ccm_args) | Execute CCM operation(s) remotely
:return A tuple defining the execution of the command
* output - The output of the execution if the output was not displayed
* exit_status - The exit status of remotely executed script
:raises Exception if invalid options are passed for `--dse-credentials`, `--ssl`, or
`--node-ssl` when initiating a remote execution; also if
error occured during ssh connection | Below is the the instruction that describes the task:
### Input:
Execute CCM operation(s) remotely
:return A tuple defining the execution of the command
* output - The output of the execution if the output was not displayed
* exit_status - The exit status of remotely executed script
:raises Exception if invalid options are passed for `--dse-credentials`, `--ssl`, or
`--node-ssl` when initiating a remote execution; also if
error occured during ssh connection
### Response:
def execute_ccm_remotely(remote_options, ccm_args):
"""
Execute CCM operation(s) remotely
:return A tuple defining the execution of the command
* output - The output of the execution if the output was not displayed
* exit_status - The exit status of remotely executed script
:raises Exception if invalid options are passed for `--dse-credentials`, `--ssl`, or
`--node-ssl` when initiating a remote execution; also if
error occured during ssh connection
"""
if not PARAMIKO_IS_AVAILABLE:
logging.warn("Paramiko is not Availble: Skipping remote execution of CCM command")
return None, None
# Create the SSH client
ssh_client = SSHClient(remote_options.ssh_host, remote_options.ssh_port,
remote_options.ssh_username, remote_options.ssh_password,
remote_options.ssh_private_key)
# Handle CCM arguments that require SFTP
for index, argument in enumerate(ccm_args):
# Determine if DSE credentials argument is being used
if "--dse-credentials" in argument:
# Get the filename being used for the DSE credentials
tokens = argument.split("=")
credentials_path = os.path.join(os.path.expanduser("~"), ".ccm", ".dse.ini")
if len(tokens) == 2:
credentials_path = tokens[1]
# Ensure the credential file exists locally and copy to remote host
if not os.path.isfile(credentials_path):
raise Exception("DSE Credentials File Does not Exist: %s"
% credentials_path)
ssh_client.put(credentials_path, ssh_client.ccm_config_dir)
# Update the DSE credentials argument
ccm_args[index] = "--dse-credentials"
# Determine if SSL or node SSL path argument is being used
if "--ssl" in argument or "--node-ssl" in argument:
# Get the directory being used for the path
tokens = argument.split("=")
if len(tokens) != 2:
raise Exception("Path is not Specified: %s" % argument)
ssl_path = tokens[1]
# Ensure the path exists locally and copy to remote host
if not os.path.isdir(ssl_path):
raise Exception("Path Does not Exist: %s" % ssl_path)
remote_ssl_path = ssh_client.temp + os.path.basename(ssl_path)
ssh_client.put(ssl_path, remote_ssl_path)
# Update the argument
ccm_args[index] = tokens[0] + "=" + remote_ssl_path
# Execute the CCM request, return output and exit status
return ssh_client.execute_ccm_command(ccm_args) |
def save(self, *args, **kwargs):
"""
Overrides the save method
"""
self.slug = self.create_slug()
super(Slugable, self).save(*args, **kwargs) | Overrides the save method | Below is the the instruction that describes the task:
### Input:
Overrides the save method
### Response:
def save(self, *args, **kwargs):
"""
Overrides the save method
"""
self.slug = self.create_slug()
super(Slugable, self).save(*args, **kwargs) |
def update_endtime(jid, time):
'''
Update (or store) the end time for a given job
Endtime is stored as a plain text string
'''
jid_dir = salt.utils.jid.jid_dir(jid, _job_dir(), __opts__['hash_type'])
try:
if not os.path.exists(jid_dir):
os.makedirs(jid_dir)
with salt.utils.files.fopen(os.path.join(jid_dir, ENDTIME), 'w') as etfile:
etfile.write(salt.utils.stringutils.to_str(time))
except IOError as exc:
log.warning('Could not write job invocation cache file: %s', exc) | Update (or store) the end time for a given job
Endtime is stored as a plain text string | Below is the the instruction that describes the task:
### Input:
Update (or store) the end time for a given job
Endtime is stored as a plain text string
### Response:
def update_endtime(jid, time):
'''
Update (or store) the end time for a given job
Endtime is stored as a plain text string
'''
jid_dir = salt.utils.jid.jid_dir(jid, _job_dir(), __opts__['hash_type'])
try:
if not os.path.exists(jid_dir):
os.makedirs(jid_dir)
with salt.utils.files.fopen(os.path.join(jid_dir, ENDTIME), 'w') as etfile:
etfile.write(salt.utils.stringutils.to_str(time))
except IOError as exc:
log.warning('Could not write job invocation cache file: %s', exc) |
def create_destination_id(client, container, name):
# type: (azure.storage.StorageClient, str, str) -> str
"""Create a unique destination id
:param azure.storage.StorageClient client: storage client
:param str container: container name
:param str name: entity name
:rtype: str
:return: unique id for the destination
"""
path = str(pathlib.PurePath(name))
return ';'.join((client.primary_endpoint, container, path)) | Create a unique destination id
:param azure.storage.StorageClient client: storage client
:param str container: container name
:param str name: entity name
:rtype: str
:return: unique id for the destination | Below is the the instruction that describes the task:
### Input:
Create a unique destination id
:param azure.storage.StorageClient client: storage client
:param str container: container name
:param str name: entity name
:rtype: str
:return: unique id for the destination
### Response:
def create_destination_id(client, container, name):
# type: (azure.storage.StorageClient, str, str) -> str
"""Create a unique destination id
:param azure.storage.StorageClient client: storage client
:param str container: container name
:param str name: entity name
:rtype: str
:return: unique id for the destination
"""
path = str(pathlib.PurePath(name))
return ';'.join((client.primary_endpoint, container, path)) |
def run(self):
"""Processing the pipeline."""
self.logger.info("Running with Python %s", sys.version.replace("\n", ""))
self.logger.info("Running on platform %s", platform.platform())
self.logger.info("Current cpu count is %d", multiprocessing.cpu_count())
configuration = self.load_configuration()
paths = [os.path.abspath(path) for path in Adapter(self.options).path]
supported_extension = [
ext.strip() for entry in configuration for ext in Adapter(entry).extension.split()]
for path, path_and_filename, extension in Application.walk_files_for(paths, supported_extension):
entry = Select(*configuration) \
.where(lambda entry: extension in Adapter(entry).extension.split()) \
.transform(lambda entry: Adapter(entry)) \
.build()[0]
# parsing file with regex to get loc and com values
# 100 lines of code (total) with 50 lines of comments means: loc=50, com=50
# the ratio would be then: 1.0
loc, com = self.analyse(path_and_filename, entry.regex)
ratio = float(com) / float(loc) if loc > 0 and com < loc else 1.0
self.results.append({
'type': entry.type,
'file': path_and_filename.replace(path + '/', ''),
'loc': loc,
'com': com,
'ratio': "%.2f" % ratio
})
# for the table we are mainly interested in ratio below defined threshold
# (except you want to see all of your code: --show-all)
ppresults = Select(*self.results).where(
lambda entry: float(Adapter(entry).ratio) < Adapter(self.options).threshold or
Adapter(self.options).show_all).build()
# print out results in table format
pprint(ppresults, keys=['ratio', 'loc', 'com', 'file', 'type'])
if Adapter(self.options).average:
all_ratio = Select(*self.results).transform(lambda entry: float(Adapter(entry).ratio)).build()
avg_ratio = float(sum(all_ratio)) / float(len(all_ratio)) if len(all_ratio) > 0 else 1.0
self.logger.info('average ratio is %.2f for %d files', avg_ratio, len(all_ratio))
return avg_ratio >= Adapter(self.options).threshold
# providing results (mainly for unittesting)
return len(Select(*self.results).where(
lambda entry: float(Adapter(entry).ratio) < Adapter(self.options).threshold).build()) == 0 | Processing the pipeline. | Below is the the instruction that describes the task:
### Input:
Processing the pipeline.
### Response:
def run(self):
"""Processing the pipeline."""
self.logger.info("Running with Python %s", sys.version.replace("\n", ""))
self.logger.info("Running on platform %s", platform.platform())
self.logger.info("Current cpu count is %d", multiprocessing.cpu_count())
configuration = self.load_configuration()
paths = [os.path.abspath(path) for path in Adapter(self.options).path]
supported_extension = [
ext.strip() for entry in configuration for ext in Adapter(entry).extension.split()]
for path, path_and_filename, extension in Application.walk_files_for(paths, supported_extension):
entry = Select(*configuration) \
.where(lambda entry: extension in Adapter(entry).extension.split()) \
.transform(lambda entry: Adapter(entry)) \
.build()[0]
# parsing file with regex to get loc and com values
# 100 lines of code (total) with 50 lines of comments means: loc=50, com=50
# the ratio would be then: 1.0
loc, com = self.analyse(path_and_filename, entry.regex)
ratio = float(com) / float(loc) if loc > 0 and com < loc else 1.0
self.results.append({
'type': entry.type,
'file': path_and_filename.replace(path + '/', ''),
'loc': loc,
'com': com,
'ratio': "%.2f" % ratio
})
# for the table we are mainly interested in ratio below defined threshold
# (except you want to see all of your code: --show-all)
ppresults = Select(*self.results).where(
lambda entry: float(Adapter(entry).ratio) < Adapter(self.options).threshold or
Adapter(self.options).show_all).build()
# print out results in table format
pprint(ppresults, keys=['ratio', 'loc', 'com', 'file', 'type'])
if Adapter(self.options).average:
all_ratio = Select(*self.results).transform(lambda entry: float(Adapter(entry).ratio)).build()
avg_ratio = float(sum(all_ratio)) / float(len(all_ratio)) if len(all_ratio) > 0 else 1.0
self.logger.info('average ratio is %.2f for %d files', avg_ratio, len(all_ratio))
return avg_ratio >= Adapter(self.options).threshold
# providing results (mainly for unittesting)
return len(Select(*self.results).where(
lambda entry: float(Adapter(entry).ratio) < Adapter(self.options).threshold).build()) == 0 |
def save_item(self, item_form, *args, **kwargs):
"""Pass through to provider ItemAdminSession.update_item"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.update_resource
if item_form.is_for_update():
return self.update_item(item_form, *args, **kwargs)
else:
return self.create_item(item_form, *args, **kwargs) | Pass through to provider ItemAdminSession.update_item | Below is the the instruction that describes the task:
### Input:
Pass through to provider ItemAdminSession.update_item
### Response:
def save_item(self, item_form, *args, **kwargs):
"""Pass through to provider ItemAdminSession.update_item"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.update_resource
if item_form.is_for_update():
return self.update_item(item_form, *args, **kwargs)
else:
return self.create_item(item_form, *args, **kwargs) |
def send(self, data, opcode=websocket.ABNF.OPCODE_TEXT):
"""
Send message to server.
data: message to send. If you set opcode to OPCODE_TEXT,
data must be utf-8 string or unicode.
opcode: operation code of data. default is OPCODE_TEXT.
"""
self.ws_client.send(data, opcode) | Send message to server.
data: message to send. If you set opcode to OPCODE_TEXT,
data must be utf-8 string or unicode.
opcode: operation code of data. default is OPCODE_TEXT. | Below is the the instruction that describes the task:
### Input:
Send message to server.
data: message to send. If you set opcode to OPCODE_TEXT,
data must be utf-8 string or unicode.
opcode: operation code of data. default is OPCODE_TEXT.
### Response:
def send(self, data, opcode=websocket.ABNF.OPCODE_TEXT):
"""
Send message to server.
data: message to send. If you set opcode to OPCODE_TEXT,
data must be utf-8 string or unicode.
opcode: operation code of data. default is OPCODE_TEXT.
"""
self.ws_client.send(data, opcode) |
def _update_dPrxy(self):
"""Update `dPrxy`, accounting for dependence of `Prxy` on `omega2`."""
super(ExpCM_empirical_phi_divpressure, self)._update_dPrxy()
if 'omega2' in self.freeparams:
with scipy.errstate(divide='raise', under='raise', over='raise',
invalid='ignore'):
scipy.copyto(self.dPrxy['omega2'], -self.ln_piAx_piAy_beta
* self.Qxy * self.omega /
(1 - self.piAx_piAy_beta), where=CODON_NONSYN)
scipy.copyto(self.dPrxy['omega2'], self.Qxy * self.omega,
where=scipy.logical_and(CODON_NONSYN, scipy.fabs(1 -
self.piAx_piAy_beta) < ALMOST_ZERO))
for r in range(self.nsites):
self.dPrxy['omega2'][r] *= self.deltar[r]
_fill_diagonals(self.dPrxy['omega2'], self._diag_indices) | Update `dPrxy`, accounting for dependence of `Prxy` on `omega2`. | Below is the the instruction that describes the task:
### Input:
Update `dPrxy`, accounting for dependence of `Prxy` on `omega2`.
### Response:
def _update_dPrxy(self):
"""Update `dPrxy`, accounting for dependence of `Prxy` on `omega2`."""
super(ExpCM_empirical_phi_divpressure, self)._update_dPrxy()
if 'omega2' in self.freeparams:
with scipy.errstate(divide='raise', under='raise', over='raise',
invalid='ignore'):
scipy.copyto(self.dPrxy['omega2'], -self.ln_piAx_piAy_beta
* self.Qxy * self.omega /
(1 - self.piAx_piAy_beta), where=CODON_NONSYN)
scipy.copyto(self.dPrxy['omega2'], self.Qxy * self.omega,
where=scipy.logical_and(CODON_NONSYN, scipy.fabs(1 -
self.piAx_piAy_beta) < ALMOST_ZERO))
for r in range(self.nsites):
self.dPrxy['omega2'][r] *= self.deltar[r]
_fill_diagonals(self.dPrxy['omega2'], self._diag_indices) |
def intersectingPoint(self, p):
"""
given a point, get intervals in the tree that are intersected.
:param p: intersection point
:return: the list of intersected intervals
"""
# perfect match
if p == self.data.mid:
return self.data.ends
if p > self.data.mid:
# we know all intervals in self.data begin before p (if they began after
# p, they would have not included mid) we just need to find those that
# end after p
endAfterP = [r for r in self.data.ends
if (r.end >= p and not self.openEnded) or
(r.end > p and self.openEnded)]
if self.right is not None:
endAfterP.extend(self.right.intersectingPoint(p))
return endAfterP
if p < self.data.mid:
# we know all intervals in self.data end after p (if they ended before p,
# they would have not included mid) we just need to find those that start
# before p
startBeforeP = [r for r in self.data.starts if r.start <= p]
if self.left is not None:
startBeforeP.extend(self.left.intersectingPoint(p))
return startBeforeP | given a point, get intervals in the tree that are intersected.
:param p: intersection point
:return: the list of intersected intervals | Below is the the instruction that describes the task:
### Input:
given a point, get intervals in the tree that are intersected.
:param p: intersection point
:return: the list of intersected intervals
### Response:
def intersectingPoint(self, p):
"""
given a point, get intervals in the tree that are intersected.
:param p: intersection point
:return: the list of intersected intervals
"""
# perfect match
if p == self.data.mid:
return self.data.ends
if p > self.data.mid:
# we know all intervals in self.data begin before p (if they began after
# p, they would have not included mid) we just need to find those that
# end after p
endAfterP = [r for r in self.data.ends
if (r.end >= p and not self.openEnded) or
(r.end > p and self.openEnded)]
if self.right is not None:
endAfterP.extend(self.right.intersectingPoint(p))
return endAfterP
if p < self.data.mid:
# we know all intervals in self.data end after p (if they ended before p,
# they would have not included mid) we just need to find those that start
# before p
startBeforeP = [r for r in self.data.starts if r.start <= p]
if self.left is not None:
startBeforeP.extend(self.left.intersectingPoint(p))
return startBeforeP |
def check(self, data):
"""returns True if any match any regexp"""
if isinstance(data, Iterable):
data = "".join(str(x) for x in data)
try:
data = str(data)
except UnicodeDecodeError:
return False
return bool(data and self.__regexp.match(data)) | returns True if any match any regexp | Below is the the instruction that describes the task:
### Input:
returns True if any match any regexp
### Response:
def check(self, data):
"""returns True if any match any regexp"""
if isinstance(data, Iterable):
data = "".join(str(x) for x in data)
try:
data = str(data)
except UnicodeDecodeError:
return False
return bool(data and self.__regexp.match(data)) |
def get_url(self, version=None):
"""
Return the filename of the bundled bundle
"""
if self.fixed_bundle_url:
return self.fixed_bundle_url
return '%s.%s.%s' % (os.path.join(self.bundle_url_root, self.bundle_filename), version or self.get_version(), self.bundle_type) | Return the filename of the bundled bundle | Below is the the instruction that describes the task:
### Input:
Return the filename of the bundled bundle
### Response:
def get_url(self, version=None):
"""
Return the filename of the bundled bundle
"""
if self.fixed_bundle_url:
return self.fixed_bundle_url
return '%s.%s.%s' % (os.path.join(self.bundle_url_root, self.bundle_filename), version or self.get_version(), self.bundle_type) |
def scroll(self, clicks):
"""Zoom using a mouse scroll wheel motion.
Parameters
----------
clicks : int
The number of clicks. Positive numbers indicate forward wheel
movement.
"""
target = self._target
ratio = 0.90
mult = 1.0
if clicks > 0:
mult = ratio**clicks
elif clicks < 0:
mult = (1.0 / ratio)**abs(clicks)
z_axis = self._n_pose[:3, 2].flatten()
eye = self._n_pose[:3, 3].flatten()
radius = np.linalg.norm(eye - target)
translation = (mult * radius - radius) * z_axis
t_tf = np.eye(4)
t_tf[:3, 3] = translation
self._n_pose = t_tf.dot(self._n_pose)
z_axis = self._pose[:3, 2].flatten()
eye = self._pose[:3, 3].flatten()
radius = np.linalg.norm(eye - target)
translation = (mult * radius - radius) * z_axis
t_tf = np.eye(4)
t_tf[:3, 3] = translation
self._pose = t_tf.dot(self._pose) | Zoom using a mouse scroll wheel motion.
Parameters
----------
clicks : int
The number of clicks. Positive numbers indicate forward wheel
movement. | Below is the the instruction that describes the task:
### Input:
Zoom using a mouse scroll wheel motion.
Parameters
----------
clicks : int
The number of clicks. Positive numbers indicate forward wheel
movement.
### Response:
def scroll(self, clicks):
"""Zoom using a mouse scroll wheel motion.
Parameters
----------
clicks : int
The number of clicks. Positive numbers indicate forward wheel
movement.
"""
target = self._target
ratio = 0.90
mult = 1.0
if clicks > 0:
mult = ratio**clicks
elif clicks < 0:
mult = (1.0 / ratio)**abs(clicks)
z_axis = self._n_pose[:3, 2].flatten()
eye = self._n_pose[:3, 3].flatten()
radius = np.linalg.norm(eye - target)
translation = (mult * radius - radius) * z_axis
t_tf = np.eye(4)
t_tf[:3, 3] = translation
self._n_pose = t_tf.dot(self._n_pose)
z_axis = self._pose[:3, 2].flatten()
eye = self._pose[:3, 3].flatten()
radius = np.linalg.norm(eye - target)
translation = (mult * radius - radius) * z_axis
t_tf = np.eye(4)
t_tf[:3, 3] = translation
self._pose = t_tf.dot(self._pose) |
def _update_targets(vesseldicts, environment_dict):
"""
<Purpose>
Connects to the nodes in the vesseldicts and adds them to the list
of valid targets.
<Arguments>
vesseldicts:
A list of vesseldicts obtained through
SeattleClearinghouseClient calls.
<Side Effects>
All valid targets that the user can access on the specified nodes
are added to the list of targets.
<Exceptions>
None
<Returns>
None
"""
# Compile a list of the nodes that we need to check
nodelist = []
for vesseldict in vesseldicts:
nodeip_port = vesseldict['node_ip']+':'+str(vesseldict['node_port'])
if not nodeip_port in nodelist:
nodelist.append(nodeip_port)
# we'll output a message about the new keys later...
newidlist = []
faillist = []
# Clear the list so that the user doesn't target vessels acquired from
# previous requests when targeting this group
seash_global_variables.targets['acquired'] = []
print nodelist
# currently, if I browse more than once, I look up everything again...
retdict = seash_helper.contact_targets(
nodelist,
seash_helper.browse_target,
environment_dict['currentkeyname'],
'acquired')
# parse the output so we can print out something intelligible
for nodename in retdict:
if retdict[nodename][0]:
newidlist = newidlist + retdict[nodename][1]
else:
faillist.append(nodename)
seash_helper.print_vessel_errors(retdict)
if len(newidlist) == 0:
print "Could not add any new targets."
else:
print "Added targets: "+", ".join(newidlist)
if len(seash_global_variables.targets['acquired']) > 0:
num_targets = str(len(seash_global_variables.targets['acquired']))
print "Added group 'acquired' with "+num_targets+" targets" | <Purpose>
Connects to the nodes in the vesseldicts and adds them to the list
of valid targets.
<Arguments>
vesseldicts:
A list of vesseldicts obtained through
SeattleClearinghouseClient calls.
<Side Effects>
All valid targets that the user can access on the specified nodes
are added to the list of targets.
<Exceptions>
None
<Returns>
None | Below is the the instruction that describes the task:
### Input:
<Purpose>
Connects to the nodes in the vesseldicts and adds them to the list
of valid targets.
<Arguments>
vesseldicts:
A list of vesseldicts obtained through
SeattleClearinghouseClient calls.
<Side Effects>
All valid targets that the user can access on the specified nodes
are added to the list of targets.
<Exceptions>
None
<Returns>
None
### Response:
def _update_targets(vesseldicts, environment_dict):
"""
<Purpose>
Connects to the nodes in the vesseldicts and adds them to the list
of valid targets.
<Arguments>
vesseldicts:
A list of vesseldicts obtained through
SeattleClearinghouseClient calls.
<Side Effects>
All valid targets that the user can access on the specified nodes
are added to the list of targets.
<Exceptions>
None
<Returns>
None
"""
# Compile a list of the nodes that we need to check
nodelist = []
for vesseldict in vesseldicts:
nodeip_port = vesseldict['node_ip']+':'+str(vesseldict['node_port'])
if not nodeip_port in nodelist:
nodelist.append(nodeip_port)
# we'll output a message about the new keys later...
newidlist = []
faillist = []
# Clear the list so that the user doesn't target vessels acquired from
# previous requests when targeting this group
seash_global_variables.targets['acquired'] = []
print nodelist
# currently, if I browse more than once, I look up everything again...
retdict = seash_helper.contact_targets(
nodelist,
seash_helper.browse_target,
environment_dict['currentkeyname'],
'acquired')
# parse the output so we can print out something intelligible
for nodename in retdict:
if retdict[nodename][0]:
newidlist = newidlist + retdict[nodename][1]
else:
faillist.append(nodename)
seash_helper.print_vessel_errors(retdict)
if len(newidlist) == 0:
print "Could not add any new targets."
else:
print "Added targets: "+", ".join(newidlist)
if len(seash_global_variables.targets['acquired']) > 0:
num_targets = str(len(seash_global_variables.targets['acquired']))
print "Added group 'acquired' with "+num_targets+" targets" |
def trim(self):
"""
Trims data based on propensity score to create a subsample with
better covariate balance.
The default cutoff value is set to 0.1. To set a custom cutoff
value, modify the object attribute named cutoff directly.
This method should only be executed after the propensity score
has been estimated.
"""
if 0 < self.cutoff <= 0.5:
pscore = self.raw_data['pscore']
keep = (pscore >= self.cutoff) & (pscore <= 1-self.cutoff)
Y_trimmed = self.raw_data['Y'][keep]
D_trimmed = self.raw_data['D'][keep]
X_trimmed = self.raw_data['X'][keep]
self.raw_data = Data(Y_trimmed, D_trimmed, X_trimmed)
self.raw_data._dict['pscore'] = pscore[keep]
self.summary_stats = Summary(self.raw_data)
self.strata = None
self.estimates = Estimators()
elif self.cutoff == 0:
pass
else:
raise ValueError('Invalid cutoff.') | Trims data based on propensity score to create a subsample with
better covariate balance.
The default cutoff value is set to 0.1. To set a custom cutoff
value, modify the object attribute named cutoff directly.
This method should only be executed after the propensity score
has been estimated. | Below is the the instruction that describes the task:
### Input:
Trims data based on propensity score to create a subsample with
better covariate balance.
The default cutoff value is set to 0.1. To set a custom cutoff
value, modify the object attribute named cutoff directly.
This method should only be executed after the propensity score
has been estimated.
### Response:
def trim(self):
"""
Trims data based on propensity score to create a subsample with
better covariate balance.
The default cutoff value is set to 0.1. To set a custom cutoff
value, modify the object attribute named cutoff directly.
This method should only be executed after the propensity score
has been estimated.
"""
if 0 < self.cutoff <= 0.5:
pscore = self.raw_data['pscore']
keep = (pscore >= self.cutoff) & (pscore <= 1-self.cutoff)
Y_trimmed = self.raw_data['Y'][keep]
D_trimmed = self.raw_data['D'][keep]
X_trimmed = self.raw_data['X'][keep]
self.raw_data = Data(Y_trimmed, D_trimmed, X_trimmed)
self.raw_data._dict['pscore'] = pscore[keep]
self.summary_stats = Summary(self.raw_data)
self.strata = None
self.estimates = Estimators()
elif self.cutoff == 0:
pass
else:
raise ValueError('Invalid cutoff.') |
def count_hom_ref(self, axis=None):
"""Count homozygous reference genotypes.
Parameters
----------
axis : int, optional
Axis over which to count, or None to perform overall count.
"""
b = self.is_hom_ref()
return np.sum(b, axis=axis) | Count homozygous reference genotypes.
Parameters
----------
axis : int, optional
Axis over which to count, or None to perform overall count. | Below is the the instruction that describes the task:
### Input:
Count homozygous reference genotypes.
Parameters
----------
axis : int, optional
Axis over which to count, or None to perform overall count.
### Response:
def count_hom_ref(self, axis=None):
"""Count homozygous reference genotypes.
Parameters
----------
axis : int, optional
Axis over which to count, or None to perform overall count.
"""
b = self.is_hom_ref()
return np.sum(b, axis=axis) |
def get_device_name(self):
"""
return the device name
:return: str
"""
command = const.CMD_OPTIONS_RRQ
command_string = b'~DeviceName\x00'
response_size = 1024
cmd_response = self.__send_command(command, command_string, response_size)
if cmd_response.get('status'):
device = self.__data.split(b'=', 1)[-1].split(b'\x00')[0]
return device.decode()
else:
return "" | return the device name
:return: str | Below is the the instruction that describes the task:
### Input:
return the device name
:return: str
### Response:
def get_device_name(self):
"""
return the device name
:return: str
"""
command = const.CMD_OPTIONS_RRQ
command_string = b'~DeviceName\x00'
response_size = 1024
cmd_response = self.__send_command(command, command_string, response_size)
if cmd_response.get('status'):
device = self.__data.split(b'=', 1)[-1].split(b'\x00')[0]
return device.decode()
else:
return "" |
def open_channel(self):
"""
Open a new channel on this connection.
This method is a :ref:`coroutine <coroutine>`.
:return: The new :class:`Channel` object.
"""
if self._closing:
raise ConnectionClosed("Closed by application")
if self.closed.done():
raise self.closed.exception()
channel = yield from self.channel_factory.open()
return channel | Open a new channel on this connection.
This method is a :ref:`coroutine <coroutine>`.
:return: The new :class:`Channel` object. | Below is the the instruction that describes the task:
### Input:
Open a new channel on this connection.
This method is a :ref:`coroutine <coroutine>`.
:return: The new :class:`Channel` object.
### Response:
def open_channel(self):
"""
Open a new channel on this connection.
This method is a :ref:`coroutine <coroutine>`.
:return: The new :class:`Channel` object.
"""
if self._closing:
raise ConnectionClosed("Closed by application")
if self.closed.done():
raise self.closed.exception()
channel = yield from self.channel_factory.open()
return channel |
def checkTikaServer(scheme="http", serverHost=ServerHost, port=Port, tikaServerJar=TikaServerJar, classpath=None, config_path=None):
'''
Check that tika-server is running. If not, download JAR file and start it up.
:param scheme: e.g. http or https
:param serverHost:
:param port:
:param tikaServerJar:
:param classpath:
:return:
'''
if classpath is None:
classpath = TikaServerClasspath
if port is None:
port = '443' if scheme == 'https' else '80'
urlp = urlparse(tikaServerJar)
serverEndpoint = '%s://%s:%s' % (scheme, serverHost, port)
jarPath = os.path.join(TikaJarPath, 'tika-server.jar')
if 'localhost' in serverEndpoint or '127.0.0.1' in serverEndpoint:
alreadyRunning = checkPortIsOpen(serverHost, port)
if not alreadyRunning:
if not os.path.isfile(jarPath) and urlp.scheme != '':
getRemoteJar(tikaServerJar, jarPath)
if not checkJarSig(tikaServerJar, jarPath):
os.remove(jarPath)
tikaServerJar = getRemoteJar(tikaServerJar, jarPath)
status = startServer(jarPath, TikaJava, serverHost, port, classpath, config_path)
if not status:
log.error("Failed to receive startup confirmation from startServer.")
raise RuntimeError("Unable to start Tika server.")
return serverEndpoint | Check that tika-server is running. If not, download JAR file and start it up.
:param scheme: e.g. http or https
:param serverHost:
:param port:
:param tikaServerJar:
:param classpath:
:return: | Below is the the instruction that describes the task:
### Input:
Check that tika-server is running. If not, download JAR file and start it up.
:param scheme: e.g. http or https
:param serverHost:
:param port:
:param tikaServerJar:
:param classpath:
:return:
### Response:
def checkTikaServer(scheme="http", serverHost=ServerHost, port=Port, tikaServerJar=TikaServerJar, classpath=None, config_path=None):
'''
Check that tika-server is running. If not, download JAR file and start it up.
:param scheme: e.g. http or https
:param serverHost:
:param port:
:param tikaServerJar:
:param classpath:
:return:
'''
if classpath is None:
classpath = TikaServerClasspath
if port is None:
port = '443' if scheme == 'https' else '80'
urlp = urlparse(tikaServerJar)
serverEndpoint = '%s://%s:%s' % (scheme, serverHost, port)
jarPath = os.path.join(TikaJarPath, 'tika-server.jar')
if 'localhost' in serverEndpoint or '127.0.0.1' in serverEndpoint:
alreadyRunning = checkPortIsOpen(serverHost, port)
if not alreadyRunning:
if not os.path.isfile(jarPath) and urlp.scheme != '':
getRemoteJar(tikaServerJar, jarPath)
if not checkJarSig(tikaServerJar, jarPath):
os.remove(jarPath)
tikaServerJar = getRemoteJar(tikaServerJar, jarPath)
status = startServer(jarPath, TikaJava, serverHost, port, classpath, config_path)
if not status:
log.error("Failed to receive startup confirmation from startServer.")
raise RuntimeError("Unable to start Tika server.")
return serverEndpoint |
def quality(self, tests, alias=None):
"""
Run a series of tests and return the corresponding results.
Args:
tests (list): a list of functions.
alias (dict): a dictionary mapping mnemonics to lists of mnemonics.
Returns:
list. The results. Stick to booleans (True = pass) or ints.
"""
# Gather the test s.
# First, anything called 'all', 'All', or 'ALL'.
# Second, anything with the name of the curve we're in now.
# Third, anything that the alias list has for this curve.
# (This requires a reverse look-up so it's a bit messy.)
this_tests =\
tests.get('each', [])+tests.get('Each', [])+tests.get('EACH', [])\
+ tests.get(self.mnemonic, [])\
+ utils.flatten_list([tests.get(a) for a in self.get_alias(alias=alias)])
this_tests = filter(None, this_tests)
# If we explicitly set zero tests for a particular key, then this
# overrides the 'all' and 'alias' tests.
if not tests.get(self.mnemonic, 1):
this_tests = []
return {test.__name__: test(self) for test in this_tests} | Run a series of tests and return the corresponding results.
Args:
tests (list): a list of functions.
alias (dict): a dictionary mapping mnemonics to lists of mnemonics.
Returns:
list. The results. Stick to booleans (True = pass) or ints. | Below is the the instruction that describes the task:
### Input:
Run a series of tests and return the corresponding results.
Args:
tests (list): a list of functions.
alias (dict): a dictionary mapping mnemonics to lists of mnemonics.
Returns:
list. The results. Stick to booleans (True = pass) or ints.
### Response:
def quality(self, tests, alias=None):
"""
Run a series of tests and return the corresponding results.
Args:
tests (list): a list of functions.
alias (dict): a dictionary mapping mnemonics to lists of mnemonics.
Returns:
list. The results. Stick to booleans (True = pass) or ints.
"""
# Gather the test s.
# First, anything called 'all', 'All', or 'ALL'.
# Second, anything with the name of the curve we're in now.
# Third, anything that the alias list has for this curve.
# (This requires a reverse look-up so it's a bit messy.)
this_tests =\
tests.get('each', [])+tests.get('Each', [])+tests.get('EACH', [])\
+ tests.get(self.mnemonic, [])\
+ utils.flatten_list([tests.get(a) for a in self.get_alias(alias=alias)])
this_tests = filter(None, this_tests)
# If we explicitly set zero tests for a particular key, then this
# overrides the 'all' and 'alias' tests.
if not tests.get(self.mnemonic, 1):
this_tests = []
return {test.__name__: test(self) for test in this_tests} |
def discovery_redis(self):
"""
Installs the Redis discovery bundles and instantiates components
"""
# Install the bundle
self.context.install_bundle("pelix.remote.discovery.redis").start()
with use_waiting_list(self.context) as ipopo:
# Instantiate the discovery
ipopo.add(
rs.FACTORY_DISCOVERY_REDIS,
"pelix-discovery-redis",
{
"application.id": "sample.rs",
"redis.host": self.arguments.redis_host,
"redis.port": self.arguments.redis_port,
},
) | Installs the Redis discovery bundles and instantiates components | Below is the the instruction that describes the task:
### Input:
Installs the Redis discovery bundles and instantiates components
### Response:
def discovery_redis(self):
"""
Installs the Redis discovery bundles and instantiates components
"""
# Install the bundle
self.context.install_bundle("pelix.remote.discovery.redis").start()
with use_waiting_list(self.context) as ipopo:
# Instantiate the discovery
ipopo.add(
rs.FACTORY_DISCOVERY_REDIS,
"pelix-discovery-redis",
{
"application.id": "sample.rs",
"redis.host": self.arguments.redis_host,
"redis.port": self.arguments.redis_port,
},
) |
def associate_route_table(self, route_table_id, subnet_id):
"""
Associates a route table with a specific subnet.
:type route_table_id: str
:param route_table_id: The ID of the route table to associate.
:type subnet_id: str
:param subnet_id: The ID of the subnet to associate with.
:rtype: str
:return: The ID of the association created
"""
params = {
'RouteTableId': route_table_id,
'SubnetId': subnet_id
}
result = self.get_object('AssociateRouteTable', params, ResultSet)
return result.associationId | Associates a route table with a specific subnet.
:type route_table_id: str
:param route_table_id: The ID of the route table to associate.
:type subnet_id: str
:param subnet_id: The ID of the subnet to associate with.
:rtype: str
:return: The ID of the association created | Below is the the instruction that describes the task:
### Input:
Associates a route table with a specific subnet.
:type route_table_id: str
:param route_table_id: The ID of the route table to associate.
:type subnet_id: str
:param subnet_id: The ID of the subnet to associate with.
:rtype: str
:return: The ID of the association created
### Response:
def associate_route_table(self, route_table_id, subnet_id):
"""
Associates a route table with a specific subnet.
:type route_table_id: str
:param route_table_id: The ID of the route table to associate.
:type subnet_id: str
:param subnet_id: The ID of the subnet to associate with.
:rtype: str
:return: The ID of the association created
"""
params = {
'RouteTableId': route_table_id,
'SubnetId': subnet_id
}
result = self.get_object('AssociateRouteTable', params, ResultSet)
return result.associationId |
def _init_values(self, context=None):
"""Retrieve field values from the server.
May be used to restore the original values in the purpose to cancel
all changes made.
"""
if context is None:
context = self.env.context
# Get basic fields (no relational ones)
basic_fields = []
for field_name in self._columns:
field = self._columns[field_name]
if not getattr(field, 'relation', False):
basic_fields.append(field_name)
# Fetch values from the server
if self.ids:
rows = self.__class__.read(
self.ids, basic_fields, context=context, load='_classic_write')
ids_fetched = set()
for row in rows:
ids_fetched.add(row['id'])
for field_name in row:
if field_name == 'id':
continue
self._values[field_name][row['id']] = row[field_name]
ids_in_error = set(self.ids) - ids_fetched
if ids_in_error:
raise ValueError(
"There is no '{model}' record with IDs {ids}.".format(
model=self._name, ids=list(ids_in_error)))
# No ID: fields filled with default values
else:
default_get = self.__class__.default_get(
list(self._columns), context=context)
for field_name in self._columns:
self._values[field_name][None] = default_get.get(
field_name, False) | Retrieve field values from the server.
May be used to restore the original values in the purpose to cancel
all changes made. | Below is the the instruction that describes the task:
### Input:
Retrieve field values from the server.
May be used to restore the original values in the purpose to cancel
all changes made.
### Response:
def _init_values(self, context=None):
"""Retrieve field values from the server.
May be used to restore the original values in the purpose to cancel
all changes made.
"""
if context is None:
context = self.env.context
# Get basic fields (no relational ones)
basic_fields = []
for field_name in self._columns:
field = self._columns[field_name]
if not getattr(field, 'relation', False):
basic_fields.append(field_name)
# Fetch values from the server
if self.ids:
rows = self.__class__.read(
self.ids, basic_fields, context=context, load='_classic_write')
ids_fetched = set()
for row in rows:
ids_fetched.add(row['id'])
for field_name in row:
if field_name == 'id':
continue
self._values[field_name][row['id']] = row[field_name]
ids_in_error = set(self.ids) - ids_fetched
if ids_in_error:
raise ValueError(
"There is no '{model}' record with IDs {ids}.".format(
model=self._name, ids=list(ids_in_error)))
# No ID: fields filled with default values
else:
default_get = self.__class__.default_get(
list(self._columns), context=context)
for field_name in self._columns:
self._values[field_name][None] = default_get.get(
field_name, False) |
def disambiguate_fname(files_path_list, filename):
"""Get tab title without ambiguation."""
fname = os.path.basename(filename)
same_name_files = get_same_name_files(files_path_list, fname)
if len(same_name_files) > 1:
compare_path = shortest_path(same_name_files)
if compare_path == filename:
same_name_files.remove(path_components(filename))
compare_path = shortest_path(same_name_files)
diff_path = differentiate_prefix(path_components(filename),
path_components(compare_path))
diff_path_length = len(diff_path)
path_component = path_components(diff_path)
if (diff_path_length > 20 and len(path_component) > 2):
if path_component[0] != '/' and path_component[0] != '':
path_component = [path_component[0], '...',
path_component[-1]]
else:
path_component = [path_component[2], '...',
path_component[-1]]
diff_path = os.path.join(*path_component)
fname = fname + " - " + diff_path
return fname | Get tab title without ambiguation. | Below is the the instruction that describes the task:
### Input:
Get tab title without ambiguation.
### Response:
def disambiguate_fname(files_path_list, filename):
"""Get tab title without ambiguation."""
fname = os.path.basename(filename)
same_name_files = get_same_name_files(files_path_list, fname)
if len(same_name_files) > 1:
compare_path = shortest_path(same_name_files)
if compare_path == filename:
same_name_files.remove(path_components(filename))
compare_path = shortest_path(same_name_files)
diff_path = differentiate_prefix(path_components(filename),
path_components(compare_path))
diff_path_length = len(diff_path)
path_component = path_components(diff_path)
if (diff_path_length > 20 and len(path_component) > 2):
if path_component[0] != '/' and path_component[0] != '':
path_component = [path_component[0], '...',
path_component[-1]]
else:
path_component = [path_component[2], '...',
path_component[-1]]
diff_path = os.path.join(*path_component)
fname = fname + " - " + diff_path
return fname |
def dial(self, target):
'''
connects to a node
:param url: string (optional) - resource in which to connect.
if not provided, will use default for the stage
:returns: provider, error
'''
if not target:
return None, "target network must be specified with -t or --target"
url = get_url(self.config, target)
try:
if url.startswith('ws'):
self.w3 = Web3(WebsocketProvider(url))
elif url.startswith('http'):
self.w3 = Web3(HTTPProvider(url))
elif url.endswith('ipc'):
if url == 'ipc':
url = None
self.w3 = Web3(Web3.IPCProvider(url))
else:
return None, "Invalid Provider URL: {}".format(url)
except Exception as e:
return None, e
return self.w3, None | connects to a node
:param url: string (optional) - resource in which to connect.
if not provided, will use default for the stage
:returns: provider, error | Below is the the instruction that describes the task:
### Input:
connects to a node
:param url: string (optional) - resource in which to connect.
if not provided, will use default for the stage
:returns: provider, error
### Response:
def dial(self, target):
'''
connects to a node
:param url: string (optional) - resource in which to connect.
if not provided, will use default for the stage
:returns: provider, error
'''
if not target:
return None, "target network must be specified with -t or --target"
url = get_url(self.config, target)
try:
if url.startswith('ws'):
self.w3 = Web3(WebsocketProvider(url))
elif url.startswith('http'):
self.w3 = Web3(HTTPProvider(url))
elif url.endswith('ipc'):
if url == 'ipc':
url = None
self.w3 = Web3(Web3.IPCProvider(url))
else:
return None, "Invalid Provider URL: {}".format(url)
except Exception as e:
return None, e
return self.w3, None |
def _make_server(self):
"""Constructs the TensorBoard WSGI app and instantiates the server."""
app = application.standard_tensorboard_wsgi(self.flags,
self.plugin_loaders,
self.assets_zip_provider)
return self.server_class(app, self.flags) | Constructs the TensorBoard WSGI app and instantiates the server. | Below is the the instruction that describes the task:
### Input:
Constructs the TensorBoard WSGI app and instantiates the server.
### Response:
def _make_server(self):
"""Constructs the TensorBoard WSGI app and instantiates the server."""
app = application.standard_tensorboard_wsgi(self.flags,
self.plugin_loaders,
self.assets_zip_provider)
return self.server_class(app, self.flags) |
def threshold(self, value, inclusive=False):
"""Return True if > than treshold value (or >= threshold value if
inclusive=True).
"""
if inclusive:
def function(x, y):
return True if x >= y else False
else:
def function(x, y):
return True if x > y else False
return self.operation(value, function) | Return True if > than treshold value (or >= threshold value if
inclusive=True). | Below is the the instruction that describes the task:
### Input:
Return True if > than treshold value (or >= threshold value if
inclusive=True).
### Response:
def threshold(self, value, inclusive=False):
"""Return True if > than treshold value (or >= threshold value if
inclusive=True).
"""
if inclusive:
def function(x, y):
return True if x >= y else False
else:
def function(x, y):
return True if x > y else False
return self.operation(value, function) |
def sapm_aoi_loss(aoi, module, upper=None):
"""
Calculates the SAPM angle of incidence loss coefficient, F2.
Parameters
----------
aoi : numeric
Angle of incidence in degrees. Negative input angles will return
zeros.
module : dict-like
A dict, Series, or DataFrame defining the SAPM performance
parameters. See the :py:func:`sapm` notes section for more
details.
upper : None or float, default None
Upper limit on the results.
Returns
-------
F2 : numeric
The SAPM angle of incidence loss coefficient.
Notes
-----
The SAPM traditionally does not define an upper limit on the AOI
loss function and values slightly exceeding 1 may exist for moderate
angles of incidence (15-40 degrees). However, users may consider
imposing an upper limit of 1.
References
----------
[1] King, D. et al, 2004, "Sandia Photovoltaic Array Performance
Model", SAND Report 3535, Sandia National Laboratories, Albuquerque,
NM.
[2] B.H. King et al, "Procedure to Determine Coefficients for the
Sandia Array Performance Model (SAPM)," SAND2016-5284, Sandia
National Laboratories (2016).
[3] B.H. King et al, "Recent Advancements in Outdoor Measurement
Techniques for Angle of Incidence Effects," 42nd IEEE PVSC (2015).
DOI: 10.1109/PVSC.2015.7355849
"""
aoi_coeff = [module['B5'], module['B4'], module['B3'], module['B2'],
module['B1'], module['B0']]
aoi_loss = np.polyval(aoi_coeff, aoi)
aoi_loss = np.clip(aoi_loss, 0, upper)
# nan tolerant masking
aoi_lt_0 = np.full_like(aoi, False, dtype='bool')
np.less(aoi, 0, where=~np.isnan(aoi), out=aoi_lt_0)
aoi_loss = np.where(aoi_lt_0, 0, aoi_loss)
if isinstance(aoi, pd.Series):
aoi_loss = pd.Series(aoi_loss, aoi.index)
return aoi_loss | Calculates the SAPM angle of incidence loss coefficient, F2.
Parameters
----------
aoi : numeric
Angle of incidence in degrees. Negative input angles will return
zeros.
module : dict-like
A dict, Series, or DataFrame defining the SAPM performance
parameters. See the :py:func:`sapm` notes section for more
details.
upper : None or float, default None
Upper limit on the results.
Returns
-------
F2 : numeric
The SAPM angle of incidence loss coefficient.
Notes
-----
The SAPM traditionally does not define an upper limit on the AOI
loss function and values slightly exceeding 1 may exist for moderate
angles of incidence (15-40 degrees). However, users may consider
imposing an upper limit of 1.
References
----------
[1] King, D. et al, 2004, "Sandia Photovoltaic Array Performance
Model", SAND Report 3535, Sandia National Laboratories, Albuquerque,
NM.
[2] B.H. King et al, "Procedure to Determine Coefficients for the
Sandia Array Performance Model (SAPM)," SAND2016-5284, Sandia
National Laboratories (2016).
[3] B.H. King et al, "Recent Advancements in Outdoor Measurement
Techniques for Angle of Incidence Effects," 42nd IEEE PVSC (2015).
DOI: 10.1109/PVSC.2015.7355849 | Below is the the instruction that describes the task:
### Input:
Calculates the SAPM angle of incidence loss coefficient, F2.
Parameters
----------
aoi : numeric
Angle of incidence in degrees. Negative input angles will return
zeros.
module : dict-like
A dict, Series, or DataFrame defining the SAPM performance
parameters. See the :py:func:`sapm` notes section for more
details.
upper : None or float, default None
Upper limit on the results.
Returns
-------
F2 : numeric
The SAPM angle of incidence loss coefficient.
Notes
-----
The SAPM traditionally does not define an upper limit on the AOI
loss function and values slightly exceeding 1 may exist for moderate
angles of incidence (15-40 degrees). However, users may consider
imposing an upper limit of 1.
References
----------
[1] King, D. et al, 2004, "Sandia Photovoltaic Array Performance
Model", SAND Report 3535, Sandia National Laboratories, Albuquerque,
NM.
[2] B.H. King et al, "Procedure to Determine Coefficients for the
Sandia Array Performance Model (SAPM)," SAND2016-5284, Sandia
National Laboratories (2016).
[3] B.H. King et al, "Recent Advancements in Outdoor Measurement
Techniques for Angle of Incidence Effects," 42nd IEEE PVSC (2015).
DOI: 10.1109/PVSC.2015.7355849
### Response:
def sapm_aoi_loss(aoi, module, upper=None):
"""
Calculates the SAPM angle of incidence loss coefficient, F2.
Parameters
----------
aoi : numeric
Angle of incidence in degrees. Negative input angles will return
zeros.
module : dict-like
A dict, Series, or DataFrame defining the SAPM performance
parameters. See the :py:func:`sapm` notes section for more
details.
upper : None or float, default None
Upper limit on the results.
Returns
-------
F2 : numeric
The SAPM angle of incidence loss coefficient.
Notes
-----
The SAPM traditionally does not define an upper limit on the AOI
loss function and values slightly exceeding 1 may exist for moderate
angles of incidence (15-40 degrees). However, users may consider
imposing an upper limit of 1.
References
----------
[1] King, D. et al, 2004, "Sandia Photovoltaic Array Performance
Model", SAND Report 3535, Sandia National Laboratories, Albuquerque,
NM.
[2] B.H. King et al, "Procedure to Determine Coefficients for the
Sandia Array Performance Model (SAPM)," SAND2016-5284, Sandia
National Laboratories (2016).
[3] B.H. King et al, "Recent Advancements in Outdoor Measurement
Techniques for Angle of Incidence Effects," 42nd IEEE PVSC (2015).
DOI: 10.1109/PVSC.2015.7355849
"""
aoi_coeff = [module['B5'], module['B4'], module['B3'], module['B2'],
module['B1'], module['B0']]
aoi_loss = np.polyval(aoi_coeff, aoi)
aoi_loss = np.clip(aoi_loss, 0, upper)
# nan tolerant masking
aoi_lt_0 = np.full_like(aoi, False, dtype='bool')
np.less(aoi, 0, where=~np.isnan(aoi), out=aoi_lt_0)
aoi_loss = np.where(aoi_lt_0, 0, aoi_loss)
if isinstance(aoi, pd.Series):
aoi_loss = pd.Series(aoi_loss, aoi.index)
return aoi_loss |
def span_case(self, i, case):
"""Uppercase or lowercase the next range of characters until end marker is found."""
# A new \L, \C or \E should pop the last in the stack.
if self.span_stack:
self.span_stack.pop()
if self.single_stack:
self.single_stack.pop()
self.span_stack.append(case)
count = len(self.span_stack)
self.end_found = False
try:
while not self.end_found:
t = next(i)
if self.use_format and t in _CURLY_BRACKETS:
self.handle_format(t, i)
elif t == '\\':
try:
t = next(i)
self.reference(t, i)
except StopIteration:
self.result.append(t)
raise
else:
self.result.append(self.convert_case(t, case))
if self.end_found or count > len(self.span_stack):
self.end_found = False
break
except StopIteration:
pass
if count == len(self.span_stack):
self.span_stack.pop() | Uppercase or lowercase the next range of characters until end marker is found. | Below is the the instruction that describes the task:
### Input:
Uppercase or lowercase the next range of characters until end marker is found.
### Response:
def span_case(self, i, case):
"""Uppercase or lowercase the next range of characters until end marker is found."""
# A new \L, \C or \E should pop the last in the stack.
if self.span_stack:
self.span_stack.pop()
if self.single_stack:
self.single_stack.pop()
self.span_stack.append(case)
count = len(self.span_stack)
self.end_found = False
try:
while not self.end_found:
t = next(i)
if self.use_format and t in _CURLY_BRACKETS:
self.handle_format(t, i)
elif t == '\\':
try:
t = next(i)
self.reference(t, i)
except StopIteration:
self.result.append(t)
raise
else:
self.result.append(self.convert_case(t, case))
if self.end_found or count > len(self.span_stack):
self.end_found = False
break
except StopIteration:
pass
if count == len(self.span_stack):
self.span_stack.pop() |
def fetch_max(self, cluster, metric, topology, component, instance, timerange, environ=None):
'''
:param cluster:
:param metric:
:param topology:
:param component:
:param instance:
:param timerange:
:param environ:
:return:
'''
components = [component] if component != "*" else (yield get_comps(cluster, environ, topology))
result = {}
futures = []
for comp in components:
query = self.get_query(metric, comp, instance)
max_query = "MAX(%s)" % query
future = get_metrics(cluster, environ, topology, timerange, max_query)
futures.append(future)
results = yield futures
data = self.compute_max(results)
result = self.get_metric_response(timerange, data, True)
raise tornado.gen.Return(result) | :param cluster:
:param metric:
:param topology:
:param component:
:param instance:
:param timerange:
:param environ:
:return: | Below is the the instruction that describes the task:
### Input:
:param cluster:
:param metric:
:param topology:
:param component:
:param instance:
:param timerange:
:param environ:
:return:
### Response:
def fetch_max(self, cluster, metric, topology, component, instance, timerange, environ=None):
'''
:param cluster:
:param metric:
:param topology:
:param component:
:param instance:
:param timerange:
:param environ:
:return:
'''
components = [component] if component != "*" else (yield get_comps(cluster, environ, topology))
result = {}
futures = []
for comp in components:
query = self.get_query(metric, comp, instance)
max_query = "MAX(%s)" % query
future = get_metrics(cluster, environ, topology, timerange, max_query)
futures.append(future)
results = yield futures
data = self.compute_max(results)
result = self.get_metric_response(timerange, data, True)
raise tornado.gen.Return(result) |
def get_page_name(id):
"""Return name of a page based on passed page id.
Parameters:
- id: id of a Confluence page.
"""
data = _json.loads(_api.rest("/" + str(id) + "?expand=body.storage"))
return data["title"] | Return name of a page based on passed page id.
Parameters:
- id: id of a Confluence page. | Below is the the instruction that describes the task:
### Input:
Return name of a page based on passed page id.
Parameters:
- id: id of a Confluence page.
### Response:
def get_page_name(id):
"""Return name of a page based on passed page id.
Parameters:
- id: id of a Confluence page.
"""
data = _json.loads(_api.rest("/" + str(id) + "?expand=body.storage"))
return data["title"] |
def mtf_bitransformer_tiny():
"""Small encoder-decoder model for testing."""
hparams = mtf_bitransformer_base()
hparams.batch_size = 2
hparams.mesh_shape = ""
hparams.d_model = 128
hparams.encoder_layers = ["self_att", "drd"] * 2
hparams.decoder_layers = ["self_att", "enc_att", "drd"] * 2
hparams.num_heads = 4
hparams.d_ff = 512
return hparams | Small encoder-decoder model for testing. | Below is the the instruction that describes the task:
### Input:
Small encoder-decoder model for testing.
### Response:
def mtf_bitransformer_tiny():
"""Small encoder-decoder model for testing."""
hparams = mtf_bitransformer_base()
hparams.batch_size = 2
hparams.mesh_shape = ""
hparams.d_model = 128
hparams.encoder_layers = ["self_att", "drd"] * 2
hparams.decoder_layers = ["self_att", "enc_att", "drd"] * 2
hparams.num_heads = 4
hparams.d_ff = 512
return hparams |
async def _registration_completed(self, message):
""" We're connected and registered. Receive proper nickname and emit fake NICK message. """
if not self.registered:
# Re-enable throttling.
self.registered = True
self.connection.throttle = True
target = message.params[0]
fakemsg = self._create_message('NICK', target, source=self.nickname)
await self.on_raw_nick(fakemsg) | We're connected and registered. Receive proper nickname and emit fake NICK message. | Below is the the instruction that describes the task:
### Input:
We're connected and registered. Receive proper nickname and emit fake NICK message.
### Response:
async def _registration_completed(self, message):
""" We're connected and registered. Receive proper nickname and emit fake NICK message. """
if not self.registered:
# Re-enable throttling.
self.registered = True
self.connection.throttle = True
target = message.params[0]
fakemsg = self._create_message('NICK', target, source=self.nickname)
await self.on_raw_nick(fakemsg) |
def get_flag_variables(ds):
'''
Returns a list of variables that are defined as flag variables
:param netCDF4.Dataset ds: An open netCDF4 Dataset
'''
flag_variables = []
for name, ncvar in ds.variables.items():
standard_name = getattr(ncvar, 'standard_name', None)
if isinstance(standard_name, basestring) and 'status_flag' in standard_name:
flag_variables.append(name)
elif hasattr(ncvar, 'flag_meanings'):
flag_variables.append(name)
return flag_variables | Returns a list of variables that are defined as flag variables
:param netCDF4.Dataset ds: An open netCDF4 Dataset | Below is the the instruction that describes the task:
### Input:
Returns a list of variables that are defined as flag variables
:param netCDF4.Dataset ds: An open netCDF4 Dataset
### Response:
def get_flag_variables(ds):
'''
Returns a list of variables that are defined as flag variables
:param netCDF4.Dataset ds: An open netCDF4 Dataset
'''
flag_variables = []
for name, ncvar in ds.variables.items():
standard_name = getattr(ncvar, 'standard_name', None)
if isinstance(standard_name, basestring) and 'status_flag' in standard_name:
flag_variables.append(name)
elif hasattr(ncvar, 'flag_meanings'):
flag_variables.append(name)
return flag_variables |
def clean_file(c_source, virtualenv_dirname):
"""Strip trailing whitespace and clean up "local" names in C source.
These source files are autogenerated from the ``cython`` CLI.
Args:
c_source (str): Path to a ``.c`` source file.
virtualenv_dirname (str): The name of the ``virtualenv``
directory where Cython is installed (this is part of a
relative path ``.nox/{NAME}/lib/...``).
"""
with open(c_source, "r") as file_obj:
contents = file_obj.read().rstrip()
# Replace the path to the Cython include files.
py_version = "python{}.{}".format(*sys.version_info[:2])
lib_path = os.path.join(
".nox", virtualenv_dirname, "lib", py_version, "site-packages", ""
)
contents = contents.replace(lib_path, "")
# Write the files back, but strip all trailing whitespace.
lines = contents.split("\n")
with open(c_source, "w") as file_obj:
for line in lines:
file_obj.write(line.rstrip() + "\n") | Strip trailing whitespace and clean up "local" names in C source.
These source files are autogenerated from the ``cython`` CLI.
Args:
c_source (str): Path to a ``.c`` source file.
virtualenv_dirname (str): The name of the ``virtualenv``
directory where Cython is installed (this is part of a
relative path ``.nox/{NAME}/lib/...``). | Below is the the instruction that describes the task:
### Input:
Strip trailing whitespace and clean up "local" names in C source.
These source files are autogenerated from the ``cython`` CLI.
Args:
c_source (str): Path to a ``.c`` source file.
virtualenv_dirname (str): The name of the ``virtualenv``
directory where Cython is installed (this is part of a
relative path ``.nox/{NAME}/lib/...``).
### Response:
def clean_file(c_source, virtualenv_dirname):
"""Strip trailing whitespace and clean up "local" names in C source.
These source files are autogenerated from the ``cython`` CLI.
Args:
c_source (str): Path to a ``.c`` source file.
virtualenv_dirname (str): The name of the ``virtualenv``
directory where Cython is installed (this is part of a
relative path ``.nox/{NAME}/lib/...``).
"""
with open(c_source, "r") as file_obj:
contents = file_obj.read().rstrip()
# Replace the path to the Cython include files.
py_version = "python{}.{}".format(*sys.version_info[:2])
lib_path = os.path.join(
".nox", virtualenv_dirname, "lib", py_version, "site-packages", ""
)
contents = contents.replace(lib_path, "")
# Write the files back, but strip all trailing whitespace.
lines = contents.split("\n")
with open(c_source, "w") as file_obj:
for line in lines:
file_obj.write(line.rstrip() + "\n") |
def __set_buffer_watch(self, pid, address, size, action, bOneShot):
"""
Used by L{watch_buffer} and L{stalk_buffer}.
@type pid: int
@param pid: Process global ID.
@type address: int
@param address: Memory address of buffer to watch.
@type size: int
@param size: Size in bytes of buffer to watch.
@type action: function
@param action: (Optional) Action callback function.
See L{define_page_breakpoint} for more details.
@type bOneShot: bool
@param bOneShot:
C{True} to set a one-shot breakpoint,
C{False} to set a normal breakpoint.
"""
# Check the size isn't zero or negative.
if size < 1:
raise ValueError("Bad size for buffer watch: %r" % size)
# Create the buffer watch identifier.
bw = BufferWatch(pid, address, address + size, action, bOneShot)
# Get the base address and size in pages required for this buffer.
base = MemoryAddresses.align_address_to_page_start(address)
limit = MemoryAddresses.align_address_to_page_end(address + size)
pages = MemoryAddresses.get_buffer_size_in_pages(address, size)
try:
# For each page:
# + if a page breakpoint exists reuse it
# + if it doesn't exist define it
bset = set() # all breakpoints used
nset = set() # newly defined breakpoints
cset = set() # condition objects
page_addr = base
pageSize = MemoryAddresses.pageSize
while page_addr < limit:
# If a breakpoints exists, reuse it.
if self.has_page_breakpoint(pid, page_addr):
bp = self.get_page_breakpoint(pid, page_addr)
if bp not in bset:
condition = bp.get_condition()
if not condition in cset:
if not isinstance(condition,_BufferWatchCondition):
# this shouldn't happen unless you tinkered
# with it or defined your own page breakpoints
# manually.
msg = "Can't watch buffer at page %s"
msg = msg % HexDump.address(page_addr)
raise RuntimeError(msg)
cset.add(condition)
bset.add(bp)
# If it doesn't, define it.
else:
condition = _BufferWatchCondition()
bp = self.define_page_breakpoint(pid, page_addr, 1,
condition = condition)
bset.add(bp)
nset.add(bp)
cset.add(condition)
# Next page.
page_addr = page_addr + pageSize
# For each breakpoint, enable it if needed.
aProcess = self.system.get_process(pid)
for bp in bset:
if bp.is_disabled() or bp.is_one_shot():
bp.enable(aProcess, None)
# On error...
except:
# Erase the newly defined breakpoints.
for bp in nset:
try:
self.erase_page_breakpoint(pid, bp.get_address())
except:
pass
# Pass the exception to the caller
raise
# For each condition object, add the new buffer.
for condition in cset:
condition.add(bw) | Used by L{watch_buffer} and L{stalk_buffer}.
@type pid: int
@param pid: Process global ID.
@type address: int
@param address: Memory address of buffer to watch.
@type size: int
@param size: Size in bytes of buffer to watch.
@type action: function
@param action: (Optional) Action callback function.
See L{define_page_breakpoint} for more details.
@type bOneShot: bool
@param bOneShot:
C{True} to set a one-shot breakpoint,
C{False} to set a normal breakpoint. | Below is the the instruction that describes the task:
### Input:
Used by L{watch_buffer} and L{stalk_buffer}.
@type pid: int
@param pid: Process global ID.
@type address: int
@param address: Memory address of buffer to watch.
@type size: int
@param size: Size in bytes of buffer to watch.
@type action: function
@param action: (Optional) Action callback function.
See L{define_page_breakpoint} for more details.
@type bOneShot: bool
@param bOneShot:
C{True} to set a one-shot breakpoint,
C{False} to set a normal breakpoint.
### Response:
def __set_buffer_watch(self, pid, address, size, action, bOneShot):
"""
Used by L{watch_buffer} and L{stalk_buffer}.
@type pid: int
@param pid: Process global ID.
@type address: int
@param address: Memory address of buffer to watch.
@type size: int
@param size: Size in bytes of buffer to watch.
@type action: function
@param action: (Optional) Action callback function.
See L{define_page_breakpoint} for more details.
@type bOneShot: bool
@param bOneShot:
C{True} to set a one-shot breakpoint,
C{False} to set a normal breakpoint.
"""
# Check the size isn't zero or negative.
if size < 1:
raise ValueError("Bad size for buffer watch: %r" % size)
# Create the buffer watch identifier.
bw = BufferWatch(pid, address, address + size, action, bOneShot)
# Get the base address and size in pages required for this buffer.
base = MemoryAddresses.align_address_to_page_start(address)
limit = MemoryAddresses.align_address_to_page_end(address + size)
pages = MemoryAddresses.get_buffer_size_in_pages(address, size)
try:
# For each page:
# + if a page breakpoint exists reuse it
# + if it doesn't exist define it
bset = set() # all breakpoints used
nset = set() # newly defined breakpoints
cset = set() # condition objects
page_addr = base
pageSize = MemoryAddresses.pageSize
while page_addr < limit:
# If a breakpoints exists, reuse it.
if self.has_page_breakpoint(pid, page_addr):
bp = self.get_page_breakpoint(pid, page_addr)
if bp not in bset:
condition = bp.get_condition()
if not condition in cset:
if not isinstance(condition,_BufferWatchCondition):
# this shouldn't happen unless you tinkered
# with it or defined your own page breakpoints
# manually.
msg = "Can't watch buffer at page %s"
msg = msg % HexDump.address(page_addr)
raise RuntimeError(msg)
cset.add(condition)
bset.add(bp)
# If it doesn't, define it.
else:
condition = _BufferWatchCondition()
bp = self.define_page_breakpoint(pid, page_addr, 1,
condition = condition)
bset.add(bp)
nset.add(bp)
cset.add(condition)
# Next page.
page_addr = page_addr + pageSize
# For each breakpoint, enable it if needed.
aProcess = self.system.get_process(pid)
for bp in bset:
if bp.is_disabled() or bp.is_one_shot():
bp.enable(aProcess, None)
# On error...
except:
# Erase the newly defined breakpoints.
for bp in nset:
try:
self.erase_page_breakpoint(pid, bp.get_address())
except:
pass
# Pass the exception to the caller
raise
# For each condition object, add the new buffer.
for condition in cset:
condition.add(bw) |
def LL(n):
"""constructs the LL context"""
if (n<=0):return Context('0')
else:
LL1=LL(n-1)
r1 = C1(3**(n-1),2**(n-1)) - LL1 - LL1
r2 = LL1 - LL1 - LL1
return r1 + r2 | constructs the LL context | Below is the the instruction that describes the task:
### Input:
constructs the LL context
### Response:
def LL(n):
"""constructs the LL context"""
if (n<=0):return Context('0')
else:
LL1=LL(n-1)
r1 = C1(3**(n-1),2**(n-1)) - LL1 - LL1
r2 = LL1 - LL1 - LL1
return r1 + r2 |
def merge_wgts(em_sz, wgts, itos_pre, itos_new):
"""
:meth: `merge_wgts` insert pretrained weights and vocab into a new set of weights and vocab;
use average if vocab not in pretrained vocab
:param int em_sz: embedding size
:param wgts: torch model weights
:param list itos_pre: pretrained list of vocab
:param list itos_new: list of new vocab
:return: merged torch model weights
"""
vocab_size = len(itos_new)
enc_wgts = wgts["0.encoder.weight"].numpy()
# Average weight of encoding
row_m = enc_wgts.mean(0)
stoi_pre = collections.defaultdict(
lambda: -1, {v: k for k, v in enumerate(itos_pre)}
)
# New embedding based on classification dataset
new_w = np.zeros((vocab_size, em_sz), dtype=np.float32)
for i, w in enumerate(itos_new):
r = stoi_pre[w]
# Use pretrianed embedding if present; else use the average
new_w[i] = enc_wgts[r] if r >= 0 else row_m
wgts["0.encoder.weight"] = torch.tensor(new_w)
wgts["0.encoder_dp.emb.weight"] = torch.tensor(np.copy(new_w))
wgts["1.decoder.weight"] = torch.tensor(np.copy(new_w))
return wgts | :meth: `merge_wgts` insert pretrained weights and vocab into a new set of weights and vocab;
use average if vocab not in pretrained vocab
:param int em_sz: embedding size
:param wgts: torch model weights
:param list itos_pre: pretrained list of vocab
:param list itos_new: list of new vocab
:return: merged torch model weights | Below is the the instruction that describes the task:
### Input:
:meth: `merge_wgts` insert pretrained weights and vocab into a new set of weights and vocab;
use average if vocab not in pretrained vocab
:param int em_sz: embedding size
:param wgts: torch model weights
:param list itos_pre: pretrained list of vocab
:param list itos_new: list of new vocab
:return: merged torch model weights
### Response:
def merge_wgts(em_sz, wgts, itos_pre, itos_new):
"""
:meth: `merge_wgts` insert pretrained weights and vocab into a new set of weights and vocab;
use average if vocab not in pretrained vocab
:param int em_sz: embedding size
:param wgts: torch model weights
:param list itos_pre: pretrained list of vocab
:param list itos_new: list of new vocab
:return: merged torch model weights
"""
vocab_size = len(itos_new)
enc_wgts = wgts["0.encoder.weight"].numpy()
# Average weight of encoding
row_m = enc_wgts.mean(0)
stoi_pre = collections.defaultdict(
lambda: -1, {v: k for k, v in enumerate(itos_pre)}
)
# New embedding based on classification dataset
new_w = np.zeros((vocab_size, em_sz), dtype=np.float32)
for i, w in enumerate(itos_new):
r = stoi_pre[w]
# Use pretrianed embedding if present; else use the average
new_w[i] = enc_wgts[r] if r >= 0 else row_m
wgts["0.encoder.weight"] = torch.tensor(new_w)
wgts["0.encoder_dp.emb.weight"] = torch.tensor(np.copy(new_w))
wgts["1.decoder.weight"] = torch.tensor(np.copy(new_w))
return wgts |
def _resetWidgets(self):
"""Resets all widgets of this dialog to its inital state.
"""
self._filenameLineEdit.setText('')
self._encodingComboBox.setCurrentIndex(0)
self._delimiterBox.reset()
self._headerCheckBox.setChecked(False)
self._statusBar.showMessage('')
self._previewTableView.setModel(None)
self._datatypeTableView.setModel(None) | Resets all widgets of this dialog to its inital state. | Below is the the instruction that describes the task:
### Input:
Resets all widgets of this dialog to its inital state.
### Response:
def _resetWidgets(self):
"""Resets all widgets of this dialog to its inital state.
"""
self._filenameLineEdit.setText('')
self._encodingComboBox.setCurrentIndex(0)
self._delimiterBox.reset()
self._headerCheckBox.setChecked(False)
self._statusBar.showMessage('')
self._previewTableView.setModel(None)
self._datatypeTableView.setModel(None) |
def register(self, bug: Bug) -> None:
"""
Dynamically registers a given bug with the server. Note that the
registration will not persist beyond the lifetime of the server.
(I.e., when the server is closed, the bug will be deregistered.)
Raises:
BugAlreadyExists: if there is already a bug registered on the
server under the same name as this bug.
"""
path = "bugs/{}".format(bug.name)
payload = bug.to_dict()
r = self.__api.put(path, json=payload)
if r.status_code != 204:
self.__api.handle_erroneous_response(r) | Dynamically registers a given bug with the server. Note that the
registration will not persist beyond the lifetime of the server.
(I.e., when the server is closed, the bug will be deregistered.)
Raises:
BugAlreadyExists: if there is already a bug registered on the
server under the same name as this bug. | Below is the the instruction that describes the task:
### Input:
Dynamically registers a given bug with the server. Note that the
registration will not persist beyond the lifetime of the server.
(I.e., when the server is closed, the bug will be deregistered.)
Raises:
BugAlreadyExists: if there is already a bug registered on the
server under the same name as this bug.
### Response:
def register(self, bug: Bug) -> None:
"""
Dynamically registers a given bug with the server. Note that the
registration will not persist beyond the lifetime of the server.
(I.e., when the server is closed, the bug will be deregistered.)
Raises:
BugAlreadyExists: if there is already a bug registered on the
server under the same name as this bug.
"""
path = "bugs/{}".format(bug.name)
payload = bug.to_dict()
r = self.__api.put(path, json=payload)
if r.status_code != 204:
self.__api.handle_erroneous_response(r) |
def compose_telegram(body):
""" Compose a SCS message
body: list containing the body of the message.
returns: full telegram expressed (bytes instance)
"""
msg = [b"A8"] + body + [checksum_bytes(body)] + [b"A3"]
return str.encode("".join([x.decode() for x in msg])) | Compose a SCS message
body: list containing the body of the message.
returns: full telegram expressed (bytes instance) | Below is the the instruction that describes the task:
### Input:
Compose a SCS message
body: list containing the body of the message.
returns: full telegram expressed (bytes instance)
### Response:
def compose_telegram(body):
""" Compose a SCS message
body: list containing the body of the message.
returns: full telegram expressed (bytes instance)
"""
msg = [b"A8"] + body + [checksum_bytes(body)] + [b"A3"]
return str.encode("".join([x.decode() for x in msg])) |
def dump_json_file(json_data, pwd_dir_path, dump_file_name):
""" dump json data to file
"""
class PythonObjectEncoder(json.JSONEncoder):
def default(self, obj):
try:
return super().default(self, obj)
except TypeError:
return str(obj)
logs_dir_path = os.path.join(pwd_dir_path, "logs")
if not os.path.isdir(logs_dir_path):
os.makedirs(logs_dir_path)
dump_file_path = os.path.join(logs_dir_path, dump_file_name)
try:
with io.open(dump_file_path, 'w', encoding='utf-8') as outfile:
if is_py2:
outfile.write(
unicode(json.dumps(
json_data,
indent=4,
separators=(',', ':'),
ensure_ascii=False,
cls=PythonObjectEncoder
))
)
else:
json.dump(
json_data,
outfile,
indent=4,
separators=(',', ':'),
ensure_ascii=False,
cls=PythonObjectEncoder
)
msg = "dump file: {}".format(dump_file_path)
logger.color_print(msg, "BLUE")
except TypeError as ex:
msg = "Failed to dump json file: {}\nReason: {}".format(dump_file_path, ex)
logger.color_print(msg, "RED") | dump json data to file | Below is the the instruction that describes the task:
### Input:
dump json data to file
### Response:
def dump_json_file(json_data, pwd_dir_path, dump_file_name):
""" dump json data to file
"""
class PythonObjectEncoder(json.JSONEncoder):
def default(self, obj):
try:
return super().default(self, obj)
except TypeError:
return str(obj)
logs_dir_path = os.path.join(pwd_dir_path, "logs")
if not os.path.isdir(logs_dir_path):
os.makedirs(logs_dir_path)
dump_file_path = os.path.join(logs_dir_path, dump_file_name)
try:
with io.open(dump_file_path, 'w', encoding='utf-8') as outfile:
if is_py2:
outfile.write(
unicode(json.dumps(
json_data,
indent=4,
separators=(',', ':'),
ensure_ascii=False,
cls=PythonObjectEncoder
))
)
else:
json.dump(
json_data,
outfile,
indent=4,
separators=(',', ':'),
ensure_ascii=False,
cls=PythonObjectEncoder
)
msg = "dump file: {}".format(dump_file_path)
logger.color_print(msg, "BLUE")
except TypeError as ex:
msg = "Failed to dump json file: {}\nReason: {}".format(dump_file_path, ex)
logger.color_print(msg, "RED") |
def search_datasets(self):
"""
Returns an iterator over the Datasets on the server.
:return: An iterator over the :class:`ga4gh.protocol.Dataset`
objects on the server.
"""
request = protocol.SearchDatasetsRequest()
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "datasets", protocol.SearchDatasetsResponse) | Returns an iterator over the Datasets on the server.
:return: An iterator over the :class:`ga4gh.protocol.Dataset`
objects on the server. | Below is the the instruction that describes the task:
### Input:
Returns an iterator over the Datasets on the server.
:return: An iterator over the :class:`ga4gh.protocol.Dataset`
objects on the server.
### Response:
def search_datasets(self):
"""
Returns an iterator over the Datasets on the server.
:return: An iterator over the :class:`ga4gh.protocol.Dataset`
objects on the server.
"""
request = protocol.SearchDatasetsRequest()
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "datasets", protocol.SearchDatasetsResponse) |
def ensure_specifier_exists(db_spec):
"""Make sure a DB specifier exists, creating it if necessary."""
local_match = LOCAL_RE.match(db_spec)
remote_match = REMOTE_RE.match(db_spec)
plain_match = PLAIN_RE.match(db_spec)
if local_match:
db_name = local_match.groupdict().get('database')
server = shortcuts.get_server()
if db_name not in server:
server.create(db_name)
return True
elif remote_match:
hostname, portnum, database = map(remote_match.groupdict().get,
('hostname', 'portnum', 'database'))
server = shortcuts.get_server(
server_url=('http://%s:%s' % (hostname, portnum)))
if database not in server:
server.create(database)
return True
elif plain_match:
db_name = plain_match.groupdict().get('database')
server = shortcuts.get_server()
if db_name not in server:
server.create(db_name)
return True
return False | Make sure a DB specifier exists, creating it if necessary. | Below is the the instruction that describes the task:
### Input:
Make sure a DB specifier exists, creating it if necessary.
### Response:
def ensure_specifier_exists(db_spec):
"""Make sure a DB specifier exists, creating it if necessary."""
local_match = LOCAL_RE.match(db_spec)
remote_match = REMOTE_RE.match(db_spec)
plain_match = PLAIN_RE.match(db_spec)
if local_match:
db_name = local_match.groupdict().get('database')
server = shortcuts.get_server()
if db_name not in server:
server.create(db_name)
return True
elif remote_match:
hostname, portnum, database = map(remote_match.groupdict().get,
('hostname', 'portnum', 'database'))
server = shortcuts.get_server(
server_url=('http://%s:%s' % (hostname, portnum)))
if database not in server:
server.create(database)
return True
elif plain_match:
db_name = plain_match.groupdict().get('database')
server = shortcuts.get_server()
if db_name not in server:
server.create(db_name)
return True
return False |
def RecursiveMultiListChildren(self, urns, limit=None, age=NEWEST_TIME):
"""Recursively lists bunch of directories.
Args:
urns: List of urns to list children.
limit: Max number of children to list (NOTE: this is per urn).
age: The age of the items to retrieve. Should be one of ALL_TIMES,
NEWEST_TIME or a range.
Yields:
(subject<->children urns) tuples. RecursiveMultiListChildren will fetch
children lists for initial set of urns and then will fetch children's
children, etc.
For example, for the following objects structure:
a->
b -> c
-> d
RecursiveMultiListChildren(['a']) will return:
[('a', ['b']), ('b', ['c', 'd'])]
"""
checked_urns = set()
urns_to_check = urns
while True:
found_children = []
for subject, values in self.MultiListChildren(
urns_to_check, limit=limit, age=age):
found_children.extend(values)
yield subject, values
checked_urns.update(urns_to_check)
urns_to_check = set(found_children) - checked_urns
if not urns_to_check:
break | Recursively lists bunch of directories.
Args:
urns: List of urns to list children.
limit: Max number of children to list (NOTE: this is per urn).
age: The age of the items to retrieve. Should be one of ALL_TIMES,
NEWEST_TIME or a range.
Yields:
(subject<->children urns) tuples. RecursiveMultiListChildren will fetch
children lists for initial set of urns and then will fetch children's
children, etc.
For example, for the following objects structure:
a->
b -> c
-> d
RecursiveMultiListChildren(['a']) will return:
[('a', ['b']), ('b', ['c', 'd'])] | Below is the the instruction that describes the task:
### Input:
Recursively lists bunch of directories.
Args:
urns: List of urns to list children.
limit: Max number of children to list (NOTE: this is per urn).
age: The age of the items to retrieve. Should be one of ALL_TIMES,
NEWEST_TIME or a range.
Yields:
(subject<->children urns) tuples. RecursiveMultiListChildren will fetch
children lists for initial set of urns and then will fetch children's
children, etc.
For example, for the following objects structure:
a->
b -> c
-> d
RecursiveMultiListChildren(['a']) will return:
[('a', ['b']), ('b', ['c', 'd'])]
### Response:
def RecursiveMultiListChildren(self, urns, limit=None, age=NEWEST_TIME):
"""Recursively lists bunch of directories.
Args:
urns: List of urns to list children.
limit: Max number of children to list (NOTE: this is per urn).
age: The age of the items to retrieve. Should be one of ALL_TIMES,
NEWEST_TIME or a range.
Yields:
(subject<->children urns) tuples. RecursiveMultiListChildren will fetch
children lists for initial set of urns and then will fetch children's
children, etc.
For example, for the following objects structure:
a->
b -> c
-> d
RecursiveMultiListChildren(['a']) will return:
[('a', ['b']), ('b', ['c', 'd'])]
"""
checked_urns = set()
urns_to_check = urns
while True:
found_children = []
for subject, values in self.MultiListChildren(
urns_to_check, limit=limit, age=age):
found_children.extend(values)
yield subject, values
checked_urns.update(urns_to_check)
urns_to_check = set(found_children) - checked_urns
if not urns_to_check:
break |
def gather_positions(tree):
"""Makes a list of positions and position commands from the tree"""
pos = {'data-x': 'r0',
'data-y': 'r0',
'data-z': 'r0',
'data-rotate-x': 'r0',
'data-rotate-y': 'r0',
'data-rotate-z': 'r0',
'data-scale': 'r0',
'is_path': False
}
steps = 0
default_movement = True
for step in tree.findall('step'):
steps += 1
for key in POSITION_ATTRIBS:
value = step.get(key)
if value is not None:
# We have a new value
default_movement = False # No longer use the default movement
pos[key] = value
elif pos[key] and not pos[key].startswith('r'):
# The old value was absolute and no new value, so stop
pos[key] = 'r0'
# We had no new value, and the old value was a relative
# movement, so we just keep moving.
if steps == 1 and pos['data-scale'] == 'r0':
# No scale given for first slide, it needs to start at 1
pos['data-scale'] = '1'
if default_movement and steps != 1:
# No positioning has been given, use default:
pos['data-x'] = 'r%s' % DEFAULT_MOVEMENT
if 'data-rotate' in step.attrib:
# data-rotate is an alias for data-rotate-z
pos['data-rotate-z'] = step.get('data-rotate')
del step.attrib['data-rotate']
if 'hovercraft-path' in step.attrib:
# Path given x and y will be calculated from the path
default_movement = False # No longer use the default movement
pos['is_path'] = True
# Add the path spec
pos['path'] = step.attrib['hovercraft-path']
yield pos.copy()
# And get rid of it for the next step
del pos['path']
else:
if 'data-x' in step.attrib or 'data-y' in step.attrib:
# No longer using a path
pos['is_path'] = False
yield pos.copy() | Makes a list of positions and position commands from the tree | Below is the the instruction that describes the task:
### Input:
Makes a list of positions and position commands from the tree
### Response:
def gather_positions(tree):
"""Makes a list of positions and position commands from the tree"""
pos = {'data-x': 'r0',
'data-y': 'r0',
'data-z': 'r0',
'data-rotate-x': 'r0',
'data-rotate-y': 'r0',
'data-rotate-z': 'r0',
'data-scale': 'r0',
'is_path': False
}
steps = 0
default_movement = True
for step in tree.findall('step'):
steps += 1
for key in POSITION_ATTRIBS:
value = step.get(key)
if value is not None:
# We have a new value
default_movement = False # No longer use the default movement
pos[key] = value
elif pos[key] and not pos[key].startswith('r'):
# The old value was absolute and no new value, so stop
pos[key] = 'r0'
# We had no new value, and the old value was a relative
# movement, so we just keep moving.
if steps == 1 and pos['data-scale'] == 'r0':
# No scale given for first slide, it needs to start at 1
pos['data-scale'] = '1'
if default_movement and steps != 1:
# No positioning has been given, use default:
pos['data-x'] = 'r%s' % DEFAULT_MOVEMENT
if 'data-rotate' in step.attrib:
# data-rotate is an alias for data-rotate-z
pos['data-rotate-z'] = step.get('data-rotate')
del step.attrib['data-rotate']
if 'hovercraft-path' in step.attrib:
# Path given x and y will be calculated from the path
default_movement = False # No longer use the default movement
pos['is_path'] = True
# Add the path spec
pos['path'] = step.attrib['hovercraft-path']
yield pos.copy()
# And get rid of it for the next step
del pos['path']
else:
if 'data-x' in step.attrib or 'data-y' in step.attrib:
# No longer using a path
pos['is_path'] = False
yield pos.copy() |
def findclass(self, name):
"""Find a vgroup given its class name, returning its reference
number if found.
Args::
name class name of the vgroup to find
Returns::
vgroup reference number
An exception is raised if the vgroup is not found.
C library equivalent: Vfind
"""
refnum = _C.Vfindclass(self._hdf_inst._id, name)
if not refnum:
raise HDF4Error("vgroup not found")
return refnum | Find a vgroup given its class name, returning its reference
number if found.
Args::
name class name of the vgroup to find
Returns::
vgroup reference number
An exception is raised if the vgroup is not found.
C library equivalent: Vfind | Below is the the instruction that describes the task:
### Input:
Find a vgroup given its class name, returning its reference
number if found.
Args::
name class name of the vgroup to find
Returns::
vgroup reference number
An exception is raised if the vgroup is not found.
C library equivalent: Vfind
### Response:
def findclass(self, name):
"""Find a vgroup given its class name, returning its reference
number if found.
Args::
name class name of the vgroup to find
Returns::
vgroup reference number
An exception is raised if the vgroup is not found.
C library equivalent: Vfind
"""
refnum = _C.Vfindclass(self._hdf_inst._id, name)
if not refnum:
raise HDF4Error("vgroup not found")
return refnum |
def select_by_index(self, index):
"""Select the option at the given index. This is done by examing the "index" attribute of an
element, and not merely by counting.
:Args:
- index - The option at this index will be selected
throws NoSuchElementException If there is no option with specified index in SELECT
"""
match = str(index)
for opt in self.options:
if opt.get_attribute("index") == match:
self._setSelected(opt)
return
raise NoSuchElementException("Could not locate element with index %d" % index) | Select the option at the given index. This is done by examing the "index" attribute of an
element, and not merely by counting.
:Args:
- index - The option at this index will be selected
throws NoSuchElementException If there is no option with specified index in SELECT | Below is the the instruction that describes the task:
### Input:
Select the option at the given index. This is done by examing the "index" attribute of an
element, and not merely by counting.
:Args:
- index - The option at this index will be selected
throws NoSuchElementException If there is no option with specified index in SELECT
### Response:
def select_by_index(self, index):
"""Select the option at the given index. This is done by examing the "index" attribute of an
element, and not merely by counting.
:Args:
- index - The option at this index will be selected
throws NoSuchElementException If there is no option with specified index in SELECT
"""
match = str(index)
for opt in self.options:
if opt.get_attribute("index") == match:
self._setSelected(opt)
return
raise NoSuchElementException("Could not locate element with index %d" % index) |
def path_is_empty(p: tcod.path.AStar) -> bool:
"""Return True if a path is empty.
Args:
p (AStar): An AStar instance.
Returns:
bool: True if a path is empty. Otherwise False.
"""
return bool(lib.TCOD_path_is_empty(p._path_c)) | Return True if a path is empty.
Args:
p (AStar): An AStar instance.
Returns:
bool: True if a path is empty. Otherwise False. | Below is the the instruction that describes the task:
### Input:
Return True if a path is empty.
Args:
p (AStar): An AStar instance.
Returns:
bool: True if a path is empty. Otherwise False.
### Response:
def path_is_empty(p: tcod.path.AStar) -> bool:
"""Return True if a path is empty.
Args:
p (AStar): An AStar instance.
Returns:
bool: True if a path is empty. Otherwise False.
"""
return bool(lib.TCOD_path_is_empty(p._path_c)) |
def dt_weekofyear(x):
"""Returns the week ordinal of the year.
:returns: an expression containing the week ordinal of the year, extracted from a datetime column.
Example:
>>> import vaex
>>> import numpy as np
>>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64)
>>> df = vaex.from_arrays(date=date)
>>> df
# date
0 2009-10-12 03:31:00
1 2016-02-11 10:17:34
2 2015-11-12 11:34:22
>>> df.date.dt.weekofyear
Expression = dt_weekofyear(date)
Length: 3 dtype: int64 (expression)
-----------------------------------
0 42
1 6
2 46
"""
import pandas as pd
return pd.Series(x).dt.weekofyear.values | Returns the week ordinal of the year.
:returns: an expression containing the week ordinal of the year, extracted from a datetime column.
Example:
>>> import vaex
>>> import numpy as np
>>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64)
>>> df = vaex.from_arrays(date=date)
>>> df
# date
0 2009-10-12 03:31:00
1 2016-02-11 10:17:34
2 2015-11-12 11:34:22
>>> df.date.dt.weekofyear
Expression = dt_weekofyear(date)
Length: 3 dtype: int64 (expression)
-----------------------------------
0 42
1 6
2 46 | Below is the the instruction that describes the task:
### Input:
Returns the week ordinal of the year.
:returns: an expression containing the week ordinal of the year, extracted from a datetime column.
Example:
>>> import vaex
>>> import numpy as np
>>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64)
>>> df = vaex.from_arrays(date=date)
>>> df
# date
0 2009-10-12 03:31:00
1 2016-02-11 10:17:34
2 2015-11-12 11:34:22
>>> df.date.dt.weekofyear
Expression = dt_weekofyear(date)
Length: 3 dtype: int64 (expression)
-----------------------------------
0 42
1 6
2 46
### Response:
def dt_weekofyear(x):
"""Returns the week ordinal of the year.
:returns: an expression containing the week ordinal of the year, extracted from a datetime column.
Example:
>>> import vaex
>>> import numpy as np
>>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64)
>>> df = vaex.from_arrays(date=date)
>>> df
# date
0 2009-10-12 03:31:00
1 2016-02-11 10:17:34
2 2015-11-12 11:34:22
>>> df.date.dt.weekofyear
Expression = dt_weekofyear(date)
Length: 3 dtype: int64 (expression)
-----------------------------------
0 42
1 6
2 46
"""
import pandas as pd
return pd.Series(x).dt.weekofyear.values |
def tuple_arg(fn):
"""
fun(1,2) -> fun((1,), (2,))로
f(1,2,3) => f((1,), (2,), (3,))
:param fn:
:return:
"""
@wraps(fn)
def wrapped(*args, **kwargs):
args = map(tuplefy, args)
return fn(*args, **kwargs)
return wrapped | fun(1,2) -> fun((1,), (2,))로
f(1,2,3) => f((1,), (2,), (3,))
:param fn:
:return: | Below is the the instruction that describes the task:
### Input:
fun(1,2) -> fun((1,), (2,))로
f(1,2,3) => f((1,), (2,), (3,))
:param fn:
:return:
### Response:
def tuple_arg(fn):
"""
fun(1,2) -> fun((1,), (2,))로
f(1,2,3) => f((1,), (2,), (3,))
:param fn:
:return:
"""
@wraps(fn)
def wrapped(*args, **kwargs):
args = map(tuplefy, args)
return fn(*args, **kwargs)
return wrapped |
def decode_abi(self, types: Iterable[TypeStr], data: Decodable) -> Tuple[Any, ...]:
"""
Decodes the binary value ``data`` as a sequence of values of the ABI types
in ``types`` via the head-tail mechanism into a tuple of equivalent python
values.
:param types: An iterable of string representations of the ABI types that
will be used for decoding e.g. ``('uint256', 'bytes[]', '(int,int)')``
:param data: The binary value to be decoded.
:returns: A tuple of equivalent python values for the ABI values
represented in ``data``.
"""
if not is_bytes(data):
raise TypeError("The `data` value must be of bytes type. Got {0}".format(type(data)))
decoders = [
self._registry.get_decoder(type_str)
for type_str in types
]
decoder = TupleDecoder(decoders=decoders)
stream = ContextFramesBytesIO(data)
return decoder(stream) | Decodes the binary value ``data`` as a sequence of values of the ABI types
in ``types`` via the head-tail mechanism into a tuple of equivalent python
values.
:param types: An iterable of string representations of the ABI types that
will be used for decoding e.g. ``('uint256', 'bytes[]', '(int,int)')``
:param data: The binary value to be decoded.
:returns: A tuple of equivalent python values for the ABI values
represented in ``data``. | Below is the the instruction that describes the task:
### Input:
Decodes the binary value ``data`` as a sequence of values of the ABI types
in ``types`` via the head-tail mechanism into a tuple of equivalent python
values.
:param types: An iterable of string representations of the ABI types that
will be used for decoding e.g. ``('uint256', 'bytes[]', '(int,int)')``
:param data: The binary value to be decoded.
:returns: A tuple of equivalent python values for the ABI values
represented in ``data``.
### Response:
def decode_abi(self, types: Iterable[TypeStr], data: Decodable) -> Tuple[Any, ...]:
"""
Decodes the binary value ``data`` as a sequence of values of the ABI types
in ``types`` via the head-tail mechanism into a tuple of equivalent python
values.
:param types: An iterable of string representations of the ABI types that
will be used for decoding e.g. ``('uint256', 'bytes[]', '(int,int)')``
:param data: The binary value to be decoded.
:returns: A tuple of equivalent python values for the ABI values
represented in ``data``.
"""
if not is_bytes(data):
raise TypeError("The `data` value must be of bytes type. Got {0}".format(type(data)))
decoders = [
self._registry.get_decoder(type_str)
for type_str in types
]
decoder = TupleDecoder(decoders=decoders)
stream = ContextFramesBytesIO(data)
return decoder(stream) |
def get_query_tokens(query):
"""
:type query str
:rtype: list[sqlparse.sql.Token]
"""
query = preprocess_query(query)
parsed = sqlparse.parse(query)
# handle empty queries (#12)
if not parsed:
return []
tokens = TokenList(parsed[0].tokens).flatten()
# print([(token.value, token.ttype) for token in tokens])
return [token for token in tokens if token.ttype is not Whitespace] | :type query str
:rtype: list[sqlparse.sql.Token] | Below is the the instruction that describes the task:
### Input:
:type query str
:rtype: list[sqlparse.sql.Token]
### Response:
def get_query_tokens(query):
"""
:type query str
:rtype: list[sqlparse.sql.Token]
"""
query = preprocess_query(query)
parsed = sqlparse.parse(query)
# handle empty queries (#12)
if not parsed:
return []
tokens = TokenList(parsed[0].tokens).flatten()
# print([(token.value, token.ttype) for token in tokens])
return [token for token in tokens if token.ttype is not Whitespace] |
def cmdloop(self, intro=None):
''' Override the command loop to handle Ctrl-C. '''
self.preloop()
# Set up completion with readline.
if self.use_rawinput and self.completekey:
try:
import readline
self.old_completer = readline.get_completer()
readline.set_completer(self.complete)
readline.parse_and_bind(self.completekey + ': complete')
except ImportError:
pass
try:
if intro is not None:
self.intro = intro
if self.intro:
self.stdout.write(str(self.intro)+"\n")
stop = None
while not stop:
if self.cmdqueue:
line = self.cmdqueue.pop(0)
else:
if self.use_rawinput:
try:
if sys.version_info[0] == 2:
line = raw_input(self.prompt)
else:
line = input(self.prompt)
except EOFError:
line = 'EOF'
except KeyboardInterrupt:
line = 'ctrlc'
else:
self.stdout.write(self.prompt)
self.stdout.flush()
line = self.stdin.readline()
if not len(line):
line = 'EOF'
else:
line = line.rstrip('\r\n')
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
self.postloop()
finally:
if self.use_rawinput and self.completekey:
try:
import readline
readline.set_completer(self.old_completer)
except ImportError:
pass | Override the command loop to handle Ctrl-C. | Below is the the instruction that describes the task:
### Input:
Override the command loop to handle Ctrl-C.
### Response:
def cmdloop(self, intro=None):
''' Override the command loop to handle Ctrl-C. '''
self.preloop()
# Set up completion with readline.
if self.use_rawinput and self.completekey:
try:
import readline
self.old_completer = readline.get_completer()
readline.set_completer(self.complete)
readline.parse_and_bind(self.completekey + ': complete')
except ImportError:
pass
try:
if intro is not None:
self.intro = intro
if self.intro:
self.stdout.write(str(self.intro)+"\n")
stop = None
while not stop:
if self.cmdqueue:
line = self.cmdqueue.pop(0)
else:
if self.use_rawinput:
try:
if sys.version_info[0] == 2:
line = raw_input(self.prompt)
else:
line = input(self.prompt)
except EOFError:
line = 'EOF'
except KeyboardInterrupt:
line = 'ctrlc'
else:
self.stdout.write(self.prompt)
self.stdout.flush()
line = self.stdin.readline()
if not len(line):
line = 'EOF'
else:
line = line.rstrip('\r\n')
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
self.postloop()
finally:
if self.use_rawinput and self.completekey:
try:
import readline
readline.set_completer(self.old_completer)
except ImportError:
pass |
def _logger_api(self):
"""Add API logging handler."""
from .tcex_logger import TcExLogHandler, TcExLogFormatter
api = TcExLogHandler(self.session)
api.set_name('api')
api.setLevel(logging.DEBUG)
api.setFormatter(TcExLogFormatter())
self.log.addHandler(api) | Add API logging handler. | Below is the the instruction that describes the task:
### Input:
Add API logging handler.
### Response:
def _logger_api(self):
"""Add API logging handler."""
from .tcex_logger import TcExLogHandler, TcExLogFormatter
api = TcExLogHandler(self.session)
api.set_name('api')
api.setLevel(logging.DEBUG)
api.setFormatter(TcExLogFormatter())
self.log.addHandler(api) |
def desc(self):
"""Get a short description of the device."""
return '{0} (ID: {1}) - {2} - {3}'.format(
self.name, self.device_id, self.type, self.status) | Get a short description of the device. | Below is the the instruction that describes the task:
### Input:
Get a short description of the device.
### Response:
def desc(self):
"""Get a short description of the device."""
return '{0} (ID: {1}) - {2} - {3}'.format(
self.name, self.device_id, self.type, self.status) |
def retry_connect(self):
"""Will be called when new channels in the token network are detected.
If the minimum number of channels was not yet established, it will try
to open new channels.
If the connection manager has no funds, this is a noop.
"""
with self.lock:
if self._funds_remaining > 0 and not self._leaving_state:
self._open_channels() | Will be called when new channels in the token network are detected.
If the minimum number of channels was not yet established, it will try
to open new channels.
If the connection manager has no funds, this is a noop. | Below is the the instruction that describes the task:
### Input:
Will be called when new channels in the token network are detected.
If the minimum number of channels was not yet established, it will try
to open new channels.
If the connection manager has no funds, this is a noop.
### Response:
def retry_connect(self):
"""Will be called when new channels in the token network are detected.
If the minimum number of channels was not yet established, it will try
to open new channels.
If the connection manager has no funds, this is a noop.
"""
with self.lock:
if self._funds_remaining > 0 and not self._leaving_state:
self._open_channels() |
def spacing_file(path):
"""
Perform paranoid text spacing from file.
"""
# TODO: read line by line
with open(os.path.abspath(path)) as f:
return spacing_text(f.read()) | Perform paranoid text spacing from file. | Below is the the instruction that describes the task:
### Input:
Perform paranoid text spacing from file.
### Response:
def spacing_file(path):
"""
Perform paranoid text spacing from file.
"""
# TODO: read line by line
with open(os.path.abspath(path)) as f:
return spacing_text(f.read()) |
def load(self, require=True, *args, **kwargs):
"""
Require packages for this EntryPoint, then resolve it.
"""
if not require or args or kwargs:
warnings.warn(
"Parameters to load are deprecated. Call .resolve and "
".require separately.",
PkgResourcesDeprecationWarning,
stacklevel=2,
)
if require:
self.require(*args, **kwargs)
return self.resolve() | Require packages for this EntryPoint, then resolve it. | Below is the the instruction that describes the task:
### Input:
Require packages for this EntryPoint, then resolve it.
### Response:
def load(self, require=True, *args, **kwargs):
"""
Require packages for this EntryPoint, then resolve it.
"""
if not require or args or kwargs:
warnings.warn(
"Parameters to load are deprecated. Call .resolve and "
".require separately.",
PkgResourcesDeprecationWarning,
stacklevel=2,
)
if require:
self.require(*args, **kwargs)
return self.resolve() |
def main():
"""Main script function"""
# Create simulation object, and start streaming SPEAD heaps
sender = PulsarSender()
# Parse command line arguments
args = parse_command_line()
# Initialise logging.
_log = _init_log(level=logging.DEBUG if args.verbose else logging.INFO)
# Load configuration.
_log.info('Loading config: %s', args.config_file.name)
_config = json.load(args.config_file)
if args.print_settings:
_log.debug('Settings:\n %s', json.dumps(_config, indent=4,
sort_keys=True))
sender.send(_config, _log, 1, 1) | Main script function | Below is the the instruction that describes the task:
### Input:
Main script function
### Response:
def main():
"""Main script function"""
# Create simulation object, and start streaming SPEAD heaps
sender = PulsarSender()
# Parse command line arguments
args = parse_command_line()
# Initialise logging.
_log = _init_log(level=logging.DEBUG if args.verbose else logging.INFO)
# Load configuration.
_log.info('Loading config: %s', args.config_file.name)
_config = json.load(args.config_file)
if args.print_settings:
_log.debug('Settings:\n %s', json.dumps(_config, indent=4,
sort_keys=True))
sender.send(_config, _log, 1, 1) |
def find_file(name, directory):
"""Searches up from a directory looking for a file"""
path_bits = directory.split(os.sep)
for i in range(0, len(path_bits) - 1):
check_path = path_bits[0:len(path_bits) - i]
check_file = "%s%s%s" % (os.sep.join(check_path), os.sep, name)
if os.path.exists(check_file):
return abspath(check_file)
return None | Searches up from a directory looking for a file | Below is the the instruction that describes the task:
### Input:
Searches up from a directory looking for a file
### Response:
def find_file(name, directory):
"""Searches up from a directory looking for a file"""
path_bits = directory.split(os.sep)
for i in range(0, len(path_bits) - 1):
check_path = path_bits[0:len(path_bits) - i]
check_file = "%s%s%s" % (os.sep.join(check_path), os.sep, name)
if os.path.exists(check_file):
return abspath(check_file)
return None |
def send_document(self, peer: Peer, document: str, reply: int=None, on_success: callable=None,
reply_markup: botapi.ReplyMarkup=None):
"""
Send document to peer.
:param peer: Peer to send message to.
:param document: File path to document to send.
:param reply: Message object or message_id to reply to.
:param on_success: Callback to call when call is complete.
:type reply: int or Message
"""
if isinstance(reply, Message):
reply = reply.id
document = botapi.InputFile('document', botapi.InputFileInfo(document, open(document, 'rb'),
get_mimetype(document)))
botapi.send_document(chat_id=peer.id, document=document, reply_to_message_id=reply, on_success=on_success,
reply_markup=reply_markup, **self.request_args).run() | Send document to peer.
:param peer: Peer to send message to.
:param document: File path to document to send.
:param reply: Message object or message_id to reply to.
:param on_success: Callback to call when call is complete.
:type reply: int or Message | Below is the the instruction that describes the task:
### Input:
Send document to peer.
:param peer: Peer to send message to.
:param document: File path to document to send.
:param reply: Message object or message_id to reply to.
:param on_success: Callback to call when call is complete.
:type reply: int or Message
### Response:
def send_document(self, peer: Peer, document: str, reply: int=None, on_success: callable=None,
reply_markup: botapi.ReplyMarkup=None):
"""
Send document to peer.
:param peer: Peer to send message to.
:param document: File path to document to send.
:param reply: Message object or message_id to reply to.
:param on_success: Callback to call when call is complete.
:type reply: int or Message
"""
if isinstance(reply, Message):
reply = reply.id
document = botapi.InputFile('document', botapi.InputFileInfo(document, open(document, 'rb'),
get_mimetype(document)))
botapi.send_document(chat_id=peer.id, document=document, reply_to_message_id=reply, on_success=on_success,
reply_markup=reply_markup, **self.request_args).run() |
def _twofilter_smoothing_ON2(self, t, ti, info, phi, lwinfo):
"""O(N^2) version of two-filter smoothing.
This method should not be called directly, see twofilter_smoothing.
"""
sp, sw = 0., 0.
upb = lwinfo.max() + self.wgt[t].lw.max()
if hasattr(self.model, 'upper_bound_trans'):
upb += self.model.upper_bound_trans(t + 1)
# Loop over n, to avoid having in memory a NxN matrix
for n in range(self.N):
omegan = np.exp(lwinfo + self.wgt[t].lw[n] - upb
+ self.model.logpt(t + 1, self.X[t][n],
info.hist.X[ti]))
sp += np.sum(omegan * phi(self.X[t][n], info.hist.X[ti]))
sw += np.sum(omegan)
return sp / sw | O(N^2) version of two-filter smoothing.
This method should not be called directly, see twofilter_smoothing. | Below is the the instruction that describes the task:
### Input:
O(N^2) version of two-filter smoothing.
This method should not be called directly, see twofilter_smoothing.
### Response:
def _twofilter_smoothing_ON2(self, t, ti, info, phi, lwinfo):
"""O(N^2) version of two-filter smoothing.
This method should not be called directly, see twofilter_smoothing.
"""
sp, sw = 0., 0.
upb = lwinfo.max() + self.wgt[t].lw.max()
if hasattr(self.model, 'upper_bound_trans'):
upb += self.model.upper_bound_trans(t + 1)
# Loop over n, to avoid having in memory a NxN matrix
for n in range(self.N):
omegan = np.exp(lwinfo + self.wgt[t].lw[n] - upb
+ self.model.logpt(t + 1, self.X[t][n],
info.hist.X[ti]))
sp += np.sum(omegan * phi(self.X[t][n], info.hist.X[ti]))
sw += np.sum(omegan)
return sp / sw |
def gates_close(gate0: Gate, gate1: Gate,
tolerance: float = TOLERANCE) -> bool:
"""Returns: True if gates are almost identical.
Closeness is measured with the gate angle.
"""
return vectors_close(gate0.vec, gate1.vec, tolerance) | Returns: True if gates are almost identical.
Closeness is measured with the gate angle. | Below is the the instruction that describes the task:
### Input:
Returns: True if gates are almost identical.
Closeness is measured with the gate angle.
### Response:
def gates_close(gate0: Gate, gate1: Gate,
tolerance: float = TOLERANCE) -> bool:
"""Returns: True if gates are almost identical.
Closeness is measured with the gate angle.
"""
return vectors_close(gate0.vec, gate1.vec, tolerance) |
def channelRelease(BaRange_presence=0, GroupChannelDescription_presence=0,
GroupCipherKeyNumber_presence=0, GprsResumption_presence=0,
BaListPref_presence=0):
"""CHANNEL RELEASE Section 9.1.7"""
a = TpPd(pd=0x6)
b = MessageType(mesType=0xD) # 00001101
c = RrCause()
packet = a / b / c
if BaRange_presence is 1:
d = BaRangeHdr(ieiBR=0x73, eightBitBR=0x0)
packet = packet / d
if GroupChannelDescription_presence is 1:
e = GroupChannelDescriptionHdr(ieiGCD=0x74, eightBitGCD=0x0)
packet = packet / e
if GroupCipherKeyNumber_presence is 1:
f = GroupCipherKeyNumber(ieiGCKN=0x8)
packet = packet / f
if GprsResumption_presence is 1:
g = GprsResumptionHdr(ieiGR=0xC, eightBitGR=0x0)
packet = packet / g
if BaListPref_presence is 1:
h = BaListPrefHdr(ieiBLP=0x75, eightBitBLP=0x0)
packet = packet / h
return packet | CHANNEL RELEASE Section 9.1.7 | Below is the the instruction that describes the task:
### Input:
CHANNEL RELEASE Section 9.1.7
### Response:
def channelRelease(BaRange_presence=0, GroupChannelDescription_presence=0,
GroupCipherKeyNumber_presence=0, GprsResumption_presence=0,
BaListPref_presence=0):
"""CHANNEL RELEASE Section 9.1.7"""
a = TpPd(pd=0x6)
b = MessageType(mesType=0xD) # 00001101
c = RrCause()
packet = a / b / c
if BaRange_presence is 1:
d = BaRangeHdr(ieiBR=0x73, eightBitBR=0x0)
packet = packet / d
if GroupChannelDescription_presence is 1:
e = GroupChannelDescriptionHdr(ieiGCD=0x74, eightBitGCD=0x0)
packet = packet / e
if GroupCipherKeyNumber_presence is 1:
f = GroupCipherKeyNumber(ieiGCKN=0x8)
packet = packet / f
if GprsResumption_presence is 1:
g = GprsResumptionHdr(ieiGR=0xC, eightBitGR=0x0)
packet = packet / g
if BaListPref_presence is 1:
h = BaListPrefHdr(ieiBLP=0x75, eightBitBLP=0x0)
packet = packet / h
return packet |
def _add_childTnLst(self):
"""Add `./p:timing/p:tnLst/p:par/p:cTn/p:childTnLst` descendant.
Any existing `p:timing` child element is ruthlessly removed and
replaced.
"""
self.remove(self.get_or_add_timing())
timing = parse_xml(self._childTnLst_timing_xml())
self._insert_timing(timing)
return timing.xpath('./p:tnLst/p:par/p:cTn/p:childTnLst')[0] | Add `./p:timing/p:tnLst/p:par/p:cTn/p:childTnLst` descendant.
Any existing `p:timing` child element is ruthlessly removed and
replaced. | Below is the the instruction that describes the task:
### Input:
Add `./p:timing/p:tnLst/p:par/p:cTn/p:childTnLst` descendant.
Any existing `p:timing` child element is ruthlessly removed and
replaced.
### Response:
def _add_childTnLst(self):
"""Add `./p:timing/p:tnLst/p:par/p:cTn/p:childTnLst` descendant.
Any existing `p:timing` child element is ruthlessly removed and
replaced.
"""
self.remove(self.get_or_add_timing())
timing = parse_xml(self._childTnLst_timing_xml())
self._insert_timing(timing)
return timing.xpath('./p:tnLst/p:par/p:cTn/p:childTnLst')[0] |
def clean_params(self):
"""Retrieves the parameter OrderedDict without the context or self parameters.
Useful for inspecting signature.
"""
result = self.params.copy()
if self.cog is not None:
# first parameter is self
result.popitem(last=False)
try:
# first/second parameter is context
result.popitem(last=False)
except Exception:
raise ValueError('Missing context parameter') from None
return result | Retrieves the parameter OrderedDict without the context or self parameters.
Useful for inspecting signature. | Below is the the instruction that describes the task:
### Input:
Retrieves the parameter OrderedDict without the context or self parameters.
Useful for inspecting signature.
### Response:
def clean_params(self):
"""Retrieves the parameter OrderedDict without the context or self parameters.
Useful for inspecting signature.
"""
result = self.params.copy()
if self.cog is not None:
# first parameter is self
result.popitem(last=False)
try:
# first/second parameter is context
result.popitem(last=False)
except Exception:
raise ValueError('Missing context parameter') from None
return result |
def _get_full_paths(fastq_dir, config, config_file):
"""Retrieve full paths for directories in the case of relative locations.
"""
if fastq_dir:
fastq_dir = utils.add_full_path(fastq_dir)
config_dir = utils.add_full_path(os.path.dirname(config_file))
galaxy_config_file = utils.add_full_path(config.get("galaxy_config", "universe_wsgi.ini"),
config_dir)
return fastq_dir, os.path.dirname(galaxy_config_file), config_dir | Retrieve full paths for directories in the case of relative locations. | Below is the the instruction that describes the task:
### Input:
Retrieve full paths for directories in the case of relative locations.
### Response:
def _get_full_paths(fastq_dir, config, config_file):
"""Retrieve full paths for directories in the case of relative locations.
"""
if fastq_dir:
fastq_dir = utils.add_full_path(fastq_dir)
config_dir = utils.add_full_path(os.path.dirname(config_file))
galaxy_config_file = utils.add_full_path(config.get("galaxy_config", "universe_wsgi.ini"),
config_dir)
return fastq_dir, os.path.dirname(galaxy_config_file), config_dir |
def get_crypt_class(self):
"""
Get the Keyczar class to use.
The class can be customized with the ENCRYPTED_FIELD_MODE setting. By default,
this setting is DECRYPT_AND_ENCRYPT. Set this to ENCRYPT to disable decryption.
This is necessary if you are only providing public keys to Keyczar.
Returns:
keyczar.Encrypter if ENCRYPTED_FIELD_MODE is ENCRYPT.
keyczar.Crypter if ENCRYPTED_FIELD_MODE is DECRYPT_AND_ENCRYPT.
Override this method to customize the type of Keyczar class returned.
"""
crypt_type = getattr(settings, 'ENCRYPTED_FIELD_MODE', 'DECRYPT_AND_ENCRYPT')
if crypt_type == 'ENCRYPT':
crypt_class_name = 'Encrypter'
elif crypt_type == 'DECRYPT_AND_ENCRYPT':
crypt_class_name = 'Crypter'
else:
raise ImproperlyConfigured(
'ENCRYPTED_FIELD_MODE must be either DECRYPT_AND_ENCRYPT '
'or ENCRYPT, not %s.' % crypt_type)
return getattr(keyczar, crypt_class_name) | Get the Keyczar class to use.
The class can be customized with the ENCRYPTED_FIELD_MODE setting. By default,
this setting is DECRYPT_AND_ENCRYPT. Set this to ENCRYPT to disable decryption.
This is necessary if you are only providing public keys to Keyczar.
Returns:
keyczar.Encrypter if ENCRYPTED_FIELD_MODE is ENCRYPT.
keyczar.Crypter if ENCRYPTED_FIELD_MODE is DECRYPT_AND_ENCRYPT.
Override this method to customize the type of Keyczar class returned. | Below is the the instruction that describes the task:
### Input:
Get the Keyczar class to use.
The class can be customized with the ENCRYPTED_FIELD_MODE setting. By default,
this setting is DECRYPT_AND_ENCRYPT. Set this to ENCRYPT to disable decryption.
This is necessary if you are only providing public keys to Keyczar.
Returns:
keyczar.Encrypter if ENCRYPTED_FIELD_MODE is ENCRYPT.
keyczar.Crypter if ENCRYPTED_FIELD_MODE is DECRYPT_AND_ENCRYPT.
Override this method to customize the type of Keyczar class returned.
### Response:
def get_crypt_class(self):
"""
Get the Keyczar class to use.
The class can be customized with the ENCRYPTED_FIELD_MODE setting. By default,
this setting is DECRYPT_AND_ENCRYPT. Set this to ENCRYPT to disable decryption.
This is necessary if you are only providing public keys to Keyczar.
Returns:
keyczar.Encrypter if ENCRYPTED_FIELD_MODE is ENCRYPT.
keyczar.Crypter if ENCRYPTED_FIELD_MODE is DECRYPT_AND_ENCRYPT.
Override this method to customize the type of Keyczar class returned.
"""
crypt_type = getattr(settings, 'ENCRYPTED_FIELD_MODE', 'DECRYPT_AND_ENCRYPT')
if crypt_type == 'ENCRYPT':
crypt_class_name = 'Encrypter'
elif crypt_type == 'DECRYPT_AND_ENCRYPT':
crypt_class_name = 'Crypter'
else:
raise ImproperlyConfigured(
'ENCRYPTED_FIELD_MODE must be either DECRYPT_AND_ENCRYPT '
'or ENCRYPT, not %s.' % crypt_type)
return getattr(keyczar, crypt_class_name) |
def movie(self, cycles, plotstyle='',movname='',fps=12,**kwargs):
from matplotlib import animation
'''
Make an interactive movie in the matplotlib window for a number of
different plot types:
Plot types
----------
'iso_abund' : abundance distribution a la se.iso_abund()
'abu_chart' : abundance chart a la se.abu_chart()
'plot' : plot any number of y_items against an x_item
Parameters
----------
cycles : list
Which cycles do you want to plot as movie frames?
plotstyle : string
What type of plot should the movie show? Currently supported is
'iso_abund', 'abu_chart' and 'plot'
movname : string, optional
Name of movie (+ extension, e.g. .mp4 or .avi) if the movie is
to be saved
The default is ''
args : list
Arguments to should be passed to the plotting function. These are
the arguments of the respective methods that make the frames. See
the docstrings of those functions for details
'plot' Parameters
-----------------
'xlims' : tuple, optional
'ylims' : tuple, optional
'xlabel' : string, optional
'ylabel' : string, optional
'legend' : boolean, optional
The default is False
'loc' : string or integer, optional
Set the legend location if legend is True.
The default is 'best'
'interval' : frame interval in ms
FAQ:
----
If ffmpeg is not installed on OSX (and you don't want to wait for port to do it) check out
these binaries:
http://stackoverflow.com/questions/18833731/how-to-set-ffmpeg-for-matplotlib-in-mac-os-x
'''
modelself=self
supported_styles=['iso_abund','abu_chart','plot']
class mov(object):
def __init__(self,cyc,style,movname,fps,**kwargs):
self.fig = None
self.ax = None
self.ani = None
self.cyc = cyc
self.movname=movname
self.fps = fps
self.style=style
if self.style in supported_styles:
animateFunc=draw_frame
else:
raise IOError("this type of movie is not available yet! Sorry!")
if self.style=='plot':
self.y_ditems=kwargs['y_items']
self.data=kwargs['data']
self._init_animation(animateFunc)
def _init_animation(self, animateFunc):
if self.style=='plot':
fsize=14
params = {'axes.labelsize': fsize,
'text.fontsize': fsize,
'legend.fontsize': fsize*0.8,
'xtick.labelsize': fsize,
'ytick.labelsize': fsize,
'text.usetex': False,
'figure.facecolor': 'white',
'ytick.minor.pad': 8,
'ytick.major.pad': 8,
'xtick.minor.pad': 8,
'xtick.major.pad': 8,
'figure.subplot.bottom' : 0.15,
'lines.markersize': 8}
matplotlib.rcParams.update(params)
self.fig, self.ax = pl.subplots()
tmp=[]
for i in range(len(self.y_ditems)):
tmp.append(self.data[0][0])
tmp.append(self.data[0][i+1])
self.lines = self.ax.plot(*tmp)
if 'ylims' in kwargs:
pl.ylim(kwargs['ylims'])
if 'xlims' in kwargs:
pl.xlim(kwargs['xlims'])
if 'xlabel' in kwargs:
pl.xlabel(kwargs['xlabel'])
else:
pl.xlabel(kwargs['x_item'])
if 'ylabel' in kwargs:
pl.ylabel(kwargs['ylabel'])
else:
if type(y_items) is str:
lab=y_items
elif type(y_items) is list and len(y_items) == 1:
lab=y_items[0]
else:
lab=''
for el in kwargs['y_items']:
lab+=el+', '
lab=lab[:-2]
pl.ylabel(lab)
if 'legend' in kwargs and kwargs['legend']:
if 'loc' in kwargs:
pl.legend([line for line in self.lines], self.y_ditems,
loc=kwargs['loc']).draw_frame(False)
else:
pl.legend([line for line in self.lines], self.y_ditems,
loc='best').draw_frame(False)
self._animation(animateFunc)
def _animation(self, animateFunc):
if plotstyle=='plot' and 'interval' in kwargs:
self.ani = animation.FuncAnimation(self.fig, animateFunc, arange(0, len(self.cyc)), interval=kwargs['interval'], blit=False, fargs=[self])
elif plotstyle=='iso_abund':
self.fig, self.ax = pl.subplots()
ims=[]
for i in arange(0,len(self.cyc)):
im=draw_frame(i,self)
ims.append(im)
self.ani = animation.ArtistAnimation(self.fig,ims,interval=50,
blit=False)
self.fig.canvas.draw()
elif plotstyle=='abu_chart':
self.fig=pl.figure()
axx = 0.10
axy = 0.10
axw = 0.85
axh = 0.8
self.ax=pl.axes([axx,axy,axw,axh])
ims=[]
for i in arange(0,len(self.cyc)):
im=draw_frame(i,self)
# draw_frame here returns the patch for the abundance squares
# im[0] as well as the artists im[1], so that the colorbar
# can be plotted only once (on the first plot)
ims.append(im[1])
if i==0:
cb=pl.colorbar(im[0])
cb.set_label('log$_{10}$(X)',fontsize='x-large')
self.ani = animation.ArtistAnimation(self.fig,ims,interval=50,
blit=False)
self.fig.canvas.draw()
if self.movname is not '':
print('\n generating animation: '+self.movname)
self.ani.save(self.movname,fps=self.fps)
print('animation '+self.movname+' saved with '+str(self.fps)+' frames per second')
plotType=self._classTest()
if plotType=='se':
if plotstyle == 'iso_abund':
data = self.se.get(cycles,['iso_massf','mass'])
def draw_frame(i,self=None):
artists=modelself.iso_abund(self.cyc[i],stable=True,show=False,
data_provided=True,thedata=data[i],
verbose=False,drawfig=self.fig,drawax=self.ax,
mov=True,**kwargs)
return artists
if plotstyle == 'abu_chart':
data = self.se.get(cycles,['iso_massf','mass'])
def draw_frame(i,self=None):
artists=modelself.abu_chart(self.cyc[i],show=False,data_provided=True,
thedata=data[i],lbound=(-12, -6),drawfig=self.fig,
drawax=self.ax,mov=True,**kwargs)
return artists
if plotstyle=='plot':
if 'x_item' not in kwargs or 'y_items' not in kwargs:
raise IOError("Please specify both x_item and y_items")
x_item = kwargs['x_item']
y_items = kwargs['y_items']
tx, ty = type(x_item), type(y_items)
if tx is list and ty is list:
data=self.se.get(cycles,x_item+y_items)
elif tx is str and ty is list:
data=self.se.get(cycles,[x_item]+y_items)
elif tx is str and ty is str:
data=self.se.get(cycles,[x_item]+[y_items])
def draw_frame(i, self=None):
# pl.title("cycle: " + self.cyc[i])
for j in range(len(self.lines)):
if 'logy' in kwargs and kwargs['logy']:
self.lines[j].set_data(self.data[i][0],
np.log10(self.data[i][j+1]))
else:
self.lines[j].set_data(self.data[i][0],
self.data[i][j+1])
return self.lines
if plotstyle=='plot':
return mov(cycles,plotstyle,movname,fps,data=data,**kwargs).ani
else:
return mov(cycles,plotstyle,movname,fps).ani | Make an interactive movie in the matplotlib window for a number of
different plot types:
Plot types
----------
'iso_abund' : abundance distribution a la se.iso_abund()
'abu_chart' : abundance chart a la se.abu_chart()
'plot' : plot any number of y_items against an x_item
Parameters
----------
cycles : list
Which cycles do you want to plot as movie frames?
plotstyle : string
What type of plot should the movie show? Currently supported is
'iso_abund', 'abu_chart' and 'plot'
movname : string, optional
Name of movie (+ extension, e.g. .mp4 or .avi) if the movie is
to be saved
The default is ''
args : list
Arguments to should be passed to the plotting function. These are
the arguments of the respective methods that make the frames. See
the docstrings of those functions for details
'plot' Parameters
-----------------
'xlims' : tuple, optional
'ylims' : tuple, optional
'xlabel' : string, optional
'ylabel' : string, optional
'legend' : boolean, optional
The default is False
'loc' : string or integer, optional
Set the legend location if legend is True.
The default is 'best'
'interval' : frame interval in ms
FAQ:
----
If ffmpeg is not installed on OSX (and you don't want to wait for port to do it) check out
these binaries:
http://stackoverflow.com/questions/18833731/how-to-set-ffmpeg-for-matplotlib-in-mac-os-x | Below is the the instruction that describes the task:
### Input:
Make an interactive movie in the matplotlib window for a number of
different plot types:
Plot types
----------
'iso_abund' : abundance distribution a la se.iso_abund()
'abu_chart' : abundance chart a la se.abu_chart()
'plot' : plot any number of y_items against an x_item
Parameters
----------
cycles : list
Which cycles do you want to plot as movie frames?
plotstyle : string
What type of plot should the movie show? Currently supported is
'iso_abund', 'abu_chart' and 'plot'
movname : string, optional
Name of movie (+ extension, e.g. .mp4 or .avi) if the movie is
to be saved
The default is ''
args : list
Arguments to should be passed to the plotting function. These are
the arguments of the respective methods that make the frames. See
the docstrings of those functions for details
'plot' Parameters
-----------------
'xlims' : tuple, optional
'ylims' : tuple, optional
'xlabel' : string, optional
'ylabel' : string, optional
'legend' : boolean, optional
The default is False
'loc' : string or integer, optional
Set the legend location if legend is True.
The default is 'best'
'interval' : frame interval in ms
FAQ:
----
If ffmpeg is not installed on OSX (and you don't want to wait for port to do it) check out
these binaries:
http://stackoverflow.com/questions/18833731/how-to-set-ffmpeg-for-matplotlib-in-mac-os-x
### Response:
def movie(self, cycles, plotstyle='',movname='',fps=12,**kwargs):
from matplotlib import animation
'''
Make an interactive movie in the matplotlib window for a number of
different plot types:
Plot types
----------
'iso_abund' : abundance distribution a la se.iso_abund()
'abu_chart' : abundance chart a la se.abu_chart()
'plot' : plot any number of y_items against an x_item
Parameters
----------
cycles : list
Which cycles do you want to plot as movie frames?
plotstyle : string
What type of plot should the movie show? Currently supported is
'iso_abund', 'abu_chart' and 'plot'
movname : string, optional
Name of movie (+ extension, e.g. .mp4 or .avi) if the movie is
to be saved
The default is ''
args : list
Arguments to should be passed to the plotting function. These are
the arguments of the respective methods that make the frames. See
the docstrings of those functions for details
'plot' Parameters
-----------------
'xlims' : tuple, optional
'ylims' : tuple, optional
'xlabel' : string, optional
'ylabel' : string, optional
'legend' : boolean, optional
The default is False
'loc' : string or integer, optional
Set the legend location if legend is True.
The default is 'best'
'interval' : frame interval in ms
FAQ:
----
If ffmpeg is not installed on OSX (and you don't want to wait for port to do it) check out
these binaries:
http://stackoverflow.com/questions/18833731/how-to-set-ffmpeg-for-matplotlib-in-mac-os-x
'''
modelself=self
supported_styles=['iso_abund','abu_chart','plot']
class mov(object):
def __init__(self,cyc,style,movname,fps,**kwargs):
self.fig = None
self.ax = None
self.ani = None
self.cyc = cyc
self.movname=movname
self.fps = fps
self.style=style
if self.style in supported_styles:
animateFunc=draw_frame
else:
raise IOError("this type of movie is not available yet! Sorry!")
if self.style=='plot':
self.y_ditems=kwargs['y_items']
self.data=kwargs['data']
self._init_animation(animateFunc)
def _init_animation(self, animateFunc):
if self.style=='plot':
fsize=14
params = {'axes.labelsize': fsize,
'text.fontsize': fsize,
'legend.fontsize': fsize*0.8,
'xtick.labelsize': fsize,
'ytick.labelsize': fsize,
'text.usetex': False,
'figure.facecolor': 'white',
'ytick.minor.pad': 8,
'ytick.major.pad': 8,
'xtick.minor.pad': 8,
'xtick.major.pad': 8,
'figure.subplot.bottom' : 0.15,
'lines.markersize': 8}
matplotlib.rcParams.update(params)
self.fig, self.ax = pl.subplots()
tmp=[]
for i in range(len(self.y_ditems)):
tmp.append(self.data[0][0])
tmp.append(self.data[0][i+1])
self.lines = self.ax.plot(*tmp)
if 'ylims' in kwargs:
pl.ylim(kwargs['ylims'])
if 'xlims' in kwargs:
pl.xlim(kwargs['xlims'])
if 'xlabel' in kwargs:
pl.xlabel(kwargs['xlabel'])
else:
pl.xlabel(kwargs['x_item'])
if 'ylabel' in kwargs:
pl.ylabel(kwargs['ylabel'])
else:
if type(y_items) is str:
lab=y_items
elif type(y_items) is list and len(y_items) == 1:
lab=y_items[0]
else:
lab=''
for el in kwargs['y_items']:
lab+=el+', '
lab=lab[:-2]
pl.ylabel(lab)
if 'legend' in kwargs and kwargs['legend']:
if 'loc' in kwargs:
pl.legend([line for line in self.lines], self.y_ditems,
loc=kwargs['loc']).draw_frame(False)
else:
pl.legend([line for line in self.lines], self.y_ditems,
loc='best').draw_frame(False)
self._animation(animateFunc)
def _animation(self, animateFunc):
if plotstyle=='plot' and 'interval' in kwargs:
self.ani = animation.FuncAnimation(self.fig, animateFunc, arange(0, len(self.cyc)), interval=kwargs['interval'], blit=False, fargs=[self])
elif plotstyle=='iso_abund':
self.fig, self.ax = pl.subplots()
ims=[]
for i in arange(0,len(self.cyc)):
im=draw_frame(i,self)
ims.append(im)
self.ani = animation.ArtistAnimation(self.fig,ims,interval=50,
blit=False)
self.fig.canvas.draw()
elif plotstyle=='abu_chart':
self.fig=pl.figure()
axx = 0.10
axy = 0.10
axw = 0.85
axh = 0.8
self.ax=pl.axes([axx,axy,axw,axh])
ims=[]
for i in arange(0,len(self.cyc)):
im=draw_frame(i,self)
# draw_frame here returns the patch for the abundance squares
# im[0] as well as the artists im[1], so that the colorbar
# can be plotted only once (on the first plot)
ims.append(im[1])
if i==0:
cb=pl.colorbar(im[0])
cb.set_label('log$_{10}$(X)',fontsize='x-large')
self.ani = animation.ArtistAnimation(self.fig,ims,interval=50,
blit=False)
self.fig.canvas.draw()
if self.movname is not '':
print('\n generating animation: '+self.movname)
self.ani.save(self.movname,fps=self.fps)
print('animation '+self.movname+' saved with '+str(self.fps)+' frames per second')
plotType=self._classTest()
if plotType=='se':
if plotstyle == 'iso_abund':
data = self.se.get(cycles,['iso_massf','mass'])
def draw_frame(i,self=None):
artists=modelself.iso_abund(self.cyc[i],stable=True,show=False,
data_provided=True,thedata=data[i],
verbose=False,drawfig=self.fig,drawax=self.ax,
mov=True,**kwargs)
return artists
if plotstyle == 'abu_chart':
data = self.se.get(cycles,['iso_massf','mass'])
def draw_frame(i,self=None):
artists=modelself.abu_chart(self.cyc[i],show=False,data_provided=True,
thedata=data[i],lbound=(-12, -6),drawfig=self.fig,
drawax=self.ax,mov=True,**kwargs)
return artists
if plotstyle=='plot':
if 'x_item' not in kwargs or 'y_items' not in kwargs:
raise IOError("Please specify both x_item and y_items")
x_item = kwargs['x_item']
y_items = kwargs['y_items']
tx, ty = type(x_item), type(y_items)
if tx is list and ty is list:
data=self.se.get(cycles,x_item+y_items)
elif tx is str and ty is list:
data=self.se.get(cycles,[x_item]+y_items)
elif tx is str and ty is str:
data=self.se.get(cycles,[x_item]+[y_items])
def draw_frame(i, self=None):
# pl.title("cycle: " + self.cyc[i])
for j in range(len(self.lines)):
if 'logy' in kwargs and kwargs['logy']:
self.lines[j].set_data(self.data[i][0],
np.log10(self.data[i][j+1]))
else:
self.lines[j].set_data(self.data[i][0],
self.data[i][j+1])
return self.lines
if plotstyle=='plot':
return mov(cycles,plotstyle,movname,fps,data=data,**kwargs).ani
else:
return mov(cycles,plotstyle,movname,fps).ani |
def P1(self, value):
"""Set private ``_P1`` and reset ``_block_matcher``."""
if value < self.P2:
self._P1 = value
else:
raise InvalidFirstDisparityChangePenaltyError("P1 must be less "
"than P2.")
self._replace_bm() | Set private ``_P1`` and reset ``_block_matcher``. | Below is the the instruction that describes the task:
### Input:
Set private ``_P1`` and reset ``_block_matcher``.
### Response:
def P1(self, value):
"""Set private ``_P1`` and reset ``_block_matcher``."""
if value < self.P2:
self._P1 = value
else:
raise InvalidFirstDisparityChangePenaltyError("P1 must be less "
"than P2.")
self._replace_bm() |
def reset(self):
"""
Clears all entries.
:return: None
"""
for i in range(len(self.values)):
self.values[i].delete(0, tk.END)
if self.defaults[i] is not None:
self.values[i].insert(0, self.defaults[i]) | Clears all entries.
:return: None | Below is the the instruction that describes the task:
### Input:
Clears all entries.
:return: None
### Response:
def reset(self):
"""
Clears all entries.
:return: None
"""
for i in range(len(self.values)):
self.values[i].delete(0, tk.END)
if self.defaults[i] is not None:
self.values[i].insert(0, self.defaults[i]) |
def _get_subnets_table(subnets):
"""Yields a formatted table to print subnet details.
:param List[dict] subnets: List of subnets.
:return Table: Formatted for subnet output.
"""
table = formatting.Table(['id',
'network identifier',
'cidr',
'note'])
for subnet in subnets:
table.add_row([subnet.get('id', ''),
subnet.get('networkIdentifier', ''),
subnet.get('cidr', ''),
subnet.get('note', '')])
return table | Yields a formatted table to print subnet details.
:param List[dict] subnets: List of subnets.
:return Table: Formatted for subnet output. | Below is the the instruction that describes the task:
### Input:
Yields a formatted table to print subnet details.
:param List[dict] subnets: List of subnets.
:return Table: Formatted for subnet output.
### Response:
def _get_subnets_table(subnets):
"""Yields a formatted table to print subnet details.
:param List[dict] subnets: List of subnets.
:return Table: Formatted for subnet output.
"""
table = formatting.Table(['id',
'network identifier',
'cidr',
'note'])
for subnet in subnets:
table.add_row([subnet.get('id', ''),
subnet.get('networkIdentifier', ''),
subnet.get('cidr', ''),
subnet.get('note', '')])
return table |
def print_results(results):
"""Print `results` (the results of validation) to stdout.
Args:
results: A list of FileValidationResults or ObjectValidationResults
instances.
"""
if not isinstance(results, list):
results = [results]
for r in results:
try:
r.log()
except AttributeError:
raise ValueError('Argument to print_results() must be a list of '
'FileValidationResults or ObjectValidationResults.') | Print `results` (the results of validation) to stdout.
Args:
results: A list of FileValidationResults or ObjectValidationResults
instances. | Below is the the instruction that describes the task:
### Input:
Print `results` (the results of validation) to stdout.
Args:
results: A list of FileValidationResults or ObjectValidationResults
instances.
### Response:
def print_results(results):
"""Print `results` (the results of validation) to stdout.
Args:
results: A list of FileValidationResults or ObjectValidationResults
instances.
"""
if not isinstance(results, list):
results = [results]
for r in results:
try:
r.log()
except AttributeError:
raise ValueError('Argument to print_results() must be a list of '
'FileValidationResults or ObjectValidationResults.') |
def coerceType(self, ftype, value):
"""Returns unicode(value) after trying to coerce it into the SOLR field type.
@param ftype(string) The SOLR field type for the value
@param value(any) The value that is to be represented as Unicode text.
"""
if value is None:
return None
if ftype == 'string':
return str(value)
elif ftype == 'text':
return str(value)
elif ftype == 'int':
try:
v = int(value)
return str(v)
except Exception:
return None
elif ftype == 'float':
try:
v = float(value)
return str(v)
except Exception:
return None
elif ftype == 'date':
try:
v = datetime.datetime.strptime(value, '%b %d %Y %I:%M%p')
return v.isoformat()
except Exception:
return None
return str(value) | Returns unicode(value) after trying to coerce it into the SOLR field type.
@param ftype(string) The SOLR field type for the value
@param value(any) The value that is to be represented as Unicode text. | Below is the the instruction that describes the task:
### Input:
Returns unicode(value) after trying to coerce it into the SOLR field type.
@param ftype(string) The SOLR field type for the value
@param value(any) The value that is to be represented as Unicode text.
### Response:
def coerceType(self, ftype, value):
"""Returns unicode(value) after trying to coerce it into the SOLR field type.
@param ftype(string) The SOLR field type for the value
@param value(any) The value that is to be represented as Unicode text.
"""
if value is None:
return None
if ftype == 'string':
return str(value)
elif ftype == 'text':
return str(value)
elif ftype == 'int':
try:
v = int(value)
return str(v)
except Exception:
return None
elif ftype == 'float':
try:
v = float(value)
return str(v)
except Exception:
return None
elif ftype == 'date':
try:
v = datetime.datetime.strptime(value, '%b %d %Y %I:%M%p')
return v.isoformat()
except Exception:
return None
return str(value) |
def icanhaz(parser, token):
"""
Finds the ICanHaz template for the given name and renders it surrounded by
the requisite ICanHaz <script> tags.
"""
bits = token.contents.split()
if len(bits) not in [2, 3]:
raise template.TemplateSyntaxError(
"'icanhaz' tag takes one argument: the name/id of the template")
return ICanHazNode(bits[1]) | Finds the ICanHaz template for the given name and renders it surrounded by
the requisite ICanHaz <script> tags. | Below is the the instruction that describes the task:
### Input:
Finds the ICanHaz template for the given name and renders it surrounded by
the requisite ICanHaz <script> tags.
### Response:
def icanhaz(parser, token):
"""
Finds the ICanHaz template for the given name and renders it surrounded by
the requisite ICanHaz <script> tags.
"""
bits = token.contents.split()
if len(bits) not in [2, 3]:
raise template.TemplateSyntaxError(
"'icanhaz' tag takes one argument: the name/id of the template")
return ICanHazNode(bits[1]) |
def strip_cdata(text):
"""Removes all CDATA blocks from `text` if it contains them.
Note:
If the function contains escaped XML characters outside of a
CDATA block, they will be unescaped.
Args:
A string containing one or more CDATA blocks.
Returns:
An XML unescaped string with CDATA block qualifiers removed.
"""
if not is_cdata(text):
return text
xml = "<e>{0}</e>".format(text)
node = etree.fromstring(xml)
return node.text | Removes all CDATA blocks from `text` if it contains them.
Note:
If the function contains escaped XML characters outside of a
CDATA block, they will be unescaped.
Args:
A string containing one or more CDATA blocks.
Returns:
An XML unescaped string with CDATA block qualifiers removed. | Below is the the instruction that describes the task:
### Input:
Removes all CDATA blocks from `text` if it contains them.
Note:
If the function contains escaped XML characters outside of a
CDATA block, they will be unescaped.
Args:
A string containing one or more CDATA blocks.
Returns:
An XML unescaped string with CDATA block qualifiers removed.
### Response:
def strip_cdata(text):
"""Removes all CDATA blocks from `text` if it contains them.
Note:
If the function contains escaped XML characters outside of a
CDATA block, they will be unescaped.
Args:
A string containing one or more CDATA blocks.
Returns:
An XML unescaped string with CDATA block qualifiers removed.
"""
if not is_cdata(text):
return text
xml = "<e>{0}</e>".format(text)
node = etree.fromstring(xml)
return node.text |
async def uint(self, elem, elem_type, params=None):
"""
Integer types
:param elem:
:param elem_type:
:param params:
:return:
"""
if self.writing:
return IntegerModel(elem, elem_type.WIDTH) if self.modelize else elem
else:
return elem.val if isinstance(elem, IModel) else elem | Integer types
:param elem:
:param elem_type:
:param params:
:return: | Below is the the instruction that describes the task:
### Input:
Integer types
:param elem:
:param elem_type:
:param params:
:return:
### Response:
async def uint(self, elem, elem_type, params=None):
"""
Integer types
:param elem:
:param elem_type:
:param params:
:return:
"""
if self.writing:
return IntegerModel(elem, elem_type.WIDTH) if self.modelize else elem
else:
return elem.val if isinstance(elem, IModel) else elem |
def filter(self, versions, key=lambda x: x):
"""Filter all of the versions in an iterable that match this version range
Args:
versions (iterable): An iterable of SemanticVersion objects
Returns:
list: A list of the SemanticVersion objects that matched this range
"""
return [x for x in versions if self.check(key(x))] | Filter all of the versions in an iterable that match this version range
Args:
versions (iterable): An iterable of SemanticVersion objects
Returns:
list: A list of the SemanticVersion objects that matched this range | Below is the the instruction that describes the task:
### Input:
Filter all of the versions in an iterable that match this version range
Args:
versions (iterable): An iterable of SemanticVersion objects
Returns:
list: A list of the SemanticVersion objects that matched this range
### Response:
def filter(self, versions, key=lambda x: x):
"""Filter all of the versions in an iterable that match this version range
Args:
versions (iterable): An iterable of SemanticVersion objects
Returns:
list: A list of the SemanticVersion objects that matched this range
"""
return [x for x in versions if self.check(key(x))] |
def deserialize(cls, value, trusted=False, strict=False,
assert_valid=False, **kwargs):
"""Create a Singleton instance from a serialized dictionary.
This behaves identically to HasProperties.deserialize, except if
the singleton is already found in the singleton registry the existing
value is used.
.. note::
If property values differ from the existing singleton and
the input dictionary, the new values from the input dictionary
will be ignored
"""
if not isinstance(value, dict):
raise ValueError('HasProperties must deserialize from dictionary')
identifier = value.pop('_singleton_id', value.get('name'))
if identifier is None:
raise ValueError('Singleton classes must contain identifying name')
if identifier in cls._SINGLETONS:
return cls._SINGLETONS[identifier]
value = value.copy()
name = value.get('name', None)
value.update({'name': identifier})
newinst = super(Singleton, cls).deserialize(
value,
trusted=trusted,
strict=strict,
assert_valid=assert_valid,
**kwargs
)
if name:
newinst.name = name
return newinst | Create a Singleton instance from a serialized dictionary.
This behaves identically to HasProperties.deserialize, except if
the singleton is already found in the singleton registry the existing
value is used.
.. note::
If property values differ from the existing singleton and
the input dictionary, the new values from the input dictionary
will be ignored | Below is the the instruction that describes the task:
### Input:
Create a Singleton instance from a serialized dictionary.
This behaves identically to HasProperties.deserialize, except if
the singleton is already found in the singleton registry the existing
value is used.
.. note::
If property values differ from the existing singleton and
the input dictionary, the new values from the input dictionary
will be ignored
### Response:
def deserialize(cls, value, trusted=False, strict=False,
assert_valid=False, **kwargs):
"""Create a Singleton instance from a serialized dictionary.
This behaves identically to HasProperties.deserialize, except if
the singleton is already found in the singleton registry the existing
value is used.
.. note::
If property values differ from the existing singleton and
the input dictionary, the new values from the input dictionary
will be ignored
"""
if not isinstance(value, dict):
raise ValueError('HasProperties must deserialize from dictionary')
identifier = value.pop('_singleton_id', value.get('name'))
if identifier is None:
raise ValueError('Singleton classes must contain identifying name')
if identifier in cls._SINGLETONS:
return cls._SINGLETONS[identifier]
value = value.copy()
name = value.get('name', None)
value.update({'name': identifier})
newinst = super(Singleton, cls).deserialize(
value,
trusted=trusted,
strict=strict,
assert_valid=assert_valid,
**kwargs
)
if name:
newinst.name = name
return newinst |
def postprocess(self):
"""
Postprocessing includes renaming and gzipping where necessary.
"""
# Add suffix to all sub_dir/{items}
for path in self.neb_dirs:
for f in VASP_NEB_OUTPUT_SUB_FILES:
f = os.path.join(path, f)
if os.path.exists(f):
if self.final and self.suffix != "":
shutil.move(f, "{}{}".format(f, self.suffix))
elif self.suffix != "":
shutil.copy(f, "{}{}".format(f, self.suffix))
# Add suffix to all output files
for f in VASP_NEB_OUTPUT_FILES + [self.output_file]:
if os.path.exists(f):
if self.final and self.suffix != "":
shutil.move(f, "{}{}".format(f, self.suffix))
elif self.suffix != "":
shutil.copy(f, "{}{}".format(f, self.suffix)) | Postprocessing includes renaming and gzipping where necessary. | Below is the the instruction that describes the task:
### Input:
Postprocessing includes renaming and gzipping where necessary.
### Response:
def postprocess(self):
"""
Postprocessing includes renaming and gzipping where necessary.
"""
# Add suffix to all sub_dir/{items}
for path in self.neb_dirs:
for f in VASP_NEB_OUTPUT_SUB_FILES:
f = os.path.join(path, f)
if os.path.exists(f):
if self.final and self.suffix != "":
shutil.move(f, "{}{}".format(f, self.suffix))
elif self.suffix != "":
shutil.copy(f, "{}{}".format(f, self.suffix))
# Add suffix to all output files
for f in VASP_NEB_OUTPUT_FILES + [self.output_file]:
if os.path.exists(f):
if self.final and self.suffix != "":
shutil.move(f, "{}{}".format(f, self.suffix))
elif self.suffix != "":
shutil.copy(f, "{}{}".format(f, self.suffix)) |
def recover_devices(cls):
"""Track devices.
Creates global dict to track device names across driver invocations
and populates based on current devices configured on the system.
"""
if "_devices" in globals():
return
global _devices
confs_dir = os.path.abspath(os.path.normpath(cfg.CONF.dhcp_confs))
for netid in os.listdir(confs_dir):
conf_dir = os.path.join(confs_dir, netid)
intf_filename = os.path.join(conf_dir, 'interface')
try:
with open(intf_filename, 'r') as f:
ifname = f.read()
_devices[netid] = ifname
except IOError:
LOG.error('Unable to read interface file: %s',
intf_filename)
LOG.debug("Recovered device %s for network %s'",
ifname, netid) | Track devices.
Creates global dict to track device names across driver invocations
and populates based on current devices configured on the system. | Below is the the instruction that describes the task:
### Input:
Track devices.
Creates global dict to track device names across driver invocations
and populates based on current devices configured on the system.
### Response:
def recover_devices(cls):
"""Track devices.
Creates global dict to track device names across driver invocations
and populates based on current devices configured on the system.
"""
if "_devices" in globals():
return
global _devices
confs_dir = os.path.abspath(os.path.normpath(cfg.CONF.dhcp_confs))
for netid in os.listdir(confs_dir):
conf_dir = os.path.join(confs_dir, netid)
intf_filename = os.path.join(conf_dir, 'interface')
try:
with open(intf_filename, 'r') as f:
ifname = f.read()
_devices[netid] = ifname
except IOError:
LOG.error('Unable to read interface file: %s',
intf_filename)
LOG.debug("Recovered device %s for network %s'",
ifname, netid) |
def _parse_hosts_contents(self, hosts_contents):
"""
Parse the inventory contents. This returns a list of sections found in
the inventory, which can then be used to figure out which hosts belong
to which groups and such. Each section has a name, a type ('hosts',
'children', 'vars') and a list of entries for that section. Entries
consist of a hostname and the variables. For 'vars' sections, the
hostname is None.
For example:
[production:children]
frontend purpose="web"
db purpose="db"
Returns:
{
'name': 'production',
'type': 'children',
'entries': [
{'name': 'frontend', 'hostvars': {'purpose': 'web'}},
{'name': 'db', 'hostvars': {'purpose': 'db'}},
]
}
"""
sections = []
cur_section = {
'type': 'hosts',
'name': None,
'entries': []
}
for line in hosts_contents:
line = line.strip()
if line.startswith('#') or not line:
continue
elif line.startswith('['):
sections.append(cur_section)
section_type, name = self._parse_line_section(line)
cur_section = {
'type': section_type,
'name': name,
'entries': []
}
else:
name, vars = self._parse_line_entry(line, cur_section['type'])
entry = {
'name': name,
'hostvars': vars
}
cur_section['entries'].append(entry)
sections.append(cur_section)
return sections | Parse the inventory contents. This returns a list of sections found in
the inventory, which can then be used to figure out which hosts belong
to which groups and such. Each section has a name, a type ('hosts',
'children', 'vars') and a list of entries for that section. Entries
consist of a hostname and the variables. For 'vars' sections, the
hostname is None.
For example:
[production:children]
frontend purpose="web"
db purpose="db"
Returns:
{
'name': 'production',
'type': 'children',
'entries': [
{'name': 'frontend', 'hostvars': {'purpose': 'web'}},
{'name': 'db', 'hostvars': {'purpose': 'db'}},
]
} | Below is the the instruction that describes the task:
### Input:
Parse the inventory contents. This returns a list of sections found in
the inventory, which can then be used to figure out which hosts belong
to which groups and such. Each section has a name, a type ('hosts',
'children', 'vars') and a list of entries for that section. Entries
consist of a hostname and the variables. For 'vars' sections, the
hostname is None.
For example:
[production:children]
frontend purpose="web"
db purpose="db"
Returns:
{
'name': 'production',
'type': 'children',
'entries': [
{'name': 'frontend', 'hostvars': {'purpose': 'web'}},
{'name': 'db', 'hostvars': {'purpose': 'db'}},
]
}
### Response:
def _parse_hosts_contents(self, hosts_contents):
"""
Parse the inventory contents. This returns a list of sections found in
the inventory, which can then be used to figure out which hosts belong
to which groups and such. Each section has a name, a type ('hosts',
'children', 'vars') and a list of entries for that section. Entries
consist of a hostname and the variables. For 'vars' sections, the
hostname is None.
For example:
[production:children]
frontend purpose="web"
db purpose="db"
Returns:
{
'name': 'production',
'type': 'children',
'entries': [
{'name': 'frontend', 'hostvars': {'purpose': 'web'}},
{'name': 'db', 'hostvars': {'purpose': 'db'}},
]
}
"""
sections = []
cur_section = {
'type': 'hosts',
'name': None,
'entries': []
}
for line in hosts_contents:
line = line.strip()
if line.startswith('#') or not line:
continue
elif line.startswith('['):
sections.append(cur_section)
section_type, name = self._parse_line_section(line)
cur_section = {
'type': section_type,
'name': name,
'entries': []
}
else:
name, vars = self._parse_line_entry(line, cur_section['type'])
entry = {
'name': name,
'hostvars': vars
}
cur_section['entries'].append(entry)
sections.append(cur_section)
return sections |
def out_format(data, out='nested', opts=None, **kwargs):
'''
Return the formatted outputter string for the Python object.
data
The JSON serializable object.
out: ``nested``
The name of the output to use to transform the data. Default: ``nested``.
opts
Dictionary of configuration options. Default: ``__opts__``.
kwargs
Arguments to sent to the outputter module.
CLI Example:
.. code-block:: bash
salt '*' out.out_format "{'key': 'value'}"
'''
if not opts:
opts = __opts__
return salt.output.out_format(data, out, opts=opts, **kwargs) | Return the formatted outputter string for the Python object.
data
The JSON serializable object.
out: ``nested``
The name of the output to use to transform the data. Default: ``nested``.
opts
Dictionary of configuration options. Default: ``__opts__``.
kwargs
Arguments to sent to the outputter module.
CLI Example:
.. code-block:: bash
salt '*' out.out_format "{'key': 'value'}" | Below is the the instruction that describes the task:
### Input:
Return the formatted outputter string for the Python object.
data
The JSON serializable object.
out: ``nested``
The name of the output to use to transform the data. Default: ``nested``.
opts
Dictionary of configuration options. Default: ``__opts__``.
kwargs
Arguments to sent to the outputter module.
CLI Example:
.. code-block:: bash
salt '*' out.out_format "{'key': 'value'}"
### Response:
def out_format(data, out='nested', opts=None, **kwargs):
'''
Return the formatted outputter string for the Python object.
data
The JSON serializable object.
out: ``nested``
The name of the output to use to transform the data. Default: ``nested``.
opts
Dictionary of configuration options. Default: ``__opts__``.
kwargs
Arguments to sent to the outputter module.
CLI Example:
.. code-block:: bash
salt '*' out.out_format "{'key': 'value'}"
'''
if not opts:
opts = __opts__
return salt.output.out_format(data, out, opts=opts, **kwargs) |
def execute(self, sensor_graph, scope_stack):
"""Execute this statement on the sensor_graph given the current scope tree.
This adds a single node to the sensor graph with subtract as the function
so that the current scope's trigger stream has the subtract_stream's value
subtracted from it.
Args:
sensor_graph (SensorGraph): The sensor graph that we are building or
modifying
scope_stack (list(Scope)): A stack of nested scopes that may influence
how this statement allocates clocks or other stream resources.
"""
if self.subtract_stream.stream_type != DataStream.ConstantType:
raise SensorGraphSemanticError("You can only subtract a constant value currently", stream=self.subtract_stream)
parent = scope_stack[-1]
alloc = parent.allocator
trigger_stream, trigger_cond = parent.trigger_chain()
sensor_graph.add_node(u"({} always && {} {}) => {} using {}".format(self.subtract_stream, trigger_stream, trigger_cond, self.stream, 'subtract_afromb'))
value = self.default
if value is None:
value = 0
if self.default is not None and self.subtract_stream in sensor_graph.constant_database:
raise SensorGraphSemanticError("Attempted to set the same constant stream twice", stream=self.subtract_stream, new_value=self.default)
elif self.default is None and self.subtract_stream in sensor_graph.constant_database:
return
sensor_graph.add_constant(self.subtract_stream, value) | Execute this statement on the sensor_graph given the current scope tree.
This adds a single node to the sensor graph with subtract as the function
so that the current scope's trigger stream has the subtract_stream's value
subtracted from it.
Args:
sensor_graph (SensorGraph): The sensor graph that we are building or
modifying
scope_stack (list(Scope)): A stack of nested scopes that may influence
how this statement allocates clocks or other stream resources. | Below is the the instruction that describes the task:
### Input:
Execute this statement on the sensor_graph given the current scope tree.
This adds a single node to the sensor graph with subtract as the function
so that the current scope's trigger stream has the subtract_stream's value
subtracted from it.
Args:
sensor_graph (SensorGraph): The sensor graph that we are building or
modifying
scope_stack (list(Scope)): A stack of nested scopes that may influence
how this statement allocates clocks or other stream resources.
### Response:
def execute(self, sensor_graph, scope_stack):
"""Execute this statement on the sensor_graph given the current scope tree.
This adds a single node to the sensor graph with subtract as the function
so that the current scope's trigger stream has the subtract_stream's value
subtracted from it.
Args:
sensor_graph (SensorGraph): The sensor graph that we are building or
modifying
scope_stack (list(Scope)): A stack of nested scopes that may influence
how this statement allocates clocks or other stream resources.
"""
if self.subtract_stream.stream_type != DataStream.ConstantType:
raise SensorGraphSemanticError("You can only subtract a constant value currently", stream=self.subtract_stream)
parent = scope_stack[-1]
alloc = parent.allocator
trigger_stream, trigger_cond = parent.trigger_chain()
sensor_graph.add_node(u"({} always && {} {}) => {} using {}".format(self.subtract_stream, trigger_stream, trigger_cond, self.stream, 'subtract_afromb'))
value = self.default
if value is None:
value = 0
if self.default is not None and self.subtract_stream in sensor_graph.constant_database:
raise SensorGraphSemanticError("Attempted to set the same constant stream twice", stream=self.subtract_stream, new_value=self.default)
elif self.default is None and self.subtract_stream in sensor_graph.constant_database:
return
sensor_graph.add_constant(self.subtract_stream, value) |
def authenticate(self, *args, **kwargs):
'''
Authenticate the user agains LDAP
'''
# Get config
username = kwargs.get("username", None)
password = kwargs.get("password", None)
# Check user in Active Directory (authorization == None if can not connect to Active Directory Server)
authorization = self.ldap_link(username, password, mode='LOGIN')
if authorization:
# The user was validated in Active Directory
user = self.get_or_create_user(username, password)
# Get or get_create_user will revalidate the new user
if user:
# If the user has been properly validated
user.is_active = True
user.save()
else:
# Locate user in our system
user = User.objects.filter(username=username).first()
if user and not user.is_staff:
# If access was denied
if authorization is False or getattr(settings, "AD_LOCK_UNAUTHORIZED", False):
# Deactivate the user
user.is_active = False
user.save()
# No access and no user here
user = None
# Return the final decision
return user | Authenticate the user agains LDAP | Below is the the instruction that describes the task:
### Input:
Authenticate the user agains LDAP
### Response:
def authenticate(self, *args, **kwargs):
'''
Authenticate the user agains LDAP
'''
# Get config
username = kwargs.get("username", None)
password = kwargs.get("password", None)
# Check user in Active Directory (authorization == None if can not connect to Active Directory Server)
authorization = self.ldap_link(username, password, mode='LOGIN')
if authorization:
# The user was validated in Active Directory
user = self.get_or_create_user(username, password)
# Get or get_create_user will revalidate the new user
if user:
# If the user has been properly validated
user.is_active = True
user.save()
else:
# Locate user in our system
user = User.objects.filter(username=username).first()
if user and not user.is_staff:
# If access was denied
if authorization is False or getattr(settings, "AD_LOCK_UNAUTHORIZED", False):
# Deactivate the user
user.is_active = False
user.save()
# No access and no user here
user = None
# Return the final decision
return user |
def serve(self, host='127.0.0.1', port=8888, limit=100, **kwargs):
"""Start a local proxy server.
The server distributes incoming requests to a pool of found proxies.
When the server receives an incoming request, it chooses the optimal
proxy (based on the percentage of errors and average response time)
and passes to it the incoming request.
In addition to the parameters listed below are also accept all the
parameters of the :meth:`.find` method and passed it to gather proxies
to a pool.
:ref:`Example of usage <proxybroker-examples-server>`.
:param str host: (optional) Host of local proxy server
:param int port: (optional) Port of local proxy server
:param int limit:
(optional) When will be found a requested number of working
proxies, checking of new proxies will be lazily paused.
Checking will be resumed if all the found proxies will be discarded
in the process of working with them (see :attr:`max_error_rate`,
:attr:`max_resp_time`). And will continue until it finds one
working proxy and paused again. The default value is 100
:param int max_tries:
(optional) The maximum number of attempts to handle an incoming
request. If not specified, it will use the value specified during
the creation of the :class:`Broker` object. Attempts can be made
with different proxies. The default value is 3
:param int min_req_proxy:
(optional) The minimum number of processed requests to estimate the
quality of proxy (in accordance with :attr:`max_error_rate` and
:attr:`max_resp_time`). The default value is 5
:param int max_error_rate:
(optional) The maximum percentage of requests that ended with
an error. For example: 0.5 = 50%. If proxy.error_rate exceeds this
value, proxy will be removed from the pool.
The default value is 0.5
:param int max_resp_time:
(optional) The maximum response time in seconds.
If proxy.avg_resp_time exceeds this value, proxy will be removed
from the pool. The default value is 8
:param bool prefer_connect:
(optional) Flag that indicates whether to use the CONNECT method
if possible. For example: If is set to True and a proxy supports
HTTP proto (GET or POST requests) and CONNECT method, the server
will try to use CONNECT method and only after that send the
original request. The default value is False
:param list http_allowed_codes:
(optional) Acceptable HTTP codes returned by proxy on requests.
If a proxy return code, not included in this list, it will be
considered as a proxy error, not a wrong/unavailable address.
For example, if a proxy will return a ``404 Not Found`` response -
this will be considered as an error of a proxy.
Checks only for HTTP protocol, HTTPS not supported at the moment.
By default the list is empty and the response code is not verified
:param int backlog:
(optional) The maximum number of queued connections passed to
listen. The default value is 100
:raises ValueError:
If :attr:`limit` is less than or equal to zero.
Because a parsing of providers will be endless
.. versionadded:: 0.2.0
"""
if limit <= 0:
raise ValueError(
'In serve mode value of the limit cannot be less than or '
'equal to zero. Otherwise, a parsing of providers will be '
'endless'
)
self._server = Server(
host=host,
port=port,
proxies=self._proxies,
timeout=self._timeout,
max_tries=kwargs.pop('max_tries', self._max_tries),
loop=self._loop,
**kwargs
)
self._server.start()
task = asyncio.ensure_future(self.find(limit=limit, **kwargs))
self._all_tasks.append(task) | Start a local proxy server.
The server distributes incoming requests to a pool of found proxies.
When the server receives an incoming request, it chooses the optimal
proxy (based on the percentage of errors and average response time)
and passes to it the incoming request.
In addition to the parameters listed below are also accept all the
parameters of the :meth:`.find` method and passed it to gather proxies
to a pool.
:ref:`Example of usage <proxybroker-examples-server>`.
:param str host: (optional) Host of local proxy server
:param int port: (optional) Port of local proxy server
:param int limit:
(optional) When will be found a requested number of working
proxies, checking of new proxies will be lazily paused.
Checking will be resumed if all the found proxies will be discarded
in the process of working with them (see :attr:`max_error_rate`,
:attr:`max_resp_time`). And will continue until it finds one
working proxy and paused again. The default value is 100
:param int max_tries:
(optional) The maximum number of attempts to handle an incoming
request. If not specified, it will use the value specified during
the creation of the :class:`Broker` object. Attempts can be made
with different proxies. The default value is 3
:param int min_req_proxy:
(optional) The minimum number of processed requests to estimate the
quality of proxy (in accordance with :attr:`max_error_rate` and
:attr:`max_resp_time`). The default value is 5
:param int max_error_rate:
(optional) The maximum percentage of requests that ended with
an error. For example: 0.5 = 50%. If proxy.error_rate exceeds this
value, proxy will be removed from the pool.
The default value is 0.5
:param int max_resp_time:
(optional) The maximum response time in seconds.
If proxy.avg_resp_time exceeds this value, proxy will be removed
from the pool. The default value is 8
:param bool prefer_connect:
(optional) Flag that indicates whether to use the CONNECT method
if possible. For example: If is set to True and a proxy supports
HTTP proto (GET or POST requests) and CONNECT method, the server
will try to use CONNECT method and only after that send the
original request. The default value is False
:param list http_allowed_codes:
(optional) Acceptable HTTP codes returned by proxy on requests.
If a proxy return code, not included in this list, it will be
considered as a proxy error, not a wrong/unavailable address.
For example, if a proxy will return a ``404 Not Found`` response -
this will be considered as an error of a proxy.
Checks only for HTTP protocol, HTTPS not supported at the moment.
By default the list is empty and the response code is not verified
:param int backlog:
(optional) The maximum number of queued connections passed to
listen. The default value is 100
:raises ValueError:
If :attr:`limit` is less than or equal to zero.
Because a parsing of providers will be endless
.. versionadded:: 0.2.0 | Below is the the instruction that describes the task:
### Input:
Start a local proxy server.
The server distributes incoming requests to a pool of found proxies.
When the server receives an incoming request, it chooses the optimal
proxy (based on the percentage of errors and average response time)
and passes to it the incoming request.
In addition to the parameters listed below are also accept all the
parameters of the :meth:`.find` method and passed it to gather proxies
to a pool.
:ref:`Example of usage <proxybroker-examples-server>`.
:param str host: (optional) Host of local proxy server
:param int port: (optional) Port of local proxy server
:param int limit:
(optional) When will be found a requested number of working
proxies, checking of new proxies will be lazily paused.
Checking will be resumed if all the found proxies will be discarded
in the process of working with them (see :attr:`max_error_rate`,
:attr:`max_resp_time`). And will continue until it finds one
working proxy and paused again. The default value is 100
:param int max_tries:
(optional) The maximum number of attempts to handle an incoming
request. If not specified, it will use the value specified during
the creation of the :class:`Broker` object. Attempts can be made
with different proxies. The default value is 3
:param int min_req_proxy:
(optional) The minimum number of processed requests to estimate the
quality of proxy (in accordance with :attr:`max_error_rate` and
:attr:`max_resp_time`). The default value is 5
:param int max_error_rate:
(optional) The maximum percentage of requests that ended with
an error. For example: 0.5 = 50%. If proxy.error_rate exceeds this
value, proxy will be removed from the pool.
The default value is 0.5
:param int max_resp_time:
(optional) The maximum response time in seconds.
If proxy.avg_resp_time exceeds this value, proxy will be removed
from the pool. The default value is 8
:param bool prefer_connect:
(optional) Flag that indicates whether to use the CONNECT method
if possible. For example: If is set to True and a proxy supports
HTTP proto (GET or POST requests) and CONNECT method, the server
will try to use CONNECT method and only after that send the
original request. The default value is False
:param list http_allowed_codes:
(optional) Acceptable HTTP codes returned by proxy on requests.
If a proxy return code, not included in this list, it will be
considered as a proxy error, not a wrong/unavailable address.
For example, if a proxy will return a ``404 Not Found`` response -
this will be considered as an error of a proxy.
Checks only for HTTP protocol, HTTPS not supported at the moment.
By default the list is empty and the response code is not verified
:param int backlog:
(optional) The maximum number of queued connections passed to
listen. The default value is 100
:raises ValueError:
If :attr:`limit` is less than or equal to zero.
Because a parsing of providers will be endless
.. versionadded:: 0.2.0
### Response:
def serve(self, host='127.0.0.1', port=8888, limit=100, **kwargs):
"""Start a local proxy server.
The server distributes incoming requests to a pool of found proxies.
When the server receives an incoming request, it chooses the optimal
proxy (based on the percentage of errors and average response time)
and passes to it the incoming request.
In addition to the parameters listed below are also accept all the
parameters of the :meth:`.find` method and passed it to gather proxies
to a pool.
:ref:`Example of usage <proxybroker-examples-server>`.
:param str host: (optional) Host of local proxy server
:param int port: (optional) Port of local proxy server
:param int limit:
(optional) When will be found a requested number of working
proxies, checking of new proxies will be lazily paused.
Checking will be resumed if all the found proxies will be discarded
in the process of working with them (see :attr:`max_error_rate`,
:attr:`max_resp_time`). And will continue until it finds one
working proxy and paused again. The default value is 100
:param int max_tries:
(optional) The maximum number of attempts to handle an incoming
request. If not specified, it will use the value specified during
the creation of the :class:`Broker` object. Attempts can be made
with different proxies. The default value is 3
:param int min_req_proxy:
(optional) The minimum number of processed requests to estimate the
quality of proxy (in accordance with :attr:`max_error_rate` and
:attr:`max_resp_time`). The default value is 5
:param int max_error_rate:
(optional) The maximum percentage of requests that ended with
an error. For example: 0.5 = 50%. If proxy.error_rate exceeds this
value, proxy will be removed from the pool.
The default value is 0.5
:param int max_resp_time:
(optional) The maximum response time in seconds.
If proxy.avg_resp_time exceeds this value, proxy will be removed
from the pool. The default value is 8
:param bool prefer_connect:
(optional) Flag that indicates whether to use the CONNECT method
if possible. For example: If is set to True and a proxy supports
HTTP proto (GET or POST requests) and CONNECT method, the server
will try to use CONNECT method and only after that send the
original request. The default value is False
:param list http_allowed_codes:
(optional) Acceptable HTTP codes returned by proxy on requests.
If a proxy return code, not included in this list, it will be
considered as a proxy error, not a wrong/unavailable address.
For example, if a proxy will return a ``404 Not Found`` response -
this will be considered as an error of a proxy.
Checks only for HTTP protocol, HTTPS not supported at the moment.
By default the list is empty and the response code is not verified
:param int backlog:
(optional) The maximum number of queued connections passed to
listen. The default value is 100
:raises ValueError:
If :attr:`limit` is less than or equal to zero.
Because a parsing of providers will be endless
.. versionadded:: 0.2.0
"""
if limit <= 0:
raise ValueError(
'In serve mode value of the limit cannot be less than or '
'equal to zero. Otherwise, a parsing of providers will be '
'endless'
)
self._server = Server(
host=host,
port=port,
proxies=self._proxies,
timeout=self._timeout,
max_tries=kwargs.pop('max_tries', self._max_tries),
loop=self._loop,
**kwargs
)
self._server.start()
task = asyncio.ensure_future(self.find(limit=limit, **kwargs))
self._all_tasks.append(task) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.