code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def tci_path(self):
"""Return the path to the granules TrueColorImage."""
tci_paths = [
path for path in self.dataset._product_metadata.xpath(
".//Granule[@granuleIdentifier='%s']/IMAGE_FILE/text()"
% self.granule_identifier
) if path.endswith('TCI')
]
try:
tci_path = tci_paths[0]
except IndexError:
return None
return os.path.join(
self.dataset._zip_root if self.dataset.is_zip else self.dataset.path,
tci_path
) + '.jp2' | Return the path to the granules TrueColorImage. | Below is the the instruction that describes the task:
### Input:
Return the path to the granules TrueColorImage.
### Response:
def tci_path(self):
"""Return the path to the granules TrueColorImage."""
tci_paths = [
path for path in self.dataset._product_metadata.xpath(
".//Granule[@granuleIdentifier='%s']/IMAGE_FILE/text()"
% self.granule_identifier
) if path.endswith('TCI')
]
try:
tci_path = tci_paths[0]
except IndexError:
return None
return os.path.join(
self.dataset._zip_root if self.dataset.is_zip else self.dataset.path,
tci_path
) + '.jp2' |
def contacts_getPublicList(user_id):
"""Gets the contacts (Users) for the user_id"""
method = 'flickr.contacts.getPublicList'
data = _doget(method, auth=False, user_id=user_id)
if isinstance(data.rsp.contacts.contact, list):
return [User(user.nsid, username=user.username) \
for user in data.rsp.contacts.contact]
else:
user = data.rsp.contacts.contact
return [User(user.nsid, username=user.username)] | Gets the contacts (Users) for the user_id | Below is the the instruction that describes the task:
### Input:
Gets the contacts (Users) for the user_id
### Response:
def contacts_getPublicList(user_id):
"""Gets the contacts (Users) for the user_id"""
method = 'flickr.contacts.getPublicList'
data = _doget(method, auth=False, user_id=user_id)
if isinstance(data.rsp.contacts.contact, list):
return [User(user.nsid, username=user.username) \
for user in data.rsp.contacts.contact]
else:
user = data.rsp.contacts.contact
return [User(user.nsid, username=user.username)] |
def get_script_module(script_information, package='pylabcontrol', verbose=False):
"""
wrapper to get the module for a script
Args:
script_information: information of the script. This can be
- a dictionary
- a Script instance
- name of Script class
package (optional): name of the package to which the script belongs, i.e. pylabcontrol or b26toolkit only used when script_information is a string
Returns:
module
"""
module, _, _, _, _, _, _ = Script.get_script_information(script_information=script_information, package=package, verbose=verbose)
return module | wrapper to get the module for a script
Args:
script_information: information of the script. This can be
- a dictionary
- a Script instance
- name of Script class
package (optional): name of the package to which the script belongs, i.e. pylabcontrol or b26toolkit only used when script_information is a string
Returns:
module | Below is the the instruction that describes the task:
### Input:
wrapper to get the module for a script
Args:
script_information: information of the script. This can be
- a dictionary
- a Script instance
- name of Script class
package (optional): name of the package to which the script belongs, i.e. pylabcontrol or b26toolkit only used when script_information is a string
Returns:
module
### Response:
def get_script_module(script_information, package='pylabcontrol', verbose=False):
"""
wrapper to get the module for a script
Args:
script_information: information of the script. This can be
- a dictionary
- a Script instance
- name of Script class
package (optional): name of the package to which the script belongs, i.e. pylabcontrol or b26toolkit only used when script_information is a string
Returns:
module
"""
module, _, _, _, _, _, _ = Script.get_script_information(script_information=script_information, package=package, verbose=verbose)
return module |
def _open_channels(self) -> bool:
""" Open channels until there are `self.initial_channel_target`
channels open. Do nothing if there are enough channels open already.
Note:
- This method must be called with the lock held.
Return:
- False if no channels could be opened
"""
open_channels = views.get_channelstate_open(
chain_state=views.state_from_raiden(self.raiden),
payment_network_id=self.registry_address,
token_address=self.token_address,
)
open_channels = [
channel_state
for channel_state in open_channels
if channel_state.partner_state.address != self.BOOTSTRAP_ADDR
]
funded_channels = [
channel_state for channel_state in open_channels
if channel_state.our_state.contract_balance >= self._initial_funding_per_partner
]
nonfunded_channels = [
channel_state for channel_state in open_channels
if channel_state not in funded_channels
]
possible_new_partners = self._find_new_partners()
if possible_new_partners == 0:
return False
# if we already met our target, break
if len(funded_channels) >= self.initial_channel_target:
return False
# if we didn't, but there's no nonfunded channels and no available partners
# it means the network is smaller than our target, so we should also break
if not nonfunded_channels and possible_new_partners == 0:
return False
n_to_join = self.initial_channel_target - len(funded_channels)
nonfunded_partners = [
channel_state.partner_state.address
for channel_state in nonfunded_channels
]
# first, fund nonfunded channels, then open and fund with possible_new_partners,
# until initial_channel_target of funded channels is met
join_partners = (nonfunded_partners + possible_new_partners)[:n_to_join]
log.debug(
'Spawning greenlets to join partners',
node=pex(self.raiden.address),
num_greenlets=len(join_partners),
)
greenlets = set(
gevent.spawn(self._join_partner, partner)
for partner in join_partners
)
gevent.joinall(greenlets, raise_error=True)
return True | Open channels until there are `self.initial_channel_target`
channels open. Do nothing if there are enough channels open already.
Note:
- This method must be called with the lock held.
Return:
- False if no channels could be opened | Below is the the instruction that describes the task:
### Input:
Open channels until there are `self.initial_channel_target`
channels open. Do nothing if there are enough channels open already.
Note:
- This method must be called with the lock held.
Return:
- False if no channels could be opened
### Response:
def _open_channels(self) -> bool:
""" Open channels until there are `self.initial_channel_target`
channels open. Do nothing if there are enough channels open already.
Note:
- This method must be called with the lock held.
Return:
- False if no channels could be opened
"""
open_channels = views.get_channelstate_open(
chain_state=views.state_from_raiden(self.raiden),
payment_network_id=self.registry_address,
token_address=self.token_address,
)
open_channels = [
channel_state
for channel_state in open_channels
if channel_state.partner_state.address != self.BOOTSTRAP_ADDR
]
funded_channels = [
channel_state for channel_state in open_channels
if channel_state.our_state.contract_balance >= self._initial_funding_per_partner
]
nonfunded_channels = [
channel_state for channel_state in open_channels
if channel_state not in funded_channels
]
possible_new_partners = self._find_new_partners()
if possible_new_partners == 0:
return False
# if we already met our target, break
if len(funded_channels) >= self.initial_channel_target:
return False
# if we didn't, but there's no nonfunded channels and no available partners
# it means the network is smaller than our target, so we should also break
if not nonfunded_channels and possible_new_partners == 0:
return False
n_to_join = self.initial_channel_target - len(funded_channels)
nonfunded_partners = [
channel_state.partner_state.address
for channel_state in nonfunded_channels
]
# first, fund nonfunded channels, then open and fund with possible_new_partners,
# until initial_channel_target of funded channels is met
join_partners = (nonfunded_partners + possible_new_partners)[:n_to_join]
log.debug(
'Spawning greenlets to join partners',
node=pex(self.raiden.address),
num_greenlets=len(join_partners),
)
greenlets = set(
gevent.spawn(self._join_partner, partner)
for partner in join_partners
)
gevent.joinall(greenlets, raise_error=True)
return True |
def _BuildToken(self, request, execution_time):
"""Build an ACLToken from the request."""
token = access_control.ACLToken(
username=request.user,
reason=request.args.get("reason", ""),
process="GRRAdminUI",
expiry=rdfvalue.RDFDatetime.Now() + execution_time)
for field in ["Remote_Addr", "X-Forwarded-For"]:
remote_addr = request.headers.get(field, "")
if remote_addr:
token.source_ips.append(remote_addr)
return token | Build an ACLToken from the request. | Below is the the instruction that describes the task:
### Input:
Build an ACLToken from the request.
### Response:
def _BuildToken(self, request, execution_time):
"""Build an ACLToken from the request."""
token = access_control.ACLToken(
username=request.user,
reason=request.args.get("reason", ""),
process="GRRAdminUI",
expiry=rdfvalue.RDFDatetime.Now() + execution_time)
for field in ["Remote_Addr", "X-Forwarded-For"]:
remote_addr = request.headers.get(field, "")
if remote_addr:
token.source_ips.append(remote_addr)
return token |
def init_common(app):
"""Post initialization."""
if app.config['USERPROFILES_EXTEND_SECURITY_FORMS']:
security_ext = app.extensions['security']
security_ext.confirm_register_form = confirm_register_form_factory(
security_ext.confirm_register_form)
security_ext.register_form = register_form_factory(
security_ext.register_form) | Post initialization. | Below is the the instruction that describes the task:
### Input:
Post initialization.
### Response:
def init_common(app):
"""Post initialization."""
if app.config['USERPROFILES_EXTEND_SECURITY_FORMS']:
security_ext = app.extensions['security']
security_ext.confirm_register_form = confirm_register_form_factory(
security_ext.confirm_register_form)
security_ext.register_form = register_form_factory(
security_ext.register_form) |
def wait_until_first_element_is_found(self, elements, timeout=None):
"""Search list of elements and wait until one of them is found
:param elements: list of PageElements or element locators as a tuple (locator_type, locator_value) to be found
sequentially
:param timeout: max time to wait
:returns: first element found
:rtype: toolium.pageelements.PageElement or tuple
:raises TimeoutException: If no element in the list is found after the timeout
"""
try:
return self._wait_until(self._expected_condition_find_first_element, elements, timeout)
except TimeoutException as exception:
msg = 'None of the page elements has been found after %s seconds'
timeout = timeout if timeout else self.get_explicitly_wait()
self.logger.error(msg, timeout)
exception.msg += "\n {}".format(msg % timeout)
raise exception | Search list of elements and wait until one of them is found
:param elements: list of PageElements or element locators as a tuple (locator_type, locator_value) to be found
sequentially
:param timeout: max time to wait
:returns: first element found
:rtype: toolium.pageelements.PageElement or tuple
:raises TimeoutException: If no element in the list is found after the timeout | Below is the the instruction that describes the task:
### Input:
Search list of elements and wait until one of them is found
:param elements: list of PageElements or element locators as a tuple (locator_type, locator_value) to be found
sequentially
:param timeout: max time to wait
:returns: first element found
:rtype: toolium.pageelements.PageElement or tuple
:raises TimeoutException: If no element in the list is found after the timeout
### Response:
def wait_until_first_element_is_found(self, elements, timeout=None):
"""Search list of elements and wait until one of them is found
:param elements: list of PageElements or element locators as a tuple (locator_type, locator_value) to be found
sequentially
:param timeout: max time to wait
:returns: first element found
:rtype: toolium.pageelements.PageElement or tuple
:raises TimeoutException: If no element in the list is found after the timeout
"""
try:
return self._wait_until(self._expected_condition_find_first_element, elements, timeout)
except TimeoutException as exception:
msg = 'None of the page elements has been found after %s seconds'
timeout = timeout if timeout else self.get_explicitly_wait()
self.logger.error(msg, timeout)
exception.msg += "\n {}".format(msg % timeout)
raise exception |
def push(self, repository, tag=None, stream=False, auth_config=None,
decode=False):
"""
Push an image or a repository to the registry. Similar to the ``docker
push`` command.
Args:
repository (str): The repository to push to
tag (str): An optional tag to push
stream (bool): Stream the output as a blocking generator
auth_config (dict): Override the credentials that are found in the
config for this request. ``auth_config`` should contain the
``username`` and ``password`` keys to be valid.
decode (bool): Decode the JSON data from the server into dicts.
Only applies with ``stream=True``
Returns:
(generator or str): The output from the server.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> for line in cli.push('yourname/app', stream=True, decode=True):
... print(line)
{'status': 'Pushing repository yourname/app (1 tags)'}
{'status': 'Pushing','progressDetail': {}, 'id': '511136ea3c5a'}
{'status': 'Image already pushed, skipping', 'progressDetail':{},
'id': '511136ea3c5a'}
...
"""
if not tag:
repository, tag = utils.parse_repository_tag(repository)
registry, repo_name = auth.resolve_repository_name(repository)
u = self._url("/images/{0}/push", repository)
params = {
'tag': tag
}
headers = {}
if auth_config is None:
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
else:
log.debug('Sending supplied auth config')
headers['X-Registry-Auth'] = auth.encode_header(auth_config)
response = self._post_json(
u, None, headers=headers, stream=stream, params=params
)
self._raise_for_status(response)
if stream:
return self._stream_helper(response, decode=decode)
return self._result(response) | Push an image or a repository to the registry. Similar to the ``docker
push`` command.
Args:
repository (str): The repository to push to
tag (str): An optional tag to push
stream (bool): Stream the output as a blocking generator
auth_config (dict): Override the credentials that are found in the
config for this request. ``auth_config`` should contain the
``username`` and ``password`` keys to be valid.
decode (bool): Decode the JSON data from the server into dicts.
Only applies with ``stream=True``
Returns:
(generator or str): The output from the server.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> for line in cli.push('yourname/app', stream=True, decode=True):
... print(line)
{'status': 'Pushing repository yourname/app (1 tags)'}
{'status': 'Pushing','progressDetail': {}, 'id': '511136ea3c5a'}
{'status': 'Image already pushed, skipping', 'progressDetail':{},
'id': '511136ea3c5a'}
... | Below is the the instruction that describes the task:
### Input:
Push an image or a repository to the registry. Similar to the ``docker
push`` command.
Args:
repository (str): The repository to push to
tag (str): An optional tag to push
stream (bool): Stream the output as a blocking generator
auth_config (dict): Override the credentials that are found in the
config for this request. ``auth_config`` should contain the
``username`` and ``password`` keys to be valid.
decode (bool): Decode the JSON data from the server into dicts.
Only applies with ``stream=True``
Returns:
(generator or str): The output from the server.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> for line in cli.push('yourname/app', stream=True, decode=True):
... print(line)
{'status': 'Pushing repository yourname/app (1 tags)'}
{'status': 'Pushing','progressDetail': {}, 'id': '511136ea3c5a'}
{'status': 'Image already pushed, skipping', 'progressDetail':{},
'id': '511136ea3c5a'}
...
### Response:
def push(self, repository, tag=None, stream=False, auth_config=None,
decode=False):
"""
Push an image or a repository to the registry. Similar to the ``docker
push`` command.
Args:
repository (str): The repository to push to
tag (str): An optional tag to push
stream (bool): Stream the output as a blocking generator
auth_config (dict): Override the credentials that are found in the
config for this request. ``auth_config`` should contain the
``username`` and ``password`` keys to be valid.
decode (bool): Decode the JSON data from the server into dicts.
Only applies with ``stream=True``
Returns:
(generator or str): The output from the server.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> for line in cli.push('yourname/app', stream=True, decode=True):
... print(line)
{'status': 'Pushing repository yourname/app (1 tags)'}
{'status': 'Pushing','progressDetail': {}, 'id': '511136ea3c5a'}
{'status': 'Image already pushed, skipping', 'progressDetail':{},
'id': '511136ea3c5a'}
...
"""
if not tag:
repository, tag = utils.parse_repository_tag(repository)
registry, repo_name = auth.resolve_repository_name(repository)
u = self._url("/images/{0}/push", repository)
params = {
'tag': tag
}
headers = {}
if auth_config is None:
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
else:
log.debug('Sending supplied auth config')
headers['X-Registry-Auth'] = auth.encode_header(auth_config)
response = self._post_json(
u, None, headers=headers, stream=stream, params=params
)
self._raise_for_status(response)
if stream:
return self._stream_helper(response, decode=decode)
return self._result(response) |
def add_replace(self, selector, replacement, upsert=False,
collation=None):
"""Create a replace document and add it to the list of ops.
"""
validate_ok_for_replace(replacement)
cmd = SON([('q', selector), ('u', replacement),
('multi', False), ('upsert', upsert)])
collation = validate_collation_or_none(collation)
if collation is not None:
self.uses_collation = True
cmd['collation'] = collation
self.ops.append((_UPDATE, cmd)) | Create a replace document and add it to the list of ops. | Below is the the instruction that describes the task:
### Input:
Create a replace document and add it to the list of ops.
### Response:
def add_replace(self, selector, replacement, upsert=False,
collation=None):
"""Create a replace document and add it to the list of ops.
"""
validate_ok_for_replace(replacement)
cmd = SON([('q', selector), ('u', replacement),
('multi', False), ('upsert', upsert)])
collation = validate_collation_or_none(collation)
if collation is not None:
self.uses_collation = True
cmd['collation'] = collation
self.ops.append((_UPDATE, cmd)) |
def basis_functions(degree, knot_vector, spans, knots):
""" Computes the non-vanishing basis functions for a list of parameters.
:param degree: degree, :math:`p`
:type degree: int
:param knot_vector: knot vector, :math:`U`
:type knot_vector: list, tuple
:param spans: list of knot spans
:type spans: list, tuple
:param knots: list of knots or parameters
:type knots: list, tuple
:return: basis functions
:rtype: list
"""
basis = []
for span, knot in zip(spans, knots):
basis.append(basis_function(degree, knot_vector, span, knot))
return basis | Computes the non-vanishing basis functions for a list of parameters.
:param degree: degree, :math:`p`
:type degree: int
:param knot_vector: knot vector, :math:`U`
:type knot_vector: list, tuple
:param spans: list of knot spans
:type spans: list, tuple
:param knots: list of knots or parameters
:type knots: list, tuple
:return: basis functions
:rtype: list | Below is the the instruction that describes the task:
### Input:
Computes the non-vanishing basis functions for a list of parameters.
:param degree: degree, :math:`p`
:type degree: int
:param knot_vector: knot vector, :math:`U`
:type knot_vector: list, tuple
:param spans: list of knot spans
:type spans: list, tuple
:param knots: list of knots or parameters
:type knots: list, tuple
:return: basis functions
:rtype: list
### Response:
def basis_functions(degree, knot_vector, spans, knots):
""" Computes the non-vanishing basis functions for a list of parameters.
:param degree: degree, :math:`p`
:type degree: int
:param knot_vector: knot vector, :math:`U`
:type knot_vector: list, tuple
:param spans: list of knot spans
:type spans: list, tuple
:param knots: list of knots or parameters
:type knots: list, tuple
:return: basis functions
:rtype: list
"""
basis = []
for span, knot in zip(spans, knots):
basis.append(basis_function(degree, knot_vector, span, knot))
return basis |
def depuncture(self,soft_bits,puncture_pattern = ('110','101'),
erase_value = 3.5):
"""
Apply de-puncturing to the soft bits coming from the channel. Erasure bits
are inserted to return the soft bit values back to a form that can be
Viterbi decoded.
:param soft_bits:
:param puncture_pattern:
:param erase_value:
:return:
Examples
--------
This example uses the following puncture matrix:
.. math::
\\begin{align*}
\\mathbf{A} = \\begin{bmatrix}
1 & 1 & 0 \\\\
1 & 0 & 1
\\end{bmatrix}
\\end{align*}
The upper row operates on the outputs for the :math:`G_{1}` polynomial and the lower row operates on the outputs of
the :math:`G_{2}` polynomial.
>>> import numpy as np
>>> from sk_dsp_comm.fec_conv import fec_conv
>>> cc = fec_conv(('101','111'))
>>> x = np.array([0, 0, 1, 1, 1, 0, 0, 0, 0, 0])
>>> state = '00'
>>> y, state = cc.conv_encoder(x, state)
>>> yp = cc.puncture(y, ('110','101'))
>>> cc.depuncture(yp, ('110', '101'), 1)
array([ 0., 0., 0., 1., 1., 1., 1., 0., 0., 1., 1., 0., 1., 1., 0., 1., 1., 0.]
"""
# Check to see that the length of soft_bits is consistent with a rate
# 1/2 code.
L_pp = len(puncture_pattern[0])
L_pp1 = len([g1 for g1 in puncture_pattern[0] if g1 == '1'])
L_pp0 = len([g1 for g1 in puncture_pattern[0] if g1 == '0'])
#L_pp0 = len([g1 for g1 in pp1 if g1 == '0'])
N_softwords = int(np.floor(len(soft_bits)/float(2)))
if 2*N_softwords != len(soft_bits):
warnings.warn('Number of soft bits must be even!')
warnings.warn('Truncating bits to be compatible.')
soft_bits = soft_bits[:2*N_softwords]
# Extract the G1p and G2p encoded bits from the serial stream.
# Assume the stream is of the form [G1p G2p G1p G2p ... ],
# which for QPSK may be of the form [Ip Qp Ip Qp Ip Qp ... ]
x_G1 = soft_bits.reshape(N_softwords,2).take([0],
axis=1).reshape(1,N_softwords).flatten()
x_G2 = soft_bits.reshape(N_softwords,2).take([1],
axis=1).reshape(1,N_softwords).flatten()
# Check to see that the length of x_G1 and x_G2 is consistent with the
# puncture length period of the soft bits
N_punct_periods = int(np.floor(N_softwords/float(L_pp1)))
if L_pp1*N_punct_periods != N_softwords:
warnings.warn('Number of soft bits per puncture period is %d' % L_pp1)
warnings.warn('The number of soft bits is not a multiple')
warnings.warn('Truncating soft bits to be compatible.')
x_G1 = x_G1[:L_pp1*N_punct_periods]
x_G2 = x_G2[:L_pp1*N_punct_periods]
x_G1 = x_G1.reshape(N_punct_periods,L_pp1)
x_G2 = x_G2.reshape(N_punct_periods,L_pp1)
#Depuncture x_G1 and x_G1
g1_pp1 = [k for k,g1 in enumerate(puncture_pattern[0]) if g1 == '1']
g1_pp0 = [k for k,g1 in enumerate(puncture_pattern[0]) if g1 == '0']
g2_pp1 = [k for k,g2 in enumerate(puncture_pattern[1]) if g2 == '1']
g2_pp0 = [k for k,g2 in enumerate(puncture_pattern[1]) if g2 == '0']
x_E = erase_value*np.ones((N_punct_periods,L_pp0))
y_G1 = np.hstack((x_G1,x_E))
y_G2 = np.hstack((x_G2,x_E))
[g1_pp1.append(val) for idx,val in enumerate(g1_pp0)]
g1_comp = list(zip(g1_pp1,list(range(L_pp))))
g1_comp.sort()
G1_col_permute = [g1_comp[idx][1] for idx in range(L_pp)]
[g2_pp1.append(val) for idx,val in enumerate(g2_pp0)]
g2_comp = list(zip(g2_pp1,list(range(L_pp))))
g2_comp.sort()
G2_col_permute = [g2_comp[idx][1] for idx in range(L_pp)]
#permute columns to place erasure bits in the correct position
y = np.hstack((y_G1[:,G1_col_permute].reshape(L_pp*N_punct_periods,1),
y_G2[:,G2_col_permute].reshape(L_pp*N_punct_periods,
1))).reshape(1,2*L_pp*N_punct_periods).flatten()
return y | Apply de-puncturing to the soft bits coming from the channel. Erasure bits
are inserted to return the soft bit values back to a form that can be
Viterbi decoded.
:param soft_bits:
:param puncture_pattern:
:param erase_value:
:return:
Examples
--------
This example uses the following puncture matrix:
.. math::
\\begin{align*}
\\mathbf{A} = \\begin{bmatrix}
1 & 1 & 0 \\\\
1 & 0 & 1
\\end{bmatrix}
\\end{align*}
The upper row operates on the outputs for the :math:`G_{1}` polynomial and the lower row operates on the outputs of
the :math:`G_{2}` polynomial.
>>> import numpy as np
>>> from sk_dsp_comm.fec_conv import fec_conv
>>> cc = fec_conv(('101','111'))
>>> x = np.array([0, 0, 1, 1, 1, 0, 0, 0, 0, 0])
>>> state = '00'
>>> y, state = cc.conv_encoder(x, state)
>>> yp = cc.puncture(y, ('110','101'))
>>> cc.depuncture(yp, ('110', '101'), 1)
array([ 0., 0., 0., 1., 1., 1., 1., 0., 0., 1., 1., 0., 1., 1., 0., 1., 1., 0.] | Below is the the instruction that describes the task:
### Input:
Apply de-puncturing to the soft bits coming from the channel. Erasure bits
are inserted to return the soft bit values back to a form that can be
Viterbi decoded.
:param soft_bits:
:param puncture_pattern:
:param erase_value:
:return:
Examples
--------
This example uses the following puncture matrix:
.. math::
\\begin{align*}
\\mathbf{A} = \\begin{bmatrix}
1 & 1 & 0 \\\\
1 & 0 & 1
\\end{bmatrix}
\\end{align*}
The upper row operates on the outputs for the :math:`G_{1}` polynomial and the lower row operates on the outputs of
the :math:`G_{2}` polynomial.
>>> import numpy as np
>>> from sk_dsp_comm.fec_conv import fec_conv
>>> cc = fec_conv(('101','111'))
>>> x = np.array([0, 0, 1, 1, 1, 0, 0, 0, 0, 0])
>>> state = '00'
>>> y, state = cc.conv_encoder(x, state)
>>> yp = cc.puncture(y, ('110','101'))
>>> cc.depuncture(yp, ('110', '101'), 1)
array([ 0., 0., 0., 1., 1., 1., 1., 0., 0., 1., 1., 0., 1., 1., 0., 1., 1., 0.]
### Response:
def depuncture(self,soft_bits,puncture_pattern = ('110','101'),
erase_value = 3.5):
"""
Apply de-puncturing to the soft bits coming from the channel. Erasure bits
are inserted to return the soft bit values back to a form that can be
Viterbi decoded.
:param soft_bits:
:param puncture_pattern:
:param erase_value:
:return:
Examples
--------
This example uses the following puncture matrix:
.. math::
\\begin{align*}
\\mathbf{A} = \\begin{bmatrix}
1 & 1 & 0 \\\\
1 & 0 & 1
\\end{bmatrix}
\\end{align*}
The upper row operates on the outputs for the :math:`G_{1}` polynomial and the lower row operates on the outputs of
the :math:`G_{2}` polynomial.
>>> import numpy as np
>>> from sk_dsp_comm.fec_conv import fec_conv
>>> cc = fec_conv(('101','111'))
>>> x = np.array([0, 0, 1, 1, 1, 0, 0, 0, 0, 0])
>>> state = '00'
>>> y, state = cc.conv_encoder(x, state)
>>> yp = cc.puncture(y, ('110','101'))
>>> cc.depuncture(yp, ('110', '101'), 1)
array([ 0., 0., 0., 1., 1., 1., 1., 0., 0., 1., 1., 0., 1., 1., 0., 1., 1., 0.]
"""
# Check to see that the length of soft_bits is consistent with a rate
# 1/2 code.
L_pp = len(puncture_pattern[0])
L_pp1 = len([g1 for g1 in puncture_pattern[0] if g1 == '1'])
L_pp0 = len([g1 for g1 in puncture_pattern[0] if g1 == '0'])
#L_pp0 = len([g1 for g1 in pp1 if g1 == '0'])
N_softwords = int(np.floor(len(soft_bits)/float(2)))
if 2*N_softwords != len(soft_bits):
warnings.warn('Number of soft bits must be even!')
warnings.warn('Truncating bits to be compatible.')
soft_bits = soft_bits[:2*N_softwords]
# Extract the G1p and G2p encoded bits from the serial stream.
# Assume the stream is of the form [G1p G2p G1p G2p ... ],
# which for QPSK may be of the form [Ip Qp Ip Qp Ip Qp ... ]
x_G1 = soft_bits.reshape(N_softwords,2).take([0],
axis=1).reshape(1,N_softwords).flatten()
x_G2 = soft_bits.reshape(N_softwords,2).take([1],
axis=1).reshape(1,N_softwords).flatten()
# Check to see that the length of x_G1 and x_G2 is consistent with the
# puncture length period of the soft bits
N_punct_periods = int(np.floor(N_softwords/float(L_pp1)))
if L_pp1*N_punct_periods != N_softwords:
warnings.warn('Number of soft bits per puncture period is %d' % L_pp1)
warnings.warn('The number of soft bits is not a multiple')
warnings.warn('Truncating soft bits to be compatible.')
x_G1 = x_G1[:L_pp1*N_punct_periods]
x_G2 = x_G2[:L_pp1*N_punct_periods]
x_G1 = x_G1.reshape(N_punct_periods,L_pp1)
x_G2 = x_G2.reshape(N_punct_periods,L_pp1)
#Depuncture x_G1 and x_G1
g1_pp1 = [k for k,g1 in enumerate(puncture_pattern[0]) if g1 == '1']
g1_pp0 = [k for k,g1 in enumerate(puncture_pattern[0]) if g1 == '0']
g2_pp1 = [k for k,g2 in enumerate(puncture_pattern[1]) if g2 == '1']
g2_pp0 = [k for k,g2 in enumerate(puncture_pattern[1]) if g2 == '0']
x_E = erase_value*np.ones((N_punct_periods,L_pp0))
y_G1 = np.hstack((x_G1,x_E))
y_G2 = np.hstack((x_G2,x_E))
[g1_pp1.append(val) for idx,val in enumerate(g1_pp0)]
g1_comp = list(zip(g1_pp1,list(range(L_pp))))
g1_comp.sort()
G1_col_permute = [g1_comp[idx][1] for idx in range(L_pp)]
[g2_pp1.append(val) for idx,val in enumerate(g2_pp0)]
g2_comp = list(zip(g2_pp1,list(range(L_pp))))
g2_comp.sort()
G2_col_permute = [g2_comp[idx][1] for idx in range(L_pp)]
#permute columns to place erasure bits in the correct position
y = np.hstack((y_G1[:,G1_col_permute].reshape(L_pp*N_punct_periods,1),
y_G2[:,G2_col_permute].reshape(L_pp*N_punct_periods,
1))).reshape(1,2*L_pp*N_punct_periods).flatten()
return y |
def tintWith(self, red, green, blue):
"""tintWith(self, red, green, blue)"""
if not self.colorspace or self.colorspace.n > 3:
print("warning: colorspace invalid for function")
return
return _fitz.Pixmap_tintWith(self, red, green, blue) | tintWith(self, red, green, blue) | Below is the the instruction that describes the task:
### Input:
tintWith(self, red, green, blue)
### Response:
def tintWith(self, red, green, blue):
"""tintWith(self, red, green, blue)"""
if not self.colorspace or self.colorspace.n > 3:
print("warning: colorspace invalid for function")
return
return _fitz.Pixmap_tintWith(self, red, green, blue) |
def user_organization_memberships(self, user_id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/organization_memberships#list-memberships"
api_path = "/api/v2/users/{user_id}/organization_memberships.json"
api_path = api_path.format(user_id=user_id)
return self.call(api_path, **kwargs) | https://developer.zendesk.com/rest_api/docs/core/organization_memberships#list-memberships | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/core/organization_memberships#list-memberships
### Response:
def user_organization_memberships(self, user_id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/organization_memberships#list-memberships"
api_path = "/api/v2/users/{user_id}/organization_memberships.json"
api_path = api_path.format(user_id=user_id)
return self.call(api_path, **kwargs) |
def op_cmd(self, command, req_format='text', xpath_expr=""):
""" Execute an operational mode command.
Purpose: Used to send an operational mode command to the connected
| device. This requires and uses a paramiko.SSHClient() as
| the handler so that we can easily pass and allow all pipe
| commands to be used.
|
| We indiscriminately attach ' | no-more' on the end of
| every command so the device doesn't hold output. The
| req_format parameter can be set to 'xml' to force raw
| xml output in the reply.
@param command: The single command that to retrieve output from the
| device. Any pipes will be taken into account.
@type command: str
@param req_format: The desired format of the response, defaults to
| 'text', but also accepts 'xml'. **NOTE**: 'xml'
| will still return a string, not a libxml ElementTree
@type req_format: str
@returns: The reply from the device.
@rtype: str
"""
if not command:
raise InvalidCommandError("Parameter 'command' cannot be empty")
if req_format.lower() == 'xml' or xpath_expr:
command = command.strip() + ' | display xml'
command = command.strip() + ' | no-more\n'
out = ''
# when logging in as root, we use _shell to get the response.
if self.username == 'root':
self._shell.send(command)
time.sleep(3)
while self._shell.recv_ready():
out += self._shell.recv(999999)
time.sleep(.75)
# take off the command being sent and the prompt at the end.
out = '\n'.join(out.split('\n')[1:-2])
# not logging in as root, and can grab the output as normal.
else:
stdin, stdout, stderr = self._session.exec_command(command=command,
timeout=float(self.session_timeout))
stdin.close()
# read normal output
while not stdout.channel.exit_status_ready():
out += stdout.read()
stdout.close()
# read errors
while not stderr.channel.exit_status_ready():
out += stderr.read()
stderr.close()
return out if not xpath_expr else xpath(out, xpath_expr) | Execute an operational mode command.
Purpose: Used to send an operational mode command to the connected
| device. This requires and uses a paramiko.SSHClient() as
| the handler so that we can easily pass and allow all pipe
| commands to be used.
|
| We indiscriminately attach ' | no-more' on the end of
| every command so the device doesn't hold output. The
| req_format parameter can be set to 'xml' to force raw
| xml output in the reply.
@param command: The single command that to retrieve output from the
| device. Any pipes will be taken into account.
@type command: str
@param req_format: The desired format of the response, defaults to
| 'text', but also accepts 'xml'. **NOTE**: 'xml'
| will still return a string, not a libxml ElementTree
@type req_format: str
@returns: The reply from the device.
@rtype: str | Below is the the instruction that describes the task:
### Input:
Execute an operational mode command.
Purpose: Used to send an operational mode command to the connected
| device. This requires and uses a paramiko.SSHClient() as
| the handler so that we can easily pass and allow all pipe
| commands to be used.
|
| We indiscriminately attach ' | no-more' on the end of
| every command so the device doesn't hold output. The
| req_format parameter can be set to 'xml' to force raw
| xml output in the reply.
@param command: The single command that to retrieve output from the
| device. Any pipes will be taken into account.
@type command: str
@param req_format: The desired format of the response, defaults to
| 'text', but also accepts 'xml'. **NOTE**: 'xml'
| will still return a string, not a libxml ElementTree
@type req_format: str
@returns: The reply from the device.
@rtype: str
### Response:
def op_cmd(self, command, req_format='text', xpath_expr=""):
""" Execute an operational mode command.
Purpose: Used to send an operational mode command to the connected
| device. This requires and uses a paramiko.SSHClient() as
| the handler so that we can easily pass and allow all pipe
| commands to be used.
|
| We indiscriminately attach ' | no-more' on the end of
| every command so the device doesn't hold output. The
| req_format parameter can be set to 'xml' to force raw
| xml output in the reply.
@param command: The single command that to retrieve output from the
| device. Any pipes will be taken into account.
@type command: str
@param req_format: The desired format of the response, defaults to
| 'text', but also accepts 'xml'. **NOTE**: 'xml'
| will still return a string, not a libxml ElementTree
@type req_format: str
@returns: The reply from the device.
@rtype: str
"""
if not command:
raise InvalidCommandError("Parameter 'command' cannot be empty")
if req_format.lower() == 'xml' or xpath_expr:
command = command.strip() + ' | display xml'
command = command.strip() + ' | no-more\n'
out = ''
# when logging in as root, we use _shell to get the response.
if self.username == 'root':
self._shell.send(command)
time.sleep(3)
while self._shell.recv_ready():
out += self._shell.recv(999999)
time.sleep(.75)
# take off the command being sent and the prompt at the end.
out = '\n'.join(out.split('\n')[1:-2])
# not logging in as root, and can grab the output as normal.
else:
stdin, stdout, stderr = self._session.exec_command(command=command,
timeout=float(self.session_timeout))
stdin.close()
# read normal output
while not stdout.channel.exit_status_ready():
out += stdout.read()
stdout.close()
# read errors
while not stderr.channel.exit_status_ready():
out += stderr.read()
stderr.close()
return out if not xpath_expr else xpath(out, xpath_expr) |
def status_update(self, crits_id, crits_type, status):
"""
Update the status of the TLO. By default, the options are:
- New
- In Progress
- Analyzed
- Deprecated
Args:
crits_id: The object id of the TLO
crits_type: The type of TLO. This must be 'Indicator', ''
status: The status to change.
Returns:
True if the status was updated. False otherwise.
Raises:
CRITsInvalidTypeError
"""
obj_type = self._type_translation(crits_type)
patch_url = "{0}/{1}/{2}/".format(self.url, obj_type, crits_id)
params = {
'api_key': self.api_key,
'username': self.username,
}
data = {
'action': 'status_update',
'value': status,
}
r = requests.patch(patch_url, params=params, data=data,
verify=self.verify, proxies=self.proxies)
if r.status_code == 200:
log.debug('Object {} set to {}'.format(crits_id, status))
return True
else:
log.error('Attempted to set object id {} to '
'Informational, but did not receive a '
'200'.format(crits_id))
log.error('Error message was: {}'.format(r.text))
return False | Update the status of the TLO. By default, the options are:
- New
- In Progress
- Analyzed
- Deprecated
Args:
crits_id: The object id of the TLO
crits_type: The type of TLO. This must be 'Indicator', ''
status: The status to change.
Returns:
True if the status was updated. False otherwise.
Raises:
CRITsInvalidTypeError | Below is the the instruction that describes the task:
### Input:
Update the status of the TLO. By default, the options are:
- New
- In Progress
- Analyzed
- Deprecated
Args:
crits_id: The object id of the TLO
crits_type: The type of TLO. This must be 'Indicator', ''
status: The status to change.
Returns:
True if the status was updated. False otherwise.
Raises:
CRITsInvalidTypeError
### Response:
def status_update(self, crits_id, crits_type, status):
"""
Update the status of the TLO. By default, the options are:
- New
- In Progress
- Analyzed
- Deprecated
Args:
crits_id: The object id of the TLO
crits_type: The type of TLO. This must be 'Indicator', ''
status: The status to change.
Returns:
True if the status was updated. False otherwise.
Raises:
CRITsInvalidTypeError
"""
obj_type = self._type_translation(crits_type)
patch_url = "{0}/{1}/{2}/".format(self.url, obj_type, crits_id)
params = {
'api_key': self.api_key,
'username': self.username,
}
data = {
'action': 'status_update',
'value': status,
}
r = requests.patch(patch_url, params=params, data=data,
verify=self.verify, proxies=self.proxies)
if r.status_code == 200:
log.debug('Object {} set to {}'.format(crits_id, status))
return True
else:
log.error('Attempted to set object id {} to '
'Informational, but did not receive a '
'200'.format(crits_id))
log.error('Error message was: {}'.format(r.text))
return False |
def get_form(self, request, obj=None, **kwargs):
"""
Pass the current language to the form.
"""
form_class = super(TranslatableAdmin, self).get_form(request, obj, **kwargs)
if self._has_translatable_model():
form_class.language_code = self.get_form_language(request, obj)
return form_class | Pass the current language to the form. | Below is the the instruction that describes the task:
### Input:
Pass the current language to the form.
### Response:
def get_form(self, request, obj=None, **kwargs):
"""
Pass the current language to the form.
"""
form_class = super(TranslatableAdmin, self).get_form(request, obj, **kwargs)
if self._has_translatable_model():
form_class.language_code = self.get_form_language(request, obj)
return form_class |
def get_integration_module(integration_path):
"""Add custom paths to sys and import integration module.
:param integration_path: Path to integration folder
"""
# add custom paths so imports would work
paths = [
os.path.join(__file__, "..", ".."), # to import integrationmanager
os.path.join(integration_path, ".."), # to import integration itself
os.path.join(integration_path, DEPS_DIR), # to import integration deps
]
for path in paths:
path = os.path.realpath(path)
logger.debug("adding %s to path", path)
sys.path.insert(0, path)
# get our integration class instance
integration_name = os.path.basename(integration_path)
logger.debug("importing %s", ".".join([integration_name, INTEGRATION]))
return importlib.import_module(".".join([integration_name, INTEGRATION])) | Add custom paths to sys and import integration module.
:param integration_path: Path to integration folder | Below is the the instruction that describes the task:
### Input:
Add custom paths to sys and import integration module.
:param integration_path: Path to integration folder
### Response:
def get_integration_module(integration_path):
"""Add custom paths to sys and import integration module.
:param integration_path: Path to integration folder
"""
# add custom paths so imports would work
paths = [
os.path.join(__file__, "..", ".."), # to import integrationmanager
os.path.join(integration_path, ".."), # to import integration itself
os.path.join(integration_path, DEPS_DIR), # to import integration deps
]
for path in paths:
path = os.path.realpath(path)
logger.debug("adding %s to path", path)
sys.path.insert(0, path)
# get our integration class instance
integration_name = os.path.basename(integration_path)
logger.debug("importing %s", ".".join([integration_name, INTEGRATION]))
return importlib.import_module(".".join([integration_name, INTEGRATION])) |
def _get_const_info(const_index, const_list):
"""Helper to get optional details about const references
Returns the dereferenced constant and its repr if the constant
list is defined.
Otherwise returns the constant index and its repr().
"""
argval = const_index
if const_list is not None:
argval = const_list[const_index]
# float values nan and inf are not directly representable in Python at least
# before 3.5 and even there it is via a library constant.
# So we will canonicalize their representation as float('nan') and float('inf')
if isinstance(argval, float) and str(argval) in frozenset(['nan', '-nan', 'inf', '-inf']):
return argval, "float('%s')" % argval
return argval, repr(argval) | Helper to get optional details about const references
Returns the dereferenced constant and its repr if the constant
list is defined.
Otherwise returns the constant index and its repr(). | Below is the the instruction that describes the task:
### Input:
Helper to get optional details about const references
Returns the dereferenced constant and its repr if the constant
list is defined.
Otherwise returns the constant index and its repr().
### Response:
def _get_const_info(const_index, const_list):
"""Helper to get optional details about const references
Returns the dereferenced constant and its repr if the constant
list is defined.
Otherwise returns the constant index and its repr().
"""
argval = const_index
if const_list is not None:
argval = const_list[const_index]
# float values nan and inf are not directly representable in Python at least
# before 3.5 and even there it is via a library constant.
# So we will canonicalize their representation as float('nan') and float('inf')
if isinstance(argval, float) and str(argval) in frozenset(['nan', '-nan', 'inf', '-inf']):
return argval, "float('%s')" % argval
return argval, repr(argval) |
def get(
self,
name=None,
group=None,
index=None,
raster=None,
samples_only=False,
data=None,
raw=False,
ignore_invalidation_bits=False,
source=None,
record_offset=0,
record_count=None,
copy_master=True,
):
"""Gets channel samples.
Channel can be specified in two ways:
* using the first positional argument *name*
* if *source* is given this will be first used to validate the
channel selection
* if there are multiple occurances for this channel then the
*group* and *index* arguments can be used to select a specific
group.
* if there are multiple occurances for this channel and either the
*group* or *index* arguments is None then a warning is issued
* using the group number (keyword argument *group*) and the channel
number (keyword argument *index*). Use *info* method for group and
channel numbers
If the *raster* keyword argument is not *None* the output is
interpolated accordingly.
Parameters
----------
name : string
name of channel
group : int
0-based group index
index : int
0-based channel index
raster : float
time raster in seconds
samples_only : bool
if *True* return only the channel samples as numpy array; if
*False* return a *Signal* object
data : bytes
prevent redundant data read by providing the raw data group samples
raw : bool
return channel samples without appling the conversion rule; default
`False`
ignore_invalidation_bits : bool
only defined to have the same API with the MDF v4
source : str
source name used to select the channel
record_offset : int
if *data=None* use this to select the record offset from which the
group data should be loaded
copy_master : bool
make a copy of the timebase for this channel
Returns
-------
res : (numpy.array, None) | Signal
returns *Signal* if *samples_only*=*False* (default option),
otherwise returns a (numpy.array, None) tuple (for compatibility
with MDF v4 class.
The *Signal* samples are
* numpy recarray for channels that have CDBLOCK or BYTEARRAY
type channels
* numpy array for all the rest
Raises
------
MdfException :
* if the channel name is not found
* if the group index is out of range
* if the channel index is out of range
Examples
--------
>>> from asammdf import MDF, Signal
>>> import numpy as np
>>> t = np.arange(5)
>>> s = np.ones(5)
>>> mdf = MDF(version='3.30')
>>> for i in range(4):
... sigs = [Signal(s*(i*10+j), t, name='Sig') for j in range(1, 4)]
... mdf.append(sigs)
...
>>> # first group and channel index of the specified channel name
...
>>> mdf.get('Sig')
UserWarning: Multiple occurances for channel "Sig". Using first occurance from data group 4. Provide both "group" and "index" arguments to select another data group
<Signal Sig:
samples=[ 1. 1. 1. 1. 1.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
>>> # first channel index in the specified group
...
>>> mdf.get('Sig', 1)
<Signal Sig:
samples=[ 11. 11. 11. 11. 11.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
>>> # channel named Sig from group 1 channel index 2
...
>>> mdf.get('Sig', 1, 2)
<Signal Sig:
samples=[ 12. 12. 12. 12. 12.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
>>> # channel index 1 or group 2
...
>>> mdf.get(None, 2, 1)
<Signal Sig:
samples=[ 21. 21. 21. 21. 21.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
>>> mdf.get(group=2, index=1)
<Signal Sig:
samples=[ 21. 21. 21. 21. 21.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
>>> mdf.get('Sig', source='VN7060')
<Signal Sig:
samples=[ 12. 12. 12. 12. 12.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
"""
gp_nr, ch_nr = self._validate_channel_selection(
name, group, index, source=source
)
original_data = data
grp = self.groups[gp_nr]
if grp.data_location == v23c.LOCATION_ORIGINAL_FILE:
stream = self._file
else:
stream = self._tempfile
channel = grp.channels[ch_nr]
conversion = channel.conversion
name = channel.name
display_name = channel.display_name
bit_count = channel.bit_count or 64
dep = grp.channel_dependencies[ch_nr]
cycles_nr = grp.channel_group.cycles_nr
encoding = None
# get data group record
if data is None:
data = self._load_data(
grp, record_offset=record_offset, record_count=record_count
)
else:
data = (data,)
# check if this is a channel array
if dep:
if dep.dependency_type == v23c.DEPENDENCY_TYPE_VECTOR:
shape = [dep.sd_nr]
elif dep.dependency_type >= v23c.DEPENDENCY_TYPE_NDIM:
shape = []
i = 0
while True:
try:
dim = dep[f"dim_{i}"]
shape.append(dim)
i += 1
except KeyError:
break
shape = shape[::-1]
record_shape = tuple(shape)
arrays = [
self.get(
group=dg_nr,
index=ch_nr,
samples_only=True,
raw=raw,
data=original_data,
record_offset=record_offset,
record_count=record_count,
)[0]
for dg_nr, ch_nr in dep.referenced_channels
]
shape.insert(0, cycles_nr)
vals = column_stack(arrays).flatten().reshape(tuple(shape))
arrays = [vals]
types = [(channel.name, vals.dtype, record_shape)]
types = dtype(types)
vals = fromarrays(arrays, dtype=types)
if not samples_only or raster:
timestamps = self.get_master(
gp_nr,
original_data,
record_offset=record_offset,
record_count=record_count,
copy_master=copy_master,
)
if raster and len(timestamps) > 1:
num = float(float32((timestamps[-1] - timestamps[0]) / raster))
if num.is_integer():
t = linspace(timestamps[0], timestamps[-1], int(num))
else:
t = arange(timestamps[0], timestamps[-1], raster)
vals = (
Signal(vals, timestamps, name="_")
.interp(t, interpolation_mode=self._integer_interpolation)
.samples
)
timestamps = t
else:
# get channel values
channel_values = []
timestamps = []
count = 0
for fragment in data:
data_bytes, _offset, _count = fragment
parents, dtypes = self._prepare_record(grp)
try:
parent, bit_offset = parents[ch_nr]
except KeyError:
parent, bit_offset = None, None
bits = channel.bit_count
if parent is not None:
if grp.record is None:
if dtypes.itemsize:
record = fromstring(data_bytes, dtype=dtypes)
else:
record = None
else:
record = grp.record
record.setflags(write=False)
vals = record[parent]
data_type = channel.data_type
size = vals.dtype.itemsize
if data_type == v23c.DATA_TYPE_BYTEARRAY:
size *= vals.shape[1]
vals_dtype = vals.dtype.kind
if vals_dtype not in "ui" and (bit_offset or not bits == size * 8):
vals = self._get_not_byte_aligned_data(data_bytes, grp, ch_nr)
else:
dtype_ = vals.dtype
kind_ = dtype_.kind
if data_type in v23c.INT_TYPES:
if kind_ == 'f':
if bits != size * 8:
vals = self._get_not_byte_aligned_data(
data_bytes, grp, ch_nr
)
else:
dtype_fmt = get_fmt_v3(data_type, bits)
channel_dtype = dtype(dtype_fmt.split(')')[-1])
vals = vals.view(channel_dtype)
else:
if dtype_.byteorder == ">":
if bit_offset or bits != size << 3:
vals = self._get_not_byte_aligned_data(
data_bytes, grp, ch_nr
)
else:
if bit_offset:
if dtype_.kind == "i":
vals = vals.astype(
dtype(f"{dtype_.byteorder}u{size}")
)
vals >>= bit_offset
else:
vals = vals >> bit_offset
if bits != size << 3:
if data_type in v23c.SIGNED_INT:
vals = as_non_byte_sized_signed_int(vals, bits)
else:
mask = (1 << bits) - 1
if vals.flags.writeable:
vals &= mask
else:
vals = vals & mask
else:
if bits != size * 8:
vals = self._get_not_byte_aligned_data(
data_bytes, grp, ch_nr
)
else:
if kind_ in "ui":
dtype_fmt = get_fmt_v3(data_type, bits)
channel_dtype = dtype(dtype_fmt.split(')')[-1])
vals = vals.view(channel_dtype)
else:
vals = self._get_not_byte_aligned_data(data_bytes, grp, ch_nr)
if not samples_only or raster:
timestamps.append(
self.get_master(gp_nr, fragment, copy_master=copy_master)
)
if bits == 1 and self._single_bit_uint_as_bool:
vals = array(vals, dtype=bool)
else:
data_type = channel.data_type
channel_dtype = array([], dtype=get_fmt_v3(data_type, bits))
if vals.dtype != channel_dtype.dtype:
vals = vals.astype(channel_dtype.dtype)
channel_values.append(vals.copy())
count += 1
if count > 1:
vals = concatenate(channel_values)
elif count == 1:
vals = channel_values[0]
else:
vals = []
if not samples_only or raster:
if count > 1:
timestamps = concatenate(timestamps)
else:
timestamps = timestamps[0]
if raster and len(timestamps) > 1:
num = float(float32((timestamps[-1] - timestamps[0]) / raster))
if num.is_integer():
t = linspace(timestamps[0], timestamps[-1], int(num))
else:
t = arange(timestamps[0], timestamps[-1], raster)
vals = (
Signal(vals, timestamps, name="_")
.interp(t, interpolation_mode=self._integer_interpolation)
.samples
)
timestamps = t
if conversion is None:
conversion_type = v23c.CONVERSION_TYPE_NONE
else:
conversion_type = conversion.conversion_type
if conversion_type == v23c.CONVERSION_TYPE_NONE:
if vals.dtype.kind == "S":
encoding = "latin-1"
if not raw:
if conversion:
vals = conversion.convert(vals)
conversion = None
if samples_only:
res = vals, None
else:
if conversion:
unit = conversion.unit
else:
unit = ""
comment = channel.comment
description = channel.description.decode("latin-1").strip(" \t\n\0")
if comment:
comment = f"{comment}\n{description}"
else:
comment = description
source = channel.source
if source:
if source["type"] == v23c.SOURCE_ECU:
source = SignalSource(
source.name,
source.path,
source.comment,
0, # source type other
0, # bus type none
)
else:
source = SignalSource(
source.name,
source.path,
source.comment,
2, # source type bus
2, # bus type CAN
)
master_metadata = self._master_channel_metadata.get(gp_nr, None)
res = Signal(
samples=vals,
timestamps=timestamps,
unit=unit,
name=channel.name,
comment=comment,
conversion=conversion,
raw=raw,
master_metadata=master_metadata,
display_name=display_name,
source=source,
bit_count=bit_count,
encoding=encoding,
)
return res | Gets channel samples.
Channel can be specified in two ways:
* using the first positional argument *name*
* if *source* is given this will be first used to validate the
channel selection
* if there are multiple occurances for this channel then the
*group* and *index* arguments can be used to select a specific
group.
* if there are multiple occurances for this channel and either the
*group* or *index* arguments is None then a warning is issued
* using the group number (keyword argument *group*) and the channel
number (keyword argument *index*). Use *info* method for group and
channel numbers
If the *raster* keyword argument is not *None* the output is
interpolated accordingly.
Parameters
----------
name : string
name of channel
group : int
0-based group index
index : int
0-based channel index
raster : float
time raster in seconds
samples_only : bool
if *True* return only the channel samples as numpy array; if
*False* return a *Signal* object
data : bytes
prevent redundant data read by providing the raw data group samples
raw : bool
return channel samples without appling the conversion rule; default
`False`
ignore_invalidation_bits : bool
only defined to have the same API with the MDF v4
source : str
source name used to select the channel
record_offset : int
if *data=None* use this to select the record offset from which the
group data should be loaded
copy_master : bool
make a copy of the timebase for this channel
Returns
-------
res : (numpy.array, None) | Signal
returns *Signal* if *samples_only*=*False* (default option),
otherwise returns a (numpy.array, None) tuple (for compatibility
with MDF v4 class.
The *Signal* samples are
* numpy recarray for channels that have CDBLOCK or BYTEARRAY
type channels
* numpy array for all the rest
Raises
------
MdfException :
* if the channel name is not found
* if the group index is out of range
* if the channel index is out of range
Examples
--------
>>> from asammdf import MDF, Signal
>>> import numpy as np
>>> t = np.arange(5)
>>> s = np.ones(5)
>>> mdf = MDF(version='3.30')
>>> for i in range(4):
... sigs = [Signal(s*(i*10+j), t, name='Sig') for j in range(1, 4)]
... mdf.append(sigs)
...
>>> # first group and channel index of the specified channel name
...
>>> mdf.get('Sig')
UserWarning: Multiple occurances for channel "Sig". Using first occurance from data group 4. Provide both "group" and "index" arguments to select another data group
<Signal Sig:
samples=[ 1. 1. 1. 1. 1.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
>>> # first channel index in the specified group
...
>>> mdf.get('Sig', 1)
<Signal Sig:
samples=[ 11. 11. 11. 11. 11.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
>>> # channel named Sig from group 1 channel index 2
...
>>> mdf.get('Sig', 1, 2)
<Signal Sig:
samples=[ 12. 12. 12. 12. 12.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
>>> # channel index 1 or group 2
...
>>> mdf.get(None, 2, 1)
<Signal Sig:
samples=[ 21. 21. 21. 21. 21.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
>>> mdf.get(group=2, index=1)
<Signal Sig:
samples=[ 21. 21. 21. 21. 21.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
>>> mdf.get('Sig', source='VN7060')
<Signal Sig:
samples=[ 12. 12. 12. 12. 12.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment=""> | Below is the the instruction that describes the task:
### Input:
Gets channel samples.
Channel can be specified in two ways:
* using the first positional argument *name*
* if *source* is given this will be first used to validate the
channel selection
* if there are multiple occurances for this channel then the
*group* and *index* arguments can be used to select a specific
group.
* if there are multiple occurances for this channel and either the
*group* or *index* arguments is None then a warning is issued
* using the group number (keyword argument *group*) and the channel
number (keyword argument *index*). Use *info* method for group and
channel numbers
If the *raster* keyword argument is not *None* the output is
interpolated accordingly.
Parameters
----------
name : string
name of channel
group : int
0-based group index
index : int
0-based channel index
raster : float
time raster in seconds
samples_only : bool
if *True* return only the channel samples as numpy array; if
*False* return a *Signal* object
data : bytes
prevent redundant data read by providing the raw data group samples
raw : bool
return channel samples without appling the conversion rule; default
`False`
ignore_invalidation_bits : bool
only defined to have the same API with the MDF v4
source : str
source name used to select the channel
record_offset : int
if *data=None* use this to select the record offset from which the
group data should be loaded
copy_master : bool
make a copy of the timebase for this channel
Returns
-------
res : (numpy.array, None) | Signal
returns *Signal* if *samples_only*=*False* (default option),
otherwise returns a (numpy.array, None) tuple (for compatibility
with MDF v4 class.
The *Signal* samples are
* numpy recarray for channels that have CDBLOCK or BYTEARRAY
type channels
* numpy array for all the rest
Raises
------
MdfException :
* if the channel name is not found
* if the group index is out of range
* if the channel index is out of range
Examples
--------
>>> from asammdf import MDF, Signal
>>> import numpy as np
>>> t = np.arange(5)
>>> s = np.ones(5)
>>> mdf = MDF(version='3.30')
>>> for i in range(4):
... sigs = [Signal(s*(i*10+j), t, name='Sig') for j in range(1, 4)]
... mdf.append(sigs)
...
>>> # first group and channel index of the specified channel name
...
>>> mdf.get('Sig')
UserWarning: Multiple occurances for channel "Sig". Using first occurance from data group 4. Provide both "group" and "index" arguments to select another data group
<Signal Sig:
samples=[ 1. 1. 1. 1. 1.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
>>> # first channel index in the specified group
...
>>> mdf.get('Sig', 1)
<Signal Sig:
samples=[ 11. 11. 11. 11. 11.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
>>> # channel named Sig from group 1 channel index 2
...
>>> mdf.get('Sig', 1, 2)
<Signal Sig:
samples=[ 12. 12. 12. 12. 12.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
>>> # channel index 1 or group 2
...
>>> mdf.get(None, 2, 1)
<Signal Sig:
samples=[ 21. 21. 21. 21. 21.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
>>> mdf.get(group=2, index=1)
<Signal Sig:
samples=[ 21. 21. 21. 21. 21.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
>>> mdf.get('Sig', source='VN7060')
<Signal Sig:
samples=[ 12. 12. 12. 12. 12.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
### Response:
def get(
self,
name=None,
group=None,
index=None,
raster=None,
samples_only=False,
data=None,
raw=False,
ignore_invalidation_bits=False,
source=None,
record_offset=0,
record_count=None,
copy_master=True,
):
"""Gets channel samples.
Channel can be specified in two ways:
* using the first positional argument *name*
* if *source* is given this will be first used to validate the
channel selection
* if there are multiple occurances for this channel then the
*group* and *index* arguments can be used to select a specific
group.
* if there are multiple occurances for this channel and either the
*group* or *index* arguments is None then a warning is issued
* using the group number (keyword argument *group*) and the channel
number (keyword argument *index*). Use *info* method for group and
channel numbers
If the *raster* keyword argument is not *None* the output is
interpolated accordingly.
Parameters
----------
name : string
name of channel
group : int
0-based group index
index : int
0-based channel index
raster : float
time raster in seconds
samples_only : bool
if *True* return only the channel samples as numpy array; if
*False* return a *Signal* object
data : bytes
prevent redundant data read by providing the raw data group samples
raw : bool
return channel samples without appling the conversion rule; default
`False`
ignore_invalidation_bits : bool
only defined to have the same API with the MDF v4
source : str
source name used to select the channel
record_offset : int
if *data=None* use this to select the record offset from which the
group data should be loaded
copy_master : bool
make a copy of the timebase for this channel
Returns
-------
res : (numpy.array, None) | Signal
returns *Signal* if *samples_only*=*False* (default option),
otherwise returns a (numpy.array, None) tuple (for compatibility
with MDF v4 class.
The *Signal* samples are
* numpy recarray for channels that have CDBLOCK or BYTEARRAY
type channels
* numpy array for all the rest
Raises
------
MdfException :
* if the channel name is not found
* if the group index is out of range
* if the channel index is out of range
Examples
--------
>>> from asammdf import MDF, Signal
>>> import numpy as np
>>> t = np.arange(5)
>>> s = np.ones(5)
>>> mdf = MDF(version='3.30')
>>> for i in range(4):
... sigs = [Signal(s*(i*10+j), t, name='Sig') for j in range(1, 4)]
... mdf.append(sigs)
...
>>> # first group and channel index of the specified channel name
...
>>> mdf.get('Sig')
UserWarning: Multiple occurances for channel "Sig". Using first occurance from data group 4. Provide both "group" and "index" arguments to select another data group
<Signal Sig:
samples=[ 1. 1. 1. 1. 1.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
>>> # first channel index in the specified group
...
>>> mdf.get('Sig', 1)
<Signal Sig:
samples=[ 11. 11. 11. 11. 11.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
>>> # channel named Sig from group 1 channel index 2
...
>>> mdf.get('Sig', 1, 2)
<Signal Sig:
samples=[ 12. 12. 12. 12. 12.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
>>> # channel index 1 or group 2
...
>>> mdf.get(None, 2, 1)
<Signal Sig:
samples=[ 21. 21. 21. 21. 21.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
>>> mdf.get(group=2, index=1)
<Signal Sig:
samples=[ 21. 21. 21. 21. 21.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
>>> mdf.get('Sig', source='VN7060')
<Signal Sig:
samples=[ 12. 12. 12. 12. 12.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
"""
gp_nr, ch_nr = self._validate_channel_selection(
name, group, index, source=source
)
original_data = data
grp = self.groups[gp_nr]
if grp.data_location == v23c.LOCATION_ORIGINAL_FILE:
stream = self._file
else:
stream = self._tempfile
channel = grp.channels[ch_nr]
conversion = channel.conversion
name = channel.name
display_name = channel.display_name
bit_count = channel.bit_count or 64
dep = grp.channel_dependencies[ch_nr]
cycles_nr = grp.channel_group.cycles_nr
encoding = None
# get data group record
if data is None:
data = self._load_data(
grp, record_offset=record_offset, record_count=record_count
)
else:
data = (data,)
# check if this is a channel array
if dep:
if dep.dependency_type == v23c.DEPENDENCY_TYPE_VECTOR:
shape = [dep.sd_nr]
elif dep.dependency_type >= v23c.DEPENDENCY_TYPE_NDIM:
shape = []
i = 0
while True:
try:
dim = dep[f"dim_{i}"]
shape.append(dim)
i += 1
except KeyError:
break
shape = shape[::-1]
record_shape = tuple(shape)
arrays = [
self.get(
group=dg_nr,
index=ch_nr,
samples_only=True,
raw=raw,
data=original_data,
record_offset=record_offset,
record_count=record_count,
)[0]
for dg_nr, ch_nr in dep.referenced_channels
]
shape.insert(0, cycles_nr)
vals = column_stack(arrays).flatten().reshape(tuple(shape))
arrays = [vals]
types = [(channel.name, vals.dtype, record_shape)]
types = dtype(types)
vals = fromarrays(arrays, dtype=types)
if not samples_only or raster:
timestamps = self.get_master(
gp_nr,
original_data,
record_offset=record_offset,
record_count=record_count,
copy_master=copy_master,
)
if raster and len(timestamps) > 1:
num = float(float32((timestamps[-1] - timestamps[0]) / raster))
if num.is_integer():
t = linspace(timestamps[0], timestamps[-1], int(num))
else:
t = arange(timestamps[0], timestamps[-1], raster)
vals = (
Signal(vals, timestamps, name="_")
.interp(t, interpolation_mode=self._integer_interpolation)
.samples
)
timestamps = t
else:
# get channel values
channel_values = []
timestamps = []
count = 0
for fragment in data:
data_bytes, _offset, _count = fragment
parents, dtypes = self._prepare_record(grp)
try:
parent, bit_offset = parents[ch_nr]
except KeyError:
parent, bit_offset = None, None
bits = channel.bit_count
if parent is not None:
if grp.record is None:
if dtypes.itemsize:
record = fromstring(data_bytes, dtype=dtypes)
else:
record = None
else:
record = grp.record
record.setflags(write=False)
vals = record[parent]
data_type = channel.data_type
size = vals.dtype.itemsize
if data_type == v23c.DATA_TYPE_BYTEARRAY:
size *= vals.shape[1]
vals_dtype = vals.dtype.kind
if vals_dtype not in "ui" and (bit_offset or not bits == size * 8):
vals = self._get_not_byte_aligned_data(data_bytes, grp, ch_nr)
else:
dtype_ = vals.dtype
kind_ = dtype_.kind
if data_type in v23c.INT_TYPES:
if kind_ == 'f':
if bits != size * 8:
vals = self._get_not_byte_aligned_data(
data_bytes, grp, ch_nr
)
else:
dtype_fmt = get_fmt_v3(data_type, bits)
channel_dtype = dtype(dtype_fmt.split(')')[-1])
vals = vals.view(channel_dtype)
else:
if dtype_.byteorder == ">":
if bit_offset or bits != size << 3:
vals = self._get_not_byte_aligned_data(
data_bytes, grp, ch_nr
)
else:
if bit_offset:
if dtype_.kind == "i":
vals = vals.astype(
dtype(f"{dtype_.byteorder}u{size}")
)
vals >>= bit_offset
else:
vals = vals >> bit_offset
if bits != size << 3:
if data_type in v23c.SIGNED_INT:
vals = as_non_byte_sized_signed_int(vals, bits)
else:
mask = (1 << bits) - 1
if vals.flags.writeable:
vals &= mask
else:
vals = vals & mask
else:
if bits != size * 8:
vals = self._get_not_byte_aligned_data(
data_bytes, grp, ch_nr
)
else:
if kind_ in "ui":
dtype_fmt = get_fmt_v3(data_type, bits)
channel_dtype = dtype(dtype_fmt.split(')')[-1])
vals = vals.view(channel_dtype)
else:
vals = self._get_not_byte_aligned_data(data_bytes, grp, ch_nr)
if not samples_only or raster:
timestamps.append(
self.get_master(gp_nr, fragment, copy_master=copy_master)
)
if bits == 1 and self._single_bit_uint_as_bool:
vals = array(vals, dtype=bool)
else:
data_type = channel.data_type
channel_dtype = array([], dtype=get_fmt_v3(data_type, bits))
if vals.dtype != channel_dtype.dtype:
vals = vals.astype(channel_dtype.dtype)
channel_values.append(vals.copy())
count += 1
if count > 1:
vals = concatenate(channel_values)
elif count == 1:
vals = channel_values[0]
else:
vals = []
if not samples_only or raster:
if count > 1:
timestamps = concatenate(timestamps)
else:
timestamps = timestamps[0]
if raster and len(timestamps) > 1:
num = float(float32((timestamps[-1] - timestamps[0]) / raster))
if num.is_integer():
t = linspace(timestamps[0], timestamps[-1], int(num))
else:
t = arange(timestamps[0], timestamps[-1], raster)
vals = (
Signal(vals, timestamps, name="_")
.interp(t, interpolation_mode=self._integer_interpolation)
.samples
)
timestamps = t
if conversion is None:
conversion_type = v23c.CONVERSION_TYPE_NONE
else:
conversion_type = conversion.conversion_type
if conversion_type == v23c.CONVERSION_TYPE_NONE:
if vals.dtype.kind == "S":
encoding = "latin-1"
if not raw:
if conversion:
vals = conversion.convert(vals)
conversion = None
if samples_only:
res = vals, None
else:
if conversion:
unit = conversion.unit
else:
unit = ""
comment = channel.comment
description = channel.description.decode("latin-1").strip(" \t\n\0")
if comment:
comment = f"{comment}\n{description}"
else:
comment = description
source = channel.source
if source:
if source["type"] == v23c.SOURCE_ECU:
source = SignalSource(
source.name,
source.path,
source.comment,
0, # source type other
0, # bus type none
)
else:
source = SignalSource(
source.name,
source.path,
source.comment,
2, # source type bus
2, # bus type CAN
)
master_metadata = self._master_channel_metadata.get(gp_nr, None)
res = Signal(
samples=vals,
timestamps=timestamps,
unit=unit,
name=channel.name,
comment=comment,
conversion=conversion,
raw=raw,
master_metadata=master_metadata,
display_name=display_name,
source=source,
bit_count=bit_count,
encoding=encoding,
)
return res |
def inverted_level_order(self) -> Iterator["BSP"]:
"""Iterate over this BSP's hierarchy in inverse level order.
.. versionadded:: 8.3
"""
levels = [] # type: List[List['BSP']]
next = [self] # type: List['BSP']
while next:
levels.append(next)
level = next # type: List['BSP']
next = []
for node in level:
next.extend(node.children)
while levels:
yield from levels.pop() | Iterate over this BSP's hierarchy in inverse level order.
.. versionadded:: 8.3 | Below is the the instruction that describes the task:
### Input:
Iterate over this BSP's hierarchy in inverse level order.
.. versionadded:: 8.3
### Response:
def inverted_level_order(self) -> Iterator["BSP"]:
"""Iterate over this BSP's hierarchy in inverse level order.
.. versionadded:: 8.3
"""
levels = [] # type: List[List['BSP']]
next = [self] # type: List['BSP']
while next:
levels.append(next)
level = next # type: List['BSP']
next = []
for node in level:
next.extend(node.children)
while levels:
yield from levels.pop() |
def pstdev(data, mu=None):
"""Return the square root of the population variance.
See ``pvariance`` for arguments and other details.
"""
var = pvariance(data, mu)
try:
return var.sqrt()
except AttributeError:
return math.sqrt(var) | Return the square root of the population variance.
See ``pvariance`` for arguments and other details. | Below is the the instruction that describes the task:
### Input:
Return the square root of the population variance.
See ``pvariance`` for arguments and other details.
### Response:
def pstdev(data, mu=None):
"""Return the square root of the population variance.
See ``pvariance`` for arguments and other details.
"""
var = pvariance(data, mu)
try:
return var.sqrt()
except AttributeError:
return math.sqrt(var) |
def fill_pool(self):
"""
Add connections as necessary to meet the target pool size. If there
are no nodes to connect to (because we maxed out connections-per-node
on all active connections and any unconnected nodes have pending
reconnect timers), call the on_insufficient_nodes callback.
"""
time_since_last_called = self.fill_pool_throttle
if self.fill_pool_last_called is not None:
time_since_last_called = time() - self.fill_pool_last_called
need = self.target_pool_size - self.num_connectors()
if need <= 0 or (self.throttle_timer is not None and self.throttle_timer.active()):
return
elif time_since_last_called < self.fill_pool_throttle:
self.log("Filling pool too quickly, calling again in %.1f seconds" % self.fill_pool_throttle)
self._set_fill_pool_timer()
return
else:
try:
for num, node in izip(xrange(need), self.choose_nodes_to_connect()):
self.make_conn(node)
self.fill_pool_last_called = time()
except NoNodesAvailable, e:
waittime = e.args[0]
pending_requests = len(self.request_queue.pending)
if self.on_insufficient_nodes:
self.on_insufficient_nodes(self.num_active_conns(),
self.target_pool_size,
pending_requests,
waittime if waittime != float('Inf') else None)
self.schedule_future_fill_pool(e.args[0])
if self.num_connectors() == 0 and pending_requests > 0:
if self.on_insufficient_conns:
self.on_insufficient_conns(self.num_connectors(),
pending_requests) | Add connections as necessary to meet the target pool size. If there
are no nodes to connect to (because we maxed out connections-per-node
on all active connections and any unconnected nodes have pending
reconnect timers), call the on_insufficient_nodes callback. | Below is the the instruction that describes the task:
### Input:
Add connections as necessary to meet the target pool size. If there
are no nodes to connect to (because we maxed out connections-per-node
on all active connections and any unconnected nodes have pending
reconnect timers), call the on_insufficient_nodes callback.
### Response:
def fill_pool(self):
"""
Add connections as necessary to meet the target pool size. If there
are no nodes to connect to (because we maxed out connections-per-node
on all active connections and any unconnected nodes have pending
reconnect timers), call the on_insufficient_nodes callback.
"""
time_since_last_called = self.fill_pool_throttle
if self.fill_pool_last_called is not None:
time_since_last_called = time() - self.fill_pool_last_called
need = self.target_pool_size - self.num_connectors()
if need <= 0 or (self.throttle_timer is not None and self.throttle_timer.active()):
return
elif time_since_last_called < self.fill_pool_throttle:
self.log("Filling pool too quickly, calling again in %.1f seconds" % self.fill_pool_throttle)
self._set_fill_pool_timer()
return
else:
try:
for num, node in izip(xrange(need), self.choose_nodes_to_connect()):
self.make_conn(node)
self.fill_pool_last_called = time()
except NoNodesAvailable, e:
waittime = e.args[0]
pending_requests = len(self.request_queue.pending)
if self.on_insufficient_nodes:
self.on_insufficient_nodes(self.num_active_conns(),
self.target_pool_size,
pending_requests,
waittime if waittime != float('Inf') else None)
self.schedule_future_fill_pool(e.args[0])
if self.num_connectors() == 0 and pending_requests > 0:
if self.on_insufficient_conns:
self.on_insufficient_conns(self.num_connectors(),
pending_requests) |
def execute_policy(self, policy):
"""
Executes the specified policy for this scaling group.
"""
return self.manager.execute_policy(scaling_group=self, policy=policy) | Executes the specified policy for this scaling group. | Below is the the instruction that describes the task:
### Input:
Executes the specified policy for this scaling group.
### Response:
def execute_policy(self, policy):
"""
Executes the specified policy for this scaling group.
"""
return self.manager.execute_policy(scaling_group=self, policy=policy) |
def is_empty(self):
"""
Test interval emptiness.
:return: True if interval is empty, False otherwise.
"""
return (
self._lower > self._upper or
(self._lower == self._upper and (self._left == OPEN or self._right == OPEN))
) | Test interval emptiness.
:return: True if interval is empty, False otherwise. | Below is the the instruction that describes the task:
### Input:
Test interval emptiness.
:return: True if interval is empty, False otherwise.
### Response:
def is_empty(self):
"""
Test interval emptiness.
:return: True if interval is empty, False otherwise.
"""
return (
self._lower > self._upper or
(self._lower == self._upper and (self._left == OPEN or self._right == OPEN))
) |
def format_exc(limit=None):
"""Like print_exc() but return a string. Backport for Python 2.3."""
try:
etype, value, tb = sys.exc_info()
return ''.join(traceback.format_exception(etype, value, tb, limit))
finally:
etype = value = tb = None | Like print_exc() but return a string. Backport for Python 2.3. | Below is the the instruction that describes the task:
### Input:
Like print_exc() but return a string. Backport for Python 2.3.
### Response:
def format_exc(limit=None):
"""Like print_exc() but return a string. Backport for Python 2.3."""
try:
etype, value, tb = sys.exc_info()
return ''.join(traceback.format_exception(etype, value, tb, limit))
finally:
etype = value = tb = None |
def squish(incs, f):
"""
returns 'flattened' inclination, assuming factor, f and King (1955) formula:
tan (I_o) = f tan (I_f)
Parameters
__________
incs : array of inclination (I_f) data to flatten
f : flattening factor
Returns
_______
I_o : inclinations after flattening
"""
incs = np.radians(incs)
I_o = f * np.tan(incs) # multiply tangent by flattening factor
return np.degrees(np.arctan(I_o)) | returns 'flattened' inclination, assuming factor, f and King (1955) formula:
tan (I_o) = f tan (I_f)
Parameters
__________
incs : array of inclination (I_f) data to flatten
f : flattening factor
Returns
_______
I_o : inclinations after flattening | Below is the the instruction that describes the task:
### Input:
returns 'flattened' inclination, assuming factor, f and King (1955) formula:
tan (I_o) = f tan (I_f)
Parameters
__________
incs : array of inclination (I_f) data to flatten
f : flattening factor
Returns
_______
I_o : inclinations after flattening
### Response:
def squish(incs, f):
"""
returns 'flattened' inclination, assuming factor, f and King (1955) formula:
tan (I_o) = f tan (I_f)
Parameters
__________
incs : array of inclination (I_f) data to flatten
f : flattening factor
Returns
_______
I_o : inclinations after flattening
"""
incs = np.radians(incs)
I_o = f * np.tan(incs) # multiply tangent by flattening factor
return np.degrees(np.arctan(I_o)) |
def _checkMousePositionForFocus(self):
"""Check the mouse position to know if move focus on a option"""
i = 0
cur_pos = pygame.mouse.get_pos()
ml, mt = self.position
for o in self.options:
rect = o.get('label_rect')
if rect:
if rect.collidepoint(cur_pos) and self.mouse_pos != cur_pos:
self.option = i
self.mouse_pos = cur_pos
break
i += 1 | Check the mouse position to know if move focus on a option | Below is the the instruction that describes the task:
### Input:
Check the mouse position to know if move focus on a option
### Response:
def _checkMousePositionForFocus(self):
"""Check the mouse position to know if move focus on a option"""
i = 0
cur_pos = pygame.mouse.get_pos()
ml, mt = self.position
for o in self.options:
rect = o.get('label_rect')
if rect:
if rect.collidepoint(cur_pos) and self.mouse_pos != cur_pos:
self.option = i
self.mouse_pos = cur_pos
break
i += 1 |
def _mc_error(x, batches=5, circular=False):
"""Calculate the simulation standard error, accounting for non-independent samples.
The trace is divided into batches, and the standard deviation of the batch
means is calculated.
Parameters
----------
x : Numpy array
An array containing MCMC samples
batches : integer
Number of batches
circular : bool
Whether to compute the error taking into account `x` is a circular variable
(in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables).
Returns
-------
mc_error : float
Simulation standard error
"""
if x.ndim > 1:
dims = np.shape(x)
trace = np.transpose([t.ravel() for t in x])
return np.reshape([_mc_error(t, batches) for t in trace], dims[1:])
else:
if batches == 1:
if circular:
std = st.circstd(x, high=np.pi, low=-np.pi)
else:
std = np.std(x)
return std / np.sqrt(len(x))
batched_traces = np.resize(x, (batches, int(len(x) / batches)))
if circular:
means = st.circmean(batched_traces, high=np.pi, low=-np.pi, axis=1)
std = st.circstd(means, high=np.pi, low=-np.pi)
else:
means = np.mean(batched_traces, 1)
std = np.std(means)
return std / np.sqrt(batches) | Calculate the simulation standard error, accounting for non-independent samples.
The trace is divided into batches, and the standard deviation of the batch
means is calculated.
Parameters
----------
x : Numpy array
An array containing MCMC samples
batches : integer
Number of batches
circular : bool
Whether to compute the error taking into account `x` is a circular variable
(in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables).
Returns
-------
mc_error : float
Simulation standard error | Below is the the instruction that describes the task:
### Input:
Calculate the simulation standard error, accounting for non-independent samples.
The trace is divided into batches, and the standard deviation of the batch
means is calculated.
Parameters
----------
x : Numpy array
An array containing MCMC samples
batches : integer
Number of batches
circular : bool
Whether to compute the error taking into account `x` is a circular variable
(in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables).
Returns
-------
mc_error : float
Simulation standard error
### Response:
def _mc_error(x, batches=5, circular=False):
"""Calculate the simulation standard error, accounting for non-independent samples.
The trace is divided into batches, and the standard deviation of the batch
means is calculated.
Parameters
----------
x : Numpy array
An array containing MCMC samples
batches : integer
Number of batches
circular : bool
Whether to compute the error taking into account `x` is a circular variable
(in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables).
Returns
-------
mc_error : float
Simulation standard error
"""
if x.ndim > 1:
dims = np.shape(x)
trace = np.transpose([t.ravel() for t in x])
return np.reshape([_mc_error(t, batches) for t in trace], dims[1:])
else:
if batches == 1:
if circular:
std = st.circstd(x, high=np.pi, low=-np.pi)
else:
std = np.std(x)
return std / np.sqrt(len(x))
batched_traces = np.resize(x, (batches, int(len(x) / batches)))
if circular:
means = st.circmean(batched_traces, high=np.pi, low=-np.pi, axis=1)
std = st.circstd(means, high=np.pi, low=-np.pi)
else:
means = np.mean(batched_traces, 1)
std = np.std(means)
return std / np.sqrt(batches) |
def set_attributes(self, doc, fields, # pylint: disable=arguments-differ
parent_type=None, catch_all_field=None):
"""
:param UnionField catch_all_field: The field designated as the
catch-all. This field should be a member of the list of fields.
See :meth:`Composite.set_attributes` for parameter definitions.
"""
if parent_type:
assert isinstance(parent_type, Union)
super(Union, self).set_attributes(doc, fields, parent_type)
self.catch_all_field = catch_all_field
self.parent_type = parent_type | :param UnionField catch_all_field: The field designated as the
catch-all. This field should be a member of the list of fields.
See :meth:`Composite.set_attributes` for parameter definitions. | Below is the the instruction that describes the task:
### Input:
:param UnionField catch_all_field: The field designated as the
catch-all. This field should be a member of the list of fields.
See :meth:`Composite.set_attributes` for parameter definitions.
### Response:
def set_attributes(self, doc, fields, # pylint: disable=arguments-differ
parent_type=None, catch_all_field=None):
"""
:param UnionField catch_all_field: The field designated as the
catch-all. This field should be a member of the list of fields.
See :meth:`Composite.set_attributes` for parameter definitions.
"""
if parent_type:
assert isinstance(parent_type, Union)
super(Union, self).set_attributes(doc, fields, parent_type)
self.catch_all_field = catch_all_field
self.parent_type = parent_type |
def set_kw_typeahead_input(cls):
"""
Map the typeahead input to remote dataset.
"""
# get reference to parent element
parent_id = cls.intput_el.parent.id
if "typeahead" not in parent_id.lower():
parent_id = cls.intput_el.parent.parent.id
window.make_keyword_typeahead_tag(
"#" + parent_id,
join(settings.API_PATH, "kw_list.json"),
cls.on_select_callback,
) | Map the typeahead input to remote dataset. | Below is the the instruction that describes the task:
### Input:
Map the typeahead input to remote dataset.
### Response:
def set_kw_typeahead_input(cls):
"""
Map the typeahead input to remote dataset.
"""
# get reference to parent element
parent_id = cls.intput_el.parent.id
if "typeahead" not in parent_id.lower():
parent_id = cls.intput_el.parent.parent.id
window.make_keyword_typeahead_tag(
"#" + parent_id,
join(settings.API_PATH, "kw_list.json"),
cls.on_select_callback,
) |
def run_somaticsniper_with_merge(job, tumor_bam, normal_bam, univ_options, somaticsniper_options):
"""
A wrapper for the the entire SomaticSniper sub-graph.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict somaticsniper_options: Options specific to SomaticSniper
:return: fsID to the merged SomaticSniper calls
:rtype: toil.fileStore.FileID
"""
spawn = job.wrapJobFn(run_somaticsniper, tumor_bam, normal_bam, univ_options,
somaticsniper_options, split=False).encapsulate()
job.addChild(spawn)
return spawn.rv() | A wrapper for the the entire SomaticSniper sub-graph.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict somaticsniper_options: Options specific to SomaticSniper
:return: fsID to the merged SomaticSniper calls
:rtype: toil.fileStore.FileID | Below is the the instruction that describes the task:
### Input:
A wrapper for the the entire SomaticSniper sub-graph.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict somaticsniper_options: Options specific to SomaticSniper
:return: fsID to the merged SomaticSniper calls
:rtype: toil.fileStore.FileID
### Response:
def run_somaticsniper_with_merge(job, tumor_bam, normal_bam, univ_options, somaticsniper_options):
"""
A wrapper for the the entire SomaticSniper sub-graph.
:param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
:param dict normal_bam: Dict of bam and bai for normal DNA-Seq
:param dict univ_options: Dict of universal options used by almost all tools
:param dict somaticsniper_options: Options specific to SomaticSniper
:return: fsID to the merged SomaticSniper calls
:rtype: toil.fileStore.FileID
"""
spawn = job.wrapJobFn(run_somaticsniper, tumor_bam, normal_bam, univ_options,
somaticsniper_options, split=False).encapsulate()
job.addChild(spawn)
return spawn.rv() |
def view_tickets(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/views#list-tickets-from-a-view"
api_path = "/api/v2/views/{id}/tickets.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) | https://developer.zendesk.com/rest_api/docs/core/views#list-tickets-from-a-view | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/core/views#list-tickets-from-a-view
### Response:
def view_tickets(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/views#list-tickets-from-a-view"
api_path = "/api/v2/views/{id}/tickets.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) |
def run_remote_command(host, command, timeout_sec=5.0):
"""Retrieves the value of an environment variable of a
remote host over SSH
:param host: (str) host to query
:param command: (str) command
:param timeout_sec (float) seconds to wait before killing the command.
:return: (str) command output
:raises: TypeError, CommandError
"""
log = logging.getLogger(mod_logger + '.run_remote_command')
if not isinstance(host, basestring):
msg = 'host argument must be a string'
raise TypeError(msg)
if not isinstance(command, basestring):
msg = 'command argument must be a string'
raise TypeError(msg)
log.debug('Running remote command on host: {h}: {c}...'.format(h=host, c=command))
command = ['ssh', '{h}'.format(h=host), '{c}'.format(c=command)]
try:
result = run_command(command, timeout_sec=timeout_sec)
code = result['code']
except CommandError:
raise
if code != 0:
msg = 'There was a problem running command [{m}] on host {h} over SSH, return code: {c}, and ' \
'produced output:\n{o}'.format(h=host, c=code, m=' '.join(command), o=result['output'])
raise CommandError(msg)
else:
output_text = result['output'].strip()
log.debug('Running command [{m}] host {h} over SSH produced output: {o}'.format(
m=command, h=host, o=output_text))
output = {
'output': output_text,
'code': code
}
return output | Retrieves the value of an environment variable of a
remote host over SSH
:param host: (str) host to query
:param command: (str) command
:param timeout_sec (float) seconds to wait before killing the command.
:return: (str) command output
:raises: TypeError, CommandError | Below is the the instruction that describes the task:
### Input:
Retrieves the value of an environment variable of a
remote host over SSH
:param host: (str) host to query
:param command: (str) command
:param timeout_sec (float) seconds to wait before killing the command.
:return: (str) command output
:raises: TypeError, CommandError
### Response:
def run_remote_command(host, command, timeout_sec=5.0):
"""Retrieves the value of an environment variable of a
remote host over SSH
:param host: (str) host to query
:param command: (str) command
:param timeout_sec (float) seconds to wait before killing the command.
:return: (str) command output
:raises: TypeError, CommandError
"""
log = logging.getLogger(mod_logger + '.run_remote_command')
if not isinstance(host, basestring):
msg = 'host argument must be a string'
raise TypeError(msg)
if not isinstance(command, basestring):
msg = 'command argument must be a string'
raise TypeError(msg)
log.debug('Running remote command on host: {h}: {c}...'.format(h=host, c=command))
command = ['ssh', '{h}'.format(h=host), '{c}'.format(c=command)]
try:
result = run_command(command, timeout_sec=timeout_sec)
code = result['code']
except CommandError:
raise
if code != 0:
msg = 'There was a problem running command [{m}] on host {h} over SSH, return code: {c}, and ' \
'produced output:\n{o}'.format(h=host, c=code, m=' '.join(command), o=result['output'])
raise CommandError(msg)
else:
output_text = result['output'].strip()
log.debug('Running command [{m}] host {h} over SSH produced output: {o}'.format(
m=command, h=host, o=output_text))
output = {
'output': output_text,
'code': code
}
return output |
def runPlink(options):
"""Run Plink with the ``mind`` option.
:param options: the options.
:type options: argparse.Namespace
"""
# The plink command
plinkCommand = [
"plink",
"--noweb",
"--bfile" if options.is_bfile else "--tfile",
options.ifile,
"--mind",
str(options.mind),
"--make-bed",
"--out",
options.out,
]
output = None
try:
output = subprocess.check_output(plinkCommand,
stderr=subprocess.STDOUT, shell=False)
except subprocess.CalledProcessError:
msg = "plink: couldn't run plink"
raise ProgramError(msg) | Run Plink with the ``mind`` option.
:param options: the options.
:type options: argparse.Namespace | Below is the the instruction that describes the task:
### Input:
Run Plink with the ``mind`` option.
:param options: the options.
:type options: argparse.Namespace
### Response:
def runPlink(options):
"""Run Plink with the ``mind`` option.
:param options: the options.
:type options: argparse.Namespace
"""
# The plink command
plinkCommand = [
"plink",
"--noweb",
"--bfile" if options.is_bfile else "--tfile",
options.ifile,
"--mind",
str(options.mind),
"--make-bed",
"--out",
options.out,
]
output = None
try:
output = subprocess.check_output(plinkCommand,
stderr=subprocess.STDOUT, shell=False)
except subprocess.CalledProcessError:
msg = "plink: couldn't run plink"
raise ProgramError(msg) |
def add_vlan_int(self, vlan_id):
"""
Add VLAN Interface. VLAN interfaces are required for VLANs even when
not wanting to use the interface for any L3 features.
Args:
vlan_id: ID for the VLAN interface being created. Value of 2-4096.
Returns:
True if command completes successfully or False if not.
Raises:
None
"""
config = ET.Element('config')
vlinterface = ET.SubElement(config, 'interface-vlan',
xmlns=("urn:brocade.com:mgmt:"
"brocade-interface"))
interface = ET.SubElement(vlinterface, 'interface')
vlan = ET.SubElement(interface, 'vlan')
name = ET.SubElement(vlan, 'name')
name.text = vlan_id
try:
self._callback(config)
return True
# TODO add logging and narrow exception window.
except Exception as error:
logging.error(error)
return False | Add VLAN Interface. VLAN interfaces are required for VLANs even when
not wanting to use the interface for any L3 features.
Args:
vlan_id: ID for the VLAN interface being created. Value of 2-4096.
Returns:
True if command completes successfully or False if not.
Raises:
None | Below is the the instruction that describes the task:
### Input:
Add VLAN Interface. VLAN interfaces are required for VLANs even when
not wanting to use the interface for any L3 features.
Args:
vlan_id: ID for the VLAN interface being created. Value of 2-4096.
Returns:
True if command completes successfully or False if not.
Raises:
None
### Response:
def add_vlan_int(self, vlan_id):
"""
Add VLAN Interface. VLAN interfaces are required for VLANs even when
not wanting to use the interface for any L3 features.
Args:
vlan_id: ID for the VLAN interface being created. Value of 2-4096.
Returns:
True if command completes successfully or False if not.
Raises:
None
"""
config = ET.Element('config')
vlinterface = ET.SubElement(config, 'interface-vlan',
xmlns=("urn:brocade.com:mgmt:"
"brocade-interface"))
interface = ET.SubElement(vlinterface, 'interface')
vlan = ET.SubElement(interface, 'vlan')
name = ET.SubElement(vlan, 'name')
name.text = vlan_id
try:
self._callback(config)
return True
# TODO add logging and narrow exception window.
except Exception as error:
logging.error(error)
return False |
def compute_auth_key(userid, password):
"""
Compute the authentication key for freedns.afraid.org.
This is the SHA1 hash of the string b'userid|password'.
:param userid: ascii username
:param password: ascii password
:return: ascii authentication key (SHA1 at this point)
"""
import sys
if sys.version_info >= (3, 0):
return hashlib.sha1(b"|".join((userid.encode("ascii"), # noqa: S303
password.encode("ascii")))).hexdigest()
return hashlib.sha1("|".join((userid, password))).hexdigest() | Compute the authentication key for freedns.afraid.org.
This is the SHA1 hash of the string b'userid|password'.
:param userid: ascii username
:param password: ascii password
:return: ascii authentication key (SHA1 at this point) | Below is the the instruction that describes the task:
### Input:
Compute the authentication key for freedns.afraid.org.
This is the SHA1 hash of the string b'userid|password'.
:param userid: ascii username
:param password: ascii password
:return: ascii authentication key (SHA1 at this point)
### Response:
def compute_auth_key(userid, password):
"""
Compute the authentication key for freedns.afraid.org.
This is the SHA1 hash of the string b'userid|password'.
:param userid: ascii username
:param password: ascii password
:return: ascii authentication key (SHA1 at this point)
"""
import sys
if sys.version_info >= (3, 0):
return hashlib.sha1(b"|".join((userid.encode("ascii"), # noqa: S303
password.encode("ascii")))).hexdigest()
return hashlib.sha1("|".join((userid, password))).hexdigest() |
def docker(gandi, vm, args):
"""
Manage docker instance
"""
if not [basedir for basedir in os.getenv('PATH', '.:/usr/bin').split(':')
if os.path.exists('%s/docker' % basedir)]:
gandi.echo("""'docker' not found in $PATH, required for this command \
to work
See https://docs.docker.com/installation/#installation to install, or use:
# curl https://get.docker.io/ | sh""")
return
if vm:
gandi.configure(True, 'dockervm', vm)
else:
vm = gandi.get('dockervm')
if not vm:
gandi.echo("""
No docker vm specified. You can create one:
$ gandi vm create --hostname docker --image "Ubuntu 14.04 64 bits LTS (HVM)" \\
--run 'wget -O - https://get.docker.io/ | sh'
Then configure it using:
$ gandi docker --vm docker ps
Or to both change target vm and spawn a process (note the -- separator):
$ gandi docker --vm myvm -- run -i -t debian bash
""") # noqa
return
return gandi.docker.handle(vm, args) | Manage docker instance | Below is the the instruction that describes the task:
### Input:
Manage docker instance
### Response:
def docker(gandi, vm, args):
"""
Manage docker instance
"""
if not [basedir for basedir in os.getenv('PATH', '.:/usr/bin').split(':')
if os.path.exists('%s/docker' % basedir)]:
gandi.echo("""'docker' not found in $PATH, required for this command \
to work
See https://docs.docker.com/installation/#installation to install, or use:
# curl https://get.docker.io/ | sh""")
return
if vm:
gandi.configure(True, 'dockervm', vm)
else:
vm = gandi.get('dockervm')
if not vm:
gandi.echo("""
No docker vm specified. You can create one:
$ gandi vm create --hostname docker --image "Ubuntu 14.04 64 bits LTS (HVM)" \\
--run 'wget -O - https://get.docker.io/ | sh'
Then configure it using:
$ gandi docker --vm docker ps
Or to both change target vm and spawn a process (note the -- separator):
$ gandi docker --vm myvm -- run -i -t debian bash
""") # noqa
return
return gandi.docker.handle(vm, args) |
def render(self, container, descender, state, space_below=0,
first_line_only=False):
"""Typeset the paragraph
The paragraph is typeset in the given container starting below the
current cursor position of the container. When the end of the container
is reached, the rendering state is preserved to continue setting the
rest of the paragraph when this method is called with a new container.
Args:
container (Container): the container to render to
descender (float or None): descender height of the preceeding line
state (ParagraphState): the state where rendering will continue
first_line_only (bool): typeset only the first line
"""
indent_first = (float(self.get_style('indent_first', container))
if state.initial else 0)
line_width = float(container.width)
line_spacing = self.get_style('line_spacing', container)
text_align = self.get_style('text_align', container)
tab_stops = self.get_style('tab_stops', container)
if not tab_stops:
tab_width = 2 * self.get_style('font_size', container)
tab_stops = DefaultTabStops(tab_width)
# `saved_state` is updated after successfully rendering each line, so
# that when `container` overflows on rendering a line, the words in that
# line are yielded again on the next typeset() call.
saved_state = copy(state)
prev_state = copy(state)
max_line_width = 0
def typeset_line(line, last_line=False):
"""Typeset `line` and, if no exception is raised, update the
paragraph's internal rendering state."""
nonlocal state, saved_state, max_line_width, descender, space_below
max_line_width = max(max_line_width, line.cursor)
advance = (line.ascender(container) if descender is None
else line_spacing.advance(line, descender, container))
descender = line.descender(container) # descender <= 0
line.advance = advance
total_advance = advance + (space_below if last_line else 0) - descender
if container.remaining_height < total_advance:
raise EndOfContainer(saved_state)
assert container.advance2(advance)
line.typeset(container, text_align, last_line)
assert container.advance2(- descender)
state.initial = False
saved_state = copy(state)
return Line(tab_stops, line_width, container,
significant_whitespace=self.significant_whitespace)
first_line = line = Line(tab_stops, line_width, container,
indent_first, self.significant_whitespace)
while True:
try:
word = state.next_word()
except StopIteration:
break
try:
if not line.append_word(word):
for first, second in word.hyphenate(container):
if line.append_word(first):
state.prepend_word(second) # prepend second part
break
else:
state = prev_state
line = typeset_line(line)
if first_line_only:
break
continue
except NewLineException:
line.append(word.glyphs_span)
line = typeset_line(line, last_line=True)
if first_line_only:
break
prev_state = copy(state)
if line:
typeset_line(line, last_line=True)
# Correct the horizontal text placement for auto-width paragraphs
if self._width(container) == FlowableWidth.AUTO:
if text_align == TextAlign.CENTER:
container.left -= float(container.width - max_line_width) / 2
if text_align == TextAlign.RIGHT:
container.left -= float(container.width - max_line_width)
return max_line_width, first_line.advance, descender | Typeset the paragraph
The paragraph is typeset in the given container starting below the
current cursor position of the container. When the end of the container
is reached, the rendering state is preserved to continue setting the
rest of the paragraph when this method is called with a new container.
Args:
container (Container): the container to render to
descender (float or None): descender height of the preceeding line
state (ParagraphState): the state where rendering will continue
first_line_only (bool): typeset only the first line | Below is the the instruction that describes the task:
### Input:
Typeset the paragraph
The paragraph is typeset in the given container starting below the
current cursor position of the container. When the end of the container
is reached, the rendering state is preserved to continue setting the
rest of the paragraph when this method is called with a new container.
Args:
container (Container): the container to render to
descender (float or None): descender height of the preceeding line
state (ParagraphState): the state where rendering will continue
first_line_only (bool): typeset only the first line
### Response:
def render(self, container, descender, state, space_below=0,
first_line_only=False):
"""Typeset the paragraph
The paragraph is typeset in the given container starting below the
current cursor position of the container. When the end of the container
is reached, the rendering state is preserved to continue setting the
rest of the paragraph when this method is called with a new container.
Args:
container (Container): the container to render to
descender (float or None): descender height of the preceeding line
state (ParagraphState): the state where rendering will continue
first_line_only (bool): typeset only the first line
"""
indent_first = (float(self.get_style('indent_first', container))
if state.initial else 0)
line_width = float(container.width)
line_spacing = self.get_style('line_spacing', container)
text_align = self.get_style('text_align', container)
tab_stops = self.get_style('tab_stops', container)
if not tab_stops:
tab_width = 2 * self.get_style('font_size', container)
tab_stops = DefaultTabStops(tab_width)
# `saved_state` is updated after successfully rendering each line, so
# that when `container` overflows on rendering a line, the words in that
# line are yielded again on the next typeset() call.
saved_state = copy(state)
prev_state = copy(state)
max_line_width = 0
def typeset_line(line, last_line=False):
"""Typeset `line` and, if no exception is raised, update the
paragraph's internal rendering state."""
nonlocal state, saved_state, max_line_width, descender, space_below
max_line_width = max(max_line_width, line.cursor)
advance = (line.ascender(container) if descender is None
else line_spacing.advance(line, descender, container))
descender = line.descender(container) # descender <= 0
line.advance = advance
total_advance = advance + (space_below if last_line else 0) - descender
if container.remaining_height < total_advance:
raise EndOfContainer(saved_state)
assert container.advance2(advance)
line.typeset(container, text_align, last_line)
assert container.advance2(- descender)
state.initial = False
saved_state = copy(state)
return Line(tab_stops, line_width, container,
significant_whitespace=self.significant_whitespace)
first_line = line = Line(tab_stops, line_width, container,
indent_first, self.significant_whitespace)
while True:
try:
word = state.next_word()
except StopIteration:
break
try:
if not line.append_word(word):
for first, second in word.hyphenate(container):
if line.append_word(first):
state.prepend_word(second) # prepend second part
break
else:
state = prev_state
line = typeset_line(line)
if first_line_only:
break
continue
except NewLineException:
line.append(word.glyphs_span)
line = typeset_line(line, last_line=True)
if first_line_only:
break
prev_state = copy(state)
if line:
typeset_line(line, last_line=True)
# Correct the horizontal text placement for auto-width paragraphs
if self._width(container) == FlowableWidth.AUTO:
if text_align == TextAlign.CENTER:
container.left -= float(container.width - max_line_width) / 2
if text_align == TextAlign.RIGHT:
container.left -= float(container.width - max_line_width)
return max_line_width, first_line.advance, descender |
def get(method, hmc, uri, uri_parms, logon_required):
"""Operation: List Password Rules."""
query_str = uri_parms[0]
try:
console = hmc.consoles.lookup_by_oid(None)
except KeyError:
raise InvalidResourceError(method, uri)
result_password_rules = []
filter_args = parse_query_parms(method, uri, query_str)
for password_rule in console.password_rules.list(filter_args):
result_password_rule = {}
for prop in password_rule.properties:
if prop in ('element-uri', 'name', 'type'):
result_password_rule[prop] = password_rule.properties[prop]
result_password_rules.append(result_password_rule)
return {'password-rules': result_password_rules} | Operation: List Password Rules. | Below is the the instruction that describes the task:
### Input:
Operation: List Password Rules.
### Response:
def get(method, hmc, uri, uri_parms, logon_required):
"""Operation: List Password Rules."""
query_str = uri_parms[0]
try:
console = hmc.consoles.lookup_by_oid(None)
except KeyError:
raise InvalidResourceError(method, uri)
result_password_rules = []
filter_args = parse_query_parms(method, uri, query_str)
for password_rule in console.password_rules.list(filter_args):
result_password_rule = {}
for prop in password_rule.properties:
if prop in ('element-uri', 'name', 'type'):
result_password_rule[prop] = password_rule.properties[prop]
result_password_rules.append(result_password_rule)
return {'password-rules': result_password_rules} |
def retrieve_client_credentials(self):
"""Return the client credentials.
:returns: tuple(client_id, client_secret)
"""
client_id = self.params.get('client_id')
client_secret = self.params.get('client_secret')
return (client_id, client_secret) | Return the client credentials.
:returns: tuple(client_id, client_secret) | Below is the the instruction that describes the task:
### Input:
Return the client credentials.
:returns: tuple(client_id, client_secret)
### Response:
def retrieve_client_credentials(self):
"""Return the client credentials.
:returns: tuple(client_id, client_secret)
"""
client_id = self.params.get('client_id')
client_secret = self.params.get('client_secret')
return (client_id, client_secret) |
def write_fcs(filename, chn_names, data,
endianness="big",
compat_chn_names=True,
compat_copy=True,
compat_negative=True,
compat_percent=True,
compat_max_int16=10000):
"""Write numpy data to an .fcs file (FCS3.0 file format)
Parameters
----------
filename: str or pathlib.Path
Path to the output .fcs file
ch_names: list of str, length C
Names of the output channels
data: 2d ndarray of shape (N,C)
The numpy array data to store as .fcs file format.
endianness: str
Set to "little" or "big" to define the byte order used.
compat_chn_names: bool
Compatibility mode for 3rd party flow analysis software:
The characters " ", "?", and "_" are removed in the output
channel names.
compat_copy: bool
Do not override the input array `data` when modified in
compatibility mode.
compat_negative: bool
Compatibliity mode for 3rd party flow analysis software:
Flip the sign of `data` if its mean is smaller than zero.
compat_percent: bool
Compatibliity mode for 3rd party flow analysis software:
If a column in `data` contains values only between 0 and 1,
they are multiplied by 100.
compat_max_int16: int
Compatibliity mode for 3rd party flow analysis software:
If a column in `data` has a maximum above this value,
then the display-maximum is set to 2**15.
Notes
-----
- These commonly used unicode characters are replaced: "µ", "²"
- If the input data contain NaN values, the corresponding rows
are excluded due to incompatibility with the FCS file format.
"""
filename = pathlib.Path(filename)
if not isinstance(data, np.ndarray):
data = np.array(data, dtype=float)
# remove rows with nan values
nanrows = np.isnan(data).any(axis=1)
if np.sum(nanrows):
msg = "Rows containing NaNs are not written to {}!".format(filename)
warnings.warn(msg)
data = data[~nanrows]
if endianness not in ["little", "big"]:
raise ValueError("`endianness` must be 'little' or 'big'!")
msg = "length of `chn_names` must match length of 2nd axis of `data`"
assert len(chn_names) == data.shape[1], msg
rpl = [["µ", "u"],
["²", "2"],
]
if compat_chn_names:
# Compatibility mode: Clean up headers.
rpl += [[" ", ""],
["?", ""],
["_", ""],
]
for ii in range(len(chn_names)):
for (a, b) in rpl:
chn_names[ii] = chn_names[ii].replace(a, b)
# Data with values between 0 and 1
pcnt_cands = []
for ch in range(data.shape[1]):
if data[:, ch].min() >= 0 and data[:, ch].max() <= 1:
pcnt_cands.append(ch)
if compat_percent and pcnt_cands:
# Compatibility mode: Scale values b/w 0 and 1 to percent
if compat_copy:
# copy if requested
data = data.copy()
for ch in pcnt_cands:
data[:, ch] *= 100
if compat_negative:
toflip = []
for ch in range(data.shape[1]):
if np.mean(data[:, ch]) < 0:
toflip.append(ch)
if len(toflip):
if compat_copy:
# copy if requested
data = data.copy()
for ch in toflip:
data[:, ch] *= -1
# DATA segment
data1 = data.flatten().tolist()
DATA = struct.pack('>%sf' % len(data1), *data1)
# TEXT segment
header_size = 256
if endianness == "little":
# use little endian
byteord = '1,2,3,4'
else:
# use big endian
byteord = '4,3,2,1'
TEXT = '/$BEGINANALYSIS/0/$ENDANALYSIS/0'
TEXT += '/$BEGINSTEXT/0/$ENDSTEXT/0'
# Add placeholders for $BEGINDATA and $ENDDATA, because we don't
# know yet how long TEXT is.
TEXT += '/$BEGINDATA/{data_start_byte}/$ENDDATA/{data_end_byte}'
TEXT += '/$BYTEORD/{0}/$DATATYPE/F'.format(byteord)
TEXT += '/$MODE/L/$NEXTDATA/0/$TOT/{0}'.format(data.shape[0])
TEXT += '/$PAR/{0}'.format(data.shape[1])
# Check for content of data columns and set range
for jj in range(data.shape[1]):
# Set data maximum to that of int16
if (compat_max_int16 and
np.max(data[:, jj]) > compat_max_int16 and
np.max(data[:, jj]) < 2**15):
pnrange = int(2**15)
# Set range for data with values between 0 and 1
elif jj in pcnt_cands:
if compat_percent: # scaled to 100%
pnrange = 100
else: # not scaled
pnrange = 1
# default: set range to maxium value found in column
else:
pnrange = int(abs(np.max(data[:, jj])))
# TODO:
# - Set log/lin
fmt_str = '/$P{0}B/32/$P{0}E/0,0/$P{0}N/{1}/$P{0}R/{2}/$P{0}D/Linear'
TEXT += fmt_str.format(jj+1, chn_names[jj], pnrange)
TEXT += '/'
# SET $BEGINDATA and $ENDDATA using the current size of TEXT plus padding.
text_padding = 47 # for visual separation and safety
data_start_byte = header_size + len(TEXT) + text_padding
data_end_byte = data_start_byte + len(DATA) - 1
TEXT = TEXT.format(data_start_byte=data_start_byte,
data_end_byte=data_end_byte)
lentxt = len(TEXT)
# Pad TEXT segment with spaces until data_start_byte
TEXT = TEXT.ljust(data_start_byte - header_size, " ")
# HEADER segment
ver = 'FCS3.0'
textfirst = '{0: >8}'.format(header_size)
textlast = '{0: >8}'.format(lentxt + header_size - 1)
# Starting with FCS 3.0, data segment can end beyond byte 99,999,999,
# in which case a zero is written in each of the two header fields (the
# values are given in the text segment keywords $BEGINDATA and $ENDDATA)
if data_end_byte <= 99999999:
datafirst = '{0: >8}'.format(data_start_byte)
datalast = '{0: >8}'.format(data_end_byte)
else:
datafirst = '{0: >8}'.format(0)
datalast = '{0: >8}'.format(0)
anafirst = '{0: >8}'.format(0)
analast = '{0: >8}'.format(0)
HEADER = '{0: <256}'.format(ver + ' '
+ textfirst
+ textlast
+ datafirst
+ datalast
+ anafirst
+ analast)
# Write data
with filename.open("wb") as fd:
fd.write(HEADER.encode("ascii", "replace"))
fd.write(TEXT.encode("ascii", "replace"))
fd.write(DATA)
fd.write(b'00000000') | Write numpy data to an .fcs file (FCS3.0 file format)
Parameters
----------
filename: str or pathlib.Path
Path to the output .fcs file
ch_names: list of str, length C
Names of the output channels
data: 2d ndarray of shape (N,C)
The numpy array data to store as .fcs file format.
endianness: str
Set to "little" or "big" to define the byte order used.
compat_chn_names: bool
Compatibility mode for 3rd party flow analysis software:
The characters " ", "?", and "_" are removed in the output
channel names.
compat_copy: bool
Do not override the input array `data` when modified in
compatibility mode.
compat_negative: bool
Compatibliity mode for 3rd party flow analysis software:
Flip the sign of `data` if its mean is smaller than zero.
compat_percent: bool
Compatibliity mode for 3rd party flow analysis software:
If a column in `data` contains values only between 0 and 1,
they are multiplied by 100.
compat_max_int16: int
Compatibliity mode for 3rd party flow analysis software:
If a column in `data` has a maximum above this value,
then the display-maximum is set to 2**15.
Notes
-----
- These commonly used unicode characters are replaced: "µ", "²"
- If the input data contain NaN values, the corresponding rows
are excluded due to incompatibility with the FCS file format. | Below is the the instruction that describes the task:
### Input:
Write numpy data to an .fcs file (FCS3.0 file format)
Parameters
----------
filename: str or pathlib.Path
Path to the output .fcs file
ch_names: list of str, length C
Names of the output channels
data: 2d ndarray of shape (N,C)
The numpy array data to store as .fcs file format.
endianness: str
Set to "little" or "big" to define the byte order used.
compat_chn_names: bool
Compatibility mode for 3rd party flow analysis software:
The characters " ", "?", and "_" are removed in the output
channel names.
compat_copy: bool
Do not override the input array `data` when modified in
compatibility mode.
compat_negative: bool
Compatibliity mode for 3rd party flow analysis software:
Flip the sign of `data` if its mean is smaller than zero.
compat_percent: bool
Compatibliity mode for 3rd party flow analysis software:
If a column in `data` contains values only between 0 and 1,
they are multiplied by 100.
compat_max_int16: int
Compatibliity mode for 3rd party flow analysis software:
If a column in `data` has a maximum above this value,
then the display-maximum is set to 2**15.
Notes
-----
- These commonly used unicode characters are replaced: "µ", "²"
- If the input data contain NaN values, the corresponding rows
are excluded due to incompatibility with the FCS file format.
### Response:
def write_fcs(filename, chn_names, data,
endianness="big",
compat_chn_names=True,
compat_copy=True,
compat_negative=True,
compat_percent=True,
compat_max_int16=10000):
"""Write numpy data to an .fcs file (FCS3.0 file format)
Parameters
----------
filename: str or pathlib.Path
Path to the output .fcs file
ch_names: list of str, length C
Names of the output channels
data: 2d ndarray of shape (N,C)
The numpy array data to store as .fcs file format.
endianness: str
Set to "little" or "big" to define the byte order used.
compat_chn_names: bool
Compatibility mode for 3rd party flow analysis software:
The characters " ", "?", and "_" are removed in the output
channel names.
compat_copy: bool
Do not override the input array `data` when modified in
compatibility mode.
compat_negative: bool
Compatibliity mode for 3rd party flow analysis software:
Flip the sign of `data` if its mean is smaller than zero.
compat_percent: bool
Compatibliity mode for 3rd party flow analysis software:
If a column in `data` contains values only between 0 and 1,
they are multiplied by 100.
compat_max_int16: int
Compatibliity mode for 3rd party flow analysis software:
If a column in `data` has a maximum above this value,
then the display-maximum is set to 2**15.
Notes
-----
- These commonly used unicode characters are replaced: "µ", "²"
- If the input data contain NaN values, the corresponding rows
are excluded due to incompatibility with the FCS file format.
"""
filename = pathlib.Path(filename)
if not isinstance(data, np.ndarray):
data = np.array(data, dtype=float)
# remove rows with nan values
nanrows = np.isnan(data).any(axis=1)
if np.sum(nanrows):
msg = "Rows containing NaNs are not written to {}!".format(filename)
warnings.warn(msg)
data = data[~nanrows]
if endianness not in ["little", "big"]:
raise ValueError("`endianness` must be 'little' or 'big'!")
msg = "length of `chn_names` must match length of 2nd axis of `data`"
assert len(chn_names) == data.shape[1], msg
rpl = [["µ", "u"],
["²", "2"],
]
if compat_chn_names:
# Compatibility mode: Clean up headers.
rpl += [[" ", ""],
["?", ""],
["_", ""],
]
for ii in range(len(chn_names)):
for (a, b) in rpl:
chn_names[ii] = chn_names[ii].replace(a, b)
# Data with values between 0 and 1
pcnt_cands = []
for ch in range(data.shape[1]):
if data[:, ch].min() >= 0 and data[:, ch].max() <= 1:
pcnt_cands.append(ch)
if compat_percent and pcnt_cands:
# Compatibility mode: Scale values b/w 0 and 1 to percent
if compat_copy:
# copy if requested
data = data.copy()
for ch in pcnt_cands:
data[:, ch] *= 100
if compat_negative:
toflip = []
for ch in range(data.shape[1]):
if np.mean(data[:, ch]) < 0:
toflip.append(ch)
if len(toflip):
if compat_copy:
# copy if requested
data = data.copy()
for ch in toflip:
data[:, ch] *= -1
# DATA segment
data1 = data.flatten().tolist()
DATA = struct.pack('>%sf' % len(data1), *data1)
# TEXT segment
header_size = 256
if endianness == "little":
# use little endian
byteord = '1,2,3,4'
else:
# use big endian
byteord = '4,3,2,1'
TEXT = '/$BEGINANALYSIS/0/$ENDANALYSIS/0'
TEXT += '/$BEGINSTEXT/0/$ENDSTEXT/0'
# Add placeholders for $BEGINDATA and $ENDDATA, because we don't
# know yet how long TEXT is.
TEXT += '/$BEGINDATA/{data_start_byte}/$ENDDATA/{data_end_byte}'
TEXT += '/$BYTEORD/{0}/$DATATYPE/F'.format(byteord)
TEXT += '/$MODE/L/$NEXTDATA/0/$TOT/{0}'.format(data.shape[0])
TEXT += '/$PAR/{0}'.format(data.shape[1])
# Check for content of data columns and set range
for jj in range(data.shape[1]):
# Set data maximum to that of int16
if (compat_max_int16 and
np.max(data[:, jj]) > compat_max_int16 and
np.max(data[:, jj]) < 2**15):
pnrange = int(2**15)
# Set range for data with values between 0 and 1
elif jj in pcnt_cands:
if compat_percent: # scaled to 100%
pnrange = 100
else: # not scaled
pnrange = 1
# default: set range to maxium value found in column
else:
pnrange = int(abs(np.max(data[:, jj])))
# TODO:
# - Set log/lin
fmt_str = '/$P{0}B/32/$P{0}E/0,0/$P{0}N/{1}/$P{0}R/{2}/$P{0}D/Linear'
TEXT += fmt_str.format(jj+1, chn_names[jj], pnrange)
TEXT += '/'
# SET $BEGINDATA and $ENDDATA using the current size of TEXT plus padding.
text_padding = 47 # for visual separation and safety
data_start_byte = header_size + len(TEXT) + text_padding
data_end_byte = data_start_byte + len(DATA) - 1
TEXT = TEXT.format(data_start_byte=data_start_byte,
data_end_byte=data_end_byte)
lentxt = len(TEXT)
# Pad TEXT segment with spaces until data_start_byte
TEXT = TEXT.ljust(data_start_byte - header_size, " ")
# HEADER segment
ver = 'FCS3.0'
textfirst = '{0: >8}'.format(header_size)
textlast = '{0: >8}'.format(lentxt + header_size - 1)
# Starting with FCS 3.0, data segment can end beyond byte 99,999,999,
# in which case a zero is written in each of the two header fields (the
# values are given in the text segment keywords $BEGINDATA and $ENDDATA)
if data_end_byte <= 99999999:
datafirst = '{0: >8}'.format(data_start_byte)
datalast = '{0: >8}'.format(data_end_byte)
else:
datafirst = '{0: >8}'.format(0)
datalast = '{0: >8}'.format(0)
anafirst = '{0: >8}'.format(0)
analast = '{0: >8}'.format(0)
HEADER = '{0: <256}'.format(ver + ' '
+ textfirst
+ textlast
+ datafirst
+ datalast
+ anafirst
+ analast)
# Write data
with filename.open("wb") as fd:
fd.write(HEADER.encode("ascii", "replace"))
fd.write(TEXT.encode("ascii", "replace"))
fd.write(DATA)
fd.write(b'00000000') |
def disconnect_sync(self, conn_id):
"""Synchronously disconnect from a connected device
Args:
conn_id (int): A unique identifier that will refer to this connection
Returns:
dict: A dictionary with two elements
'success': a bool with the result of the connection attempt
'failure_reason': a string with the reason for the failure if we failed
"""
done = threading.Event()
result = {}
def disconnect_done(conn_id, adapter_id, status, reason):
result['success'] = status
result['failure_reason'] = reason
done.set()
self.disconnect_async(conn_id, disconnect_done)
done.wait()
return result | Synchronously disconnect from a connected device
Args:
conn_id (int): A unique identifier that will refer to this connection
Returns:
dict: A dictionary with two elements
'success': a bool with the result of the connection attempt
'failure_reason': a string with the reason for the failure if we failed | Below is the the instruction that describes the task:
### Input:
Synchronously disconnect from a connected device
Args:
conn_id (int): A unique identifier that will refer to this connection
Returns:
dict: A dictionary with two elements
'success': a bool with the result of the connection attempt
'failure_reason': a string with the reason for the failure if we failed
### Response:
def disconnect_sync(self, conn_id):
"""Synchronously disconnect from a connected device
Args:
conn_id (int): A unique identifier that will refer to this connection
Returns:
dict: A dictionary with two elements
'success': a bool with the result of the connection attempt
'failure_reason': a string with the reason for the failure if we failed
"""
done = threading.Event()
result = {}
def disconnect_done(conn_id, adapter_id, status, reason):
result['success'] = status
result['failure_reason'] = reason
done.set()
self.disconnect_async(conn_id, disconnect_done)
done.wait()
return result |
def get_rendered_transform_path_relative(self, relative_transform_ref):
"""
Generates a rendered transform path relative to
parent.
:param relative_transform_ref:
:return:
"""
path = self.transform_path
parent = self.parent
while parent is not None and parent is not relative_transform_ref:
path = "{0}/{1}".format(parent.transform_path, path)
parent = parent.parent
return path | Generates a rendered transform path relative to
parent.
:param relative_transform_ref:
:return: | Below is the the instruction that describes the task:
### Input:
Generates a rendered transform path relative to
parent.
:param relative_transform_ref:
:return:
### Response:
def get_rendered_transform_path_relative(self, relative_transform_ref):
"""
Generates a rendered transform path relative to
parent.
:param relative_transform_ref:
:return:
"""
path = self.transform_path
parent = self.parent
while parent is not None and parent is not relative_transform_ref:
path = "{0}/{1}".format(parent.transform_path, path)
parent = parent.parent
return path |
def __create_distance_calculator(self):
"""!
@brief Creates distance calculator in line with algorithms parameters.
@return (callable) Distance calculator.
"""
if self.__data_type == 'points':
return lambda index1, index2: self.__metric(self.__pointer_data[index1], self.__pointer_data[index2])
elif self.__data_type == 'distance_matrix':
if isinstance(self.__pointer_data, numpy.matrix):
return lambda index1, index2: self.__pointer_data.item((index1, index2))
return lambda index1, index2: self.__pointer_data[index1][index2]
else:
raise TypeError("Unknown type of data is specified '%s'" % self.__data_type) | !
@brief Creates distance calculator in line with algorithms parameters.
@return (callable) Distance calculator. | Below is the the instruction that describes the task:
### Input:
!
@brief Creates distance calculator in line with algorithms parameters.
@return (callable) Distance calculator.
### Response:
def __create_distance_calculator(self):
"""!
@brief Creates distance calculator in line with algorithms parameters.
@return (callable) Distance calculator.
"""
if self.__data_type == 'points':
return lambda index1, index2: self.__metric(self.__pointer_data[index1], self.__pointer_data[index2])
elif self.__data_type == 'distance_matrix':
if isinstance(self.__pointer_data, numpy.matrix):
return lambda index1, index2: self.__pointer_data.item((index1, index2))
return lambda index1, index2: self.__pointer_data[index1][index2]
else:
raise TypeError("Unknown type of data is specified '%s'" % self.__data_type) |
def clipboard_get(self):
""" Get text from the clipboard.
"""
from IPython.lib.clipboard import (
osx_clipboard_get, tkinter_clipboard_get,
win32_clipboard_get
)
if sys.platform == 'win32':
chain = [win32_clipboard_get, tkinter_clipboard_get]
elif sys.platform == 'darwin':
chain = [osx_clipboard_get, tkinter_clipboard_get]
else:
chain = [tkinter_clipboard_get]
dispatcher = CommandChainDispatcher()
for func in chain:
dispatcher.add(func)
text = dispatcher()
return text | Get text from the clipboard. | Below is the the instruction that describes the task:
### Input:
Get text from the clipboard.
### Response:
def clipboard_get(self):
""" Get text from the clipboard.
"""
from IPython.lib.clipboard import (
osx_clipboard_get, tkinter_clipboard_get,
win32_clipboard_get
)
if sys.platform == 'win32':
chain = [win32_clipboard_get, tkinter_clipboard_get]
elif sys.platform == 'darwin':
chain = [osx_clipboard_get, tkinter_clipboard_get]
else:
chain = [tkinter_clipboard_get]
dispatcher = CommandChainDispatcher()
for func in chain:
dispatcher.add(func)
text = dispatcher()
return text |
def get_subsequence(self, resnums, new_id=None, copy_letter_annotations=True):
"""Get a subsequence as a new SeqProp object given a list of residue numbers"""
# XTODO: documentation
biop_compound_list = []
for resnum in resnums:
# XTODO can be sped up by separating into ranges based on continuous resnums
feat = FeatureLocation(resnum - 1, resnum)
biop_compound_list.append(feat)
if len(biop_compound_list) == 0:
log.debug('Zero length subsequence')
return
elif len(biop_compound_list) == 1:
log.debug('Subsequence only one residue long')
sub_feature_location = biop_compound_list[0]
else:
sub_feature_location = CompoundLocation(biop_compound_list)
try:
sub_feature = sub_feature_location.extract(self)
except TypeError:
log.critical('SeqProp {}: unknown error when trying to get subsequence - please investigate! '
'Try using a feature to extract a subsequence from the SeqProp'.format(self.id))
return
if not new_id:
new_id = '{}_subseq'.format(self.id)
new_sp = SeqProp(id=new_id, seq=sub_feature.seq)
if copy_letter_annotations:
new_sp.letter_annotations = sub_feature.letter_annotations
return new_sp | Get a subsequence as a new SeqProp object given a list of residue numbers | Below is the the instruction that describes the task:
### Input:
Get a subsequence as a new SeqProp object given a list of residue numbers
### Response:
def get_subsequence(self, resnums, new_id=None, copy_letter_annotations=True):
"""Get a subsequence as a new SeqProp object given a list of residue numbers"""
# XTODO: documentation
biop_compound_list = []
for resnum in resnums:
# XTODO can be sped up by separating into ranges based on continuous resnums
feat = FeatureLocation(resnum - 1, resnum)
biop_compound_list.append(feat)
if len(biop_compound_list) == 0:
log.debug('Zero length subsequence')
return
elif len(biop_compound_list) == 1:
log.debug('Subsequence only one residue long')
sub_feature_location = biop_compound_list[0]
else:
sub_feature_location = CompoundLocation(biop_compound_list)
try:
sub_feature = sub_feature_location.extract(self)
except TypeError:
log.critical('SeqProp {}: unknown error when trying to get subsequence - please investigate! '
'Try using a feature to extract a subsequence from the SeqProp'.format(self.id))
return
if not new_id:
new_id = '{}_subseq'.format(self.id)
new_sp = SeqProp(id=new_id, seq=sub_feature.seq)
if copy_letter_annotations:
new_sp.letter_annotations = sub_feature.letter_annotations
return new_sp |
def data(self, data):
""" Sets the font data of this item.
Does type conversion to ensure data is always of the correct type.
Also updates the children (which is the reason for this property to be overloaded.
"""
self._data = self._enforceDataType(data) # Enforce self._data to be a QFont
self.familyCti.data = fontFamilyIndex(self.data, list(self.familyCti.iterConfigValues))
self.pointSizeCti.data = self.data.pointSize()
self.weightCti.data = fontWeightIndex(self.data, list(self.weightCti.iterConfigValues))
self.italicCti.data = self.data.italic() | Sets the font data of this item.
Does type conversion to ensure data is always of the correct type.
Also updates the children (which is the reason for this property to be overloaded. | Below is the the instruction that describes the task:
### Input:
Sets the font data of this item.
Does type conversion to ensure data is always of the correct type.
Also updates the children (which is the reason for this property to be overloaded.
### Response:
def data(self, data):
""" Sets the font data of this item.
Does type conversion to ensure data is always of the correct type.
Also updates the children (which is the reason for this property to be overloaded.
"""
self._data = self._enforceDataType(data) # Enforce self._data to be a QFont
self.familyCti.data = fontFamilyIndex(self.data, list(self.familyCti.iterConfigValues))
self.pointSizeCti.data = self.data.pointSize()
self.weightCti.data = fontWeightIndex(self.data, list(self.weightCti.iterConfigValues))
self.italicCti.data = self.data.italic() |
def selections(self):
"""Build list of extra selections for rectangular selection"""
selections = []
cursors = self.cursors()
if cursors:
background = self._qpart.palette().color(QPalette.Highlight)
foreground = self._qpart.palette().color(QPalette.HighlightedText)
for cursor in cursors:
selection = QTextEdit.ExtraSelection()
selection.format.setBackground(background)
selection.format.setForeground(foreground)
selection.cursor = cursor
selections.append(selection)
return selections | Build list of extra selections for rectangular selection | Below is the the instruction that describes the task:
### Input:
Build list of extra selections for rectangular selection
### Response:
def selections(self):
"""Build list of extra selections for rectangular selection"""
selections = []
cursors = self.cursors()
if cursors:
background = self._qpart.palette().color(QPalette.Highlight)
foreground = self._qpart.palette().color(QPalette.HighlightedText)
for cursor in cursors:
selection = QTextEdit.ExtraSelection()
selection.format.setBackground(background)
selection.format.setForeground(foreground)
selection.cursor = cursor
selections.append(selection)
return selections |
def check_sha1(filename, sha1_hash):
"""Check whether the sha1 hash of the file content matches the expected hash.
Parameters
----------
filename : str
Path to the file.
sha1_hash : str
Expected sha1 hash in hexadecimal digits.
Returns
-------
bool
Whether the file content matches the expected hash.
"""
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
return sha1.hexdigest() == sha1_hash | Check whether the sha1 hash of the file content matches the expected hash.
Parameters
----------
filename : str
Path to the file.
sha1_hash : str
Expected sha1 hash in hexadecimal digits.
Returns
-------
bool
Whether the file content matches the expected hash. | Below is the the instruction that describes the task:
### Input:
Check whether the sha1 hash of the file content matches the expected hash.
Parameters
----------
filename : str
Path to the file.
sha1_hash : str
Expected sha1 hash in hexadecimal digits.
Returns
-------
bool
Whether the file content matches the expected hash.
### Response:
def check_sha1(filename, sha1_hash):
"""Check whether the sha1 hash of the file content matches the expected hash.
Parameters
----------
filename : str
Path to the file.
sha1_hash : str
Expected sha1 hash in hexadecimal digits.
Returns
-------
bool
Whether the file content matches the expected hash.
"""
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
return sha1.hexdigest() == sha1_hash |
def is_smooth_from(self, previous, warning_on=True):
"""[Warning: The name of this method is somewhat misleading (yet kept
for compatibility with scripts created using svg.path 2.0). This
method is meant only for d string creation and should not be used to
check for kinks. To check a segment for differentiability, use the
joins_smoothly_with() method instead.]"""
if warning_on:
warn(_is_smooth_from_warning)
if isinstance(previous, CubicBezier):
return (self.start == previous.end and
(self.control1 - self.start) == (
previous.end - previous.control2))
else:
return self.control1 == self.start | [Warning: The name of this method is somewhat misleading (yet kept
for compatibility with scripts created using svg.path 2.0). This
method is meant only for d string creation and should not be used to
check for kinks. To check a segment for differentiability, use the
joins_smoothly_with() method instead.] | Below is the the instruction that describes the task:
### Input:
[Warning: The name of this method is somewhat misleading (yet kept
for compatibility with scripts created using svg.path 2.0). This
method is meant only for d string creation and should not be used to
check for kinks. To check a segment for differentiability, use the
joins_smoothly_with() method instead.]
### Response:
def is_smooth_from(self, previous, warning_on=True):
"""[Warning: The name of this method is somewhat misleading (yet kept
for compatibility with scripts created using svg.path 2.0). This
method is meant only for d string creation and should not be used to
check for kinks. To check a segment for differentiability, use the
joins_smoothly_with() method instead.]"""
if warning_on:
warn(_is_smooth_from_warning)
if isinstance(previous, CubicBezier):
return (self.start == previous.end and
(self.control1 - self.start) == (
previous.end - previous.control2))
else:
return self.control1 == self.start |
def _get_workflow_by_uuid(workflow_uuid):
"""Get Workflow with UUIDv4.
:param workflow_uuid: UUIDv4 of a Workflow.
:type workflow_uuid: String representing a valid UUIDv4.
:rtype: reana-db.models.Workflow
"""
from reana_db.models import Workflow
workflow = Workflow.query.filter(Workflow.id_ ==
workflow_uuid).first()
if not workflow:
raise ValueError(
'REANA_WORKON is set to {0}, but '
'that workflow does not exist. '
'Please set your REANA_WORKON environment '
'variable appropriately.'.
format(workflow_uuid))
return workflow | Get Workflow with UUIDv4.
:param workflow_uuid: UUIDv4 of a Workflow.
:type workflow_uuid: String representing a valid UUIDv4.
:rtype: reana-db.models.Workflow | Below is the the instruction that describes the task:
### Input:
Get Workflow with UUIDv4.
:param workflow_uuid: UUIDv4 of a Workflow.
:type workflow_uuid: String representing a valid UUIDv4.
:rtype: reana-db.models.Workflow
### Response:
def _get_workflow_by_uuid(workflow_uuid):
"""Get Workflow with UUIDv4.
:param workflow_uuid: UUIDv4 of a Workflow.
:type workflow_uuid: String representing a valid UUIDv4.
:rtype: reana-db.models.Workflow
"""
from reana_db.models import Workflow
workflow = Workflow.query.filter(Workflow.id_ ==
workflow_uuid).first()
if not workflow:
raise ValueError(
'REANA_WORKON is set to {0}, but '
'that workflow does not exist. '
'Please set your REANA_WORKON environment '
'variable appropriately.'.
format(workflow_uuid))
return workflow |
def ntp_server_key(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ntp = ET.SubElement(config, "ntp", xmlns="urn:brocade.com:mgmt:brocade-ntp")
server = ET.SubElement(ntp, "server")
ip_key = ET.SubElement(server, "ip")
ip_key.text = kwargs.pop('ip')
use_vrf_key = ET.SubElement(server, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
key = ET.SubElement(server, "key")
key.text = kwargs.pop('key')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def ntp_server_key(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ntp = ET.SubElement(config, "ntp", xmlns="urn:brocade.com:mgmt:brocade-ntp")
server = ET.SubElement(ntp, "server")
ip_key = ET.SubElement(server, "ip")
ip_key.text = kwargs.pop('ip')
use_vrf_key = ET.SubElement(server, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
key = ET.SubElement(server, "key")
key.text = kwargs.pop('key')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def __onPickEvent(self, event=None):
"""pick events"""
legline = event.artist
trace = self.conf.legend_map.get(legline, None)
visible = True
if trace is not None and self.conf.hidewith_legend:
line, legline, legtext = trace
visible = not line.get_visible()
line.set_visible(visible)
if visible:
legline.set_zorder(10.00)
legline.set_alpha(1.00)
legtext.set_zorder(10.00)
legtext.set_alpha(1.00)
else:
legline.set_alpha(0.50)
legtext.set_alpha(0.50) | pick events | Below is the the instruction that describes the task:
### Input:
pick events
### Response:
def __onPickEvent(self, event=None):
"""pick events"""
legline = event.artist
trace = self.conf.legend_map.get(legline, None)
visible = True
if trace is not None and self.conf.hidewith_legend:
line, legline, legtext = trace
visible = not line.get_visible()
line.set_visible(visible)
if visible:
legline.set_zorder(10.00)
legline.set_alpha(1.00)
legtext.set_zorder(10.00)
legtext.set_alpha(1.00)
else:
legline.set_alpha(0.50)
legtext.set_alpha(0.50) |
def list(self, filter_function=None, list_root='', max_results=1, reverse_order=False, previous_file=''):
''' a method to list files on localhost from walk of directories
:param filter_function: (keyword arguments) function used to filter results
:param list_root: string with localhost path from which to root list of files
:param max_results: integer with maximum number of results to return
:param reverse_order: boolean to determine alphabetical direction of walk
:param previous_file: string with absolute path of file to begin search after
:return: list of file absolute path strings
NOTE: the filter_function must be able to accept keyword arguments and
return a value that can evaluate to true or false. while walking
the local file structure, the metadata for each file will be
fed to the filter function. if the function evaluates this input
and returns a true value the file will be included in the list
results.
fields produced by the metadata function are listed in the
self.file_model.schema
'''
__name__ = '%s.list(...)' % self.__class__.__name__
# validate input
input_kwargs = [list_root, max_results, previous_file]
input_names = ['.list_root', '.max_results', '.previous_file']
for i in range(len(input_kwargs)):
if input_kwargs[i]:
self.fields.validate(input_kwargs[i], input_names[i])
# validate filter function
if filter_function:
try:
filter_function(**self.file_model.schema)
except:
err_msg = __name__.replace('...', 'filter_function=%s' % filter_function.__class__.__name__)
raise TypeError('%s must accept key word arguments.' % err_msg)
# validate that previous file exists
file_exists = False
if previous_file:
if os.path.exists(previous_file):
if os.path.isfile(previous_file):
file_exists = True
if not file_exists:
err_msg = __name__.replace('...', 'previous_file="%s"' % previous_file)
raise ValueError('%s must be a valid file.' % err_msg)
# construct empty results object
results_list = []
# determine root for walk
if list_root:
if not os.path.isdir(list_root):
return results_list
else:
list_root = './'
# walk directory structure to find files
for file_path in self.walk(list_root, reverse_order, previous_file):
if filter_function:
file_metadata = self.metadata(file_path)
if filter_function(**file_metadata):
results_list.append(file_path)
else:
results_list.append(file_path)
# return results list
if len(results_list) == max_results:
return results_list
return results_list | a method to list files on localhost from walk of directories
:param filter_function: (keyword arguments) function used to filter results
:param list_root: string with localhost path from which to root list of files
:param max_results: integer with maximum number of results to return
:param reverse_order: boolean to determine alphabetical direction of walk
:param previous_file: string with absolute path of file to begin search after
:return: list of file absolute path strings
NOTE: the filter_function must be able to accept keyword arguments and
return a value that can evaluate to true or false. while walking
the local file structure, the metadata for each file will be
fed to the filter function. if the function evaluates this input
and returns a true value the file will be included in the list
results.
fields produced by the metadata function are listed in the
self.file_model.schema | Below is the the instruction that describes the task:
### Input:
a method to list files on localhost from walk of directories
:param filter_function: (keyword arguments) function used to filter results
:param list_root: string with localhost path from which to root list of files
:param max_results: integer with maximum number of results to return
:param reverse_order: boolean to determine alphabetical direction of walk
:param previous_file: string with absolute path of file to begin search after
:return: list of file absolute path strings
NOTE: the filter_function must be able to accept keyword arguments and
return a value that can evaluate to true or false. while walking
the local file structure, the metadata for each file will be
fed to the filter function. if the function evaluates this input
and returns a true value the file will be included in the list
results.
fields produced by the metadata function are listed in the
self.file_model.schema
### Response:
def list(self, filter_function=None, list_root='', max_results=1, reverse_order=False, previous_file=''):
''' a method to list files on localhost from walk of directories
:param filter_function: (keyword arguments) function used to filter results
:param list_root: string with localhost path from which to root list of files
:param max_results: integer with maximum number of results to return
:param reverse_order: boolean to determine alphabetical direction of walk
:param previous_file: string with absolute path of file to begin search after
:return: list of file absolute path strings
NOTE: the filter_function must be able to accept keyword arguments and
return a value that can evaluate to true or false. while walking
the local file structure, the metadata for each file will be
fed to the filter function. if the function evaluates this input
and returns a true value the file will be included in the list
results.
fields produced by the metadata function are listed in the
self.file_model.schema
'''
__name__ = '%s.list(...)' % self.__class__.__name__
# validate input
input_kwargs = [list_root, max_results, previous_file]
input_names = ['.list_root', '.max_results', '.previous_file']
for i in range(len(input_kwargs)):
if input_kwargs[i]:
self.fields.validate(input_kwargs[i], input_names[i])
# validate filter function
if filter_function:
try:
filter_function(**self.file_model.schema)
except:
err_msg = __name__.replace('...', 'filter_function=%s' % filter_function.__class__.__name__)
raise TypeError('%s must accept key word arguments.' % err_msg)
# validate that previous file exists
file_exists = False
if previous_file:
if os.path.exists(previous_file):
if os.path.isfile(previous_file):
file_exists = True
if not file_exists:
err_msg = __name__.replace('...', 'previous_file="%s"' % previous_file)
raise ValueError('%s must be a valid file.' % err_msg)
# construct empty results object
results_list = []
# determine root for walk
if list_root:
if not os.path.isdir(list_root):
return results_list
else:
list_root = './'
# walk directory structure to find files
for file_path in self.walk(list_root, reverse_order, previous_file):
if filter_function:
file_metadata = self.metadata(file_path)
if filter_function(**file_metadata):
results_list.append(file_path)
else:
results_list.append(file_path)
# return results list
if len(results_list) == max_results:
return results_list
return results_list |
def obfuscate_builtins(module, tokens, name_generator, table=None):
"""
Inserts an assignment, '<obfuscated identifier> = <builtin function>' at
the beginning of *tokens* (after the shebang and encoding if present) for
every Python built-in function that is used inside *tokens*. Also, replaces
all of said builti-in functions in *tokens* with each respective obfuscated
identifer.
Obfuscated identifier names are pulled out of name_generator via next().
If *table* is provided, replacements will be looked up there before
generating a new unique name.
"""
used_builtins = analyze.enumerate_builtins(tokens)
obfuscated_assignments = remap_name(name_generator, used_builtins, table)
replacements = []
for assignment in obfuscated_assignments.split('\n'):
replacements.append(assignment.split('=')[0])
replacement_dict = dict(zip(used_builtins, replacements))
if table:
table[0].update(replacement_dict)
iter_replacements = iter(replacements)
for builtin in used_builtins:
replace_obfuscatables(
module, tokens, obfuscate_unique, builtin, iter_replacements)
# Check for shebangs and encodings before we do anything else
skip_tokens = 0
matched_shebang = False
matched_encoding = False
for tok in tokens[0:4]: # Will always be in the first four tokens
line = tok[4]
if analyze.shebang.match(line): # (e.g. '#!/usr/bin/env python')
if not matched_shebang:
matched_shebang = True
skip_tokens += 1
elif analyze.encoding.match(line): # (e.g. '# -*- coding: utf-8 -*-')
if not matched_encoding:
matched_encoding = True
skip_tokens += 1
insert_in_next_line(tokens, skip_tokens, obfuscated_assignments) | Inserts an assignment, '<obfuscated identifier> = <builtin function>' at
the beginning of *tokens* (after the shebang and encoding if present) for
every Python built-in function that is used inside *tokens*. Also, replaces
all of said builti-in functions in *tokens* with each respective obfuscated
identifer.
Obfuscated identifier names are pulled out of name_generator via next().
If *table* is provided, replacements will be looked up there before
generating a new unique name. | Below is the the instruction that describes the task:
### Input:
Inserts an assignment, '<obfuscated identifier> = <builtin function>' at
the beginning of *tokens* (after the shebang and encoding if present) for
every Python built-in function that is used inside *tokens*. Also, replaces
all of said builti-in functions in *tokens* with each respective obfuscated
identifer.
Obfuscated identifier names are pulled out of name_generator via next().
If *table* is provided, replacements will be looked up there before
generating a new unique name.
### Response:
def obfuscate_builtins(module, tokens, name_generator, table=None):
"""
Inserts an assignment, '<obfuscated identifier> = <builtin function>' at
the beginning of *tokens* (after the shebang and encoding if present) for
every Python built-in function that is used inside *tokens*. Also, replaces
all of said builti-in functions in *tokens* with each respective obfuscated
identifer.
Obfuscated identifier names are pulled out of name_generator via next().
If *table* is provided, replacements will be looked up there before
generating a new unique name.
"""
used_builtins = analyze.enumerate_builtins(tokens)
obfuscated_assignments = remap_name(name_generator, used_builtins, table)
replacements = []
for assignment in obfuscated_assignments.split('\n'):
replacements.append(assignment.split('=')[0])
replacement_dict = dict(zip(used_builtins, replacements))
if table:
table[0].update(replacement_dict)
iter_replacements = iter(replacements)
for builtin in used_builtins:
replace_obfuscatables(
module, tokens, obfuscate_unique, builtin, iter_replacements)
# Check for shebangs and encodings before we do anything else
skip_tokens = 0
matched_shebang = False
matched_encoding = False
for tok in tokens[0:4]: # Will always be in the first four tokens
line = tok[4]
if analyze.shebang.match(line): # (e.g. '#!/usr/bin/env python')
if not matched_shebang:
matched_shebang = True
skip_tokens += 1
elif analyze.encoding.match(line): # (e.g. '# -*- coding: utf-8 -*-')
if not matched_encoding:
matched_encoding = True
skip_tokens += 1
insert_in_next_line(tokens, skip_tokens, obfuscated_assignments) |
def msetnx(self, *args, **kwargs):
"""
Sets key/values based on a mapping if none of the keys are already set.
Mapping can be supplied as a single dictionary argument or as kwargs.
Returns a boolean indicating if the operation was successful.
"""
if args:
if len(args) != 1 or not isinstance(args[0], dict):
raise RedisError('MSETNX requires **kwargs or a single dict arg')
mapping = args[0]
else:
mapping = kwargs
if len(mapping) == 0:
raise ResponseError("wrong number of arguments for 'msetnx' command")
for key in mapping.keys():
if self._encode(key) in self.redis:
return False
for key, value in mapping.items():
self.set(key, value)
return True | Sets key/values based on a mapping if none of the keys are already set.
Mapping can be supplied as a single dictionary argument or as kwargs.
Returns a boolean indicating if the operation was successful. | Below is the the instruction that describes the task:
### Input:
Sets key/values based on a mapping if none of the keys are already set.
Mapping can be supplied as a single dictionary argument or as kwargs.
Returns a boolean indicating if the operation was successful.
### Response:
def msetnx(self, *args, **kwargs):
"""
Sets key/values based on a mapping if none of the keys are already set.
Mapping can be supplied as a single dictionary argument or as kwargs.
Returns a boolean indicating if the operation was successful.
"""
if args:
if len(args) != 1 or not isinstance(args[0], dict):
raise RedisError('MSETNX requires **kwargs or a single dict arg')
mapping = args[0]
else:
mapping = kwargs
if len(mapping) == 0:
raise ResponseError("wrong number of arguments for 'msetnx' command")
for key in mapping.keys():
if self._encode(key) in self.redis:
return False
for key, value in mapping.items():
self.set(key, value)
return True |
def _get_file_from_s3(creds, metadata, saltenv, bucket, path,
cached_file_path):
'''
Checks the local cache for the file, if it's old or missing go grab the
file from S3 and update the cache
'''
# check the local cache...
if os.path.isfile(cached_file_path):
file_meta = _find_file_meta(metadata, bucket, saltenv, path)
file_md5 = "".join(list(filter(str.isalnum, file_meta['ETag']))) \
if file_meta else None
cached_md5 = salt.utils.hashutils.get_hash(cached_file_path, 'md5')
# hashes match we have a cache hit
log.debug('Cached file: path=%s, md5=%s, etag=%s',
cached_file_path, cached_md5, file_md5)
if cached_md5 == file_md5:
return
# ... or get the file from S3
__utils__['s3.query'](
key=creds.key,
keyid=creds.keyid,
kms_keyid=creds.kms_keyid,
bucket=bucket,
service_url=creds.service_url,
path=_quote(path),
local_file=cached_file_path,
verify_ssl=creds.verify_ssl,
location=creds.location,
path_style=creds.path_style,
https_enable=creds.https_enable
) | Checks the local cache for the file, if it's old or missing go grab the
file from S3 and update the cache | Below is the the instruction that describes the task:
### Input:
Checks the local cache for the file, if it's old or missing go grab the
file from S3 and update the cache
### Response:
def _get_file_from_s3(creds, metadata, saltenv, bucket, path,
cached_file_path):
'''
Checks the local cache for the file, if it's old or missing go grab the
file from S3 and update the cache
'''
# check the local cache...
if os.path.isfile(cached_file_path):
file_meta = _find_file_meta(metadata, bucket, saltenv, path)
file_md5 = "".join(list(filter(str.isalnum, file_meta['ETag']))) \
if file_meta else None
cached_md5 = salt.utils.hashutils.get_hash(cached_file_path, 'md5')
# hashes match we have a cache hit
log.debug('Cached file: path=%s, md5=%s, etag=%s',
cached_file_path, cached_md5, file_md5)
if cached_md5 == file_md5:
return
# ... or get the file from S3
__utils__['s3.query'](
key=creds.key,
keyid=creds.keyid,
kms_keyid=creds.kms_keyid,
bucket=bucket,
service_url=creds.service_url,
path=_quote(path),
local_file=cached_file_path,
verify_ssl=creds.verify_ssl,
location=creds.location,
path_style=creds.path_style,
https_enable=creds.https_enable
) |
def create(vm_):
'''
Create a single VM from a data dict
'''
if 'driver' not in vm_:
vm_['driver'] = vm_['provider']
private_networking = config.get_cloud_config_value(
'enable_private_network', vm_, __opts__, search_global=False, default=False,
)
startup_script = config.get_cloud_config_value(
'startup_script_id', vm_, __opts__, search_global=False, default=None,
)
if startup_script and str(startup_script) not in avail_scripts():
log.error('Your Vultr account does not have a startup script with ID %s', str(startup_script))
return False
if private_networking is not None:
if not isinstance(private_networking, bool):
raise SaltCloudConfigError("'private_networking' should be a boolean value.")
if private_networking is True:
enable_private_network = 'yes'
else:
enable_private_network = 'no'
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
osid = _lookup_vultrid(vm_['image'], 'avail_images', 'OSID')
if not osid:
log.error('Vultr does not have an image with id or name %s', vm_['image'])
return False
vpsplanid = _lookup_vultrid(vm_['size'], 'avail_sizes', 'VPSPLANID')
if not vpsplanid:
log.error('Vultr does not have a size with id or name %s', vm_['size'])
return False
dcid = _lookup_vultrid(vm_['location'], 'avail_locations', 'DCID')
if not dcid:
log.error('Vultr does not have a location with id or name %s', vm_['location'])
return False
kwargs = {
'label': vm_['name'],
'OSID': osid,
'VPSPLANID': vpsplanid,
'DCID': dcid,
'hostname': vm_['name'],
'enable_private_network': enable_private_network,
}
if startup_script:
kwargs['SCRIPTID'] = startup_script
log.info('Creating Cloud VM %s', vm_['name'])
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args={
'kwargs': __utils__['cloud.filter_event']('requesting', kwargs, list(kwargs)),
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport'],
)
try:
data = _query('server/create', method='POST', data=_urlencode(kwargs))
if int(data.get('status', '200')) >= 300:
log.error(
'Error creating %s on Vultr\n\n'
'Vultr API returned %s\n', vm_['name'], data
)
log.error('Status 412 may mean that you are requesting an\n'
'invalid location, image, or size.')
__utils__['cloud.fire_event'](
'event',
'instance request failed',
'salt/cloud/{0}/requesting/failed'.format(vm_['name']),
args={'kwargs': kwargs},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport'],
)
return False
except Exception as exc:
log.error(
'Error creating %s on Vultr\n\n'
'The following exception was thrown when trying to '
'run the initial deployment:\n%s',
vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
__utils__['cloud.fire_event'](
'event',
'instance request failed',
'salt/cloud/{0}/requesting/failed'.format(vm_['name']),
args={'kwargs': kwargs},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport'],
)
return False
def wait_for_hostname():
'''
Wait for the IP address to become available
'''
data = show_instance(vm_['name'], call='action')
main_ip = six.text_type(data.get('main_ip', '0'))
if main_ip.startswith('0'):
time.sleep(3)
return False
return data['main_ip']
def wait_for_default_password():
'''
Wait for the IP address to become available
'''
data = show_instance(vm_['name'], call='action')
# print("Waiting for default password")
# pprint.pprint(data)
if six.text_type(data.get('default_password', '')) == '':
time.sleep(1)
return False
return data['default_password']
def wait_for_status():
'''
Wait for the IP address to become available
'''
data = show_instance(vm_['name'], call='action')
# print("Waiting for status normal")
# pprint.pprint(data)
if six.text_type(data.get('status', '')) != 'active':
time.sleep(1)
return False
return data['default_password']
def wait_for_server_state():
'''
Wait for the IP address to become available
'''
data = show_instance(vm_['name'], call='action')
# print("Waiting for server state ok")
# pprint.pprint(data)
if six.text_type(data.get('server_state', '')) != 'ok':
time.sleep(1)
return False
return data['default_password']
vm_['ssh_host'] = __utils__['cloud.wait_for_fun'](
wait_for_hostname,
timeout=config.get_cloud_config_value(
'wait_for_fun_timeout', vm_, __opts__, default=15 * 60),
)
vm_['password'] = __utils__['cloud.wait_for_fun'](
wait_for_default_password,
timeout=config.get_cloud_config_value(
'wait_for_fun_timeout', vm_, __opts__, default=15 * 60),
)
__utils__['cloud.wait_for_fun'](
wait_for_status,
timeout=config.get_cloud_config_value(
'wait_for_fun_timeout', vm_, __opts__, default=15 * 60),
)
__utils__['cloud.wait_for_fun'](
wait_for_server_state,
timeout=config.get_cloud_config_value(
'wait_for_fun_timeout', vm_, __opts__, default=15 * 60),
)
__opts__['hard_timeout'] = config.get_cloud_config_value(
'hard_timeout',
get_configured_provider(),
__opts__,
search_global=False,
default=None,
)
# Bootstrap
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(show_instance(vm_['name'], call='action'))
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data)
)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret | Create a single VM from a data dict | Below is the the instruction that describes the task:
### Input:
Create a single VM from a data dict
### Response:
def create(vm_):
'''
Create a single VM from a data dict
'''
if 'driver' not in vm_:
vm_['driver'] = vm_['provider']
private_networking = config.get_cloud_config_value(
'enable_private_network', vm_, __opts__, search_global=False, default=False,
)
startup_script = config.get_cloud_config_value(
'startup_script_id', vm_, __opts__, search_global=False, default=None,
)
if startup_script and str(startup_script) not in avail_scripts():
log.error('Your Vultr account does not have a startup script with ID %s', str(startup_script))
return False
if private_networking is not None:
if not isinstance(private_networking, bool):
raise SaltCloudConfigError("'private_networking' should be a boolean value.")
if private_networking is True:
enable_private_network = 'yes'
else:
enable_private_network = 'no'
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
osid = _lookup_vultrid(vm_['image'], 'avail_images', 'OSID')
if not osid:
log.error('Vultr does not have an image with id or name %s', vm_['image'])
return False
vpsplanid = _lookup_vultrid(vm_['size'], 'avail_sizes', 'VPSPLANID')
if not vpsplanid:
log.error('Vultr does not have a size with id or name %s', vm_['size'])
return False
dcid = _lookup_vultrid(vm_['location'], 'avail_locations', 'DCID')
if not dcid:
log.error('Vultr does not have a location with id or name %s', vm_['location'])
return False
kwargs = {
'label': vm_['name'],
'OSID': osid,
'VPSPLANID': vpsplanid,
'DCID': dcid,
'hostname': vm_['name'],
'enable_private_network': enable_private_network,
}
if startup_script:
kwargs['SCRIPTID'] = startup_script
log.info('Creating Cloud VM %s', vm_['name'])
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args={
'kwargs': __utils__['cloud.filter_event']('requesting', kwargs, list(kwargs)),
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport'],
)
try:
data = _query('server/create', method='POST', data=_urlencode(kwargs))
if int(data.get('status', '200')) >= 300:
log.error(
'Error creating %s on Vultr\n\n'
'Vultr API returned %s\n', vm_['name'], data
)
log.error('Status 412 may mean that you are requesting an\n'
'invalid location, image, or size.')
__utils__['cloud.fire_event'](
'event',
'instance request failed',
'salt/cloud/{0}/requesting/failed'.format(vm_['name']),
args={'kwargs': kwargs},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport'],
)
return False
except Exception as exc:
log.error(
'Error creating %s on Vultr\n\n'
'The following exception was thrown when trying to '
'run the initial deployment:\n%s',
vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
__utils__['cloud.fire_event'](
'event',
'instance request failed',
'salt/cloud/{0}/requesting/failed'.format(vm_['name']),
args={'kwargs': kwargs},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport'],
)
return False
def wait_for_hostname():
'''
Wait for the IP address to become available
'''
data = show_instance(vm_['name'], call='action')
main_ip = six.text_type(data.get('main_ip', '0'))
if main_ip.startswith('0'):
time.sleep(3)
return False
return data['main_ip']
def wait_for_default_password():
'''
Wait for the IP address to become available
'''
data = show_instance(vm_['name'], call='action')
# print("Waiting for default password")
# pprint.pprint(data)
if six.text_type(data.get('default_password', '')) == '':
time.sleep(1)
return False
return data['default_password']
def wait_for_status():
'''
Wait for the IP address to become available
'''
data = show_instance(vm_['name'], call='action')
# print("Waiting for status normal")
# pprint.pprint(data)
if six.text_type(data.get('status', '')) != 'active':
time.sleep(1)
return False
return data['default_password']
def wait_for_server_state():
'''
Wait for the IP address to become available
'''
data = show_instance(vm_['name'], call='action')
# print("Waiting for server state ok")
# pprint.pprint(data)
if six.text_type(data.get('server_state', '')) != 'ok':
time.sleep(1)
return False
return data['default_password']
vm_['ssh_host'] = __utils__['cloud.wait_for_fun'](
wait_for_hostname,
timeout=config.get_cloud_config_value(
'wait_for_fun_timeout', vm_, __opts__, default=15 * 60),
)
vm_['password'] = __utils__['cloud.wait_for_fun'](
wait_for_default_password,
timeout=config.get_cloud_config_value(
'wait_for_fun_timeout', vm_, __opts__, default=15 * 60),
)
__utils__['cloud.wait_for_fun'](
wait_for_status,
timeout=config.get_cloud_config_value(
'wait_for_fun_timeout', vm_, __opts__, default=15 * 60),
)
__utils__['cloud.wait_for_fun'](
wait_for_server_state,
timeout=config.get_cloud_config_value(
'wait_for_fun_timeout', vm_, __opts__, default=15 * 60),
)
__opts__['hard_timeout'] = config.get_cloud_config_value(
'hard_timeout',
get_configured_provider(),
__opts__,
search_global=False,
default=None,
)
# Bootstrap
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(show_instance(vm_['name'], call='action'))
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data)
)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret |
def _write_value_failed(self, dbus_error):
"""
Called when the write request has failed.
"""
error = _error_from_dbus_error(dbus_error)
self.service.device.characteristic_write_value_failed(characteristic=self, error=error) | Called when the write request has failed. | Below is the the instruction that describes the task:
### Input:
Called when the write request has failed.
### Response:
def _write_value_failed(self, dbus_error):
"""
Called when the write request has failed.
"""
error = _error_from_dbus_error(dbus_error)
self.service.device.characteristic_write_value_failed(characteristic=self, error=error) |
def round_robin(members, items):
"""
Default allocator with a round robin approach.
In this algorithm, each member of the group is cycled over and given an
item until there are no items left. This assumes roughly equal capacity
for each member and aims for even distribution of item counts.
"""
allocation = collections.defaultdict(set)
for member, item in zip(itertools.cycle(members), items):
allocation[member].add(item)
return allocation | Default allocator with a round robin approach.
In this algorithm, each member of the group is cycled over and given an
item until there are no items left. This assumes roughly equal capacity
for each member and aims for even distribution of item counts. | Below is the the instruction that describes the task:
### Input:
Default allocator with a round robin approach.
In this algorithm, each member of the group is cycled over and given an
item until there are no items left. This assumes roughly equal capacity
for each member and aims for even distribution of item counts.
### Response:
def round_robin(members, items):
"""
Default allocator with a round robin approach.
In this algorithm, each member of the group is cycled over and given an
item until there are no items left. This assumes roughly equal capacity
for each member and aims for even distribution of item counts.
"""
allocation = collections.defaultdict(set)
for member, item in zip(itertools.cycle(members), items):
allocation[member].add(item)
return allocation |
def makeairplantloop(data, commdct):
"""make the edges for the airloop and the plantloop"""
anode = "epnode"
endnode = "EndNode"
# in plantloop get:
# demand inlet, outlet, branchlist
# supply inlet, outlet, branchlist
plantloops = loops.plantloopfields(data, commdct)
# splitters
# inlet
# outlet1
# outlet2
splitters = loops.splitterfields(data, commdct)
#
# mixer
# outlet
# inlet1
# inlet2
mixers = loops.mixerfields(data, commdct)
#
# supply barnchlist
# branch1 -> inlet, outlet
# branch2 -> inlet, outlet
# branch3 -> inlet, outlet
#
# CONNET INLET OUTLETS
edges = []
# get all branches
branchkey = "branch".upper()
branches = data.dt[branchkey]
branch_i_o = {}
for br in branches:
br_name = br[1]
in_out = loops.branch_inlet_outlet(data, commdct, br_name)
branch_i_o[br_name] = dict(list(zip(["inlet", "outlet"], in_out)))
# for br_name, in_out in branch_i_o.items():
# edges.append(((in_out["inlet"], anode), br_name))
# edges.append((br_name, (in_out["outlet"], anode)))
# instead of doing the branch
# do the content of the branch
edges = makebranchcomponents(data, commdct)
# connect splitter to nodes
for splitter in splitters:
# splitter_inlet = inletbranch.node
splittername = splitter[0]
inletbranchname = splitter[1]
splitter_inlet = branch_i_o[inletbranchname]["outlet"]
# edges = splitter_inlet -> splittername
edges.append(((splitter_inlet, anode), splittername))
# splitter_outlets = ouletbranches.nodes
outletbranchnames = [br for br in splitter[2:]]
splitter_outlets = [branch_i_o[br]["inlet"] for br in outletbranchnames]
# edges = [splittername -> outlet for outlet in splitter_outlets]
moreedges = [(splittername,
(outlet, anode)) for outlet in splitter_outlets]
edges = edges + moreedges
for mixer in mixers:
# mixer_outlet = outletbranch.node
mixername = mixer[0]
outletbranchname = mixer[1]
mixer_outlet = branch_i_o[outletbranchname]["inlet"]
# edges = mixername -> mixer_outlet
edges.append((mixername, (mixer_outlet, anode)))
# mixer_inlets = inletbranches.nodes
inletbranchnames = [br for br in mixer[2:]]
mixer_inlets = [branch_i_o[br]["outlet"] for br in inletbranchnames]
# edges = [mixername -> inlet for inlet in mixer_inlets]
moreedges = [((inlet, anode), mixername) for inlet in mixer_inlets]
edges = edges + moreedges
# connect demand and supply side
# for plantloop in plantloops:
# supplyinlet = plantloop[1]
# supplyoutlet = plantloop[2]
# demandinlet = plantloop[4]
# demandoutlet = plantloop[5]
# # edges = [supplyoutlet -> demandinlet, demandoutlet -> supplyinlet]
# moreedges = [((supplyoutlet, endnode), (demandinlet, endnode)),
# ((demandoutlet, endnode), (supplyinlet, endnode))]
# edges = edges + moreedges
#
# -----------air loop stuff----------------------
# from s_airloop2.py
# Get the demand and supply nodes from 'airloophvac'
# in airloophvac get:
# get branch, supplyinlet, supplyoutlet, demandinlet, demandoutlet
objkey = "airloophvac".upper()
fieldlists = [["Branch List Name",
"Supply Side Inlet Node Name",
"Demand Side Outlet Node Name",
"Demand Side Inlet Node Names",
"Supply Side Outlet Node Names"]] * loops.objectcount(data, objkey)
airloophvacs = loops.extractfields(data, commdct, objkey, fieldlists)
# airloophvac = airloophvacs[0]
# in AirLoopHVAC:ZoneSplitter:
# get Name, inlet, all outlets
objkey = "AirLoopHVAC:ZoneSplitter".upper()
singlefields = ["Name", "Inlet Node Name"]
fld = "Outlet %s Node Name"
repeatfields = loops.repeatingfields(data, commdct, objkey, fld)
fieldlist = singlefields + repeatfields
fieldlists = [fieldlist] * loops.objectcount(data, objkey)
zonesplitters = loops.extractfields(data, commdct, objkey, fieldlists)
# in AirLoopHVAC:SupplyPlenum:
# get Name, Zone Name, Zone Node Name, inlet, all outlets
objkey = "AirLoopHVAC:SupplyPlenum".upper()
singlefields = ["Name", "Zone Name", "Zone Node Name", "Inlet Node Name"]
fld = "Outlet %s Node Name"
repeatfields = loops.repeatingfields(data, commdct, objkey, fld)
fieldlist = singlefields + repeatfields
fieldlists = [fieldlist] * loops.objectcount(data, objkey)
supplyplenums = loops.extractfields(data, commdct, objkey, fieldlists)
# in AirLoopHVAC:ZoneMixer:
# get Name, outlet, all inlets
objkey = "AirLoopHVAC:ZoneMixer".upper()
singlefields = ["Name", "Outlet Node Name"]
fld = "Inlet %s Node Name"
repeatfields = loops.repeatingfields(data, commdct, objkey, fld)
fieldlist = singlefields + repeatfields
fieldlists = [fieldlist] * loops.objectcount(data, objkey)
zonemixers = loops.extractfields(data, commdct, objkey, fieldlists)
# in AirLoopHVAC:ReturnPlenum:
# get Name, Zone Name, Zone Node Name, outlet, all inlets
objkey = "AirLoopHVAC:ReturnPlenum".upper()
singlefields = ["Name", "Zone Name", "Zone Node Name", "Outlet Node Name"]
fld = "Inlet %s Node Name"
repeatfields = loops.repeatingfields(data, commdct, objkey, fld)
fieldlist = singlefields + repeatfields
fieldlists = [fieldlist] * loops.objectcount(data, objkey)
returnplenums = loops.extractfields(data, commdct, objkey, fieldlists)
# connect room to each equip in equiplist
# in ZoneHVAC:EquipmentConnections:
# get Name, equiplist, zoneairnode, returnnode
objkey = "ZoneHVAC:EquipmentConnections".upper()
singlefields = ["Zone Name", "Zone Conditioning Equipment List Name",
"Zone Air Node Name", "Zone Return Air Node Name"]
repeatfields = []
fieldlist = singlefields + repeatfields
fieldlists = [fieldlist] * loops.objectcount(data, objkey)
equipconnections = loops.extractfields(data, commdct, objkey, fieldlists)
# in ZoneHVAC:EquipmentList:
# get Name, all equiptype, all equipnames
objkey = "ZoneHVAC:EquipmentList".upper()
singlefields = ["Name", ]
fieldlist = singlefields
flds = ["Zone Equipment %s Object Type", "Zone Equipment %s Name"]
repeatfields = loops.repeatingfields(data, commdct, objkey, flds)
fieldlist = fieldlist + repeatfields
fieldlists = [fieldlist] * loops.objectcount(data, objkey)
equiplists = loops.extractfields(data, commdct, objkey, fieldlists)
equiplistdct = dict([(ep[0], ep[1:]) for ep in equiplists])
for key, equips in list(equiplistdct.items()):
enames = [equips[i] for i in range(1, len(equips), 2)]
equiplistdct[key] = enames
# adistuunit -> room
# adistuunit <- VAVreheat
# airinlet -> VAVreheat
# in ZoneHVAC:AirDistributionUnit:
# get Name, equiplist, zoneairnode, returnnode
objkey = "ZoneHVAC:AirDistributionUnit".upper()
singlefields = ["Name", "Air Terminal Object Type", "Air Terminal Name"]
repeatfields = []
fieldlist = singlefields + repeatfields
fieldlists = [fieldlist] * loops.objectcount(data, objkey)
adistuunits = loops.extractfields(data, commdct, objkey, fieldlists)
# code only for AirTerminal:SingleDuct:VAV:Reheat
# get airinletnodes for vavreheats
# in AirTerminal:SingleDuct:VAV:Reheat:
# get Name, airinletnode
adistuinlets = loops.makeadistu_inlets(data, commdct)
alladistu_comps = []
for key in list(adistuinlets.keys()):
objkey = key.upper()
singlefields = ["Name"] + adistuinlets[key]
repeatfields = []
fieldlist = singlefields + repeatfields
fieldlists = [fieldlist] * loops.objectcount(data, objkey)
adistu_components = loops.extractfields(data, commdct, objkey, fieldlists)
alladistu_comps.append(adistu_components)
# in AirTerminal:SingleDuct:Uncontrolled:
# get Name, airinletnode
objkey = "AirTerminal:SingleDuct:Uncontrolled".upper()
singlefields = ["Name", "Zone Supply Air Node Name"]
repeatfields = []
fieldlist = singlefields + repeatfields
fieldlists = [fieldlist] * loops.objectcount(data, objkey)
uncontrolleds = loops.extractfields(data, commdct, objkey, fieldlists)
anode = "epnode"
endnode = "EndNode"
# edges = []
# connect demand and supply side
# for airloophvac in airloophvacs:
# supplyinlet = airloophvac[1]
# supplyoutlet = airloophvac[4]
# demandinlet = airloophvac[3]
# demandoutlet = airloophvac[2]
# # edges = [supplyoutlet -> demandinlet, demandoutlet -> supplyinlet]
# moreedges = [((supplyoutlet, endnode), (demandinlet, endnode)),
# ((demandoutlet, endnode), (supplyinlet, endnode))]
# edges = edges + moreedges
# connect zonesplitter to nodes
for zonesplitter in zonesplitters:
name = zonesplitter[0]
inlet = zonesplitter[1]
outlets = zonesplitter[2:]
edges.append(((inlet, anode), name))
for outlet in outlets:
edges.append((name, (outlet, anode)))
# connect supplyplenum to nodes
for supplyplenum in supplyplenums:
name = supplyplenum[0]
inlet = supplyplenum[3]
outlets = supplyplenum[4:]
edges.append(((inlet, anode), name))
for outlet in outlets:
edges.append((name, (outlet, anode)))
# connect zonemixer to nodes
for zonemixer in zonemixers:
name = zonemixer[0]
outlet = zonemixer[1]
inlets = zonemixer[2:]
edges.append((name, (outlet, anode)))
for inlet in inlets:
edges.append(((inlet, anode), name))
# connect returnplenums to nodes
for returnplenum in returnplenums:
name = returnplenum[0]
outlet = returnplenum[3]
inlets = returnplenum[4:]
edges.append((name, (outlet, anode)))
for inlet in inlets:
edges.append(((inlet, anode), name))
# connect room to return node
for equipconnection in equipconnections:
zonename = equipconnection[0]
returnnode = equipconnection[-1]
edges.append((zonename, (returnnode, anode)))
# connect equips to room
for equipconnection in equipconnections:
zonename = equipconnection[0]
zequiplistname = equipconnection[1]
for zequip in equiplistdct[zequiplistname]:
edges.append((zequip, zonename))
# adistuunit <- adistu_component
for adistuunit in adistuunits:
unitname = adistuunit[0]
compname = adistuunit[2]
edges.append((compname, unitname))
# airinlet -> adistu_component
for adistu_comps in alladistu_comps:
for adistu_comp in adistu_comps:
name = adistu_comp[0]
for airnode in adistu_comp[1:]:
edges.append(((airnode, anode), name))
# supplyairnode -> uncontrolled
for uncontrolled in uncontrolleds:
name = uncontrolled[0]
airnode = uncontrolled[1]
edges.append(((airnode, anode), name))
# edges = edges + moreedges
return edges | make the edges for the airloop and the plantloop | Below is the the instruction that describes the task:
### Input:
make the edges for the airloop and the plantloop
### Response:
def makeairplantloop(data, commdct):
"""make the edges for the airloop and the plantloop"""
anode = "epnode"
endnode = "EndNode"
# in plantloop get:
# demand inlet, outlet, branchlist
# supply inlet, outlet, branchlist
plantloops = loops.plantloopfields(data, commdct)
# splitters
# inlet
# outlet1
# outlet2
splitters = loops.splitterfields(data, commdct)
#
# mixer
# outlet
# inlet1
# inlet2
mixers = loops.mixerfields(data, commdct)
#
# supply barnchlist
# branch1 -> inlet, outlet
# branch2 -> inlet, outlet
# branch3 -> inlet, outlet
#
# CONNET INLET OUTLETS
edges = []
# get all branches
branchkey = "branch".upper()
branches = data.dt[branchkey]
branch_i_o = {}
for br in branches:
br_name = br[1]
in_out = loops.branch_inlet_outlet(data, commdct, br_name)
branch_i_o[br_name] = dict(list(zip(["inlet", "outlet"], in_out)))
# for br_name, in_out in branch_i_o.items():
# edges.append(((in_out["inlet"], anode), br_name))
# edges.append((br_name, (in_out["outlet"], anode)))
# instead of doing the branch
# do the content of the branch
edges = makebranchcomponents(data, commdct)
# connect splitter to nodes
for splitter in splitters:
# splitter_inlet = inletbranch.node
splittername = splitter[0]
inletbranchname = splitter[1]
splitter_inlet = branch_i_o[inletbranchname]["outlet"]
# edges = splitter_inlet -> splittername
edges.append(((splitter_inlet, anode), splittername))
# splitter_outlets = ouletbranches.nodes
outletbranchnames = [br for br in splitter[2:]]
splitter_outlets = [branch_i_o[br]["inlet"] for br in outletbranchnames]
# edges = [splittername -> outlet for outlet in splitter_outlets]
moreedges = [(splittername,
(outlet, anode)) for outlet in splitter_outlets]
edges = edges + moreedges
for mixer in mixers:
# mixer_outlet = outletbranch.node
mixername = mixer[0]
outletbranchname = mixer[1]
mixer_outlet = branch_i_o[outletbranchname]["inlet"]
# edges = mixername -> mixer_outlet
edges.append((mixername, (mixer_outlet, anode)))
# mixer_inlets = inletbranches.nodes
inletbranchnames = [br for br in mixer[2:]]
mixer_inlets = [branch_i_o[br]["outlet"] for br in inletbranchnames]
# edges = [mixername -> inlet for inlet in mixer_inlets]
moreedges = [((inlet, anode), mixername) for inlet in mixer_inlets]
edges = edges + moreedges
# connect demand and supply side
# for plantloop in plantloops:
# supplyinlet = plantloop[1]
# supplyoutlet = plantloop[2]
# demandinlet = plantloop[4]
# demandoutlet = plantloop[5]
# # edges = [supplyoutlet -> demandinlet, demandoutlet -> supplyinlet]
# moreedges = [((supplyoutlet, endnode), (demandinlet, endnode)),
# ((demandoutlet, endnode), (supplyinlet, endnode))]
# edges = edges + moreedges
#
# -----------air loop stuff----------------------
# from s_airloop2.py
# Get the demand and supply nodes from 'airloophvac'
# in airloophvac get:
# get branch, supplyinlet, supplyoutlet, demandinlet, demandoutlet
objkey = "airloophvac".upper()
fieldlists = [["Branch List Name",
"Supply Side Inlet Node Name",
"Demand Side Outlet Node Name",
"Demand Side Inlet Node Names",
"Supply Side Outlet Node Names"]] * loops.objectcount(data, objkey)
airloophvacs = loops.extractfields(data, commdct, objkey, fieldlists)
# airloophvac = airloophvacs[0]
# in AirLoopHVAC:ZoneSplitter:
# get Name, inlet, all outlets
objkey = "AirLoopHVAC:ZoneSplitter".upper()
singlefields = ["Name", "Inlet Node Name"]
fld = "Outlet %s Node Name"
repeatfields = loops.repeatingfields(data, commdct, objkey, fld)
fieldlist = singlefields + repeatfields
fieldlists = [fieldlist] * loops.objectcount(data, objkey)
zonesplitters = loops.extractfields(data, commdct, objkey, fieldlists)
# in AirLoopHVAC:SupplyPlenum:
# get Name, Zone Name, Zone Node Name, inlet, all outlets
objkey = "AirLoopHVAC:SupplyPlenum".upper()
singlefields = ["Name", "Zone Name", "Zone Node Name", "Inlet Node Name"]
fld = "Outlet %s Node Name"
repeatfields = loops.repeatingfields(data, commdct, objkey, fld)
fieldlist = singlefields + repeatfields
fieldlists = [fieldlist] * loops.objectcount(data, objkey)
supplyplenums = loops.extractfields(data, commdct, objkey, fieldlists)
# in AirLoopHVAC:ZoneMixer:
# get Name, outlet, all inlets
objkey = "AirLoopHVAC:ZoneMixer".upper()
singlefields = ["Name", "Outlet Node Name"]
fld = "Inlet %s Node Name"
repeatfields = loops.repeatingfields(data, commdct, objkey, fld)
fieldlist = singlefields + repeatfields
fieldlists = [fieldlist] * loops.objectcount(data, objkey)
zonemixers = loops.extractfields(data, commdct, objkey, fieldlists)
# in AirLoopHVAC:ReturnPlenum:
# get Name, Zone Name, Zone Node Name, outlet, all inlets
objkey = "AirLoopHVAC:ReturnPlenum".upper()
singlefields = ["Name", "Zone Name", "Zone Node Name", "Outlet Node Name"]
fld = "Inlet %s Node Name"
repeatfields = loops.repeatingfields(data, commdct, objkey, fld)
fieldlist = singlefields + repeatfields
fieldlists = [fieldlist] * loops.objectcount(data, objkey)
returnplenums = loops.extractfields(data, commdct, objkey, fieldlists)
# connect room to each equip in equiplist
# in ZoneHVAC:EquipmentConnections:
# get Name, equiplist, zoneairnode, returnnode
objkey = "ZoneHVAC:EquipmentConnections".upper()
singlefields = ["Zone Name", "Zone Conditioning Equipment List Name",
"Zone Air Node Name", "Zone Return Air Node Name"]
repeatfields = []
fieldlist = singlefields + repeatfields
fieldlists = [fieldlist] * loops.objectcount(data, objkey)
equipconnections = loops.extractfields(data, commdct, objkey, fieldlists)
# in ZoneHVAC:EquipmentList:
# get Name, all equiptype, all equipnames
objkey = "ZoneHVAC:EquipmentList".upper()
singlefields = ["Name", ]
fieldlist = singlefields
flds = ["Zone Equipment %s Object Type", "Zone Equipment %s Name"]
repeatfields = loops.repeatingfields(data, commdct, objkey, flds)
fieldlist = fieldlist + repeatfields
fieldlists = [fieldlist] * loops.objectcount(data, objkey)
equiplists = loops.extractfields(data, commdct, objkey, fieldlists)
equiplistdct = dict([(ep[0], ep[1:]) for ep in equiplists])
for key, equips in list(equiplistdct.items()):
enames = [equips[i] for i in range(1, len(equips), 2)]
equiplistdct[key] = enames
# adistuunit -> room
# adistuunit <- VAVreheat
# airinlet -> VAVreheat
# in ZoneHVAC:AirDistributionUnit:
# get Name, equiplist, zoneairnode, returnnode
objkey = "ZoneHVAC:AirDistributionUnit".upper()
singlefields = ["Name", "Air Terminal Object Type", "Air Terminal Name"]
repeatfields = []
fieldlist = singlefields + repeatfields
fieldlists = [fieldlist] * loops.objectcount(data, objkey)
adistuunits = loops.extractfields(data, commdct, objkey, fieldlists)
# code only for AirTerminal:SingleDuct:VAV:Reheat
# get airinletnodes for vavreheats
# in AirTerminal:SingleDuct:VAV:Reheat:
# get Name, airinletnode
adistuinlets = loops.makeadistu_inlets(data, commdct)
alladistu_comps = []
for key in list(adistuinlets.keys()):
objkey = key.upper()
singlefields = ["Name"] + adistuinlets[key]
repeatfields = []
fieldlist = singlefields + repeatfields
fieldlists = [fieldlist] * loops.objectcount(data, objkey)
adistu_components = loops.extractfields(data, commdct, objkey, fieldlists)
alladistu_comps.append(adistu_components)
# in AirTerminal:SingleDuct:Uncontrolled:
# get Name, airinletnode
objkey = "AirTerminal:SingleDuct:Uncontrolled".upper()
singlefields = ["Name", "Zone Supply Air Node Name"]
repeatfields = []
fieldlist = singlefields + repeatfields
fieldlists = [fieldlist] * loops.objectcount(data, objkey)
uncontrolleds = loops.extractfields(data, commdct, objkey, fieldlists)
anode = "epnode"
endnode = "EndNode"
# edges = []
# connect demand and supply side
# for airloophvac in airloophvacs:
# supplyinlet = airloophvac[1]
# supplyoutlet = airloophvac[4]
# demandinlet = airloophvac[3]
# demandoutlet = airloophvac[2]
# # edges = [supplyoutlet -> demandinlet, demandoutlet -> supplyinlet]
# moreedges = [((supplyoutlet, endnode), (demandinlet, endnode)),
# ((demandoutlet, endnode), (supplyinlet, endnode))]
# edges = edges + moreedges
# connect zonesplitter to nodes
for zonesplitter in zonesplitters:
name = zonesplitter[0]
inlet = zonesplitter[1]
outlets = zonesplitter[2:]
edges.append(((inlet, anode), name))
for outlet in outlets:
edges.append((name, (outlet, anode)))
# connect supplyplenum to nodes
for supplyplenum in supplyplenums:
name = supplyplenum[0]
inlet = supplyplenum[3]
outlets = supplyplenum[4:]
edges.append(((inlet, anode), name))
for outlet in outlets:
edges.append((name, (outlet, anode)))
# connect zonemixer to nodes
for zonemixer in zonemixers:
name = zonemixer[0]
outlet = zonemixer[1]
inlets = zonemixer[2:]
edges.append((name, (outlet, anode)))
for inlet in inlets:
edges.append(((inlet, anode), name))
# connect returnplenums to nodes
for returnplenum in returnplenums:
name = returnplenum[0]
outlet = returnplenum[3]
inlets = returnplenum[4:]
edges.append((name, (outlet, anode)))
for inlet in inlets:
edges.append(((inlet, anode), name))
# connect room to return node
for equipconnection in equipconnections:
zonename = equipconnection[0]
returnnode = equipconnection[-1]
edges.append((zonename, (returnnode, anode)))
# connect equips to room
for equipconnection in equipconnections:
zonename = equipconnection[0]
zequiplistname = equipconnection[1]
for zequip in equiplistdct[zequiplistname]:
edges.append((zequip, zonename))
# adistuunit <- adistu_component
for adistuunit in adistuunits:
unitname = adistuunit[0]
compname = adistuunit[2]
edges.append((compname, unitname))
# airinlet -> adistu_component
for adistu_comps in alladistu_comps:
for adistu_comp in adistu_comps:
name = adistu_comp[0]
for airnode in adistu_comp[1:]:
edges.append(((airnode, anode), name))
# supplyairnode -> uncontrolled
for uncontrolled in uncontrolleds:
name = uncontrolled[0]
airnode = uncontrolled[1]
edges.append(((airnode, anode), name))
# edges = edges + moreedges
return edges |
def _convert_to_array(x, size, name):
"""Check length of array or convert scalar to array.
Check to see is `x` has the given length `size`. If this is true
then return Numpy array equivalent of `x`. If not then raise
ValueError, using `name` as an idnetification. If len(x) returns
TypeError, then assume it is a scalar and create a Numpy array of
length `size`. Each item of this array will have the value as `x`.
"""
try:
l = len(x)
if l != size:
raise ValueError(
"{0} must be scalar or of length {1}".format(
name, size))
except TypeError:
# Only one item
xa = np.array([x] * size) # Each item is a diff. object.
else:
xa = np.array(x)
return xa | Check length of array or convert scalar to array.
Check to see is `x` has the given length `size`. If this is true
then return Numpy array equivalent of `x`. If not then raise
ValueError, using `name` as an idnetification. If len(x) returns
TypeError, then assume it is a scalar and create a Numpy array of
length `size`. Each item of this array will have the value as `x`. | Below is the the instruction that describes the task:
### Input:
Check length of array or convert scalar to array.
Check to see is `x` has the given length `size`. If this is true
then return Numpy array equivalent of `x`. If not then raise
ValueError, using `name` as an idnetification. If len(x) returns
TypeError, then assume it is a scalar and create a Numpy array of
length `size`. Each item of this array will have the value as `x`.
### Response:
def _convert_to_array(x, size, name):
"""Check length of array or convert scalar to array.
Check to see is `x` has the given length `size`. If this is true
then return Numpy array equivalent of `x`. If not then raise
ValueError, using `name` as an idnetification. If len(x) returns
TypeError, then assume it is a scalar and create a Numpy array of
length `size`. Each item of this array will have the value as `x`.
"""
try:
l = len(x)
if l != size:
raise ValueError(
"{0} must be scalar or of length {1}".format(
name, size))
except TypeError:
# Only one item
xa = np.array([x] * size) # Each item is a diff. object.
else:
xa = np.array(x)
return xa |
def sumApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the sum within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> abs(rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high()) | .. note:: Experimental
Approximate operation to return the sum within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> abs(rdd.sumApprox(1000) - r) / r < 0.05
True | Below is the the instruction that describes the task:
### Input:
.. note:: Experimental
Approximate operation to return the sum within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> abs(rdd.sumApprox(1000) - r) / r < 0.05
True
### Response:
def sumApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the sum within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> abs(rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high()) |
def _iter_descendants_preorder(self, is_leaf_fn=None):
""" Iterator over all descendant nodes. """
to_visit = deque()
node = self
while node is not None:
yield node
if not is_leaf_fn or not is_leaf_fn(node):
to_visit.extendleft(reversed(node.children))
try:
node = to_visit.popleft()
except:
node = None | Iterator over all descendant nodes. | Below is the the instruction that describes the task:
### Input:
Iterator over all descendant nodes.
### Response:
def _iter_descendants_preorder(self, is_leaf_fn=None):
""" Iterator over all descendant nodes. """
to_visit = deque()
node = self
while node is not None:
yield node
if not is_leaf_fn or not is_leaf_fn(node):
to_visit.extendleft(reversed(node.children))
try:
node = to_visit.popleft()
except:
node = None |
def print_signals_and_slots(self):
"""
List all active Slots and Signal.
Credits to: http://visitusers.org/index.php?title=PySide_Recipes#Debugging
"""
for i in xrange(self.metaObject().methodCount()):
m = self.metaObject().method(i)
if m.methodType() == QMetaMethod.MethodType.Signal:
print("SIGNAL: sig=", m.signature(), "hooked to nslots=", self.receivers(SIGNAL(m.signature())))
elif m.methodType() == QMetaMethod.MethodType.Slot:
print("SLOT: sig=", m.signature()) | List all active Slots and Signal.
Credits to: http://visitusers.org/index.php?title=PySide_Recipes#Debugging | Below is the the instruction that describes the task:
### Input:
List all active Slots and Signal.
Credits to: http://visitusers.org/index.php?title=PySide_Recipes#Debugging
### Response:
def print_signals_and_slots(self):
"""
List all active Slots and Signal.
Credits to: http://visitusers.org/index.php?title=PySide_Recipes#Debugging
"""
for i in xrange(self.metaObject().methodCount()):
m = self.metaObject().method(i)
if m.methodType() == QMetaMethod.MethodType.Signal:
print("SIGNAL: sig=", m.signature(), "hooked to nslots=", self.receivers(SIGNAL(m.signature())))
elif m.methodType() == QMetaMethod.MethodType.Slot:
print("SLOT: sig=", m.signature()) |
def find_doi(self, curr_dict):
"""
Recursively search the file for the DOI id. More taxing, but more flexible when dictionary structuring isn't absolute
:param dict curr_dict: Current dictionary being searched
:return dict bool: Recursive - Current dictionary, False flag that DOI was not found
:return str bool: Final - DOI id, True flag that DOI was found
"""
try:
if 'id' in curr_dict:
return curr_dict['id'], True
elif isinstance(curr_dict, list):
for i in curr_dict:
return self.find_doi(i)
elif isinstance(curr_dict, dict):
for k, v in curr_dict.items():
if k == 'identifier':
return self.find_doi(v)
return curr_dict, False
else:
return curr_dict, False
# If the ID key doesn't exist, then return the original dict with a flag
except TypeError:
return curr_dict, False | Recursively search the file for the DOI id. More taxing, but more flexible when dictionary structuring isn't absolute
:param dict curr_dict: Current dictionary being searched
:return dict bool: Recursive - Current dictionary, False flag that DOI was not found
:return str bool: Final - DOI id, True flag that DOI was found | Below is the the instruction that describes the task:
### Input:
Recursively search the file for the DOI id. More taxing, but more flexible when dictionary structuring isn't absolute
:param dict curr_dict: Current dictionary being searched
:return dict bool: Recursive - Current dictionary, False flag that DOI was not found
:return str bool: Final - DOI id, True flag that DOI was found
### Response:
def find_doi(self, curr_dict):
"""
Recursively search the file for the DOI id. More taxing, but more flexible when dictionary structuring isn't absolute
:param dict curr_dict: Current dictionary being searched
:return dict bool: Recursive - Current dictionary, False flag that DOI was not found
:return str bool: Final - DOI id, True flag that DOI was found
"""
try:
if 'id' in curr_dict:
return curr_dict['id'], True
elif isinstance(curr_dict, list):
for i in curr_dict:
return self.find_doi(i)
elif isinstance(curr_dict, dict):
for k, v in curr_dict.items():
if k == 'identifier':
return self.find_doi(v)
return curr_dict, False
else:
return curr_dict, False
# If the ID key doesn't exist, then return the original dict with a flag
except TypeError:
return curr_dict, False |
def write(self, descrs):
"""
Convert descriptions into names
"""
# example: '(poe-[\d\.]+):float32' -> 'poe-[\d\.]+'
names = []
for descr in descrs:
mo = re.match(self.long_regex, descr)
if mo:
names.append(mo.group(mo.lastindex) + descr[mo.end():])
else:
names.append(descr)
return names | Convert descriptions into names | Below is the the instruction that describes the task:
### Input:
Convert descriptions into names
### Response:
def write(self, descrs):
"""
Convert descriptions into names
"""
# example: '(poe-[\d\.]+):float32' -> 'poe-[\d\.]+'
names = []
for descr in descrs:
mo = re.match(self.long_regex, descr)
if mo:
names.append(mo.group(mo.lastindex) + descr[mo.end():])
else:
names.append(descr)
return names |
def ip_hide_ext_community_list_holder_extcommunity_list_ext_community_expr(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ip = ET.SubElement(config, "ip", xmlns="urn:brocade.com:mgmt:brocade-common-def")
hide_ext_community_list_holder = ET.SubElement(ip, "hide-ext-community-list-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy")
extcommunity_list = ET.SubElement(hide_ext_community_list_holder, "extcommunity-list")
extcommunity_list_num_key = ET.SubElement(extcommunity_list, "extcommunity-list-num")
extcommunity_list_num_key.text = kwargs.pop('extcommunity_list_num')
ext_community_expr = ET.SubElement(extcommunity_list, "ext-community-expr")
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def ip_hide_ext_community_list_holder_extcommunity_list_ext_community_expr(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ip = ET.SubElement(config, "ip", xmlns="urn:brocade.com:mgmt:brocade-common-def")
hide_ext_community_list_holder = ET.SubElement(ip, "hide-ext-community-list-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy")
extcommunity_list = ET.SubElement(hide_ext_community_list_holder, "extcommunity-list")
extcommunity_list_num_key = ET.SubElement(extcommunity_list, "extcommunity-list-num")
extcommunity_list_num_key.text = kwargs.pop('extcommunity_list_num')
ext_community_expr = ET.SubElement(extcommunity_list, "ext-community-expr")
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def writeCommit(self, varBind, **context):
"""Commit new value of the Managed Object Instance.
Implements the second of the multi-step workflow of the SNMP SET
command processing (:RFC:`1905#section-4.2.5`).
The goal of the second phase is to actually modify the requested Managed
Object Instance. When multiple Managed Objects Instances are modified at
once (likely coming all in one SNMP PDU), each of them has to run through
the second (*commit*) phase successfully for the system to transition to
the third (*cleanup*) phase. If any single *commit* step fails, the system
transitions into the *undo* state for each of Managed Objects Instances
being processed at once.
The role of this object in the MIB tree is non-terminal. It does not
access the actual Managed Object Instance, but just traverses one level
down the MIB tree and hands off the query to the underlying objects.
Parameters
----------
varBind: :py:class:`~pysnmp.smi.rfc1902.ObjectType` object representing
new Managed Object Instance value to set
Other Parameters
----------------
\*\*context:
Query parameters:
* `cbFun` (callable) - user-supplied callable that is invoked to
pass the new value of the Managed Object Instance or an error.
Notes
-----
The callback functions (e.g. `cbFun`) have the same signature as this
method where `varBind` contains the new Managed Object Instance value.
In case of an error, the `error` key in the `context` dict will contain
an exception object.
"""
name, val = varBind
(debug.logger & debug.FLAG_INS and
debug.logger('%s: writeCommit(%s, %r)' % (self, name, val)))
cbFun = context['cbFun']
instances = context['instances'].setdefault(self.name, {self.ST_CREATE: {}, self.ST_DESTROY: {}})
idx = context['idx']
if idx in instances[self.ST_CREATE]:
self.createCommit(varBind, **context)
return
if idx in instances[self.ST_DESTROY]:
self.destroyCommit(varBind, **context)
return
try:
node = self.getBranch(name, **context)
except (error.NoSuchInstanceError, error.NoSuchObjectError) as exc:
cbFun(varBind, **dict(context, error=exc))
else:
node.writeCommit(varBind, **context) | Commit new value of the Managed Object Instance.
Implements the second of the multi-step workflow of the SNMP SET
command processing (:RFC:`1905#section-4.2.5`).
The goal of the second phase is to actually modify the requested Managed
Object Instance. When multiple Managed Objects Instances are modified at
once (likely coming all in one SNMP PDU), each of them has to run through
the second (*commit*) phase successfully for the system to transition to
the third (*cleanup*) phase. If any single *commit* step fails, the system
transitions into the *undo* state for each of Managed Objects Instances
being processed at once.
The role of this object in the MIB tree is non-terminal. It does not
access the actual Managed Object Instance, but just traverses one level
down the MIB tree and hands off the query to the underlying objects.
Parameters
----------
varBind: :py:class:`~pysnmp.smi.rfc1902.ObjectType` object representing
new Managed Object Instance value to set
Other Parameters
----------------
\*\*context:
Query parameters:
* `cbFun` (callable) - user-supplied callable that is invoked to
pass the new value of the Managed Object Instance or an error.
Notes
-----
The callback functions (e.g. `cbFun`) have the same signature as this
method where `varBind` contains the new Managed Object Instance value.
In case of an error, the `error` key in the `context` dict will contain
an exception object. | Below is the the instruction that describes the task:
### Input:
Commit new value of the Managed Object Instance.
Implements the second of the multi-step workflow of the SNMP SET
command processing (:RFC:`1905#section-4.2.5`).
The goal of the second phase is to actually modify the requested Managed
Object Instance. When multiple Managed Objects Instances are modified at
once (likely coming all in one SNMP PDU), each of them has to run through
the second (*commit*) phase successfully for the system to transition to
the third (*cleanup*) phase. If any single *commit* step fails, the system
transitions into the *undo* state for each of Managed Objects Instances
being processed at once.
The role of this object in the MIB tree is non-terminal. It does not
access the actual Managed Object Instance, but just traverses one level
down the MIB tree and hands off the query to the underlying objects.
Parameters
----------
varBind: :py:class:`~pysnmp.smi.rfc1902.ObjectType` object representing
new Managed Object Instance value to set
Other Parameters
----------------
\*\*context:
Query parameters:
* `cbFun` (callable) - user-supplied callable that is invoked to
pass the new value of the Managed Object Instance or an error.
Notes
-----
The callback functions (e.g. `cbFun`) have the same signature as this
method where `varBind` contains the new Managed Object Instance value.
In case of an error, the `error` key in the `context` dict will contain
an exception object.
### Response:
def writeCommit(self, varBind, **context):
"""Commit new value of the Managed Object Instance.
Implements the second of the multi-step workflow of the SNMP SET
command processing (:RFC:`1905#section-4.2.5`).
The goal of the second phase is to actually modify the requested Managed
Object Instance. When multiple Managed Objects Instances are modified at
once (likely coming all in one SNMP PDU), each of them has to run through
the second (*commit*) phase successfully for the system to transition to
the third (*cleanup*) phase. If any single *commit* step fails, the system
transitions into the *undo* state for each of Managed Objects Instances
being processed at once.
The role of this object in the MIB tree is non-terminal. It does not
access the actual Managed Object Instance, but just traverses one level
down the MIB tree and hands off the query to the underlying objects.
Parameters
----------
varBind: :py:class:`~pysnmp.smi.rfc1902.ObjectType` object representing
new Managed Object Instance value to set
Other Parameters
----------------
\*\*context:
Query parameters:
* `cbFun` (callable) - user-supplied callable that is invoked to
pass the new value of the Managed Object Instance or an error.
Notes
-----
The callback functions (e.g. `cbFun`) have the same signature as this
method where `varBind` contains the new Managed Object Instance value.
In case of an error, the `error` key in the `context` dict will contain
an exception object.
"""
name, val = varBind
(debug.logger & debug.FLAG_INS and
debug.logger('%s: writeCommit(%s, %r)' % (self, name, val)))
cbFun = context['cbFun']
instances = context['instances'].setdefault(self.name, {self.ST_CREATE: {}, self.ST_DESTROY: {}})
idx = context['idx']
if idx in instances[self.ST_CREATE]:
self.createCommit(varBind, **context)
return
if idx in instances[self.ST_DESTROY]:
self.destroyCommit(varBind, **context)
return
try:
node = self.getBranch(name, **context)
except (error.NoSuchInstanceError, error.NoSuchObjectError) as exc:
cbFun(varBind, **dict(context, error=exc))
else:
node.writeCommit(varBind, **context) |
def handleError(self, test, err, capt=None):
"""
If the database plugin is not present, we have to handle capturing
"errors" that shouldn't be reported as such in base.
"""
if not hasattr(test.test, "testcase_guid"):
if err[0] == errors.BlockedTest:
raise SkipTest(err[1])
return True
elif err[0] == errors.DeprecatedTest:
raise SkipTest(err[1])
return True
elif err[0] == errors.SkipTest:
raise SkipTest(err[1])
return True | If the database plugin is not present, we have to handle capturing
"errors" that shouldn't be reported as such in base. | Below is the the instruction that describes the task:
### Input:
If the database plugin is not present, we have to handle capturing
"errors" that shouldn't be reported as such in base.
### Response:
def handleError(self, test, err, capt=None):
"""
If the database plugin is not present, we have to handle capturing
"errors" that shouldn't be reported as such in base.
"""
if not hasattr(test.test, "testcase_guid"):
if err[0] == errors.BlockedTest:
raise SkipTest(err[1])
return True
elif err[0] == errors.DeprecatedTest:
raise SkipTest(err[1])
return True
elif err[0] == errors.SkipTest:
raise SkipTest(err[1])
return True |
def request(self, method, url, **kwargs):
"""Override request method disabling verify on token renewal if disabled on session."""
if not url.startswith('https'):
url = '{}{}'.format(self.args.tc_api_path, url)
return super(TcExSession, self).request(method, url, **kwargs) | Override request method disabling verify on token renewal if disabled on session. | Below is the the instruction that describes the task:
### Input:
Override request method disabling verify on token renewal if disabled on session.
### Response:
def request(self, method, url, **kwargs):
"""Override request method disabling verify on token renewal if disabled on session."""
if not url.startswith('https'):
url = '{}{}'.format(self.args.tc_api_path, url)
return super(TcExSession, self).request(method, url, **kwargs) |
def build_clusters(data, sample, maxindels):
"""
Combines information from .utemp and .htemp files to create .clust files,
which contain un-aligned clusters. Hits to seeds are only kept in the
cluster if the number of internal indels is less than 'maxindels'.
By default, we set maxindels=6 for this step (within-sample clustering).
"""
## If reference assembly then here we're clustering the unmapped reads
if "reference" in data.paramsdict["assembly_method"]:
derepfile = os.path.join(data.dirs.edits, sample.name+"-refmap_derep.fastq")
else:
derepfile = os.path.join(data.dirs.edits, sample.name+"_derep.fastq")
## i/o vsearch files
uhandle = os.path.join(data.dirs.clusts, sample.name+".utemp")
usort = os.path.join(data.dirs.clusts, sample.name+".utemp.sort")
hhandle = os.path.join(data.dirs.clusts, sample.name+".htemp")
## create an output file to write clusters to
sample.files.clusters = os.path.join(data.dirs.clusts, sample.name+".clust.gz")
clustsout = gzip.open(sample.files.clusters, 'wb')
## Sort the uhandle file so we can read through matches efficiently
cmd = ["sort", "-k", "2", uhandle, "-o", usort]
proc = sps.Popen(cmd, close_fds=True)
_ = proc.communicate()[0]
## load ALL derep reads into a dictionary (this can be a few GB of RAM)
## and is larger if names are larger. We are grabbing two lines at a time.
alldereps = {}
with open(derepfile, 'rb') as ioderep:
dereps = itertools.izip(*[iter(ioderep)]*2)
for namestr, seq in dereps:
nnn, sss = [i.strip() for i in namestr, seq]
alldereps[nnn[1:]] = sss
## store observed seeds (this could count up to >million in bad data sets)
seedsseen = set()
## Iterate through the usort file grabbing matches to build clusters
with open(usort, 'rb') as insort:
## iterator, seed null, seqlist null
isort = iter(insort)
lastseed = 0
fseqs = []
seqlist = []
seqsize = 0
while 1:
## grab the next line
try:
hit, seed, _, ind, ori, _ = isort.next().strip().split()
LOGGER.debug(">{} {} {}".format(hit, seed, ori, seq))
except StopIteration:
break
## same seed, append match
if seed != lastseed:
seedsseen.add(seed)
## store the last cluster (fseq), count it, and clear fseq
if fseqs:
## sort fseqs by derep after pulling out the seed
fseqs = [fseqs[0]] + sorted(fseqs[1:], key=lambda x: \
int(x.split(";size=")[1].split(";")[0]), reverse=True)
seqlist.append("\n".join(fseqs))
seqsize += 1
fseqs = []
## occasionally write/dump stored clusters to file and clear mem
if not seqsize % 10000:
if seqlist:
clustsout.write("\n//\n//\n".join(seqlist)+"\n//\n//\n")
## reset list and counter
seqlist = []
## store the new seed on top of fseq list
fseqs.append(">{}*\n{}".format(seed, alldereps[seed]))
lastseed = seed
## add match to the seed
## revcomp if orientation is reversed (comp preserves nnnn)
if ori == "-":
seq = comp(alldereps[hit])[::-1]
else:
seq = alldereps[hit]
## only save if not too many indels
if int(ind) <= maxindels:
fseqs.append(">{}{}\n{}".format(hit, ori, seq))
else:
LOGGER.info("filtered by maxindels: %s %s", ind, seq)
## write whatever is left over to the clusts file
if fseqs:
seqlist.append("\n".join(fseqs))
if seqlist:
clustsout.write("\n//\n//\n".join(seqlist)+"\n//\n//\n")
## now write the seeds that had no hits. Make dict from htemp
with open(hhandle, 'rb') as iotemp:
nohits = itertools.izip(*[iter(iotemp)]*2)
seqlist = []
seqsize = 0
while 1:
try:
nnn, _ = [i.strip() for i in nohits.next()]
except StopIteration:
break
## occasionally write to file
if not seqsize % 10000:
if seqlist:
clustsout.write("\n//\n//\n".join(seqlist)+"\n//\n//\n")
## reset list and counter
seqlist = []
## append to list if new seed
if nnn[1:] not in seedsseen:
seqlist.append("{}*\n{}".format(nnn, alldereps[nnn[1:]]))
seqsize += 1
## write whatever is left over to the clusts file
if seqlist:
clustsout.write("\n//\n//\n".join(seqlist))#+"\n//\n//\n")
## close the file handle
clustsout.close()
del alldereps | Combines information from .utemp and .htemp files to create .clust files,
which contain un-aligned clusters. Hits to seeds are only kept in the
cluster if the number of internal indels is less than 'maxindels'.
By default, we set maxindels=6 for this step (within-sample clustering). | Below is the the instruction that describes the task:
### Input:
Combines information from .utemp and .htemp files to create .clust files,
which contain un-aligned clusters. Hits to seeds are only kept in the
cluster if the number of internal indels is less than 'maxindels'.
By default, we set maxindels=6 for this step (within-sample clustering).
### Response:
def build_clusters(data, sample, maxindels):
"""
Combines information from .utemp and .htemp files to create .clust files,
which contain un-aligned clusters. Hits to seeds are only kept in the
cluster if the number of internal indels is less than 'maxindels'.
By default, we set maxindels=6 for this step (within-sample clustering).
"""
## If reference assembly then here we're clustering the unmapped reads
if "reference" in data.paramsdict["assembly_method"]:
derepfile = os.path.join(data.dirs.edits, sample.name+"-refmap_derep.fastq")
else:
derepfile = os.path.join(data.dirs.edits, sample.name+"_derep.fastq")
## i/o vsearch files
uhandle = os.path.join(data.dirs.clusts, sample.name+".utemp")
usort = os.path.join(data.dirs.clusts, sample.name+".utemp.sort")
hhandle = os.path.join(data.dirs.clusts, sample.name+".htemp")
## create an output file to write clusters to
sample.files.clusters = os.path.join(data.dirs.clusts, sample.name+".clust.gz")
clustsout = gzip.open(sample.files.clusters, 'wb')
## Sort the uhandle file so we can read through matches efficiently
cmd = ["sort", "-k", "2", uhandle, "-o", usort]
proc = sps.Popen(cmd, close_fds=True)
_ = proc.communicate()[0]
## load ALL derep reads into a dictionary (this can be a few GB of RAM)
## and is larger if names are larger. We are grabbing two lines at a time.
alldereps = {}
with open(derepfile, 'rb') as ioderep:
dereps = itertools.izip(*[iter(ioderep)]*2)
for namestr, seq in dereps:
nnn, sss = [i.strip() for i in namestr, seq]
alldereps[nnn[1:]] = sss
## store observed seeds (this could count up to >million in bad data sets)
seedsseen = set()
## Iterate through the usort file grabbing matches to build clusters
with open(usort, 'rb') as insort:
## iterator, seed null, seqlist null
isort = iter(insort)
lastseed = 0
fseqs = []
seqlist = []
seqsize = 0
while 1:
## grab the next line
try:
hit, seed, _, ind, ori, _ = isort.next().strip().split()
LOGGER.debug(">{} {} {}".format(hit, seed, ori, seq))
except StopIteration:
break
## same seed, append match
if seed != lastseed:
seedsseen.add(seed)
## store the last cluster (fseq), count it, and clear fseq
if fseqs:
## sort fseqs by derep after pulling out the seed
fseqs = [fseqs[0]] + sorted(fseqs[1:], key=lambda x: \
int(x.split(";size=")[1].split(";")[0]), reverse=True)
seqlist.append("\n".join(fseqs))
seqsize += 1
fseqs = []
## occasionally write/dump stored clusters to file and clear mem
if not seqsize % 10000:
if seqlist:
clustsout.write("\n//\n//\n".join(seqlist)+"\n//\n//\n")
## reset list and counter
seqlist = []
## store the new seed on top of fseq list
fseqs.append(">{}*\n{}".format(seed, alldereps[seed]))
lastseed = seed
## add match to the seed
## revcomp if orientation is reversed (comp preserves nnnn)
if ori == "-":
seq = comp(alldereps[hit])[::-1]
else:
seq = alldereps[hit]
## only save if not too many indels
if int(ind) <= maxindels:
fseqs.append(">{}{}\n{}".format(hit, ori, seq))
else:
LOGGER.info("filtered by maxindels: %s %s", ind, seq)
## write whatever is left over to the clusts file
if fseqs:
seqlist.append("\n".join(fseqs))
if seqlist:
clustsout.write("\n//\n//\n".join(seqlist)+"\n//\n//\n")
## now write the seeds that had no hits. Make dict from htemp
with open(hhandle, 'rb') as iotemp:
nohits = itertools.izip(*[iter(iotemp)]*2)
seqlist = []
seqsize = 0
while 1:
try:
nnn, _ = [i.strip() for i in nohits.next()]
except StopIteration:
break
## occasionally write to file
if not seqsize % 10000:
if seqlist:
clustsout.write("\n//\n//\n".join(seqlist)+"\n//\n//\n")
## reset list and counter
seqlist = []
## append to list if new seed
if nnn[1:] not in seedsseen:
seqlist.append("{}*\n{}".format(nnn, alldereps[nnn[1:]]))
seqsize += 1
## write whatever is left over to the clusts file
if seqlist:
clustsout.write("\n//\n//\n".join(seqlist))#+"\n//\n//\n")
## close the file handle
clustsout.close()
del alldereps |
def getargspecs(func):
"""Bridges inspect.getargspec and inspect.getfullargspec.
Automatically selects the proper one depending of current Python version.
Automatically bypasses wrappers from typechecked- and override-decorators.
"""
if func is None:
raise TypeError('None is not a Python function')
if hasattr(func, 'ch_func'):
return getargspecs(func.ch_func)
elif hasattr(func, 'ov_func'):
return getargspecs(func.ov_func)
if hasattr(inspect, 'getfullargspec'):
return inspect.getfullargspec(func) # Python 3
else:
return inspect.getargspec(func) | Bridges inspect.getargspec and inspect.getfullargspec.
Automatically selects the proper one depending of current Python version.
Automatically bypasses wrappers from typechecked- and override-decorators. | Below is the the instruction that describes the task:
### Input:
Bridges inspect.getargspec and inspect.getfullargspec.
Automatically selects the proper one depending of current Python version.
Automatically bypasses wrappers from typechecked- and override-decorators.
### Response:
def getargspecs(func):
"""Bridges inspect.getargspec and inspect.getfullargspec.
Automatically selects the proper one depending of current Python version.
Automatically bypasses wrappers from typechecked- and override-decorators.
"""
if func is None:
raise TypeError('None is not a Python function')
if hasattr(func, 'ch_func'):
return getargspecs(func.ch_func)
elif hasattr(func, 'ov_func'):
return getargspecs(func.ov_func)
if hasattr(inspect, 'getfullargspec'):
return inspect.getfullargspec(func) # Python 3
else:
return inspect.getargspec(func) |
def wait(self, limit=None):
"""Go into consume mode.
Mostly for testing purposes and simple programs, you probably
want :meth:`iterconsume` or :meth:`iterqueue` instead.
This runs an infinite loop, processing all incoming messages
using :meth:`receive` to apply the message to all registered
callbacks.
"""
it = self.iterconsume(limit)
while True:
it.next() | Go into consume mode.
Mostly for testing purposes and simple programs, you probably
want :meth:`iterconsume` or :meth:`iterqueue` instead.
This runs an infinite loop, processing all incoming messages
using :meth:`receive` to apply the message to all registered
callbacks. | Below is the the instruction that describes the task:
### Input:
Go into consume mode.
Mostly for testing purposes and simple programs, you probably
want :meth:`iterconsume` or :meth:`iterqueue` instead.
This runs an infinite loop, processing all incoming messages
using :meth:`receive` to apply the message to all registered
callbacks.
### Response:
def wait(self, limit=None):
"""Go into consume mode.
Mostly for testing purposes and simple programs, you probably
want :meth:`iterconsume` or :meth:`iterqueue` instead.
This runs an infinite loop, processing all incoming messages
using :meth:`receive` to apply the message to all registered
callbacks.
"""
it = self.iterconsume(limit)
while True:
it.next() |
def is_http_running_on(port):
""" Check if an http server runs on a given port.
Args:
The port to check.
Returns:
True if it is used by an http server. False otherwise.
"""
try:
conn = httplib.HTTPConnection('127.0.0.1:' + str(port))
conn.connect()
conn.close()
return True
except Exception:
return False | Check if an http server runs on a given port.
Args:
The port to check.
Returns:
True if it is used by an http server. False otherwise. | Below is the the instruction that describes the task:
### Input:
Check if an http server runs on a given port.
Args:
The port to check.
Returns:
True if it is used by an http server. False otherwise.
### Response:
def is_http_running_on(port):
""" Check if an http server runs on a given port.
Args:
The port to check.
Returns:
True if it is used by an http server. False otherwise.
"""
try:
conn = httplib.HTTPConnection('127.0.0.1:' + str(port))
conn.connect()
conn.close()
return True
except Exception:
return False |
def acquire(self, replace_stale=False, force=False):
"""
Acquire the lock.
A lock can be claimed if any of these conditions are true:
1. The lock is unheld by anyone.
2. The lock is held but the 'force' argument is set.
3. The lock is held by the current process.
Arguments:
replace_stale (bool, optional) If true, lock can be aquired from
stale processes. A stale process is one which currently owns
the parent lock, but no process with that PID is alive.
force (bool, optional): If true, ignore any existing
lock. If false, fail if lock already claimed.
Returns:
LockFile: self.
Raises:
UnableToAcquireLockError: If the lock is already claimed
(not raised if force option is used).
"""
def _create_lock():
LockFile.write(self.path, os.getpid(), time.time())
if self.islocked:
lock_owner_pid = self.pid
if self.owned_by_self:
pass # don't replace existing lock
elif force:
_create_lock()
elif replace_stale and not system.isprocess(lock_owner_pid):
_create_lock()
else:
raise UnableToAcquireLockError(self)
else: # new lock
_create_lock()
return self | Acquire the lock.
A lock can be claimed if any of these conditions are true:
1. The lock is unheld by anyone.
2. The lock is held but the 'force' argument is set.
3. The lock is held by the current process.
Arguments:
replace_stale (bool, optional) If true, lock can be aquired from
stale processes. A stale process is one which currently owns
the parent lock, but no process with that PID is alive.
force (bool, optional): If true, ignore any existing
lock. If false, fail if lock already claimed.
Returns:
LockFile: self.
Raises:
UnableToAcquireLockError: If the lock is already claimed
(not raised if force option is used). | Below is the the instruction that describes the task:
### Input:
Acquire the lock.
A lock can be claimed if any of these conditions are true:
1. The lock is unheld by anyone.
2. The lock is held but the 'force' argument is set.
3. The lock is held by the current process.
Arguments:
replace_stale (bool, optional) If true, lock can be aquired from
stale processes. A stale process is one which currently owns
the parent lock, but no process with that PID is alive.
force (bool, optional): If true, ignore any existing
lock. If false, fail if lock already claimed.
Returns:
LockFile: self.
Raises:
UnableToAcquireLockError: If the lock is already claimed
(not raised if force option is used).
### Response:
def acquire(self, replace_stale=False, force=False):
"""
Acquire the lock.
A lock can be claimed if any of these conditions are true:
1. The lock is unheld by anyone.
2. The lock is held but the 'force' argument is set.
3. The lock is held by the current process.
Arguments:
replace_stale (bool, optional) If true, lock can be aquired from
stale processes. A stale process is one which currently owns
the parent lock, but no process with that PID is alive.
force (bool, optional): If true, ignore any existing
lock. If false, fail if lock already claimed.
Returns:
LockFile: self.
Raises:
UnableToAcquireLockError: If the lock is already claimed
(not raised if force option is used).
"""
def _create_lock():
LockFile.write(self.path, os.getpid(), time.time())
if self.islocked:
lock_owner_pid = self.pid
if self.owned_by_self:
pass # don't replace existing lock
elif force:
_create_lock()
elif replace_stale and not system.isprocess(lock_owner_pid):
_create_lock()
else:
raise UnableToAcquireLockError(self)
else: # new lock
_create_lock()
return self |
def on_finish(self):
"""Invoked once the request has been finished. Increments a counter
created in the format:
.. code::
<PREFIX>.counters.<host>.package[.module].Class.METHOD.STATUS
sprockets.counters.localhost.tornado.web.RequestHandler.GET.200
Adds a value to a timer in the following format:
.. code::
<PREFIX>.timers.<host>.package[.module].Class.METHOD.STATUS
sprockets.timers.localhost.tornado.web.RequestHandler.GET.200
"""
if self.statsd_prefix != statsd.STATSD_PREFIX:
statsd.set_prefix(self.statsd_prefix)
if hasattr(self, 'request') and self.request:
if self.statsd_use_hostname:
timer_prefix = 'timers.{0}'.format(socket.gethostname())
counter_prefix = 'counters.{0}'.format(socket.gethostname())
else:
timer_prefix = 'timers'
counter_prefix = 'counters'
statsd.add_timing(timer_prefix,
self.__module__,
str(self.__class__.__name__),
self.request.method,
str(self._status_code),
value=self.request.request_time() * 1000)
statsd.incr(counter_prefix,
self.__module__,
self.__class__.__name__,
self.request.method,
str(self._status_code))
super(RequestMetricsMixin, self).on_finish() | Invoked once the request has been finished. Increments a counter
created in the format:
.. code::
<PREFIX>.counters.<host>.package[.module].Class.METHOD.STATUS
sprockets.counters.localhost.tornado.web.RequestHandler.GET.200
Adds a value to a timer in the following format:
.. code::
<PREFIX>.timers.<host>.package[.module].Class.METHOD.STATUS
sprockets.timers.localhost.tornado.web.RequestHandler.GET.200 | Below is the the instruction that describes the task:
### Input:
Invoked once the request has been finished. Increments a counter
created in the format:
.. code::
<PREFIX>.counters.<host>.package[.module].Class.METHOD.STATUS
sprockets.counters.localhost.tornado.web.RequestHandler.GET.200
Adds a value to a timer in the following format:
.. code::
<PREFIX>.timers.<host>.package[.module].Class.METHOD.STATUS
sprockets.timers.localhost.tornado.web.RequestHandler.GET.200
### Response:
def on_finish(self):
"""Invoked once the request has been finished. Increments a counter
created in the format:
.. code::
<PREFIX>.counters.<host>.package[.module].Class.METHOD.STATUS
sprockets.counters.localhost.tornado.web.RequestHandler.GET.200
Adds a value to a timer in the following format:
.. code::
<PREFIX>.timers.<host>.package[.module].Class.METHOD.STATUS
sprockets.timers.localhost.tornado.web.RequestHandler.GET.200
"""
if self.statsd_prefix != statsd.STATSD_PREFIX:
statsd.set_prefix(self.statsd_prefix)
if hasattr(self, 'request') and self.request:
if self.statsd_use_hostname:
timer_prefix = 'timers.{0}'.format(socket.gethostname())
counter_prefix = 'counters.{0}'.format(socket.gethostname())
else:
timer_prefix = 'timers'
counter_prefix = 'counters'
statsd.add_timing(timer_prefix,
self.__module__,
str(self.__class__.__name__),
self.request.method,
str(self._status_code),
value=self.request.request_time() * 1000)
statsd.incr(counter_prefix,
self.__module__,
self.__class__.__name__,
self.request.method,
str(self._status_code))
super(RequestMetricsMixin, self).on_finish() |
def is_on(self):
"""
Get sensor state.
Assume offline or open (worst case).
"""
return self.status not in (CONST.STATUS_OFF, CONST.STATUS_OFFLINE,
CONST.STATUS_CLOSED, CONST.STATUS_OPEN) | Get sensor state.
Assume offline or open (worst case). | Below is the the instruction that describes the task:
### Input:
Get sensor state.
Assume offline or open (worst case).
### Response:
def is_on(self):
"""
Get sensor state.
Assume offline or open (worst case).
"""
return self.status not in (CONST.STATUS_OFF, CONST.STATUS_OFFLINE,
CONST.STATUS_CLOSED, CONST.STATUS_OPEN) |
def _getOLRootNumber(self):
"""_getOLRootNumber(self) -> PyObject *"""
if self.isClosed or self.isEncrypted:
raise ValueError("operation illegal for closed / encrypted doc")
return _fitz.Document__getOLRootNumber(self) | _getOLRootNumber(self) -> PyObject * | Below is the the instruction that describes the task:
### Input:
_getOLRootNumber(self) -> PyObject *
### Response:
def _getOLRootNumber(self):
"""_getOLRootNumber(self) -> PyObject *"""
if self.isClosed or self.isEncrypted:
raise ValueError("operation illegal for closed / encrypted doc")
return _fitz.Document__getOLRootNumber(self) |
def install_site_py(self):
"""Make sure there's a site.py in the target dir, if needed"""
if self.sitepy_installed:
return # already did it, or don't need to
sitepy = os.path.join(self.install_dir, "site.py")
source = resource_string("setuptools", "site-patch.py")
current = ""
if os.path.exists(sitepy):
log.debug("Checking existing site.py in %s", self.install_dir)
f = open(sitepy, 'rb')
current = f.read()
# we want str, not bytes
if PY3:
current = current.decode()
f.close()
if not current.startswith('def __boot():'):
raise DistutilsError(
"%s is not a setuptools-generated site.py; please"
" remove it." % sitepy
)
if current != source:
log.info("Creating %s", sitepy)
if not self.dry_run:
ensure_directory(sitepy)
f = open(sitepy, 'wb')
f.write(source)
f.close()
self.byte_compile([sitepy])
self.sitepy_installed = True | Make sure there's a site.py in the target dir, if needed | Below is the the instruction that describes the task:
### Input:
Make sure there's a site.py in the target dir, if needed
### Response:
def install_site_py(self):
"""Make sure there's a site.py in the target dir, if needed"""
if self.sitepy_installed:
return # already did it, or don't need to
sitepy = os.path.join(self.install_dir, "site.py")
source = resource_string("setuptools", "site-patch.py")
current = ""
if os.path.exists(sitepy):
log.debug("Checking existing site.py in %s", self.install_dir)
f = open(sitepy, 'rb')
current = f.read()
# we want str, not bytes
if PY3:
current = current.decode()
f.close()
if not current.startswith('def __boot():'):
raise DistutilsError(
"%s is not a setuptools-generated site.py; please"
" remove it." % sitepy
)
if current != source:
log.info("Creating %s", sitepy)
if not self.dry_run:
ensure_directory(sitepy)
f = open(sitepy, 'wb')
f.write(source)
f.close()
self.byte_compile([sitepy])
self.sitepy_installed = True |
def items(self, section, raw=False, vars=None):
"""Return a list of tuples with (name, value) for each option
in the section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
options = d.keys()
if "__name__" in options:
options.remove("__name__")
if raw:
return [(option, d[option])
for option in options]
else:
return [(option, self._interpolate(section, option, d[option], d))
for option in options] | Return a list of tuples with (name, value) for each option
in the section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special. | Below is the the instruction that describes the task:
### Input:
Return a list of tuples with (name, value) for each option
in the section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
### Response:
def items(self, section, raw=False, vars=None):
"""Return a list of tuples with (name, value) for each option
in the section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
options = d.keys()
if "__name__" in options:
options.remove("__name__")
if raw:
return [(option, d[option])
for option in options]
else:
return [(option, self._interpolate(section, option, d[option], d))
for option in options] |
def save_images(images, size, image_path='_temp.png'):
"""Save multiple images into one single image.
Parameters
-----------
images : numpy array
(batch, w, h, c)
size : list of 2 ints
row and column number.
number of images should be equal or less than size[0] * size[1]
image_path : str
save path
Examples
---------
>>> import numpy as np
>>> import tensorlayer as tl
>>> images = np.random.rand(64, 100, 100, 3)
>>> tl.visualize.save_images(images, [8, 8], 'temp.png')
"""
if len(images.shape) == 3: # Greyscale [batch, h, w] --> [batch, h, w, 1]
images = images[:, :, :, np.newaxis]
def merge(images, size):
h, w = images.shape[1], images.shape[2]
img = np.zeros((h * size[0], w * size[1], 3), dtype=images.dtype)
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w, :] = image
return img
def imsave(images, size, path):
if np.max(images) <= 1 and (-1 <= np.min(images) < 0):
images = ((images + 1) * 127.5).astype(np.uint8)
elif np.max(images) <= 1 and np.min(images) >= 0:
images = (images * 255).astype(np.uint8)
return imageio.imwrite(path, merge(images, size))
if len(images) > size[0] * size[1]:
raise AssertionError("number of images should be equal or less than size[0] * size[1] {}".format(len(images)))
return imsave(images, size, image_path) | Save multiple images into one single image.
Parameters
-----------
images : numpy array
(batch, w, h, c)
size : list of 2 ints
row and column number.
number of images should be equal or less than size[0] * size[1]
image_path : str
save path
Examples
---------
>>> import numpy as np
>>> import tensorlayer as tl
>>> images = np.random.rand(64, 100, 100, 3)
>>> tl.visualize.save_images(images, [8, 8], 'temp.png') | Below is the the instruction that describes the task:
### Input:
Save multiple images into one single image.
Parameters
-----------
images : numpy array
(batch, w, h, c)
size : list of 2 ints
row and column number.
number of images should be equal or less than size[0] * size[1]
image_path : str
save path
Examples
---------
>>> import numpy as np
>>> import tensorlayer as tl
>>> images = np.random.rand(64, 100, 100, 3)
>>> tl.visualize.save_images(images, [8, 8], 'temp.png')
### Response:
def save_images(images, size, image_path='_temp.png'):
"""Save multiple images into one single image.
Parameters
-----------
images : numpy array
(batch, w, h, c)
size : list of 2 ints
row and column number.
number of images should be equal or less than size[0] * size[1]
image_path : str
save path
Examples
---------
>>> import numpy as np
>>> import tensorlayer as tl
>>> images = np.random.rand(64, 100, 100, 3)
>>> tl.visualize.save_images(images, [8, 8], 'temp.png')
"""
if len(images.shape) == 3: # Greyscale [batch, h, w] --> [batch, h, w, 1]
images = images[:, :, :, np.newaxis]
def merge(images, size):
h, w = images.shape[1], images.shape[2]
img = np.zeros((h * size[0], w * size[1], 3), dtype=images.dtype)
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w, :] = image
return img
def imsave(images, size, path):
if np.max(images) <= 1 and (-1 <= np.min(images) < 0):
images = ((images + 1) * 127.5).astype(np.uint8)
elif np.max(images) <= 1 and np.min(images) >= 0:
images = (images * 255).astype(np.uint8)
return imageio.imwrite(path, merge(images, size))
if len(images) > size[0] * size[1]:
raise AssertionError("number of images should be equal or less than size[0] * size[1] {}".format(len(images)))
return imsave(images, size, image_path) |
def create_flat_start_model(feature_filename,
state_stay_probabilities,
symbol_list,
output_model_directory,
output_prototype_filename,
htk_trace):
"""
Creates a flat start model by using HCompV to compute the global mean and variance.
Then uses these global mean and variance to create an N-state model for each symbol in the given list.
:param feature_filename: The filename containing the audio and feature file pairs
:param output_model_directory: The directory where to write the created model
:param output_prototype_filename: The prototype model filename
:param htk_trace: Trace level for HTK
:rtype : None
"""
# Create a prototype model
create_prototype_model(feature_filename,
output_prototype_filename,
state_stay_probabilities=state_stay_probabilities)
# Compute the global mean and variance
config.htk_command("HCompV -A -D -T {} -f 0.01 "
"-S {} -m -o {} -M {} {}".format(htk_trace,
feature_filename,
'proto',
output_model_directory,
output_prototype_filename))
# Create an hmmdefs using the global mean and variance for all states and symbols
# Duplicate the model 'proto' -> symbol_list
proto_model_filename = config.path(output_model_directory, 'proto')
model = htk.load_model(proto_model_filename)
model = htk_model_utils.map_hmms(model, {'proto': symbol_list})
# vFloors -> macros
vfloors_filename = config.path(output_model_directory, 'vFloors')
variance_model = htk.load_model(vfloors_filename)
model['macros'] += variance_model['macros']
macros, hmmdefs = htk_model_utils.split_model(model)
htk.save_model(macros, config.path(output_model_directory, 'macros'))
htk.save_model(hmmdefs, config.path(output_model_directory, 'hmmdefs')) | Creates a flat start model by using HCompV to compute the global mean and variance.
Then uses these global mean and variance to create an N-state model for each symbol in the given list.
:param feature_filename: The filename containing the audio and feature file pairs
:param output_model_directory: The directory where to write the created model
:param output_prototype_filename: The prototype model filename
:param htk_trace: Trace level for HTK
:rtype : None | Below is the the instruction that describes the task:
### Input:
Creates a flat start model by using HCompV to compute the global mean and variance.
Then uses these global mean and variance to create an N-state model for each symbol in the given list.
:param feature_filename: The filename containing the audio and feature file pairs
:param output_model_directory: The directory where to write the created model
:param output_prototype_filename: The prototype model filename
:param htk_trace: Trace level for HTK
:rtype : None
### Response:
def create_flat_start_model(feature_filename,
state_stay_probabilities,
symbol_list,
output_model_directory,
output_prototype_filename,
htk_trace):
"""
Creates a flat start model by using HCompV to compute the global mean and variance.
Then uses these global mean and variance to create an N-state model for each symbol in the given list.
:param feature_filename: The filename containing the audio and feature file pairs
:param output_model_directory: The directory where to write the created model
:param output_prototype_filename: The prototype model filename
:param htk_trace: Trace level for HTK
:rtype : None
"""
# Create a prototype model
create_prototype_model(feature_filename,
output_prototype_filename,
state_stay_probabilities=state_stay_probabilities)
# Compute the global mean and variance
config.htk_command("HCompV -A -D -T {} -f 0.01 "
"-S {} -m -o {} -M {} {}".format(htk_trace,
feature_filename,
'proto',
output_model_directory,
output_prototype_filename))
# Create an hmmdefs using the global mean and variance for all states and symbols
# Duplicate the model 'proto' -> symbol_list
proto_model_filename = config.path(output_model_directory, 'proto')
model = htk.load_model(proto_model_filename)
model = htk_model_utils.map_hmms(model, {'proto': symbol_list})
# vFloors -> macros
vfloors_filename = config.path(output_model_directory, 'vFloors')
variance_model = htk.load_model(vfloors_filename)
model['macros'] += variance_model['macros']
macros, hmmdefs = htk_model_utils.split_model(model)
htk.save_model(macros, config.path(output_model_directory, 'macros'))
htk.save_model(hmmdefs, config.path(output_model_directory, 'hmmdefs')) |
def get(self):
"""
This method computes and returns a hitting set. The hitting set is
obtained using the underlying oracle operating the MaxSAT problem
formulation. The computed solution is mapped back to objects of the
problem domain.
:rtype: list(obj)
"""
model = self.oracle.compute()
if model:
if self.htype == 'rc2':
# extracting a hitting set
self.hset = filter(lambda v: v > 0, model)
else:
self.hset = model
return list(map(lambda vid: self.idpool.id2obj[vid], self.hset)) | This method computes and returns a hitting set. The hitting set is
obtained using the underlying oracle operating the MaxSAT problem
formulation. The computed solution is mapped back to objects of the
problem domain.
:rtype: list(obj) | Below is the the instruction that describes the task:
### Input:
This method computes and returns a hitting set. The hitting set is
obtained using the underlying oracle operating the MaxSAT problem
formulation. The computed solution is mapped back to objects of the
problem domain.
:rtype: list(obj)
### Response:
def get(self):
"""
This method computes and returns a hitting set. The hitting set is
obtained using the underlying oracle operating the MaxSAT problem
formulation. The computed solution is mapped back to objects of the
problem domain.
:rtype: list(obj)
"""
model = self.oracle.compute()
if model:
if self.htype == 'rc2':
# extracting a hitting set
self.hset = filter(lambda v: v > 0, model)
else:
self.hset = model
return list(map(lambda vid: self.idpool.id2obj[vid], self.hset)) |
def drop(self, repo, args=[]):
"""
Cleanup the repo
"""
# Clean up the rootdir
rootdir = repo.rootdir
if os.path.exists(rootdir):
print("Cleaning repo directory: {}".format(rootdir))
shutil.rmtree(rootdir)
# Cleanup the local version of the repo (this could be on
# the server etc.
server_repodir = self.server_rootdir_from_repo(repo,
create=False)
if os.path.exists(server_repodir):
print("Cleaning data from local git 'server': {}".format(server_repodir))
shutil.rmtree(server_repodir)
super(GitRepoManager, self).drop(repo)
return {
'status': 'success',
'message': "successful cleanup"
} | Cleanup the repo | Below is the the instruction that describes the task:
### Input:
Cleanup the repo
### Response:
def drop(self, repo, args=[]):
"""
Cleanup the repo
"""
# Clean up the rootdir
rootdir = repo.rootdir
if os.path.exists(rootdir):
print("Cleaning repo directory: {}".format(rootdir))
shutil.rmtree(rootdir)
# Cleanup the local version of the repo (this could be on
# the server etc.
server_repodir = self.server_rootdir_from_repo(repo,
create=False)
if os.path.exists(server_repodir):
print("Cleaning data from local git 'server': {}".format(server_repodir))
shutil.rmtree(server_repodir)
super(GitRepoManager, self).drop(repo)
return {
'status': 'success',
'message': "successful cleanup"
} |
def q_diffuser(sed_inputs=sed_dict):
"""Return the flow through each diffuser.
Parameters
----------
sed_inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations can be found in sed.yaml
Returns
-------
float
Flow through each diffuser in the sedimentation tank
Examples
--------
>>> from aide_design.play import*
>>>
"""
return (sed_inputs['tank']['vel_up'].to(u.m/u.s) *
sed_inputs['tank']['W'].to(u.m) *
L_diffuser_outer(sed_inputs)).magnitude | Return the flow through each diffuser.
Parameters
----------
sed_inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations can be found in sed.yaml
Returns
-------
float
Flow through each diffuser in the sedimentation tank
Examples
--------
>>> from aide_design.play import*
>>> | Below is the the instruction that describes the task:
### Input:
Return the flow through each diffuser.
Parameters
----------
sed_inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations can be found in sed.yaml
Returns
-------
float
Flow through each diffuser in the sedimentation tank
Examples
--------
>>> from aide_design.play import*
>>>
### Response:
def q_diffuser(sed_inputs=sed_dict):
"""Return the flow through each diffuser.
Parameters
----------
sed_inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations can be found in sed.yaml
Returns
-------
float
Flow through each diffuser in the sedimentation tank
Examples
--------
>>> from aide_design.play import*
>>>
"""
return (sed_inputs['tank']['vel_up'].to(u.m/u.s) *
sed_inputs['tank']['W'].to(u.m) *
L_diffuser_outer(sed_inputs)).magnitude |
def print_node(self, name): # noqa: D302
r"""
Print node information (parent, children and data).
:param name: Node name
:type name: :ref:`NodeName`
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
Using the same example tree created in
:py:meth:`ptrie.Trie.add_nodes`::
>>> from __future__ import print_function
>>> import docs.support.ptrie_example
>>> tobj = docs.support.ptrie_example.create_tree()
>>> print(tobj)
root
├branch1 (*)
│├leaf1
││└subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
└branch2
>>> print(tobj.print_node('root.branch1'))
Name: root.branch1
Parent: root
Children: leaf1, leaf2
Data: [5, 7]
"""
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
self._node_in_tree(name)
node = self._db[name]
children = (
[self._split_node_name(child)[-1] for child in node["children"]]
if node["children"]
else node["children"]
)
data = (
node["data"][0]
if node["data"] and (len(node["data"]) == 1)
else node["data"]
)
return (
"Name: {node_name}\n"
"Parent: {parent_name}\n"
"Children: {children_list}\n"
"Data: {node_data}".format(
node_name=name,
parent_name=node["parent"] if node["parent"] else None,
children_list=", ".join(children) if children else None,
node_data=data if data else None,
)
) | r"""
Print node information (parent, children and data).
:param name: Node name
:type name: :ref:`NodeName`
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
Using the same example tree created in
:py:meth:`ptrie.Trie.add_nodes`::
>>> from __future__ import print_function
>>> import docs.support.ptrie_example
>>> tobj = docs.support.ptrie_example.create_tree()
>>> print(tobj)
root
├branch1 (*)
│├leaf1
││└subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
└branch2
>>> print(tobj.print_node('root.branch1'))
Name: root.branch1
Parent: root
Children: leaf1, leaf2
Data: [5, 7] | Below is the the instruction that describes the task:
### Input:
r"""
Print node information (parent, children and data).
:param name: Node name
:type name: :ref:`NodeName`
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
Using the same example tree created in
:py:meth:`ptrie.Trie.add_nodes`::
>>> from __future__ import print_function
>>> import docs.support.ptrie_example
>>> tobj = docs.support.ptrie_example.create_tree()
>>> print(tobj)
root
├branch1 (*)
│├leaf1
││└subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
└branch2
>>> print(tobj.print_node('root.branch1'))
Name: root.branch1
Parent: root
Children: leaf1, leaf2
Data: [5, 7]
### Response:
def print_node(self, name): # noqa: D302
r"""
Print node information (parent, children and data).
:param name: Node name
:type name: :ref:`NodeName`
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
Using the same example tree created in
:py:meth:`ptrie.Trie.add_nodes`::
>>> from __future__ import print_function
>>> import docs.support.ptrie_example
>>> tobj = docs.support.ptrie_example.create_tree()
>>> print(tobj)
root
├branch1 (*)
│├leaf1
││└subleaf1 (*)
│└leaf2 (*)
│ └subleaf2
└branch2
>>> print(tobj.print_node('root.branch1'))
Name: root.branch1
Parent: root
Children: leaf1, leaf2
Data: [5, 7]
"""
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
self._node_in_tree(name)
node = self._db[name]
children = (
[self._split_node_name(child)[-1] for child in node["children"]]
if node["children"]
else node["children"]
)
data = (
node["data"][0]
if node["data"] and (len(node["data"]) == 1)
else node["data"]
)
return (
"Name: {node_name}\n"
"Parent: {parent_name}\n"
"Children: {children_list}\n"
"Data: {node_data}".format(
node_name=name,
parent_name=node["parent"] if node["parent"] else None,
children_list=", ".join(children) if children else None,
node_data=data if data else None,
)
) |
def _set_symlink_ownership(path, user, group, win_owner):
'''
Set the ownership of a symlink and return a boolean indicating
success/failure
'''
if salt.utils.platform.is_windows():
try:
salt.utils.win_dacl.set_owner(path, win_owner)
except CommandExecutionError:
pass
else:
try:
__salt__['file.lchown'](path, user, group)
except OSError:
pass
return _check_symlink_ownership(path, user, group, win_owner) | Set the ownership of a symlink and return a boolean indicating
success/failure | Below is the the instruction that describes the task:
### Input:
Set the ownership of a symlink and return a boolean indicating
success/failure
### Response:
def _set_symlink_ownership(path, user, group, win_owner):
'''
Set the ownership of a symlink and return a boolean indicating
success/failure
'''
if salt.utils.platform.is_windows():
try:
salt.utils.win_dacl.set_owner(path, win_owner)
except CommandExecutionError:
pass
else:
try:
__salt__['file.lchown'](path, user, group)
except OSError:
pass
return _check_symlink_ownership(path, user, group, win_owner) |
def getSizes(self):
"""
Get all the available sizes of the current image, and all available
data about them.
Returns: A list of dicts with the size data.
"""
method = 'flickr.photos.getSizes'
data = _doget(method, photo_id=self.id)
ret = []
# The given props are those that we return and the according types, since
# return width and height as string would make "75">"100" be True, which
# is just error prone.
props = {'url':str,'width':int,'height':int,'label':str,'source':str,'text':str}
for psize in data.rsp.sizes.size:
d = {}
for prop,convert_to_type in props.items():
d[prop] = convert_to_type(getattr(psize, prop))
ret.append(d)
return ret | Get all the available sizes of the current image, and all available
data about them.
Returns: A list of dicts with the size data. | Below is the the instruction that describes the task:
### Input:
Get all the available sizes of the current image, and all available
data about them.
Returns: A list of dicts with the size data.
### Response:
def getSizes(self):
"""
Get all the available sizes of the current image, and all available
data about them.
Returns: A list of dicts with the size data.
"""
method = 'flickr.photos.getSizes'
data = _doget(method, photo_id=self.id)
ret = []
# The given props are those that we return and the according types, since
# return width and height as string would make "75">"100" be True, which
# is just error prone.
props = {'url':str,'width':int,'height':int,'label':str,'source':str,'text':str}
for psize in data.rsp.sizes.size:
d = {}
for prop,convert_to_type in props.items():
d[prop] = convert_to_type(getattr(psize, prop))
ret.append(d)
return ret |
def lowstrip(term):
"""Convert to lowercase and strip spaces"""
term = re.sub('\s+', ' ', term)
term = term.lower()
return term | Convert to lowercase and strip spaces | Below is the the instruction that describes the task:
### Input:
Convert to lowercase and strip spaces
### Response:
def lowstrip(term):
"""Convert to lowercase and strip spaces"""
term = re.sub('\s+', ' ', term)
term = term.lower()
return term |
def kpath_from_seekpath(cls, seekpath, point_coords):
r"""Convert seekpath-formatted kpoints path to sumo-preferred format.
If 'GAMMA' is used as a label this will be replaced by '\Gamma'.
Args:
seekpath (list): A :obj:`list` of 2-tuples containing the labels at
each side of each segment of the k-point path::
[(A, B), (B, C), (C, D), ...]
where a break in the sequence is indicated by a non-repeating
label. E.g.::
[(A, B), (B, C), (D, E), ...]
for a break between C and D.
point_coords (dict): Dict of coordinates corresponding to k-point
labels::
{'GAMMA': [0., 0., 0.], ...}
Returns:
dict: The path and k-points as::
{
'path', [[l1, l2, l3], [l4, l5], ...],
'kpoints', {l1: [a1, b1, c1], l2: [a2, b2, c2], ...}
}
"""
# convert from seekpath format e.g. [(l1, l2), (l2, l3), (l4, l5)]
# to our preferred representation [[l1, l2, l3], [l4, l5]]
path = [[seekpath[0][0]]]
for (k1, k2) in seekpath:
if path[-1] and path[-1][-1] == k1:
path[-1].append(k2)
else:
path.append([k1, k2])
# Rebuild kpoints dictionary skipping any positions not on path
# (chain(*list) flattens nested list; set() removes duplicates.)
kpoints = {p: point_coords[p] for p in set(chain(*path))}
# Every path should include Gamma-point. Change the label to \Gamma
assert 'GAMMA' in kpoints
kpoints[r'\Gamma'] = kpoints.pop('GAMMA')
path = [[label.replace('GAMMA', r'\Gamma') for label in subpath]
for subpath in path]
return {'kpoints': kpoints, 'path': path} | r"""Convert seekpath-formatted kpoints path to sumo-preferred format.
If 'GAMMA' is used as a label this will be replaced by '\Gamma'.
Args:
seekpath (list): A :obj:`list` of 2-tuples containing the labels at
each side of each segment of the k-point path::
[(A, B), (B, C), (C, D), ...]
where a break in the sequence is indicated by a non-repeating
label. E.g.::
[(A, B), (B, C), (D, E), ...]
for a break between C and D.
point_coords (dict): Dict of coordinates corresponding to k-point
labels::
{'GAMMA': [0., 0., 0.], ...}
Returns:
dict: The path and k-points as::
{
'path', [[l1, l2, l3], [l4, l5], ...],
'kpoints', {l1: [a1, b1, c1], l2: [a2, b2, c2], ...}
} | Below is the the instruction that describes the task:
### Input:
r"""Convert seekpath-formatted kpoints path to sumo-preferred format.
If 'GAMMA' is used as a label this will be replaced by '\Gamma'.
Args:
seekpath (list): A :obj:`list` of 2-tuples containing the labels at
each side of each segment of the k-point path::
[(A, B), (B, C), (C, D), ...]
where a break in the sequence is indicated by a non-repeating
label. E.g.::
[(A, B), (B, C), (D, E), ...]
for a break between C and D.
point_coords (dict): Dict of coordinates corresponding to k-point
labels::
{'GAMMA': [0., 0., 0.], ...}
Returns:
dict: The path and k-points as::
{
'path', [[l1, l2, l3], [l4, l5], ...],
'kpoints', {l1: [a1, b1, c1], l2: [a2, b2, c2], ...}
}
### Response:
def kpath_from_seekpath(cls, seekpath, point_coords):
r"""Convert seekpath-formatted kpoints path to sumo-preferred format.
If 'GAMMA' is used as a label this will be replaced by '\Gamma'.
Args:
seekpath (list): A :obj:`list` of 2-tuples containing the labels at
each side of each segment of the k-point path::
[(A, B), (B, C), (C, D), ...]
where a break in the sequence is indicated by a non-repeating
label. E.g.::
[(A, B), (B, C), (D, E), ...]
for a break between C and D.
point_coords (dict): Dict of coordinates corresponding to k-point
labels::
{'GAMMA': [0., 0., 0.], ...}
Returns:
dict: The path and k-points as::
{
'path', [[l1, l2, l3], [l4, l5], ...],
'kpoints', {l1: [a1, b1, c1], l2: [a2, b2, c2], ...}
}
"""
# convert from seekpath format e.g. [(l1, l2), (l2, l3), (l4, l5)]
# to our preferred representation [[l1, l2, l3], [l4, l5]]
path = [[seekpath[0][0]]]
for (k1, k2) in seekpath:
if path[-1] and path[-1][-1] == k1:
path[-1].append(k2)
else:
path.append([k1, k2])
# Rebuild kpoints dictionary skipping any positions not on path
# (chain(*list) flattens nested list; set() removes duplicates.)
kpoints = {p: point_coords[p] for p in set(chain(*path))}
# Every path should include Gamma-point. Change the label to \Gamma
assert 'GAMMA' in kpoints
kpoints[r'\Gamma'] = kpoints.pop('GAMMA')
path = [[label.replace('GAMMA', r'\Gamma') for label in subpath]
for subpath in path]
return {'kpoints': kpoints, 'path': path} |
def bgmagenta(cls, string, auto=False):
"""Color-code entire string.
:param str string: String to colorize.
:param bool auto: Enable auto-color (dark/light terminal).
:return: Class instance for colorized string.
:rtype: Color
"""
return cls.colorize('bgmagenta', string, auto=auto) | Color-code entire string.
:param str string: String to colorize.
:param bool auto: Enable auto-color (dark/light terminal).
:return: Class instance for colorized string.
:rtype: Color | Below is the the instruction that describes the task:
### Input:
Color-code entire string.
:param str string: String to colorize.
:param bool auto: Enable auto-color (dark/light terminal).
:return: Class instance for colorized string.
:rtype: Color
### Response:
def bgmagenta(cls, string, auto=False):
"""Color-code entire string.
:param str string: String to colorize.
:param bool auto: Enable auto-color (dark/light terminal).
:return: Class instance for colorized string.
:rtype: Color
"""
return cls.colorize('bgmagenta', string, auto=auto) |
def validate_params(request):
"""Validate request params."""
if 'params' in request:
correct_params = isinstance(request['params'], (list, dict))
error = 'Incorrect parameter values'
assert correct_params, error | Validate request params. | Below is the the instruction that describes the task:
### Input:
Validate request params.
### Response:
def validate_params(request):
"""Validate request params."""
if 'params' in request:
correct_params = isinstance(request['params'], (list, dict))
error = 'Incorrect parameter values'
assert correct_params, error |
def create_bootstrap_dataframe(orig_df,
obs_id_col,
resampled_obs_ids_1d,
groupby_dict,
boot_id_col="bootstrap_id"):
"""
Will create the altered dataframe of data needed to estimate a choice model
with the particular observations that belong to the current bootstrap
sample.
Parameters
----------
orig_df : pandas DataFrame.
Should be long-format dataframe containing the data used to estimate
the desired choice model.
obs_id_col : str.
Should be a column name within `orig_df`. Should denote the original
observation id column.
resampled_obs_ids_1d : 1D ndarray of ints.
Each value should represent the alternative id of a given bootstrap
replicate.
groupby_dict : dict.
Each key will be a unique value in `orig_df[obs_id_col]` and each value
will be the rows of `orig_df` where `orig_df[obs_id_col] == key`.
boot_id_col : str, optional.
Denotes the new column that will be created to specify the bootstrap
observation ids for choice model estimation.
Returns
-------
bootstrap_df : pandas Dataframe.
Will contain all the same columns as `orig_df` as well as the
additional `boot_id_col`. For each value in `resampled_obs_ids_1d`,
`bootstrap_df` will contain the long format rows from `orig_df` that
have the given observation id.
"""
# Check the validity of the passed arguments.
check_column_existence(obs_id_col, orig_df, presence=True)
check_column_existence(boot_id_col, orig_df, presence=False)
# Alias the observation id column
obs_id_values = orig_df[obs_id_col].values
# Check the validity of the resampled observation ids.
ensure_resampled_obs_ids_in_df(resampled_obs_ids_1d, obs_id_values)
# Initialize a list to store the component dataframes that will be
# concatenated to form the final bootstrap_df
component_dfs = []
# Populate component_dfs
for boot_id, obs_id in enumerate(resampled_obs_ids_1d):
# Extract the dataframe that we desire.
extracted_df = groupby_dict[obs_id].copy()
# Add the bootstrap id value.
extracted_df[boot_id_col] = boot_id + 1
# Store the component dataframe
component_dfs.append(extracted_df)
# Create and return the desired dataframe.
bootstrap_df = pd.concat(component_dfs, axis=0, ignore_index=True)
return bootstrap_df | Will create the altered dataframe of data needed to estimate a choice model
with the particular observations that belong to the current bootstrap
sample.
Parameters
----------
orig_df : pandas DataFrame.
Should be long-format dataframe containing the data used to estimate
the desired choice model.
obs_id_col : str.
Should be a column name within `orig_df`. Should denote the original
observation id column.
resampled_obs_ids_1d : 1D ndarray of ints.
Each value should represent the alternative id of a given bootstrap
replicate.
groupby_dict : dict.
Each key will be a unique value in `orig_df[obs_id_col]` and each value
will be the rows of `orig_df` where `orig_df[obs_id_col] == key`.
boot_id_col : str, optional.
Denotes the new column that will be created to specify the bootstrap
observation ids for choice model estimation.
Returns
-------
bootstrap_df : pandas Dataframe.
Will contain all the same columns as `orig_df` as well as the
additional `boot_id_col`. For each value in `resampled_obs_ids_1d`,
`bootstrap_df` will contain the long format rows from `orig_df` that
have the given observation id. | Below is the the instruction that describes the task:
### Input:
Will create the altered dataframe of data needed to estimate a choice model
with the particular observations that belong to the current bootstrap
sample.
Parameters
----------
orig_df : pandas DataFrame.
Should be long-format dataframe containing the data used to estimate
the desired choice model.
obs_id_col : str.
Should be a column name within `orig_df`. Should denote the original
observation id column.
resampled_obs_ids_1d : 1D ndarray of ints.
Each value should represent the alternative id of a given bootstrap
replicate.
groupby_dict : dict.
Each key will be a unique value in `orig_df[obs_id_col]` and each value
will be the rows of `orig_df` where `orig_df[obs_id_col] == key`.
boot_id_col : str, optional.
Denotes the new column that will be created to specify the bootstrap
observation ids for choice model estimation.
Returns
-------
bootstrap_df : pandas Dataframe.
Will contain all the same columns as `orig_df` as well as the
additional `boot_id_col`. For each value in `resampled_obs_ids_1d`,
`bootstrap_df` will contain the long format rows from `orig_df` that
have the given observation id.
### Response:
def create_bootstrap_dataframe(orig_df,
obs_id_col,
resampled_obs_ids_1d,
groupby_dict,
boot_id_col="bootstrap_id"):
"""
Will create the altered dataframe of data needed to estimate a choice model
with the particular observations that belong to the current bootstrap
sample.
Parameters
----------
orig_df : pandas DataFrame.
Should be long-format dataframe containing the data used to estimate
the desired choice model.
obs_id_col : str.
Should be a column name within `orig_df`. Should denote the original
observation id column.
resampled_obs_ids_1d : 1D ndarray of ints.
Each value should represent the alternative id of a given bootstrap
replicate.
groupby_dict : dict.
Each key will be a unique value in `orig_df[obs_id_col]` and each value
will be the rows of `orig_df` where `orig_df[obs_id_col] == key`.
boot_id_col : str, optional.
Denotes the new column that will be created to specify the bootstrap
observation ids for choice model estimation.
Returns
-------
bootstrap_df : pandas Dataframe.
Will contain all the same columns as `orig_df` as well as the
additional `boot_id_col`. For each value in `resampled_obs_ids_1d`,
`bootstrap_df` will contain the long format rows from `orig_df` that
have the given observation id.
"""
# Check the validity of the passed arguments.
check_column_existence(obs_id_col, orig_df, presence=True)
check_column_existence(boot_id_col, orig_df, presence=False)
# Alias the observation id column
obs_id_values = orig_df[obs_id_col].values
# Check the validity of the resampled observation ids.
ensure_resampled_obs_ids_in_df(resampled_obs_ids_1d, obs_id_values)
# Initialize a list to store the component dataframes that will be
# concatenated to form the final bootstrap_df
component_dfs = []
# Populate component_dfs
for boot_id, obs_id in enumerate(resampled_obs_ids_1d):
# Extract the dataframe that we desire.
extracted_df = groupby_dict[obs_id].copy()
# Add the bootstrap id value.
extracted_df[boot_id_col] = boot_id + 1
# Store the component dataframe
component_dfs.append(extracted_df)
# Create and return the desired dataframe.
bootstrap_df = pd.concat(component_dfs, axis=0, ignore_index=True)
return bootstrap_df |
def _createSlink(self, slinks):
"""
Create GSSHAPY SuperLink, Pipe, and SuperNode Objects Method
"""
for slink in slinks:
# Create GSSHAPY SuperLink object
superLink = SuperLink(slinkNumber=slink['slinkNumber'],
numPipes=slink['numPipes'])
# Associate SuperLink with StormPipeNetworkFile
superLink.stormPipeNetworkFile = self
for node in slink['nodes']:
# Create GSSHAPY SuperNode objects
superNode = SuperNode(nodeNumber=node['nodeNumber'],
groundSurfaceElev=node['groundSurfaceElev'],
invertElev=node['invertElev'],
manholeSA=node['manholeSA'],
nodeInletCode=node['inletCode'],
cellI=node['cellI'],
cellJ=node['cellJ'],
weirSideLength=node['weirSideLength'],
orificeDiameter=node['orificeDiameter'])
# Associate SuperNode with SuperLink
superNode.superLink = superLink
for p in slink['pipes']:
# Create GSSHAPY Pipe objects
pipe = Pipe(pipeNumber=p['pipeNumber'],
xSecType=p['xSecType'],
diameterOrHeight=p['diameterOrHeight'],
width=p['width'],
slope=p['slope'],
roughness=p['roughness'],
length=p['length'],
conductance=p['conductance'],
drainSpacing=p['drainSpacing'])
# Associate Pipe with SuperLink
pipe.superLink = superLink | Create GSSHAPY SuperLink, Pipe, and SuperNode Objects Method | Below is the the instruction that describes the task:
### Input:
Create GSSHAPY SuperLink, Pipe, and SuperNode Objects Method
### Response:
def _createSlink(self, slinks):
"""
Create GSSHAPY SuperLink, Pipe, and SuperNode Objects Method
"""
for slink in slinks:
# Create GSSHAPY SuperLink object
superLink = SuperLink(slinkNumber=slink['slinkNumber'],
numPipes=slink['numPipes'])
# Associate SuperLink with StormPipeNetworkFile
superLink.stormPipeNetworkFile = self
for node in slink['nodes']:
# Create GSSHAPY SuperNode objects
superNode = SuperNode(nodeNumber=node['nodeNumber'],
groundSurfaceElev=node['groundSurfaceElev'],
invertElev=node['invertElev'],
manholeSA=node['manholeSA'],
nodeInletCode=node['inletCode'],
cellI=node['cellI'],
cellJ=node['cellJ'],
weirSideLength=node['weirSideLength'],
orificeDiameter=node['orificeDiameter'])
# Associate SuperNode with SuperLink
superNode.superLink = superLink
for p in slink['pipes']:
# Create GSSHAPY Pipe objects
pipe = Pipe(pipeNumber=p['pipeNumber'],
xSecType=p['xSecType'],
diameterOrHeight=p['diameterOrHeight'],
width=p['width'],
slope=p['slope'],
roughness=p['roughness'],
length=p['length'],
conductance=p['conductance'],
drainSpacing=p['drainSpacing'])
# Associate Pipe with SuperLink
pipe.superLink = superLink |
def perform(self):
"""
Performs a straightforward TCP request and response.
Sends the TCP `query` to the proper host and port, and loops over the
socket, gathering response chunks until a full line is acquired.
If the response line matches the expected value, the check passes. If
not, the check fails. The check will also fail if there's an error
during any step of the send/receive process.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host, self.port))
# if no query/response is defined, a successful connection is a pass
if not self.query:
sock.close()
return True
try:
sock.sendall(self.query)
except Exception:
logger.exception("Error sending TCP query message.")
sock.close()
return False
response, extra = sockutils.get_response(sock)
logger.debug("response: %s (extra: %s)", response, extra)
if response != self.expected_response:
logger.warn(
"Response does not match expected value: %s (expected %s)",
response, self.expected_response
)
sock.close()
return False
sock.close()
return True | Performs a straightforward TCP request and response.
Sends the TCP `query` to the proper host and port, and loops over the
socket, gathering response chunks until a full line is acquired.
If the response line matches the expected value, the check passes. If
not, the check fails. The check will also fail if there's an error
during any step of the send/receive process. | Below is the the instruction that describes the task:
### Input:
Performs a straightforward TCP request and response.
Sends the TCP `query` to the proper host and port, and loops over the
socket, gathering response chunks until a full line is acquired.
If the response line matches the expected value, the check passes. If
not, the check fails. The check will also fail if there's an error
during any step of the send/receive process.
### Response:
def perform(self):
"""
Performs a straightforward TCP request and response.
Sends the TCP `query` to the proper host and port, and loops over the
socket, gathering response chunks until a full line is acquired.
If the response line matches the expected value, the check passes. If
not, the check fails. The check will also fail if there's an error
during any step of the send/receive process.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host, self.port))
# if no query/response is defined, a successful connection is a pass
if not self.query:
sock.close()
return True
try:
sock.sendall(self.query)
except Exception:
logger.exception("Error sending TCP query message.")
sock.close()
return False
response, extra = sockutils.get_response(sock)
logger.debug("response: %s (extra: %s)", response, extra)
if response != self.expected_response:
logger.warn(
"Response does not match expected value: %s (expected %s)",
response, self.expected_response
)
sock.close()
return False
sock.close()
return True |
def unregister(self, device, callback):
"""Remove a registered a callback.
device: device that has the subscription
callback: callback used in original registration
"""
if not device:
logger.error("Received an invalid device: %r", device)
return
logger.debug("Removing subscription for {}".format(device.name))
self._callbacks[device].remove(callback)
self._devices[device.vera_device_id].remove(device) | Remove a registered a callback.
device: device that has the subscription
callback: callback used in original registration | Below is the the instruction that describes the task:
### Input:
Remove a registered a callback.
device: device that has the subscription
callback: callback used in original registration
### Response:
def unregister(self, device, callback):
"""Remove a registered a callback.
device: device that has the subscription
callback: callback used in original registration
"""
if not device:
logger.error("Received an invalid device: %r", device)
return
logger.debug("Removing subscription for {}".format(device.name))
self._callbacks[device].remove(callback)
self._devices[device.vera_device_id].remove(device) |
def setup_logfile_logger(log_path, log_level='error', log_format=None,
date_format=None, max_bytes=0, backup_count=0):
'''
Setup the logfile logger
Since version 0.10.6 we support logging to syslog, some examples:
tcp://localhost:514/LOG_USER
tcp://localhost/LOG_DAEMON
udp://localhost:5145/LOG_KERN
udp://localhost
file:///dev/log
file:///dev/log/LOG_SYSLOG
file:///dev/log/LOG_DAEMON
The above examples are self explanatory, but:
<file|udp|tcp>://<host|socketpath>:<port-if-required>/<log-facility>
If you're thinking on doing remote logging you might also be thinking that
you could point salt's logging to the remote syslog. **Please Don't!**
An issue has been reported when doing this over TCP when the logged lines
get concatenated. See #3061.
The preferred way to do remote logging is setup a local syslog, point
salt's logging to the local syslog(unix socket is much faster) and then
have the local syslog forward the log messages to the remote syslog.
'''
if is_logfile_configured():
logging.getLogger(__name__).warning('Logfile logging already configured')
return
if log_path is None:
logging.getLogger(__name__).warning(
'log_path setting is set to `None`. Nothing else to do'
)
return
# Remove the temporary logging handler
__remove_temp_logging_handler()
if log_level is None:
log_level = 'warning'
level = LOG_LEVELS.get(log_level.lower(), logging.ERROR)
parsed_log_path = urlparse(log_path)
root_logger = logging.getLogger()
if parsed_log_path.scheme in ('tcp', 'udp', 'file'):
syslog_opts = {
'facility': SysLogHandler.LOG_USER,
'socktype': socket.SOCK_DGRAM
}
if parsed_log_path.scheme == 'file' and parsed_log_path.path:
facility_name = parsed_log_path.path.split(os.sep)[-1].upper()
if not facility_name.startswith('LOG_'):
# The user is not specifying a syslog facility
facility_name = 'LOG_USER' # Syslog default
syslog_opts['address'] = parsed_log_path.path
else:
# The user has set a syslog facility, let's update the path to
# the logging socket
syslog_opts['address'] = os.sep.join(
parsed_log_path.path.split(os.sep)[:-1]
)
elif parsed_log_path.path:
# In case of udp or tcp with a facility specified
facility_name = parsed_log_path.path.lstrip(os.sep).upper()
if not facility_name.startswith('LOG_'):
# Logging facilities start with LOG_ if this is not the case
# fail right now!
raise RuntimeError(
'The syslog facility \'{0}\' is not known'.format(
facility_name
)
)
else:
# This is the case of udp or tcp without a facility specified
facility_name = 'LOG_USER' # Syslog default
facility = getattr(
SysLogHandler, facility_name, None
)
if facility is None:
# This python syslog version does not know about the user provided
# facility name
raise RuntimeError(
'The syslog facility \'{0}\' is not known'.format(
facility_name
)
)
syslog_opts['facility'] = facility
if parsed_log_path.scheme == 'tcp':
# tcp syslog support was only added on python versions >= 2.7
if sys.version_info < (2, 7):
raise RuntimeError(
'Python versions lower than 2.7 do not support logging '
'to syslog using tcp sockets'
)
syslog_opts['socktype'] = socket.SOCK_STREAM
if parsed_log_path.scheme in ('tcp', 'udp'):
syslog_opts['address'] = (
parsed_log_path.hostname,
parsed_log_path.port or logging.handlers.SYSLOG_UDP_PORT
)
if sys.version_info < (2, 7) or parsed_log_path.scheme == 'file':
# There's not socktype support on python versions lower than 2.7
syslog_opts.pop('socktype', None)
try:
# Et voilá! Finally our syslog handler instance
handler = SysLogHandler(**syslog_opts)
except socket.error as err:
logging.getLogger(__name__).error(
'Failed to setup the Syslog logging handler: %s', err
)
shutdown_multiprocessing_logging_listener()
sys.exit(2)
else:
# make sure, the logging directory exists and attempt to create it if necessary
log_dir = os.path.dirname(log_path)
if not os.path.exists(log_dir):
logging.getLogger(__name__).info(
'Log directory not found, trying to create it: %s', log_dir
)
try:
os.makedirs(log_dir, mode=0o700)
except OSError as ose:
logging.getLogger(__name__).warning(
'Failed to create directory for log file: %s (%s)', log_dir, ose
)
return
try:
# Logfile logging is UTF-8 on purpose.
# Since salt uses YAML and YAML uses either UTF-8 or UTF-16, if a
# user is not using plain ASCII, their system should be ready to
# handle UTF-8.
if max_bytes > 0:
handler = RotatingFileHandler(log_path,
mode='a',
maxBytes=max_bytes,
backupCount=backup_count,
encoding='utf-8',
delay=0)
else:
handler = WatchedFileHandler(log_path, mode='a', encoding='utf-8', delay=0)
except (IOError, OSError):
logging.getLogger(__name__).warning(
'Failed to open log file, do you have permission to write to %s?', log_path
)
# Do not proceed with any more configuration since it will fail, we
# have the console logging already setup and the user should see
# the error.
return
handler.setLevel(level)
# Set the default console formatter config
if not log_format:
log_format = '%(asctime)s [%(name)-15s][%(levelname)-8s] %(message)s'
if not date_format:
date_format = '%Y-%m-%d %H:%M:%S'
formatter = logging.Formatter(log_format, datefmt=date_format)
handler.setFormatter(formatter)
root_logger.addHandler(handler)
global __LOGFILE_CONFIGURED
global __LOGGING_LOGFILE_HANDLER
__LOGFILE_CONFIGURED = True
__LOGGING_LOGFILE_HANDLER = handler | Setup the logfile logger
Since version 0.10.6 we support logging to syslog, some examples:
tcp://localhost:514/LOG_USER
tcp://localhost/LOG_DAEMON
udp://localhost:5145/LOG_KERN
udp://localhost
file:///dev/log
file:///dev/log/LOG_SYSLOG
file:///dev/log/LOG_DAEMON
The above examples are self explanatory, but:
<file|udp|tcp>://<host|socketpath>:<port-if-required>/<log-facility>
If you're thinking on doing remote logging you might also be thinking that
you could point salt's logging to the remote syslog. **Please Don't!**
An issue has been reported when doing this over TCP when the logged lines
get concatenated. See #3061.
The preferred way to do remote logging is setup a local syslog, point
salt's logging to the local syslog(unix socket is much faster) and then
have the local syslog forward the log messages to the remote syslog. | Below is the the instruction that describes the task:
### Input:
Setup the logfile logger
Since version 0.10.6 we support logging to syslog, some examples:
tcp://localhost:514/LOG_USER
tcp://localhost/LOG_DAEMON
udp://localhost:5145/LOG_KERN
udp://localhost
file:///dev/log
file:///dev/log/LOG_SYSLOG
file:///dev/log/LOG_DAEMON
The above examples are self explanatory, but:
<file|udp|tcp>://<host|socketpath>:<port-if-required>/<log-facility>
If you're thinking on doing remote logging you might also be thinking that
you could point salt's logging to the remote syslog. **Please Don't!**
An issue has been reported when doing this over TCP when the logged lines
get concatenated. See #3061.
The preferred way to do remote logging is setup a local syslog, point
salt's logging to the local syslog(unix socket is much faster) and then
have the local syslog forward the log messages to the remote syslog.
### Response:
def setup_logfile_logger(log_path, log_level='error', log_format=None,
date_format=None, max_bytes=0, backup_count=0):
'''
Setup the logfile logger
Since version 0.10.6 we support logging to syslog, some examples:
tcp://localhost:514/LOG_USER
tcp://localhost/LOG_DAEMON
udp://localhost:5145/LOG_KERN
udp://localhost
file:///dev/log
file:///dev/log/LOG_SYSLOG
file:///dev/log/LOG_DAEMON
The above examples are self explanatory, but:
<file|udp|tcp>://<host|socketpath>:<port-if-required>/<log-facility>
If you're thinking on doing remote logging you might also be thinking that
you could point salt's logging to the remote syslog. **Please Don't!**
An issue has been reported when doing this over TCP when the logged lines
get concatenated. See #3061.
The preferred way to do remote logging is setup a local syslog, point
salt's logging to the local syslog(unix socket is much faster) and then
have the local syslog forward the log messages to the remote syslog.
'''
if is_logfile_configured():
logging.getLogger(__name__).warning('Logfile logging already configured')
return
if log_path is None:
logging.getLogger(__name__).warning(
'log_path setting is set to `None`. Nothing else to do'
)
return
# Remove the temporary logging handler
__remove_temp_logging_handler()
if log_level is None:
log_level = 'warning'
level = LOG_LEVELS.get(log_level.lower(), logging.ERROR)
parsed_log_path = urlparse(log_path)
root_logger = logging.getLogger()
if parsed_log_path.scheme in ('tcp', 'udp', 'file'):
syslog_opts = {
'facility': SysLogHandler.LOG_USER,
'socktype': socket.SOCK_DGRAM
}
if parsed_log_path.scheme == 'file' and parsed_log_path.path:
facility_name = parsed_log_path.path.split(os.sep)[-1].upper()
if not facility_name.startswith('LOG_'):
# The user is not specifying a syslog facility
facility_name = 'LOG_USER' # Syslog default
syslog_opts['address'] = parsed_log_path.path
else:
# The user has set a syslog facility, let's update the path to
# the logging socket
syslog_opts['address'] = os.sep.join(
parsed_log_path.path.split(os.sep)[:-1]
)
elif parsed_log_path.path:
# In case of udp or tcp with a facility specified
facility_name = parsed_log_path.path.lstrip(os.sep).upper()
if not facility_name.startswith('LOG_'):
# Logging facilities start with LOG_ if this is not the case
# fail right now!
raise RuntimeError(
'The syslog facility \'{0}\' is not known'.format(
facility_name
)
)
else:
# This is the case of udp or tcp without a facility specified
facility_name = 'LOG_USER' # Syslog default
facility = getattr(
SysLogHandler, facility_name, None
)
if facility is None:
# This python syslog version does not know about the user provided
# facility name
raise RuntimeError(
'The syslog facility \'{0}\' is not known'.format(
facility_name
)
)
syslog_opts['facility'] = facility
if parsed_log_path.scheme == 'tcp':
# tcp syslog support was only added on python versions >= 2.7
if sys.version_info < (2, 7):
raise RuntimeError(
'Python versions lower than 2.7 do not support logging '
'to syslog using tcp sockets'
)
syslog_opts['socktype'] = socket.SOCK_STREAM
if parsed_log_path.scheme in ('tcp', 'udp'):
syslog_opts['address'] = (
parsed_log_path.hostname,
parsed_log_path.port or logging.handlers.SYSLOG_UDP_PORT
)
if sys.version_info < (2, 7) or parsed_log_path.scheme == 'file':
# There's not socktype support on python versions lower than 2.7
syslog_opts.pop('socktype', None)
try:
# Et voilá! Finally our syslog handler instance
handler = SysLogHandler(**syslog_opts)
except socket.error as err:
logging.getLogger(__name__).error(
'Failed to setup the Syslog logging handler: %s', err
)
shutdown_multiprocessing_logging_listener()
sys.exit(2)
else:
# make sure, the logging directory exists and attempt to create it if necessary
log_dir = os.path.dirname(log_path)
if not os.path.exists(log_dir):
logging.getLogger(__name__).info(
'Log directory not found, trying to create it: %s', log_dir
)
try:
os.makedirs(log_dir, mode=0o700)
except OSError as ose:
logging.getLogger(__name__).warning(
'Failed to create directory for log file: %s (%s)', log_dir, ose
)
return
try:
# Logfile logging is UTF-8 on purpose.
# Since salt uses YAML and YAML uses either UTF-8 or UTF-16, if a
# user is not using plain ASCII, their system should be ready to
# handle UTF-8.
if max_bytes > 0:
handler = RotatingFileHandler(log_path,
mode='a',
maxBytes=max_bytes,
backupCount=backup_count,
encoding='utf-8',
delay=0)
else:
handler = WatchedFileHandler(log_path, mode='a', encoding='utf-8', delay=0)
except (IOError, OSError):
logging.getLogger(__name__).warning(
'Failed to open log file, do you have permission to write to %s?', log_path
)
# Do not proceed with any more configuration since it will fail, we
# have the console logging already setup and the user should see
# the error.
return
handler.setLevel(level)
# Set the default console formatter config
if not log_format:
log_format = '%(asctime)s [%(name)-15s][%(levelname)-8s] %(message)s'
if not date_format:
date_format = '%Y-%m-%d %H:%M:%S'
formatter = logging.Formatter(log_format, datefmt=date_format)
handler.setFormatter(formatter)
root_logger.addHandler(handler)
global __LOGFILE_CONFIGURED
global __LOGGING_LOGFILE_HANDLER
__LOGFILE_CONFIGURED = True
__LOGGING_LOGFILE_HANDLER = handler |
def col(self):
"""Gives direct access to the columns only (useful for tab completion).
Convenient when working with ipython in combination with small DataFrames, since this gives tab-completion.
Columns can be accesed by there names, which are attributes. The attribues are currently expressions, so you can
do computations with them.
Example
>>> ds = vaex.example()
>>> df.plot(df.col.x, df.col.y)
"""
class ColumnList(object):
pass
data = ColumnList()
for name in self.get_column_names():
expression = getattr(self, name, None)
if not isinstance(expression, Expression):
expression = Expression(self, name)
setattr(data, name, expression)
return data | Gives direct access to the columns only (useful for tab completion).
Convenient when working with ipython in combination with small DataFrames, since this gives tab-completion.
Columns can be accesed by there names, which are attributes. The attribues are currently expressions, so you can
do computations with them.
Example
>>> ds = vaex.example()
>>> df.plot(df.col.x, df.col.y) | Below is the the instruction that describes the task:
### Input:
Gives direct access to the columns only (useful for tab completion).
Convenient when working with ipython in combination with small DataFrames, since this gives tab-completion.
Columns can be accesed by there names, which are attributes. The attribues are currently expressions, so you can
do computations with them.
Example
>>> ds = vaex.example()
>>> df.plot(df.col.x, df.col.y)
### Response:
def col(self):
"""Gives direct access to the columns only (useful for tab completion).
Convenient when working with ipython in combination with small DataFrames, since this gives tab-completion.
Columns can be accesed by there names, which are attributes. The attribues are currently expressions, so you can
do computations with them.
Example
>>> ds = vaex.example()
>>> df.plot(df.col.x, df.col.y)
"""
class ColumnList(object):
pass
data = ColumnList()
for name in self.get_column_names():
expression = getattr(self, name, None)
if not isinstance(expression, Expression):
expression = Expression(self, name)
setattr(data, name, expression)
return data |
def zero(duration: int, name: str = None) -> SamplePulse:
"""Generates zero-sampled `SamplePulse`.
Args:
duration: Duration of pulse. Must be greater than zero.
name: Name of pulse.
"""
return _sampled_zero_pulse(duration, name=name) | Generates zero-sampled `SamplePulse`.
Args:
duration: Duration of pulse. Must be greater than zero.
name: Name of pulse. | Below is the the instruction that describes the task:
### Input:
Generates zero-sampled `SamplePulse`.
Args:
duration: Duration of pulse. Must be greater than zero.
name: Name of pulse.
### Response:
def zero(duration: int, name: str = None) -> SamplePulse:
"""Generates zero-sampled `SamplePulse`.
Args:
duration: Duration of pulse. Must be greater than zero.
name: Name of pulse.
"""
return _sampled_zero_pulse(duration, name=name) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.