code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def WriteClientSnapshot(self, snapshot):
"""Writes new client snapshot."""
client_id = snapshot.client_id
if client_id not in self.metadatas:
raise db.UnknownClientError(client_id)
startup_info = snapshot.startup_info
snapshot.startup_info = None
ts = rdfvalue.RDFDatetime.Now()
history = self.clients.setdefault(client_id, {})
history[ts] = snapshot.SerializeToString()
history = self.startup_history.setdefault(client_id, {})
history[ts] = startup_info.SerializeToString()
snapshot.startup_info = startup_info | Writes new client snapshot. | Below is the the instruction that describes the task:
### Input:
Writes new client snapshot.
### Response:
def WriteClientSnapshot(self, snapshot):
"""Writes new client snapshot."""
client_id = snapshot.client_id
if client_id not in self.metadatas:
raise db.UnknownClientError(client_id)
startup_info = snapshot.startup_info
snapshot.startup_info = None
ts = rdfvalue.RDFDatetime.Now()
history = self.clients.setdefault(client_id, {})
history[ts] = snapshot.SerializeToString()
history = self.startup_history.setdefault(client_id, {})
history[ts] = startup_info.SerializeToString()
snapshot.startup_info = startup_info |
def alter_retention_policy(self, name, database=None,
duration=None, replication=None,
default=None, shard_duration=None):
"""Modify an existing retention policy for a database.
:param name: the name of the retention policy to modify
:type name: str
:param database: the database for which the retention policy is
modified. Defaults to current client's database
:type database: str
:param duration: the new duration of the existing retention policy.
Durations such as 1h, 90m, 12h, 7d, and 4w, are all supported
and mean 1 hour, 90 minutes, 12 hours, 7 day, and 4 weeks,
respectively. For infinite retention, meaning the data will
never be deleted, use 'INF' for duration.
The minimum retention period is 1 hour.
:type duration: str
:param replication: the new replication of the existing
retention policy
:type replication: int
:param default: whether or not to set the modified policy as default
:type default: bool
:param shard_duration: the shard duration of the retention policy.
Durations such as 1h, 90m, 12h, 7d, and 4w, are all supported and
mean 1 hour, 90 minutes, 12 hours, 7 day, and 4 weeks,
respectively. Infinite retention is not supported. As a workaround,
specify a "1000w" duration to achieve an extremely long shard group
duration.
The minimum shard group duration is 1 hour.
:type shard_duration: str
.. note:: at least one of duration, replication, or default flag
should be set. Otherwise the operation will fail.
"""
query_string = (
"ALTER RETENTION POLICY {0} ON {1}"
).format(quote_ident(name),
quote_ident(database or self._database), shard_duration)
if duration:
query_string += " DURATION {0}".format(duration)
if shard_duration:
query_string += " SHARD DURATION {0}".format(shard_duration)
if replication:
query_string += " REPLICATION {0}".format(replication)
if default is True:
query_string += " DEFAULT"
self.query(query_string, method="POST") | Modify an existing retention policy for a database.
:param name: the name of the retention policy to modify
:type name: str
:param database: the database for which the retention policy is
modified. Defaults to current client's database
:type database: str
:param duration: the new duration of the existing retention policy.
Durations such as 1h, 90m, 12h, 7d, and 4w, are all supported
and mean 1 hour, 90 minutes, 12 hours, 7 day, and 4 weeks,
respectively. For infinite retention, meaning the data will
never be deleted, use 'INF' for duration.
The minimum retention period is 1 hour.
:type duration: str
:param replication: the new replication of the existing
retention policy
:type replication: int
:param default: whether or not to set the modified policy as default
:type default: bool
:param shard_duration: the shard duration of the retention policy.
Durations such as 1h, 90m, 12h, 7d, and 4w, are all supported and
mean 1 hour, 90 minutes, 12 hours, 7 day, and 4 weeks,
respectively. Infinite retention is not supported. As a workaround,
specify a "1000w" duration to achieve an extremely long shard group
duration.
The minimum shard group duration is 1 hour.
:type shard_duration: str
.. note:: at least one of duration, replication, or default flag
should be set. Otherwise the operation will fail. | Below is the the instruction that describes the task:
### Input:
Modify an existing retention policy for a database.
:param name: the name of the retention policy to modify
:type name: str
:param database: the database for which the retention policy is
modified. Defaults to current client's database
:type database: str
:param duration: the new duration of the existing retention policy.
Durations such as 1h, 90m, 12h, 7d, and 4w, are all supported
and mean 1 hour, 90 minutes, 12 hours, 7 day, and 4 weeks,
respectively. For infinite retention, meaning the data will
never be deleted, use 'INF' for duration.
The minimum retention period is 1 hour.
:type duration: str
:param replication: the new replication of the existing
retention policy
:type replication: int
:param default: whether or not to set the modified policy as default
:type default: bool
:param shard_duration: the shard duration of the retention policy.
Durations such as 1h, 90m, 12h, 7d, and 4w, are all supported and
mean 1 hour, 90 minutes, 12 hours, 7 day, and 4 weeks,
respectively. Infinite retention is not supported. As a workaround,
specify a "1000w" duration to achieve an extremely long shard group
duration.
The minimum shard group duration is 1 hour.
:type shard_duration: str
.. note:: at least one of duration, replication, or default flag
should be set. Otherwise the operation will fail.
### Response:
def alter_retention_policy(self, name, database=None,
duration=None, replication=None,
default=None, shard_duration=None):
"""Modify an existing retention policy for a database.
:param name: the name of the retention policy to modify
:type name: str
:param database: the database for which the retention policy is
modified. Defaults to current client's database
:type database: str
:param duration: the new duration of the existing retention policy.
Durations such as 1h, 90m, 12h, 7d, and 4w, are all supported
and mean 1 hour, 90 minutes, 12 hours, 7 day, and 4 weeks,
respectively. For infinite retention, meaning the data will
never be deleted, use 'INF' for duration.
The minimum retention period is 1 hour.
:type duration: str
:param replication: the new replication of the existing
retention policy
:type replication: int
:param default: whether or not to set the modified policy as default
:type default: bool
:param shard_duration: the shard duration of the retention policy.
Durations such as 1h, 90m, 12h, 7d, and 4w, are all supported and
mean 1 hour, 90 minutes, 12 hours, 7 day, and 4 weeks,
respectively. Infinite retention is not supported. As a workaround,
specify a "1000w" duration to achieve an extremely long shard group
duration.
The minimum shard group duration is 1 hour.
:type shard_duration: str
.. note:: at least one of duration, replication, or default flag
should be set. Otherwise the operation will fail.
"""
query_string = (
"ALTER RETENTION POLICY {0} ON {1}"
).format(quote_ident(name),
quote_ident(database or self._database), shard_duration)
if duration:
query_string += " DURATION {0}".format(duration)
if shard_duration:
query_string += " SHARD DURATION {0}".format(shard_duration)
if replication:
query_string += " REPLICATION {0}".format(replication)
if default is True:
query_string += " DEFAULT"
self.query(query_string, method="POST") |
def _M2_dense(X, Y, weights=None, diag_only=False):
""" 2nd moment matrix using dense matrix computations.
This function is encapsulated such that we can make easy modifications of the basic algorithms
"""
if weights is not None:
if diag_only:
return np.sum(weights[:, None] * X * Y, axis=0)
else:
return np.dot((weights[:, None] * X).T, Y)
else:
if diag_only:
return np.sum(X * Y, axis=0)
else:
return np.dot(X.T, Y) | 2nd moment matrix using dense matrix computations.
This function is encapsulated such that we can make easy modifications of the basic algorithms | Below is the the instruction that describes the task:
### Input:
2nd moment matrix using dense matrix computations.
This function is encapsulated such that we can make easy modifications of the basic algorithms
### Response:
def _M2_dense(X, Y, weights=None, diag_only=False):
""" 2nd moment matrix using dense matrix computations.
This function is encapsulated such that we can make easy modifications of the basic algorithms
"""
if weights is not None:
if diag_only:
return np.sum(weights[:, None] * X * Y, axis=0)
else:
return np.dot((weights[:, None] * X).T, Y)
else:
if diag_only:
return np.sum(X * Y, axis=0)
else:
return np.dot(X.T, Y) |
def _build_new_virtualenv(self):
'''Build a new virtualenvironment if self._virtualenv is set to None'''
if self._virtualenv is None:
# virtualenv was "None" which means "do default"
self._pkg_venv = os.path.join(self._temp_workspace, 'venv')
self._venv_pip = 'bin/pip'
if sys.platform == 'win32' or sys.platform == 'cygwin':
self._venv_pip = 'Scripts\pip.exe'
python_exe = self._python_executable()
proc = Popen(["virtualenv", "-p", python_exe,
self._pkg_venv], stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
LOG.debug("Virtualenv stdout: %s" % stdout)
LOG.debug("Virtualenv stderr: %s" % stderr)
if proc.returncode is not 0:
raise Exception('virtualenv returned unsuccessfully')
else:
raise Exception('cannot build a new virtualenv when asked to omit') | Build a new virtualenvironment if self._virtualenv is set to None | Below is the the instruction that describes the task:
### Input:
Build a new virtualenvironment if self._virtualenv is set to None
### Response:
def _build_new_virtualenv(self):
'''Build a new virtualenvironment if self._virtualenv is set to None'''
if self._virtualenv is None:
# virtualenv was "None" which means "do default"
self._pkg_venv = os.path.join(self._temp_workspace, 'venv')
self._venv_pip = 'bin/pip'
if sys.platform == 'win32' or sys.platform == 'cygwin':
self._venv_pip = 'Scripts\pip.exe'
python_exe = self._python_executable()
proc = Popen(["virtualenv", "-p", python_exe,
self._pkg_venv], stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
LOG.debug("Virtualenv stdout: %s" % stdout)
LOG.debug("Virtualenv stderr: %s" % stderr)
if proc.returncode is not 0:
raise Exception('virtualenv returned unsuccessfully')
else:
raise Exception('cannot build a new virtualenv when asked to omit') |
def simulate_custom_policy(PolicyInputList=None, ActionNames=None, ResourceArns=None, ResourcePolicy=None, ResourceOwner=None, CallerArn=None, ContextEntries=None, ResourceHandlingOption=None, MaxItems=None, Marker=None):
"""
Simulate how a set of IAM policies and optionally a resource-based policy works with a list of API actions and AWS resources to determine the policies' effective permissions. The policies are provided as strings.
The simulation does not perform the API actions; it only checks the authorization to determine if the simulated policies allow or deny the actions.
If you want to simulate existing policies attached to an IAM user, group, or role, use SimulatePrincipalPolicy instead.
Context keys are variables maintained by AWS and its services that provide details about the context of an API query request. You can use the Condition element of an IAM policy to evaluate context keys. To get the list of context keys that the policies require for correct simulation, use GetContextKeysForCustomPolicy .
If the output is long, you can use MaxItems and Marker parameters to paginate the results.
See also: AWS API Documentation
:example: response = client.simulate_custom_policy(
PolicyInputList=[
'string',
],
ActionNames=[
'string',
],
ResourceArns=[
'string',
],
ResourcePolicy='string',
ResourceOwner='string',
CallerArn='string',
ContextEntries=[
{
'ContextKeyName': 'string',
'ContextKeyValues': [
'string',
],
'ContextKeyType': 'string'|'stringList'|'numeric'|'numericList'|'boolean'|'booleanList'|'ip'|'ipList'|'binary'|'binaryList'|'date'|'dateList'
},
],
ResourceHandlingOption='string',
MaxItems=123,
Marker='string'
)
:type PolicyInputList: list
:param PolicyInputList: [REQUIRED]
A list of policy documents to include in the simulation. Each document is specified as a string containing the complete, valid JSON text of an IAM policy. Do not include any resource-based policies in this parameter. Any resource-based policy must be submitted with the ResourcePolicy parameter. The policies cannot be 'scope-down' policies, such as you could include in a call to GetFederationToken or one of the AssumeRole APIs to restrict what a user can do while using the temporary credentials.
The regex pattern used to validate this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (u0020) through end of the ASCII character range as well as the printable characters in the Basic Latin and Latin-1 Supplement character set (through u00FF). It also includes the special characters tab (u0009), line feed (u000A), and carriage return (u000D).
(string) --
:type ActionNames: list
:param ActionNames: [REQUIRED]
A list of names of API actions to evaluate in the simulation. Each action is evaluated against each resource. Each action must include the service identifier, such as iam:CreateUser .
(string) --
:type ResourceArns: list
:param ResourceArns: A list of ARNs of AWS resources to include in the simulation. If this parameter is not provided then the value defaults to * (all resources). Each API in the ActionNames parameter is evaluated for each resource in this list. The simulation determines the access result (allowed or denied) of each combination and reports it in the response.
The simulation does not automatically retrieve policies for the specified resources. If you want to include a resource policy in the simulation, then you must include the policy as a string in the ResourcePolicy parameter.
If you include a ResourcePolicy , then it must be applicable to all of the resources included in the simulation or you receive an invalid input error.
For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference .
(string) --
:type ResourcePolicy: string
:param ResourcePolicy: A resource-based policy to include in the simulation provided as a string. Each resource in the simulation is treated as if it had this policy attached. You can include only one resource-based policy in a simulation.
The regex pattern used to validate this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (u0020) through end of the ASCII character range as well as the printable characters in the Basic Latin and Latin-1 Supplement character set (through u00FF). It also includes the special characters tab (u0009), line feed (u000A), and carriage return (u000D).
:type ResourceOwner: string
:param ResourceOwner: An AWS account ID that specifies the owner of any simulated resource that does not identify its owner in the resource ARN, such as an S3 bucket or object. If ResourceOwner is specified, it is also used as the account owner of any ResourcePolicy included in the simulation. If the ResourceOwner parameter is not specified, then the owner of the resources and the resource policy defaults to the account of the identity provided in CallerArn . This parameter is required only if you specify a resource-based policy and account that owns the resource is different from the account that owns the simulated calling user CallerArn .
:type CallerArn: string
:param CallerArn: The ARN of the IAM user that you want to use as the simulated caller of the APIs. CallerArn is required if you include a ResourcePolicy so that the policy's Principal element has a value to use in evaluating the policy.
You can specify only the ARN of an IAM user. You cannot specify the ARN of an assumed role, federated user, or a service principal.
:type ContextEntries: list
:param ContextEntries: A list of context keys and corresponding values for the simulation to use. Whenever a context key is evaluated in one of the simulated IAM permission policies, the corresponding value is supplied.
(dict) --Contains information about a condition context key. It includes the name of the key and specifies the value (or values, if the context key supports multiple values) to use in the simulation. This information is used when evaluating the Condition elements of the input policies.
This data type is used as an input parameter to `` SimulateCustomPolicy `` and `` SimulateCustomPolicy `` .
ContextKeyName (string) --The full name of a condition context key, including the service prefix. For example, aws:SourceIp or s3:VersionId .
ContextKeyValues (list) --The value (or values, if the condition context key supports multiple values) to provide to the simulation for use when the key is referenced by a Condition element in an input policy.
(string) --
ContextKeyType (string) --The data type of the value (or values) specified in the ContextKeyValues parameter.
:type ResourceHandlingOption: string
:param ResourceHandlingOption: Specifies the type of simulation to run. Different APIs that support resource-based policies require different combinations of resources. By specifying the type of simulation to run, you enable the policy simulator to enforce the presence of the required resources to ensure reliable simulation results. If your simulation does not match one of the following scenarios, then you can omit this parameter. The following list shows each of the supported scenario values and the resources that you must define to run the simulation.
Each of the EC2 scenarios requires that you specify instance, image, and security-group resources. If your scenario includes an EBS volume, then you must specify that volume as a resource. If the EC2 scenario includes VPC, then you must supply the network-interface resource. If it includes an IP subnet, then you must specify the subnet resource. For more information on the EC2 scenario options, see Supported Platforms in the AWS EC2 User Guide .
EC2-Classic-InstanceStore instance, image, security-group
EC2-Classic-EBS instance, image, security-group, volume
EC2-VPC-InstanceStore instance, image, security-group, network-interface
EC2-VPC-InstanceStore-Subnet instance, image, security-group, network-interface, subnet
EC2-VPC-EBS instance, image, security-group, network-interface, volume
EC2-VPC-EBS-Subnet instance, image, security-group, network-interface, subnet, volume
:type MaxItems: integer
:param MaxItems: (Optional) Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true .
If you do not include this parameter, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.
:type Marker: string
:param Marker: Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.
:rtype: dict
:return: {
'EvaluationResults': [
{
'EvalActionName': 'string',
'EvalResourceName': 'string',
'EvalDecision': 'allowed'|'explicitDeny'|'implicitDeny',
'MatchedStatements': [
{
'SourcePolicyId': 'string',
'SourcePolicyType': 'user'|'group'|'role'|'aws-managed'|'user-managed'|'resource'|'none',
'StartPosition': {
'Line': 123,
'Column': 123
},
'EndPosition': {
'Line': 123,
'Column': 123
}
},
],
'MissingContextValues': [
'string',
],
'OrganizationsDecisionDetail': {
'AllowedByOrganizations': True|False
},
'EvalDecisionDetails': {
'string': 'allowed'|'explicitDeny'|'implicitDeny'
},
'ResourceSpecificResults': [
{
'EvalResourceName': 'string',
'EvalResourceDecision': 'allowed'|'explicitDeny'|'implicitDeny',
'MatchedStatements': [
{
'SourcePolicyId': 'string',
'SourcePolicyType': 'user'|'group'|'role'|'aws-managed'|'user-managed'|'resource'|'none',
'StartPosition': {
'Line': 123,
'Column': 123
},
'EndPosition': {
'Line': 123,
'Column': 123
}
},
],
'MissingContextValues': [
'string',
],
'EvalDecisionDetails': {
'string': 'allowed'|'explicitDeny'|'implicitDeny'
}
},
]
},
],
'IsTruncated': True|False,
'Marker': 'string'
}
:returns:
(string) --
"""
pass | Simulate how a set of IAM policies and optionally a resource-based policy works with a list of API actions and AWS resources to determine the policies' effective permissions. The policies are provided as strings.
The simulation does not perform the API actions; it only checks the authorization to determine if the simulated policies allow or deny the actions.
If you want to simulate existing policies attached to an IAM user, group, or role, use SimulatePrincipalPolicy instead.
Context keys are variables maintained by AWS and its services that provide details about the context of an API query request. You can use the Condition element of an IAM policy to evaluate context keys. To get the list of context keys that the policies require for correct simulation, use GetContextKeysForCustomPolicy .
If the output is long, you can use MaxItems and Marker parameters to paginate the results.
See also: AWS API Documentation
:example: response = client.simulate_custom_policy(
PolicyInputList=[
'string',
],
ActionNames=[
'string',
],
ResourceArns=[
'string',
],
ResourcePolicy='string',
ResourceOwner='string',
CallerArn='string',
ContextEntries=[
{
'ContextKeyName': 'string',
'ContextKeyValues': [
'string',
],
'ContextKeyType': 'string'|'stringList'|'numeric'|'numericList'|'boolean'|'booleanList'|'ip'|'ipList'|'binary'|'binaryList'|'date'|'dateList'
},
],
ResourceHandlingOption='string',
MaxItems=123,
Marker='string'
)
:type PolicyInputList: list
:param PolicyInputList: [REQUIRED]
A list of policy documents to include in the simulation. Each document is specified as a string containing the complete, valid JSON text of an IAM policy. Do not include any resource-based policies in this parameter. Any resource-based policy must be submitted with the ResourcePolicy parameter. The policies cannot be 'scope-down' policies, such as you could include in a call to GetFederationToken or one of the AssumeRole APIs to restrict what a user can do while using the temporary credentials.
The regex pattern used to validate this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (u0020) through end of the ASCII character range as well as the printable characters in the Basic Latin and Latin-1 Supplement character set (through u00FF). It also includes the special characters tab (u0009), line feed (u000A), and carriage return (u000D).
(string) --
:type ActionNames: list
:param ActionNames: [REQUIRED]
A list of names of API actions to evaluate in the simulation. Each action is evaluated against each resource. Each action must include the service identifier, such as iam:CreateUser .
(string) --
:type ResourceArns: list
:param ResourceArns: A list of ARNs of AWS resources to include in the simulation. If this parameter is not provided then the value defaults to * (all resources). Each API in the ActionNames parameter is evaluated for each resource in this list. The simulation determines the access result (allowed or denied) of each combination and reports it in the response.
The simulation does not automatically retrieve policies for the specified resources. If you want to include a resource policy in the simulation, then you must include the policy as a string in the ResourcePolicy parameter.
If you include a ResourcePolicy , then it must be applicable to all of the resources included in the simulation or you receive an invalid input error.
For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference .
(string) --
:type ResourcePolicy: string
:param ResourcePolicy: A resource-based policy to include in the simulation provided as a string. Each resource in the simulation is treated as if it had this policy attached. You can include only one resource-based policy in a simulation.
The regex pattern used to validate this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (u0020) through end of the ASCII character range as well as the printable characters in the Basic Latin and Latin-1 Supplement character set (through u00FF). It also includes the special characters tab (u0009), line feed (u000A), and carriage return (u000D).
:type ResourceOwner: string
:param ResourceOwner: An AWS account ID that specifies the owner of any simulated resource that does not identify its owner in the resource ARN, such as an S3 bucket or object. If ResourceOwner is specified, it is also used as the account owner of any ResourcePolicy included in the simulation. If the ResourceOwner parameter is not specified, then the owner of the resources and the resource policy defaults to the account of the identity provided in CallerArn . This parameter is required only if you specify a resource-based policy and account that owns the resource is different from the account that owns the simulated calling user CallerArn .
:type CallerArn: string
:param CallerArn: The ARN of the IAM user that you want to use as the simulated caller of the APIs. CallerArn is required if you include a ResourcePolicy so that the policy's Principal element has a value to use in evaluating the policy.
You can specify only the ARN of an IAM user. You cannot specify the ARN of an assumed role, federated user, or a service principal.
:type ContextEntries: list
:param ContextEntries: A list of context keys and corresponding values for the simulation to use. Whenever a context key is evaluated in one of the simulated IAM permission policies, the corresponding value is supplied.
(dict) --Contains information about a condition context key. It includes the name of the key and specifies the value (or values, if the context key supports multiple values) to use in the simulation. This information is used when evaluating the Condition elements of the input policies.
This data type is used as an input parameter to `` SimulateCustomPolicy `` and `` SimulateCustomPolicy `` .
ContextKeyName (string) --The full name of a condition context key, including the service prefix. For example, aws:SourceIp or s3:VersionId .
ContextKeyValues (list) --The value (or values, if the condition context key supports multiple values) to provide to the simulation for use when the key is referenced by a Condition element in an input policy.
(string) --
ContextKeyType (string) --The data type of the value (or values) specified in the ContextKeyValues parameter.
:type ResourceHandlingOption: string
:param ResourceHandlingOption: Specifies the type of simulation to run. Different APIs that support resource-based policies require different combinations of resources. By specifying the type of simulation to run, you enable the policy simulator to enforce the presence of the required resources to ensure reliable simulation results. If your simulation does not match one of the following scenarios, then you can omit this parameter. The following list shows each of the supported scenario values and the resources that you must define to run the simulation.
Each of the EC2 scenarios requires that you specify instance, image, and security-group resources. If your scenario includes an EBS volume, then you must specify that volume as a resource. If the EC2 scenario includes VPC, then you must supply the network-interface resource. If it includes an IP subnet, then you must specify the subnet resource. For more information on the EC2 scenario options, see Supported Platforms in the AWS EC2 User Guide .
EC2-Classic-InstanceStore instance, image, security-group
EC2-Classic-EBS instance, image, security-group, volume
EC2-VPC-InstanceStore instance, image, security-group, network-interface
EC2-VPC-InstanceStore-Subnet instance, image, security-group, network-interface, subnet
EC2-VPC-EBS instance, image, security-group, network-interface, volume
EC2-VPC-EBS-Subnet instance, image, security-group, network-interface, subnet, volume
:type MaxItems: integer
:param MaxItems: (Optional) Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true .
If you do not include this parameter, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.
:type Marker: string
:param Marker: Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.
:rtype: dict
:return: {
'EvaluationResults': [
{
'EvalActionName': 'string',
'EvalResourceName': 'string',
'EvalDecision': 'allowed'|'explicitDeny'|'implicitDeny',
'MatchedStatements': [
{
'SourcePolicyId': 'string',
'SourcePolicyType': 'user'|'group'|'role'|'aws-managed'|'user-managed'|'resource'|'none',
'StartPosition': {
'Line': 123,
'Column': 123
},
'EndPosition': {
'Line': 123,
'Column': 123
}
},
],
'MissingContextValues': [
'string',
],
'OrganizationsDecisionDetail': {
'AllowedByOrganizations': True|False
},
'EvalDecisionDetails': {
'string': 'allowed'|'explicitDeny'|'implicitDeny'
},
'ResourceSpecificResults': [
{
'EvalResourceName': 'string',
'EvalResourceDecision': 'allowed'|'explicitDeny'|'implicitDeny',
'MatchedStatements': [
{
'SourcePolicyId': 'string',
'SourcePolicyType': 'user'|'group'|'role'|'aws-managed'|'user-managed'|'resource'|'none',
'StartPosition': {
'Line': 123,
'Column': 123
},
'EndPosition': {
'Line': 123,
'Column': 123
}
},
],
'MissingContextValues': [
'string',
],
'EvalDecisionDetails': {
'string': 'allowed'|'explicitDeny'|'implicitDeny'
}
},
]
},
],
'IsTruncated': True|False,
'Marker': 'string'
}
:returns:
(string) -- | Below is the the instruction that describes the task:
### Input:
Simulate how a set of IAM policies and optionally a resource-based policy works with a list of API actions and AWS resources to determine the policies' effective permissions. The policies are provided as strings.
The simulation does not perform the API actions; it only checks the authorization to determine if the simulated policies allow or deny the actions.
If you want to simulate existing policies attached to an IAM user, group, or role, use SimulatePrincipalPolicy instead.
Context keys are variables maintained by AWS and its services that provide details about the context of an API query request. You can use the Condition element of an IAM policy to evaluate context keys. To get the list of context keys that the policies require for correct simulation, use GetContextKeysForCustomPolicy .
If the output is long, you can use MaxItems and Marker parameters to paginate the results.
See also: AWS API Documentation
:example: response = client.simulate_custom_policy(
PolicyInputList=[
'string',
],
ActionNames=[
'string',
],
ResourceArns=[
'string',
],
ResourcePolicy='string',
ResourceOwner='string',
CallerArn='string',
ContextEntries=[
{
'ContextKeyName': 'string',
'ContextKeyValues': [
'string',
],
'ContextKeyType': 'string'|'stringList'|'numeric'|'numericList'|'boolean'|'booleanList'|'ip'|'ipList'|'binary'|'binaryList'|'date'|'dateList'
},
],
ResourceHandlingOption='string',
MaxItems=123,
Marker='string'
)
:type PolicyInputList: list
:param PolicyInputList: [REQUIRED]
A list of policy documents to include in the simulation. Each document is specified as a string containing the complete, valid JSON text of an IAM policy. Do not include any resource-based policies in this parameter. Any resource-based policy must be submitted with the ResourcePolicy parameter. The policies cannot be 'scope-down' policies, such as you could include in a call to GetFederationToken or one of the AssumeRole APIs to restrict what a user can do while using the temporary credentials.
The regex pattern used to validate this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (u0020) through end of the ASCII character range as well as the printable characters in the Basic Latin and Latin-1 Supplement character set (through u00FF). It also includes the special characters tab (u0009), line feed (u000A), and carriage return (u000D).
(string) --
:type ActionNames: list
:param ActionNames: [REQUIRED]
A list of names of API actions to evaluate in the simulation. Each action is evaluated against each resource. Each action must include the service identifier, such as iam:CreateUser .
(string) --
:type ResourceArns: list
:param ResourceArns: A list of ARNs of AWS resources to include in the simulation. If this parameter is not provided then the value defaults to * (all resources). Each API in the ActionNames parameter is evaluated for each resource in this list. The simulation determines the access result (allowed or denied) of each combination and reports it in the response.
The simulation does not automatically retrieve policies for the specified resources. If you want to include a resource policy in the simulation, then you must include the policy as a string in the ResourcePolicy parameter.
If you include a ResourcePolicy , then it must be applicable to all of the resources included in the simulation or you receive an invalid input error.
For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference .
(string) --
:type ResourcePolicy: string
:param ResourcePolicy: A resource-based policy to include in the simulation provided as a string. Each resource in the simulation is treated as if it had this policy attached. You can include only one resource-based policy in a simulation.
The regex pattern used to validate this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (u0020) through end of the ASCII character range as well as the printable characters in the Basic Latin and Latin-1 Supplement character set (through u00FF). It also includes the special characters tab (u0009), line feed (u000A), and carriage return (u000D).
:type ResourceOwner: string
:param ResourceOwner: An AWS account ID that specifies the owner of any simulated resource that does not identify its owner in the resource ARN, such as an S3 bucket or object. If ResourceOwner is specified, it is also used as the account owner of any ResourcePolicy included in the simulation. If the ResourceOwner parameter is not specified, then the owner of the resources and the resource policy defaults to the account of the identity provided in CallerArn . This parameter is required only if you specify a resource-based policy and account that owns the resource is different from the account that owns the simulated calling user CallerArn .
:type CallerArn: string
:param CallerArn: The ARN of the IAM user that you want to use as the simulated caller of the APIs. CallerArn is required if you include a ResourcePolicy so that the policy's Principal element has a value to use in evaluating the policy.
You can specify only the ARN of an IAM user. You cannot specify the ARN of an assumed role, federated user, or a service principal.
:type ContextEntries: list
:param ContextEntries: A list of context keys and corresponding values for the simulation to use. Whenever a context key is evaluated in one of the simulated IAM permission policies, the corresponding value is supplied.
(dict) --Contains information about a condition context key. It includes the name of the key and specifies the value (or values, if the context key supports multiple values) to use in the simulation. This information is used when evaluating the Condition elements of the input policies.
This data type is used as an input parameter to `` SimulateCustomPolicy `` and `` SimulateCustomPolicy `` .
ContextKeyName (string) --The full name of a condition context key, including the service prefix. For example, aws:SourceIp or s3:VersionId .
ContextKeyValues (list) --The value (or values, if the condition context key supports multiple values) to provide to the simulation for use when the key is referenced by a Condition element in an input policy.
(string) --
ContextKeyType (string) --The data type of the value (or values) specified in the ContextKeyValues parameter.
:type ResourceHandlingOption: string
:param ResourceHandlingOption: Specifies the type of simulation to run. Different APIs that support resource-based policies require different combinations of resources. By specifying the type of simulation to run, you enable the policy simulator to enforce the presence of the required resources to ensure reliable simulation results. If your simulation does not match one of the following scenarios, then you can omit this parameter. The following list shows each of the supported scenario values and the resources that you must define to run the simulation.
Each of the EC2 scenarios requires that you specify instance, image, and security-group resources. If your scenario includes an EBS volume, then you must specify that volume as a resource. If the EC2 scenario includes VPC, then you must supply the network-interface resource. If it includes an IP subnet, then you must specify the subnet resource. For more information on the EC2 scenario options, see Supported Platforms in the AWS EC2 User Guide .
EC2-Classic-InstanceStore instance, image, security-group
EC2-Classic-EBS instance, image, security-group, volume
EC2-VPC-InstanceStore instance, image, security-group, network-interface
EC2-VPC-InstanceStore-Subnet instance, image, security-group, network-interface, subnet
EC2-VPC-EBS instance, image, security-group, network-interface, volume
EC2-VPC-EBS-Subnet instance, image, security-group, network-interface, subnet, volume
:type MaxItems: integer
:param MaxItems: (Optional) Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true .
If you do not include this parameter, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.
:type Marker: string
:param Marker: Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.
:rtype: dict
:return: {
'EvaluationResults': [
{
'EvalActionName': 'string',
'EvalResourceName': 'string',
'EvalDecision': 'allowed'|'explicitDeny'|'implicitDeny',
'MatchedStatements': [
{
'SourcePolicyId': 'string',
'SourcePolicyType': 'user'|'group'|'role'|'aws-managed'|'user-managed'|'resource'|'none',
'StartPosition': {
'Line': 123,
'Column': 123
},
'EndPosition': {
'Line': 123,
'Column': 123
}
},
],
'MissingContextValues': [
'string',
],
'OrganizationsDecisionDetail': {
'AllowedByOrganizations': True|False
},
'EvalDecisionDetails': {
'string': 'allowed'|'explicitDeny'|'implicitDeny'
},
'ResourceSpecificResults': [
{
'EvalResourceName': 'string',
'EvalResourceDecision': 'allowed'|'explicitDeny'|'implicitDeny',
'MatchedStatements': [
{
'SourcePolicyId': 'string',
'SourcePolicyType': 'user'|'group'|'role'|'aws-managed'|'user-managed'|'resource'|'none',
'StartPosition': {
'Line': 123,
'Column': 123
},
'EndPosition': {
'Line': 123,
'Column': 123
}
},
],
'MissingContextValues': [
'string',
],
'EvalDecisionDetails': {
'string': 'allowed'|'explicitDeny'|'implicitDeny'
}
},
]
},
],
'IsTruncated': True|False,
'Marker': 'string'
}
:returns:
(string) --
### Response:
def simulate_custom_policy(PolicyInputList=None, ActionNames=None, ResourceArns=None, ResourcePolicy=None, ResourceOwner=None, CallerArn=None, ContextEntries=None, ResourceHandlingOption=None, MaxItems=None, Marker=None):
"""
Simulate how a set of IAM policies and optionally a resource-based policy works with a list of API actions and AWS resources to determine the policies' effective permissions. The policies are provided as strings.
The simulation does not perform the API actions; it only checks the authorization to determine if the simulated policies allow or deny the actions.
If you want to simulate existing policies attached to an IAM user, group, or role, use SimulatePrincipalPolicy instead.
Context keys are variables maintained by AWS and its services that provide details about the context of an API query request. You can use the Condition element of an IAM policy to evaluate context keys. To get the list of context keys that the policies require for correct simulation, use GetContextKeysForCustomPolicy .
If the output is long, you can use MaxItems and Marker parameters to paginate the results.
See also: AWS API Documentation
:example: response = client.simulate_custom_policy(
PolicyInputList=[
'string',
],
ActionNames=[
'string',
],
ResourceArns=[
'string',
],
ResourcePolicy='string',
ResourceOwner='string',
CallerArn='string',
ContextEntries=[
{
'ContextKeyName': 'string',
'ContextKeyValues': [
'string',
],
'ContextKeyType': 'string'|'stringList'|'numeric'|'numericList'|'boolean'|'booleanList'|'ip'|'ipList'|'binary'|'binaryList'|'date'|'dateList'
},
],
ResourceHandlingOption='string',
MaxItems=123,
Marker='string'
)
:type PolicyInputList: list
:param PolicyInputList: [REQUIRED]
A list of policy documents to include in the simulation. Each document is specified as a string containing the complete, valid JSON text of an IAM policy. Do not include any resource-based policies in this parameter. Any resource-based policy must be submitted with the ResourcePolicy parameter. The policies cannot be 'scope-down' policies, such as you could include in a call to GetFederationToken or one of the AssumeRole APIs to restrict what a user can do while using the temporary credentials.
The regex pattern used to validate this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (u0020) through end of the ASCII character range as well as the printable characters in the Basic Latin and Latin-1 Supplement character set (through u00FF). It also includes the special characters tab (u0009), line feed (u000A), and carriage return (u000D).
(string) --
:type ActionNames: list
:param ActionNames: [REQUIRED]
A list of names of API actions to evaluate in the simulation. Each action is evaluated against each resource. Each action must include the service identifier, such as iam:CreateUser .
(string) --
:type ResourceArns: list
:param ResourceArns: A list of ARNs of AWS resources to include in the simulation. If this parameter is not provided then the value defaults to * (all resources). Each API in the ActionNames parameter is evaluated for each resource in this list. The simulation determines the access result (allowed or denied) of each combination and reports it in the response.
The simulation does not automatically retrieve policies for the specified resources. If you want to include a resource policy in the simulation, then you must include the policy as a string in the ResourcePolicy parameter.
If you include a ResourcePolicy , then it must be applicable to all of the resources included in the simulation or you receive an invalid input error.
For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference .
(string) --
:type ResourcePolicy: string
:param ResourcePolicy: A resource-based policy to include in the simulation provided as a string. Each resource in the simulation is treated as if it had this policy attached. You can include only one resource-based policy in a simulation.
The regex pattern used to validate this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (u0020) through end of the ASCII character range as well as the printable characters in the Basic Latin and Latin-1 Supplement character set (through u00FF). It also includes the special characters tab (u0009), line feed (u000A), and carriage return (u000D).
:type ResourceOwner: string
:param ResourceOwner: An AWS account ID that specifies the owner of any simulated resource that does not identify its owner in the resource ARN, such as an S3 bucket or object. If ResourceOwner is specified, it is also used as the account owner of any ResourcePolicy included in the simulation. If the ResourceOwner parameter is not specified, then the owner of the resources and the resource policy defaults to the account of the identity provided in CallerArn . This parameter is required only if you specify a resource-based policy and account that owns the resource is different from the account that owns the simulated calling user CallerArn .
:type CallerArn: string
:param CallerArn: The ARN of the IAM user that you want to use as the simulated caller of the APIs. CallerArn is required if you include a ResourcePolicy so that the policy's Principal element has a value to use in evaluating the policy.
You can specify only the ARN of an IAM user. You cannot specify the ARN of an assumed role, federated user, or a service principal.
:type ContextEntries: list
:param ContextEntries: A list of context keys and corresponding values for the simulation to use. Whenever a context key is evaluated in one of the simulated IAM permission policies, the corresponding value is supplied.
(dict) --Contains information about a condition context key. It includes the name of the key and specifies the value (or values, if the context key supports multiple values) to use in the simulation. This information is used when evaluating the Condition elements of the input policies.
This data type is used as an input parameter to `` SimulateCustomPolicy `` and `` SimulateCustomPolicy `` .
ContextKeyName (string) --The full name of a condition context key, including the service prefix. For example, aws:SourceIp or s3:VersionId .
ContextKeyValues (list) --The value (or values, if the condition context key supports multiple values) to provide to the simulation for use when the key is referenced by a Condition element in an input policy.
(string) --
ContextKeyType (string) --The data type of the value (or values) specified in the ContextKeyValues parameter.
:type ResourceHandlingOption: string
:param ResourceHandlingOption: Specifies the type of simulation to run. Different APIs that support resource-based policies require different combinations of resources. By specifying the type of simulation to run, you enable the policy simulator to enforce the presence of the required resources to ensure reliable simulation results. If your simulation does not match one of the following scenarios, then you can omit this parameter. The following list shows each of the supported scenario values and the resources that you must define to run the simulation.
Each of the EC2 scenarios requires that you specify instance, image, and security-group resources. If your scenario includes an EBS volume, then you must specify that volume as a resource. If the EC2 scenario includes VPC, then you must supply the network-interface resource. If it includes an IP subnet, then you must specify the subnet resource. For more information on the EC2 scenario options, see Supported Platforms in the AWS EC2 User Guide .
EC2-Classic-InstanceStore instance, image, security-group
EC2-Classic-EBS instance, image, security-group, volume
EC2-VPC-InstanceStore instance, image, security-group, network-interface
EC2-VPC-InstanceStore-Subnet instance, image, security-group, network-interface, subnet
EC2-VPC-EBS instance, image, security-group, network-interface, volume
EC2-VPC-EBS-Subnet instance, image, security-group, network-interface, subnet, volume
:type MaxItems: integer
:param MaxItems: (Optional) Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true .
If you do not include this parameter, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from.
:type Marker: string
:param Marker: Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start.
:rtype: dict
:return: {
'EvaluationResults': [
{
'EvalActionName': 'string',
'EvalResourceName': 'string',
'EvalDecision': 'allowed'|'explicitDeny'|'implicitDeny',
'MatchedStatements': [
{
'SourcePolicyId': 'string',
'SourcePolicyType': 'user'|'group'|'role'|'aws-managed'|'user-managed'|'resource'|'none',
'StartPosition': {
'Line': 123,
'Column': 123
},
'EndPosition': {
'Line': 123,
'Column': 123
}
},
],
'MissingContextValues': [
'string',
],
'OrganizationsDecisionDetail': {
'AllowedByOrganizations': True|False
},
'EvalDecisionDetails': {
'string': 'allowed'|'explicitDeny'|'implicitDeny'
},
'ResourceSpecificResults': [
{
'EvalResourceName': 'string',
'EvalResourceDecision': 'allowed'|'explicitDeny'|'implicitDeny',
'MatchedStatements': [
{
'SourcePolicyId': 'string',
'SourcePolicyType': 'user'|'group'|'role'|'aws-managed'|'user-managed'|'resource'|'none',
'StartPosition': {
'Line': 123,
'Column': 123
},
'EndPosition': {
'Line': 123,
'Column': 123
}
},
],
'MissingContextValues': [
'string',
],
'EvalDecisionDetails': {
'string': 'allowed'|'explicitDeny'|'implicitDeny'
}
},
]
},
],
'IsTruncated': True|False,
'Marker': 'string'
}
:returns:
(string) --
"""
pass |
def canny(img, threshold1=255/3, threshold2=255, **kwargs):
""" canny edge """
import cv2
# edges=None, apertureSize=None, L2gradient=None
if img.ndim <= 3:
edge = cv2.Canny(img, threshold1, threshold2, **kwargs)
if edge.ndim == 2:
edge = np.expand_dims(edge, 2)
elif img.ndim == 4:
# batch
edge = np.asarray([cv2.Canny(i, threshold1, threshold2, **kwargs) for i in img])
if edge.ndim == 3:
edge = np.expand_dims(edge, 3)
else:
raise ValueError('above 5d?')
return edge | canny edge | Below is the the instruction that describes the task:
### Input:
canny edge
### Response:
def canny(img, threshold1=255/3, threshold2=255, **kwargs):
""" canny edge """
import cv2
# edges=None, apertureSize=None, L2gradient=None
if img.ndim <= 3:
edge = cv2.Canny(img, threshold1, threshold2, **kwargs)
if edge.ndim == 2:
edge = np.expand_dims(edge, 2)
elif img.ndim == 4:
# batch
edge = np.asarray([cv2.Canny(i, threshold1, threshold2, **kwargs) for i in img])
if edge.ndim == 3:
edge = np.expand_dims(edge, 3)
else:
raise ValueError('above 5d?')
return edge |
def remove_permission(self, file_id, permission_id):
"""Deletes a permission from a file.
:param file_id: a spreadsheet ID (aka file ID.)
:type file_id: str
:param permission_id: an ID for the permission.
:type permission_id: str
"""
url = '{0}/{1}/permissions/{2}'.format(
DRIVE_FILES_API_V2_URL,
file_id,
permission_id
)
self.request('delete', url) | Deletes a permission from a file.
:param file_id: a spreadsheet ID (aka file ID.)
:type file_id: str
:param permission_id: an ID for the permission.
:type permission_id: str | Below is the the instruction that describes the task:
### Input:
Deletes a permission from a file.
:param file_id: a spreadsheet ID (aka file ID.)
:type file_id: str
:param permission_id: an ID for the permission.
:type permission_id: str
### Response:
def remove_permission(self, file_id, permission_id):
"""Deletes a permission from a file.
:param file_id: a spreadsheet ID (aka file ID.)
:type file_id: str
:param permission_id: an ID for the permission.
:type permission_id: str
"""
url = '{0}/{1}/permissions/{2}'.format(
DRIVE_FILES_API_V2_URL,
file_id,
permission_id
)
self.request('delete', url) |
def main(port, export, css, files):
"""
\b
Examples:
$ moo README.md # live preview README.md
$ moo -e *.md # export all markdown files
$ moo --no-css -e README.md # export README.md without CSS
$ cat README.md | moo -e - | less # export STDIN to STDOUT
"""
options = { 'css': css, 'port': port }
try:
if not export:
if len(files) != 1:
error("please specify just one file to preview")
preview(files[0], options)
else:
export_files(files, options)
except KeyboardInterrupt:
sys.exit(0)
except Exception as exc:
die() | \b
Examples:
$ moo README.md # live preview README.md
$ moo -e *.md # export all markdown files
$ moo --no-css -e README.md # export README.md without CSS
$ cat README.md | moo -e - | less # export STDIN to STDOUT | Below is the the instruction that describes the task:
### Input:
\b
Examples:
$ moo README.md # live preview README.md
$ moo -e *.md # export all markdown files
$ moo --no-css -e README.md # export README.md without CSS
$ cat README.md | moo -e - | less # export STDIN to STDOUT
### Response:
def main(port, export, css, files):
"""
\b
Examples:
$ moo README.md # live preview README.md
$ moo -e *.md # export all markdown files
$ moo --no-css -e README.md # export README.md without CSS
$ cat README.md | moo -e - | less # export STDIN to STDOUT
"""
options = { 'css': css, 'port': port }
try:
if not export:
if len(files) != 1:
error("please specify just one file to preview")
preview(files[0], options)
else:
export_files(files, options)
except KeyboardInterrupt:
sys.exit(0)
except Exception as exc:
die() |
def update_letter(self, letter_id, letter_dict):
"""
Updates a letter
:param letter_id: the letter id
:param letter_dict: dict
:return: dict
"""
return self._create_put_request(
resource=LETTERS,
billomat_id=letter_id,
send_data=letter_dict
) | Updates a letter
:param letter_id: the letter id
:param letter_dict: dict
:return: dict | Below is the the instruction that describes the task:
### Input:
Updates a letter
:param letter_id: the letter id
:param letter_dict: dict
:return: dict
### Response:
def update_letter(self, letter_id, letter_dict):
"""
Updates a letter
:param letter_id: the letter id
:param letter_dict: dict
:return: dict
"""
return self._create_put_request(
resource=LETTERS,
billomat_id=letter_id,
send_data=letter_dict
) |
def Compile(self, filter_implemention):
"""Returns the data_store filter implementation from the attribute."""
return self.operator_method(self.attribute_obj, filter_implemention,
*self.args) | Returns the data_store filter implementation from the attribute. | Below is the the instruction that describes the task:
### Input:
Returns the data_store filter implementation from the attribute.
### Response:
def Compile(self, filter_implemention):
"""Returns the data_store filter implementation from the attribute."""
return self.operator_method(self.attribute_obj, filter_implemention,
*self.args) |
def _generate_next_task(self):
"""
submit consumer framework defined task
:return:
"""
if self.consumer_status == ConsumerStatus.INITIALIZING:
self.current_task_exist = True
self.task_future = self.executor.submit(consumer_initialize_task, self.processor, self.log_client,
self.shard_id, self.cursor_position, self.cursor_start_time, self.cursor_end_time)
elif self.consumer_status == ConsumerStatus.PROCESSING:
if self.last_fetch_log_group is not None:
self.checkpoint_tracker.set_cursor(self.last_fetch_log_group.end_cursor)
self.current_task_exist = True
# must deep copy cause some revision will happen
last_fetch_log_group = copy.deepcopy(self.last_fetch_log_group)
self.last_fetch_log_group = None
if self.last_fetch_count > 0:
self.task_future = self.executor.submit(consumer_process_task, self.processor,
last_fetch_log_group.fetched_log_group_list,
self.checkpoint_tracker)
elif self.consumer_status == ConsumerStatus.SHUTTING_DOWN:
self.current_task_exist = True
self.logger.info("start to cancel fetch job")
self.cancel_current_fetch()
self.task_future = self.executor.submit(consumer_shutdown_task, self.processor, self.checkpoint_tracker) | submit consumer framework defined task
:return: | Below is the the instruction that describes the task:
### Input:
submit consumer framework defined task
:return:
### Response:
def _generate_next_task(self):
"""
submit consumer framework defined task
:return:
"""
if self.consumer_status == ConsumerStatus.INITIALIZING:
self.current_task_exist = True
self.task_future = self.executor.submit(consumer_initialize_task, self.processor, self.log_client,
self.shard_id, self.cursor_position, self.cursor_start_time, self.cursor_end_time)
elif self.consumer_status == ConsumerStatus.PROCESSING:
if self.last_fetch_log_group is not None:
self.checkpoint_tracker.set_cursor(self.last_fetch_log_group.end_cursor)
self.current_task_exist = True
# must deep copy cause some revision will happen
last_fetch_log_group = copy.deepcopy(self.last_fetch_log_group)
self.last_fetch_log_group = None
if self.last_fetch_count > 0:
self.task_future = self.executor.submit(consumer_process_task, self.processor,
last_fetch_log_group.fetched_log_group_list,
self.checkpoint_tracker)
elif self.consumer_status == ConsumerStatus.SHUTTING_DOWN:
self.current_task_exist = True
self.logger.info("start to cancel fetch job")
self.cancel_current_fetch()
self.task_future = self.executor.submit(consumer_shutdown_task, self.processor, self.checkpoint_tracker) |
def send_packet(self, packet):
"""Send a UDP packet along the wire."""
data = json.dumps(packet)
byte = data.encode('utf-8')
self.udp_socket.sendto(byte, (self._host, self._udp_port)) | Send a UDP packet along the wire. | Below is the the instruction that describes the task:
### Input:
Send a UDP packet along the wire.
### Response:
def send_packet(self, packet):
"""Send a UDP packet along the wire."""
data = json.dumps(packet)
byte = data.encode('utf-8')
self.udp_socket.sendto(byte, (self._host, self._udp_port)) |
def load_font(self, prefix, ttf_filename, charmap_filename, directory=None):
"""Loads a font file and the associated charmap.
If ``directory`` is None, the files will be looked for in ``./fonts/``.
Parameters
----------
prefix: str
Prefix string to be used when accessing a given font set
ttf_filename: str
Ttf font filename
charmap_filename: str
Charmap filename
directory: str or None, optional
Directory for font and charmap files
"""
def hook(obj):
result = {}
for key in obj:
result[key] = unichr(int(obj[key], 16))
return result
if directory is None:
directory = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'fonts')
# Load font
if QApplication.instance() is not None:
id_ = QFontDatabase.addApplicationFont(os.path.join(directory,
ttf_filename))
loadedFontFamilies = QFontDatabase.applicationFontFamilies(id_)
if(loadedFontFamilies):
self.fontname[prefix] = loadedFontFamilies[0]
else:
raise FontError(u"Font at '{0}' appears to be empty. "
"If you are on Windows 10, please read "
"https://support.microsoft.com/"
"en-us/kb/3053676 "
"to know how to prevent Windows from blocking "
"the fonts that come with QtAwesome.".format(
os.path.join(directory, ttf_filename)))
with open(os.path.join(directory, charmap_filename), 'r') as codes:
self.charmap[prefix] = json.load(codes, object_hook=hook)
# Verify that vendorized fonts are not corrupt
if not SYSTEM_FONTS:
ttf_hash = MD5_HASHES.get(ttf_filename, None)
if ttf_hash is not None:
hasher = hashlib.md5()
with open(os.path.join(directory, ttf_filename),
'rb') as f:
content = f.read()
hasher.update(content)
ttf_calculated_hash_code = hasher.hexdigest()
if ttf_calculated_hash_code != ttf_hash:
raise FontError(u"Font is corrupt at: '{0}'".format(
os.path.join(directory, ttf_filename))) | Loads a font file and the associated charmap.
If ``directory`` is None, the files will be looked for in ``./fonts/``.
Parameters
----------
prefix: str
Prefix string to be used when accessing a given font set
ttf_filename: str
Ttf font filename
charmap_filename: str
Charmap filename
directory: str or None, optional
Directory for font and charmap files | Below is the the instruction that describes the task:
### Input:
Loads a font file and the associated charmap.
If ``directory`` is None, the files will be looked for in ``./fonts/``.
Parameters
----------
prefix: str
Prefix string to be used when accessing a given font set
ttf_filename: str
Ttf font filename
charmap_filename: str
Charmap filename
directory: str or None, optional
Directory for font and charmap files
### Response:
def load_font(self, prefix, ttf_filename, charmap_filename, directory=None):
"""Loads a font file and the associated charmap.
If ``directory`` is None, the files will be looked for in ``./fonts/``.
Parameters
----------
prefix: str
Prefix string to be used when accessing a given font set
ttf_filename: str
Ttf font filename
charmap_filename: str
Charmap filename
directory: str or None, optional
Directory for font and charmap files
"""
def hook(obj):
result = {}
for key in obj:
result[key] = unichr(int(obj[key], 16))
return result
if directory is None:
directory = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'fonts')
# Load font
if QApplication.instance() is not None:
id_ = QFontDatabase.addApplicationFont(os.path.join(directory,
ttf_filename))
loadedFontFamilies = QFontDatabase.applicationFontFamilies(id_)
if(loadedFontFamilies):
self.fontname[prefix] = loadedFontFamilies[0]
else:
raise FontError(u"Font at '{0}' appears to be empty. "
"If you are on Windows 10, please read "
"https://support.microsoft.com/"
"en-us/kb/3053676 "
"to know how to prevent Windows from blocking "
"the fonts that come with QtAwesome.".format(
os.path.join(directory, ttf_filename)))
with open(os.path.join(directory, charmap_filename), 'r') as codes:
self.charmap[prefix] = json.load(codes, object_hook=hook)
# Verify that vendorized fonts are not corrupt
if not SYSTEM_FONTS:
ttf_hash = MD5_HASHES.get(ttf_filename, None)
if ttf_hash is not None:
hasher = hashlib.md5()
with open(os.path.join(directory, ttf_filename),
'rb') as f:
content = f.read()
hasher.update(content)
ttf_calculated_hash_code = hasher.hexdigest()
if ttf_calculated_hash_code != ttf_hash:
raise FontError(u"Font is corrupt at: '{0}'".format(
os.path.join(directory, ttf_filename))) |
def print_cli(msg, retries=10, step=0.01):
'''
Wrapper around print() that suppresses tracebacks on broken pipes (i.e.
when salt output is piped to less and less is stopped prematurely).
'''
while retries:
try:
try:
print(msg)
except UnicodeEncodeError:
print(msg.encode('utf-8'))
except IOError as exc:
err = "{0}".format(exc)
if exc.errno != errno.EPIPE:
if (
("temporarily unavailable" in err or
exc.errno in (errno.EAGAIN,)) and
retries
):
time.sleep(step)
retries -= 1
continue
else:
raise
break | Wrapper around print() that suppresses tracebacks on broken pipes (i.e.
when salt output is piped to less and less is stopped prematurely). | Below is the the instruction that describes the task:
### Input:
Wrapper around print() that suppresses tracebacks on broken pipes (i.e.
when salt output is piped to less and less is stopped prematurely).
### Response:
def print_cli(msg, retries=10, step=0.01):
'''
Wrapper around print() that suppresses tracebacks on broken pipes (i.e.
when salt output is piped to less and less is stopped prematurely).
'''
while retries:
try:
try:
print(msg)
except UnicodeEncodeError:
print(msg.encode('utf-8'))
except IOError as exc:
err = "{0}".format(exc)
if exc.errno != errno.EPIPE:
if (
("temporarily unavailable" in err or
exc.errno in (errno.EAGAIN,)) and
retries
):
time.sleep(step)
retries -= 1
continue
else:
raise
break |
def ip_hide_ext_community_list_holder_extcommunity_list_extcommunity_list_num(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ip = ET.SubElement(config, "ip", xmlns="urn:brocade.com:mgmt:brocade-common-def")
hide_ext_community_list_holder = ET.SubElement(ip, "hide-ext-community-list-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy")
extcommunity_list = ET.SubElement(hide_ext_community_list_holder, "extcommunity-list")
extcommunity_list_num = ET.SubElement(extcommunity_list, "extcommunity-list-num")
extcommunity_list_num.text = kwargs.pop('extcommunity_list_num')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def ip_hide_ext_community_list_holder_extcommunity_list_extcommunity_list_num(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ip = ET.SubElement(config, "ip", xmlns="urn:brocade.com:mgmt:brocade-common-def")
hide_ext_community_list_holder = ET.SubElement(ip, "hide-ext-community-list-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy")
extcommunity_list = ET.SubElement(hide_ext_community_list_holder, "extcommunity-list")
extcommunity_list_num = ET.SubElement(extcommunity_list, "extcommunity-list-num")
extcommunity_list_num.text = kwargs.pop('extcommunity_list_num')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def raise_(type_, value=None, traceback=None): # pylint: disable=W0613
"""
Does the same as ordinary ``raise`` with arguments do in Python 2.
But works in Python 3 (>= 3.3) also!
Please checkout README on https://github.com/9seconds/pep3134
to get an idea about possible pitfals. But short story is: please
be pretty carefull with tracebacks. If it is possible, use sys.exc_info
instead. But in most cases it will work as you expect.
"""
if type_.__traceback__ is not traceback:
raise type_.with_traceback(traceback)
raise type_ | Does the same as ordinary ``raise`` with arguments do in Python 2.
But works in Python 3 (>= 3.3) also!
Please checkout README on https://github.com/9seconds/pep3134
to get an idea about possible pitfals. But short story is: please
be pretty carefull with tracebacks. If it is possible, use sys.exc_info
instead. But in most cases it will work as you expect. | Below is the the instruction that describes the task:
### Input:
Does the same as ordinary ``raise`` with arguments do in Python 2.
But works in Python 3 (>= 3.3) also!
Please checkout README on https://github.com/9seconds/pep3134
to get an idea about possible pitfals. But short story is: please
be pretty carefull with tracebacks. If it is possible, use sys.exc_info
instead. But in most cases it will work as you expect.
### Response:
def raise_(type_, value=None, traceback=None): # pylint: disable=W0613
"""
Does the same as ordinary ``raise`` with arguments do in Python 2.
But works in Python 3 (>= 3.3) also!
Please checkout README on https://github.com/9seconds/pep3134
to get an idea about possible pitfals. But short story is: please
be pretty carefull with tracebacks. If it is possible, use sys.exc_info
instead. But in most cases it will work as you expect.
"""
if type_.__traceback__ is not traceback:
raise type_.with_traceback(traceback)
raise type_ |
def get_cell(self, row, column):
""" Gets the range object containing the single cell based on row and column numbers. """
url = self.build_url(self._endpoints.get('get_cell').format(row=row, column=column))
response = self.session.get(url)
if not response:
return None
return self.range_constructor(parent=self, **{self._cloud_data_key: response.json()}) | Gets the range object containing the single cell based on row and column numbers. | Below is the the instruction that describes the task:
### Input:
Gets the range object containing the single cell based on row and column numbers.
### Response:
def get_cell(self, row, column):
""" Gets the range object containing the single cell based on row and column numbers. """
url = self.build_url(self._endpoints.get('get_cell').format(row=row, column=column))
response = self.session.get(url)
if not response:
return None
return self.range_constructor(parent=self, **{self._cloud_data_key: response.json()}) |
def get_role(self, item, state_root, from_state=False):
"""
Used to retrieve an identity role.
Args:
item (string): the name of the role to be fetched
state_root(string): The state root of the previous block.
from_state (bool): Whether the identity value should be read
directly from state, instead of using the cached values.
This should be used when the state_root passed is not from
the current chain head.
"""
if from_state:
# if from state use identity_view and do not add to cache
if self._identity_view is None:
self.update_view(state_root)
value = self._identity_view.get_role(item)
return value
value = self._cache.get(item)
if value is None:
if self._identity_view is None:
self.update_view(state_root)
value = self._identity_view.get_role(item)
self._cache[item] = value
return value | Used to retrieve an identity role.
Args:
item (string): the name of the role to be fetched
state_root(string): The state root of the previous block.
from_state (bool): Whether the identity value should be read
directly from state, instead of using the cached values.
This should be used when the state_root passed is not from
the current chain head. | Below is the the instruction that describes the task:
### Input:
Used to retrieve an identity role.
Args:
item (string): the name of the role to be fetched
state_root(string): The state root of the previous block.
from_state (bool): Whether the identity value should be read
directly from state, instead of using the cached values.
This should be used when the state_root passed is not from
the current chain head.
### Response:
def get_role(self, item, state_root, from_state=False):
"""
Used to retrieve an identity role.
Args:
item (string): the name of the role to be fetched
state_root(string): The state root of the previous block.
from_state (bool): Whether the identity value should be read
directly from state, instead of using the cached values.
This should be used when the state_root passed is not from
the current chain head.
"""
if from_state:
# if from state use identity_view and do not add to cache
if self._identity_view is None:
self.update_view(state_root)
value = self._identity_view.get_role(item)
return value
value = self._cache.get(item)
if value is None:
if self._identity_view is None:
self.update_view(state_root)
value = self._identity_view.get_role(item)
self._cache[item] = value
return value |
def get_filename_ds(ds, dump=True, paths=None, **kwargs):
"""
Return the filename of the corresponding to a dataset
This method returns the path to the `ds` or saves the dataset
if there exists no filename
Parameters
----------
ds: xarray.Dataset
The dataset you want the path information for
dump: bool
If True and the dataset has not been dumped so far, it is dumped to a
temporary file or the one generated by `paths` is used
paths: iterable or True
An iterator over filenames to use if a dataset has no filename.
If paths is ``True``, an iterator over temporary files will be
created without raising a warning
Other Parameters
----------------
``**kwargs``
Any other keyword for the :func:`to_netcdf` function
%(xarray.Dataset.to_netcdf.parameters)s
Returns
-------
str or None
None, if the dataset has not yet been dumped to the harddisk and
`dump` is False, otherwise the complete the path to the input
file
str
The module of the :class:`xarray.backends.common.AbstractDataStore`
instance that is used to hold the data
str
The class name of the
:class:`xarray.backends.common.AbstractDataStore` instance that is
used to open the data
"""
from tempfile import NamedTemporaryFile
# if already specified, return that filename
if ds.psy._filename is not None:
return tuple([ds.psy._filename] + list(ds.psy.data_store))
def dump_nc():
# make sure that the data store is not closed by providing a
# write argument
if xr_version < (0, 11):
kwargs.setdefault('writer', xarray_api.ArrayWriter())
store = to_netcdf(ds, fname, **kwargs)
else:
# `writer` parameter was removed by
# https://github.com/pydata/xarray/pull/2261
kwargs.setdefault('multifile', True)
store = to_netcdf(ds, fname, **kwargs)[1]
store_mod = store.__module__
store_cls = store.__class__.__name__
ds._file_obj = store
return store_mod, store_cls
def tmp_it():
while True:
yield NamedTemporaryFile(suffix='.nc').name
fname = None
if paths is True or (dump and paths is None):
paths = tmp_it()
elif paths is not None:
if isstring(paths):
paths = iter([paths])
else:
paths = iter(paths)
# try to get the filename from the data store of the obj
store_mod, store_cls = ds.psy.data_store
if store_mod is not None:
store = ds._file_obj
# try several engines
if hasattr(store, 'file_objs'):
fname = []
store_mod = []
store_cls = []
for obj in store.file_objs: # mfdataset
_fname = None
for func in get_fname_funcs:
if _fname is None:
_fname = func(obj)
if _fname is not None:
fname.append(_fname)
store_mod.append(obj.__module__)
store_cls.append(obj.__class__.__name__)
fname = tuple(fname)
store_mod = tuple(store_mod)
store_cls = tuple(store_cls)
else:
for func in get_fname_funcs:
fname = func(store)
if fname is not None:
break
# check if paths is provided and if yes, save the file
if fname is None and paths is not None:
fname = next(paths, None)
if dump and fname is not None:
store_mod, store_cls = dump_nc()
ds.psy.filename = fname
ds.psy.data_store = (store_mod, store_cls)
return fname, store_mod, store_cls | Return the filename of the corresponding to a dataset
This method returns the path to the `ds` or saves the dataset
if there exists no filename
Parameters
----------
ds: xarray.Dataset
The dataset you want the path information for
dump: bool
If True and the dataset has not been dumped so far, it is dumped to a
temporary file or the one generated by `paths` is used
paths: iterable or True
An iterator over filenames to use if a dataset has no filename.
If paths is ``True``, an iterator over temporary files will be
created without raising a warning
Other Parameters
----------------
``**kwargs``
Any other keyword for the :func:`to_netcdf` function
%(xarray.Dataset.to_netcdf.parameters)s
Returns
-------
str or None
None, if the dataset has not yet been dumped to the harddisk and
`dump` is False, otherwise the complete the path to the input
file
str
The module of the :class:`xarray.backends.common.AbstractDataStore`
instance that is used to hold the data
str
The class name of the
:class:`xarray.backends.common.AbstractDataStore` instance that is
used to open the data | Below is the the instruction that describes the task:
### Input:
Return the filename of the corresponding to a dataset
This method returns the path to the `ds` or saves the dataset
if there exists no filename
Parameters
----------
ds: xarray.Dataset
The dataset you want the path information for
dump: bool
If True and the dataset has not been dumped so far, it is dumped to a
temporary file or the one generated by `paths` is used
paths: iterable or True
An iterator over filenames to use if a dataset has no filename.
If paths is ``True``, an iterator over temporary files will be
created without raising a warning
Other Parameters
----------------
``**kwargs``
Any other keyword for the :func:`to_netcdf` function
%(xarray.Dataset.to_netcdf.parameters)s
Returns
-------
str or None
None, if the dataset has not yet been dumped to the harddisk and
`dump` is False, otherwise the complete the path to the input
file
str
The module of the :class:`xarray.backends.common.AbstractDataStore`
instance that is used to hold the data
str
The class name of the
:class:`xarray.backends.common.AbstractDataStore` instance that is
used to open the data
### Response:
def get_filename_ds(ds, dump=True, paths=None, **kwargs):
"""
Return the filename of the corresponding to a dataset
This method returns the path to the `ds` or saves the dataset
if there exists no filename
Parameters
----------
ds: xarray.Dataset
The dataset you want the path information for
dump: bool
If True and the dataset has not been dumped so far, it is dumped to a
temporary file or the one generated by `paths` is used
paths: iterable or True
An iterator over filenames to use if a dataset has no filename.
If paths is ``True``, an iterator over temporary files will be
created without raising a warning
Other Parameters
----------------
``**kwargs``
Any other keyword for the :func:`to_netcdf` function
%(xarray.Dataset.to_netcdf.parameters)s
Returns
-------
str or None
None, if the dataset has not yet been dumped to the harddisk and
`dump` is False, otherwise the complete the path to the input
file
str
The module of the :class:`xarray.backends.common.AbstractDataStore`
instance that is used to hold the data
str
The class name of the
:class:`xarray.backends.common.AbstractDataStore` instance that is
used to open the data
"""
from tempfile import NamedTemporaryFile
# if already specified, return that filename
if ds.psy._filename is not None:
return tuple([ds.psy._filename] + list(ds.psy.data_store))
def dump_nc():
# make sure that the data store is not closed by providing a
# write argument
if xr_version < (0, 11):
kwargs.setdefault('writer', xarray_api.ArrayWriter())
store = to_netcdf(ds, fname, **kwargs)
else:
# `writer` parameter was removed by
# https://github.com/pydata/xarray/pull/2261
kwargs.setdefault('multifile', True)
store = to_netcdf(ds, fname, **kwargs)[1]
store_mod = store.__module__
store_cls = store.__class__.__name__
ds._file_obj = store
return store_mod, store_cls
def tmp_it():
while True:
yield NamedTemporaryFile(suffix='.nc').name
fname = None
if paths is True or (dump and paths is None):
paths = tmp_it()
elif paths is not None:
if isstring(paths):
paths = iter([paths])
else:
paths = iter(paths)
# try to get the filename from the data store of the obj
store_mod, store_cls = ds.psy.data_store
if store_mod is not None:
store = ds._file_obj
# try several engines
if hasattr(store, 'file_objs'):
fname = []
store_mod = []
store_cls = []
for obj in store.file_objs: # mfdataset
_fname = None
for func in get_fname_funcs:
if _fname is None:
_fname = func(obj)
if _fname is not None:
fname.append(_fname)
store_mod.append(obj.__module__)
store_cls.append(obj.__class__.__name__)
fname = tuple(fname)
store_mod = tuple(store_mod)
store_cls = tuple(store_cls)
else:
for func in get_fname_funcs:
fname = func(store)
if fname is not None:
break
# check if paths is provided and if yes, save the file
if fname is None and paths is not None:
fname = next(paths, None)
if dump and fname is not None:
store_mod, store_cls = dump_nc()
ds.psy.filename = fname
ds.psy.data_store = (store_mod, store_cls)
return fname, store_mod, store_cls |
def role_and_interface_to_relations(role, interface_name):
"""
Given a role and interface name, return a list of relation names for the
current charm that use that interface under that role (where role is one
of ``provides``, ``requires``, or ``peers``).
:returns: A list of relation names.
"""
_metadata = metadata()
results = []
for relation_name, relation in _metadata.get(role, {}).items():
if relation['interface'] == interface_name:
results.append(relation_name)
return results | Given a role and interface name, return a list of relation names for the
current charm that use that interface under that role (where role is one
of ``provides``, ``requires``, or ``peers``).
:returns: A list of relation names. | Below is the the instruction that describes the task:
### Input:
Given a role and interface name, return a list of relation names for the
current charm that use that interface under that role (where role is one
of ``provides``, ``requires``, or ``peers``).
:returns: A list of relation names.
### Response:
def role_and_interface_to_relations(role, interface_name):
"""
Given a role and interface name, return a list of relation names for the
current charm that use that interface under that role (where role is one
of ``provides``, ``requires``, or ``peers``).
:returns: A list of relation names.
"""
_metadata = metadata()
results = []
for relation_name, relation in _metadata.get(role, {}).items():
if relation['interface'] == interface_name:
results.append(relation_name)
return results |
def lis_to_bio_map(folder):
"""
Senators have a lis_id that is used in some places. That's dumb. Build a
dict from lis_id to bioguide_id which every member of congress has.
"""
logger.info("Opening legislator csv for lis_dct creation")
lis_dic = {}
leg_path = "{0}/legislators.csv".format(folder)
logger.info(leg_path)
with open(leg_path, 'r') as csvfile:
leg_reader = csv.reader(csvfile)
for row in leg_reader:
if row[22]:
lis_dic[row[22]] = row[19]
return lis_dic | Senators have a lis_id that is used in some places. That's dumb. Build a
dict from lis_id to bioguide_id which every member of congress has. | Below is the the instruction that describes the task:
### Input:
Senators have a lis_id that is used in some places. That's dumb. Build a
dict from lis_id to bioguide_id which every member of congress has.
### Response:
def lis_to_bio_map(folder):
"""
Senators have a lis_id that is used in some places. That's dumb. Build a
dict from lis_id to bioguide_id which every member of congress has.
"""
logger.info("Opening legislator csv for lis_dct creation")
lis_dic = {}
leg_path = "{0}/legislators.csv".format(folder)
logger.info(leg_path)
with open(leg_path, 'r') as csvfile:
leg_reader = csv.reader(csvfile)
for row in leg_reader:
if row[22]:
lis_dic[row[22]] = row[19]
return lis_dic |
def ok(self, text=u"OK", err=False):
"""Set Ok (success) finalizer to a spinner."""
# Do not display spin text for ok state
self._text = None
_text = to_text(text) if text else u"OK"
err = err or not self.write_to_stdout
self._freeze(_text, err=err) | Set Ok (success) finalizer to a spinner. | Below is the the instruction that describes the task:
### Input:
Set Ok (success) finalizer to a spinner.
### Response:
def ok(self, text=u"OK", err=False):
"""Set Ok (success) finalizer to a spinner."""
# Do not display spin text for ok state
self._text = None
_text = to_text(text) if text else u"OK"
err = err or not self.write_to_stdout
self._freeze(_text, err=err) |
def getsize(o_file):
"""
get the size, either by seeeking to the end.
"""
startpos = o_file.tell()
o_file.seek(0)
o_file.seek(0, SEEK_END)
size = o_file.tell()
o_file.seek(startpos)
return size | get the size, either by seeeking to the end. | Below is the the instruction that describes the task:
### Input:
get the size, either by seeeking to the end.
### Response:
def getsize(o_file):
"""
get the size, either by seeeking to the end.
"""
startpos = o_file.tell()
o_file.seek(0)
o_file.seek(0, SEEK_END)
size = o_file.tell()
o_file.seek(startpos)
return size |
def wc_wrap(text, length):
"""
Wrap text to given length, breaking on whitespace and taking into account
character width.
Meant for use on a single line or paragraph. Will destroy spacing between
words and paragraphs and any indentation.
"""
line_words = []
line_len = 0
words = re.split(r"\s+", text.strip())
for word in words:
word_len = wcswidth(word)
if line_words and line_len + word_len > length:
line = " ".join(line_words)
if line_len <= length:
yield line
else:
yield from _wc_hard_wrap(line, length)
line_words = []
line_len = 0
line_words.append(word)
line_len += word_len + 1 # add 1 to account for space between words
if line_words:
line = " ".join(line_words)
if line_len <= length:
yield line
else:
yield from _wc_hard_wrap(line, length) | Wrap text to given length, breaking on whitespace and taking into account
character width.
Meant for use on a single line or paragraph. Will destroy spacing between
words and paragraphs and any indentation. | Below is the the instruction that describes the task:
### Input:
Wrap text to given length, breaking on whitespace and taking into account
character width.
Meant for use on a single line or paragraph. Will destroy spacing between
words and paragraphs and any indentation.
### Response:
def wc_wrap(text, length):
"""
Wrap text to given length, breaking on whitespace and taking into account
character width.
Meant for use on a single line or paragraph. Will destroy spacing between
words and paragraphs and any indentation.
"""
line_words = []
line_len = 0
words = re.split(r"\s+", text.strip())
for word in words:
word_len = wcswidth(word)
if line_words and line_len + word_len > length:
line = " ".join(line_words)
if line_len <= length:
yield line
else:
yield from _wc_hard_wrap(line, length)
line_words = []
line_len = 0
line_words.append(word)
line_len += word_len + 1 # add 1 to account for space between words
if line_words:
line = " ".join(line_words)
if line_len <= length:
yield line
else:
yield from _wc_hard_wrap(line, length) |
def dispatch_missing(op, left, right, result):
"""
Fill nulls caused by division by zero, casting to a diffferent dtype
if necessary.
Parameters
----------
op : function (operator.add, operator.div, ...)
left : object (Index for non-reversed ops)
right : object (Index fof reversed ops)
result : ndarray
Returns
-------
result : ndarray
"""
opstr = '__{opname}__'.format(opname=op.__name__).replace('____', '__')
if op in [operator.truediv, operator.floordiv,
getattr(operator, 'div', None)]:
result = mask_zero_div_zero(left, right, result)
elif op is operator.mod:
result = fill_zeros(result, left, right, opstr, np.nan)
elif op is divmod:
res0 = mask_zero_div_zero(left, right, result[0])
res1 = fill_zeros(result[1], left, right, opstr, np.nan)
result = (res0, res1)
return result | Fill nulls caused by division by zero, casting to a diffferent dtype
if necessary.
Parameters
----------
op : function (operator.add, operator.div, ...)
left : object (Index for non-reversed ops)
right : object (Index fof reversed ops)
result : ndarray
Returns
-------
result : ndarray | Below is the the instruction that describes the task:
### Input:
Fill nulls caused by division by zero, casting to a diffferent dtype
if necessary.
Parameters
----------
op : function (operator.add, operator.div, ...)
left : object (Index for non-reversed ops)
right : object (Index fof reversed ops)
result : ndarray
Returns
-------
result : ndarray
### Response:
def dispatch_missing(op, left, right, result):
"""
Fill nulls caused by division by zero, casting to a diffferent dtype
if necessary.
Parameters
----------
op : function (operator.add, operator.div, ...)
left : object (Index for non-reversed ops)
right : object (Index fof reversed ops)
result : ndarray
Returns
-------
result : ndarray
"""
opstr = '__{opname}__'.format(opname=op.__name__).replace('____', '__')
if op in [operator.truediv, operator.floordiv,
getattr(operator, 'div', None)]:
result = mask_zero_div_zero(left, right, result)
elif op is operator.mod:
result = fill_zeros(result, left, right, opstr, np.nan)
elif op is divmod:
res0 = mask_zero_div_zero(left, right, result[0])
res1 = fill_zeros(result[1], left, right, opstr, np.nan)
result = (res0, res1)
return result |
def parse(self, stride=None):
"""Read and cache the file as a numpy array.
Store every *stride* line of data; if ``None`` then the class default is used.
The array is returned with column-first indexing, i.e. for a data file with
columns X Y1 Y2 Y3 ... the array a will be a[0] = X, a[1] = Y1, ... .
"""
if stride is None:
stride = self.stride
self.corrupted_lineno = []
irow = 0 # count rows of data
# cannot use numpy.loadtxt() because xvg can have two types of 'comment' lines
with utilities.openany(self.real_filename) as xvg:
rows = []
ncol = None
for lineno,line in enumerate(xvg):
line = line.strip()
if len(line) == 0:
continue
if "label" in line and "xaxis" in line:
self.xaxis = line.split('"')[-2]
if "label" in line and "yaxis" in line:
self.yaxis = line.split('"')[-2]
if line.startswith("@ legend"):
if not "legend" in self.metadata: self.metadata["legend"] = []
self.metadata["legend"].append(line.split("legend ")[-1])
if line.startswith("@ s") and "subtitle" not in line:
name = line.split("legend ")[-1].replace('"','').strip()
self.names.append(name)
if line.startswith(('#', '@')) :
continue
if line.startswith('&'):
raise NotImplementedError('{0!s}: Multi-data not supported, only simple NXY format.'.format(self.real_filename))
# parse line as floats
try:
row = [float(el) for el in line.split()]
except:
if self.permissive:
self.logger.warn("%s: SKIPPING unparsable line %d: %r",
self.real_filename, lineno+1, line)
self.corrupted_lineno.append(lineno+1)
continue
self.logger.error("%s: Cannot parse line %d: %r",
self.real_filename, lineno+1, line)
raise
# check for same number of columns as in previous step
if ncol is not None and len(row) != ncol:
if self.permissive:
self.logger.warn("%s: SKIPPING line %d with wrong number of columns: %r",
self.real_filename, lineno+1, line)
self.corrupted_lineno.append(lineno+1)
continue
errmsg = "{0!s}: Wrong number of columns in line {1:d}: {2!r}".format(self.real_filename, lineno+1, line)
self.logger.error(errmsg)
raise IOError(errno.ENODATA, errmsg, self.real_filename)
# finally: a good line
if irow % stride == 0:
ncol = len(row)
rows.append(row)
irow += 1
try:
self.__array = numpy.array(rows).transpose() # cache result
except:
self.logger.error("%s: Failed reading XVG file, possibly data corrupted. "
"Check the last line of the file...", self.real_filename)
raise
finally:
del rows | Read and cache the file as a numpy array.
Store every *stride* line of data; if ``None`` then the class default is used.
The array is returned with column-first indexing, i.e. for a data file with
columns X Y1 Y2 Y3 ... the array a will be a[0] = X, a[1] = Y1, ... . | Below is the the instruction that describes the task:
### Input:
Read and cache the file as a numpy array.
Store every *stride* line of data; if ``None`` then the class default is used.
The array is returned with column-first indexing, i.e. for a data file with
columns X Y1 Y2 Y3 ... the array a will be a[0] = X, a[1] = Y1, ... .
### Response:
def parse(self, stride=None):
"""Read and cache the file as a numpy array.
Store every *stride* line of data; if ``None`` then the class default is used.
The array is returned with column-first indexing, i.e. for a data file with
columns X Y1 Y2 Y3 ... the array a will be a[0] = X, a[1] = Y1, ... .
"""
if stride is None:
stride = self.stride
self.corrupted_lineno = []
irow = 0 # count rows of data
# cannot use numpy.loadtxt() because xvg can have two types of 'comment' lines
with utilities.openany(self.real_filename) as xvg:
rows = []
ncol = None
for lineno,line in enumerate(xvg):
line = line.strip()
if len(line) == 0:
continue
if "label" in line and "xaxis" in line:
self.xaxis = line.split('"')[-2]
if "label" in line and "yaxis" in line:
self.yaxis = line.split('"')[-2]
if line.startswith("@ legend"):
if not "legend" in self.metadata: self.metadata["legend"] = []
self.metadata["legend"].append(line.split("legend ")[-1])
if line.startswith("@ s") and "subtitle" not in line:
name = line.split("legend ")[-1].replace('"','').strip()
self.names.append(name)
if line.startswith(('#', '@')) :
continue
if line.startswith('&'):
raise NotImplementedError('{0!s}: Multi-data not supported, only simple NXY format.'.format(self.real_filename))
# parse line as floats
try:
row = [float(el) for el in line.split()]
except:
if self.permissive:
self.logger.warn("%s: SKIPPING unparsable line %d: %r",
self.real_filename, lineno+1, line)
self.corrupted_lineno.append(lineno+1)
continue
self.logger.error("%s: Cannot parse line %d: %r",
self.real_filename, lineno+1, line)
raise
# check for same number of columns as in previous step
if ncol is not None and len(row) != ncol:
if self.permissive:
self.logger.warn("%s: SKIPPING line %d with wrong number of columns: %r",
self.real_filename, lineno+1, line)
self.corrupted_lineno.append(lineno+1)
continue
errmsg = "{0!s}: Wrong number of columns in line {1:d}: {2!r}".format(self.real_filename, lineno+1, line)
self.logger.error(errmsg)
raise IOError(errno.ENODATA, errmsg, self.real_filename)
# finally: a good line
if irow % stride == 0:
ncol = len(row)
rows.append(row)
irow += 1
try:
self.__array = numpy.array(rows).transpose() # cache result
except:
self.logger.error("%s: Failed reading XVG file, possibly data corrupted. "
"Check the last line of the file...", self.real_filename)
raise
finally:
del rows |
def require(method):
"""
Decorator for managing chained dependencies of different class
properties. The @require decorator allows developers to specify
that a function call must be operated on before another property
or function call is accessed, so that data and processing for an
entire class can be evaluated in a lazy way (i.e. not all upon
instantiation).
Examples:
>>> class Foo(Bar):
>>>
>>> def a(self):
>>> print 'a!'
>>> return 1
>>>
>>> @require('a')
>>> @property
>>> def b(self):
>>> print 'b!'
>>> return self.a + 1
>>>
>>> foo = Foo()
>>> print foo.b
>>>
'a!'
'b!'
2
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
# throw exception if input class doesn't have requirement
if not hasattr(args[0], method):
raise AssertionError('{} class has no method {}()'.format(args[0].__class__.__name__, method))
# create property to record that method has been called
callmethod = method + '_called'
if not hasattr(args[0], callmethod):
setattr(args[0], callmethod, False)
# call the method if it hasn't yet been called
if not getattr(args[0], callmethod):
getattr(args[0], method)()
setattr(args[0], callmethod, True)
return func(*args, **kwargs)
return wrapper
return decorator | Decorator for managing chained dependencies of different class
properties. The @require decorator allows developers to specify
that a function call must be operated on before another property
or function call is accessed, so that data and processing for an
entire class can be evaluated in a lazy way (i.e. not all upon
instantiation).
Examples:
>>> class Foo(Bar):
>>>
>>> def a(self):
>>> print 'a!'
>>> return 1
>>>
>>> @require('a')
>>> @property
>>> def b(self):
>>> print 'b!'
>>> return self.a + 1
>>>
>>> foo = Foo()
>>> print foo.b
>>>
'a!'
'b!'
2 | Below is the the instruction that describes the task:
### Input:
Decorator for managing chained dependencies of different class
properties. The @require decorator allows developers to specify
that a function call must be operated on before another property
or function call is accessed, so that data and processing for an
entire class can be evaluated in a lazy way (i.e. not all upon
instantiation).
Examples:
>>> class Foo(Bar):
>>>
>>> def a(self):
>>> print 'a!'
>>> return 1
>>>
>>> @require('a')
>>> @property
>>> def b(self):
>>> print 'b!'
>>> return self.a + 1
>>>
>>> foo = Foo()
>>> print foo.b
>>>
'a!'
'b!'
2
### Response:
def require(method):
"""
Decorator for managing chained dependencies of different class
properties. The @require decorator allows developers to specify
that a function call must be operated on before another property
or function call is accessed, so that data and processing for an
entire class can be evaluated in a lazy way (i.e. not all upon
instantiation).
Examples:
>>> class Foo(Bar):
>>>
>>> def a(self):
>>> print 'a!'
>>> return 1
>>>
>>> @require('a')
>>> @property
>>> def b(self):
>>> print 'b!'
>>> return self.a + 1
>>>
>>> foo = Foo()
>>> print foo.b
>>>
'a!'
'b!'
2
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
# throw exception if input class doesn't have requirement
if not hasattr(args[0], method):
raise AssertionError('{} class has no method {}()'.format(args[0].__class__.__name__, method))
# create property to record that method has been called
callmethod = method + '_called'
if not hasattr(args[0], callmethod):
setattr(args[0], callmethod, False)
# call the method if it hasn't yet been called
if not getattr(args[0], callmethod):
getattr(args[0], method)()
setattr(args[0], callmethod, True)
return func(*args, **kwargs)
return wrapper
return decorator |
def saveNetwork(self, filename, makeWrapper = 1, mode = "pickle", counter = None):
"""
Saves network to file using pickle.
"""
self.saveNetworkToFile(filename, makeWrapper, mode, counter) | Saves network to file using pickle. | Below is the the instruction that describes the task:
### Input:
Saves network to file using pickle.
### Response:
def saveNetwork(self, filename, makeWrapper = 1, mode = "pickle", counter = None):
"""
Saves network to file using pickle.
"""
self.saveNetworkToFile(filename, makeWrapper, mode, counter) |
def build(port=8000, fixtures=None):
"""
Builds a server file.
1. Extract mock response details from all valid docstrings in existing views
2. Parse and generate mock values
3. Create a store of all endpoints and data
4. Construct server file
"""
extractor = Extractor()
parser = Parser(extractor.url_details, fixtures)
parser.parse()
url_details = parser.results
_store = get_store(url_details)
store = json.dumps(_store)
variables = str(Variable('let', 'store', store))
functions = DATA_FINDER + GET_HANDLER + MODIFY_HANDLER + POST_HANDLER
endpoints = []
endpoint_uris = []
for u in parser.results:
endpoint = Endpoint()
if u['method'].lower() in ['get', 'post']:
method = u['method'].lower()
else:
method = 'modify'
response = str(ResponseBody(method))
# Check in store if the base url has individual instances
u['url'], list_url = clean_url(u['full_url'], _store, u['method'].lower())
if list_url is not None and u['method'].lower() == 'get':
list_endpoint = Endpoint()
list_endpoint.construct('get', list_url, response)
if str(list_endpoint) not in endpoints:
endpoints.append(str(list_endpoint))
if list_endpoint.uri not in endpoint_uris:
endpoint_uris.append(list_endpoint.uri)
if method == 'modify':
without_prefix = re.sub(r'\/(\w+)\_\_', '', u['url'])
for k, v in _store.items():
if without_prefix in k:
options = v.get('options', '{}')
options = ast.literal_eval(options)
modifiers = []
if options is not None:
modifiers = options.get('modifiers', [])
if modifiers:
for mod in modifiers:
if u['method'].lower() == mod:
mod_endpoint = Endpoint()
uri = without_prefix
if v.get('position') is not None and v['position'] == 'url':
uri = re.sub(r'\/?\_\_key', '/:id', u['full_url'])
mod_endpoint.construct(u['method'].lower(), uri, response)
if str(mod_endpoint) not in endpoints:
endpoints.append(str(mod_endpoint))
if mod_endpoint.uri not in endpoint_uris:
endpoint_uris.append(mod_endpoint.uri)
else:
endpoint.construct(u['method'], u['url'], response)
if str(endpoint) not in endpoints:
endpoints.append(str(endpoint))
if endpoint.uri not in endpoint_uris:
endpoint_uris.append(endpoint.uri)
endpoints = ''.join(endpoints)
express = ExpressServer()
express.construct(variables, functions, endpoints, port)
return express | Builds a server file.
1. Extract mock response details from all valid docstrings in existing views
2. Parse and generate mock values
3. Create a store of all endpoints and data
4. Construct server file | Below is the the instruction that describes the task:
### Input:
Builds a server file.
1. Extract mock response details from all valid docstrings in existing views
2. Parse and generate mock values
3. Create a store of all endpoints and data
4. Construct server file
### Response:
def build(port=8000, fixtures=None):
"""
Builds a server file.
1. Extract mock response details from all valid docstrings in existing views
2. Parse and generate mock values
3. Create a store of all endpoints and data
4. Construct server file
"""
extractor = Extractor()
parser = Parser(extractor.url_details, fixtures)
parser.parse()
url_details = parser.results
_store = get_store(url_details)
store = json.dumps(_store)
variables = str(Variable('let', 'store', store))
functions = DATA_FINDER + GET_HANDLER + MODIFY_HANDLER + POST_HANDLER
endpoints = []
endpoint_uris = []
for u in parser.results:
endpoint = Endpoint()
if u['method'].lower() in ['get', 'post']:
method = u['method'].lower()
else:
method = 'modify'
response = str(ResponseBody(method))
# Check in store if the base url has individual instances
u['url'], list_url = clean_url(u['full_url'], _store, u['method'].lower())
if list_url is not None and u['method'].lower() == 'get':
list_endpoint = Endpoint()
list_endpoint.construct('get', list_url, response)
if str(list_endpoint) not in endpoints:
endpoints.append(str(list_endpoint))
if list_endpoint.uri not in endpoint_uris:
endpoint_uris.append(list_endpoint.uri)
if method == 'modify':
without_prefix = re.sub(r'\/(\w+)\_\_', '', u['url'])
for k, v in _store.items():
if without_prefix in k:
options = v.get('options', '{}')
options = ast.literal_eval(options)
modifiers = []
if options is not None:
modifiers = options.get('modifiers', [])
if modifiers:
for mod in modifiers:
if u['method'].lower() == mod:
mod_endpoint = Endpoint()
uri = without_prefix
if v.get('position') is not None and v['position'] == 'url':
uri = re.sub(r'\/?\_\_key', '/:id', u['full_url'])
mod_endpoint.construct(u['method'].lower(), uri, response)
if str(mod_endpoint) not in endpoints:
endpoints.append(str(mod_endpoint))
if mod_endpoint.uri not in endpoint_uris:
endpoint_uris.append(mod_endpoint.uri)
else:
endpoint.construct(u['method'], u['url'], response)
if str(endpoint) not in endpoints:
endpoints.append(str(endpoint))
if endpoint.uri not in endpoint_uris:
endpoint_uris.append(endpoint.uri)
endpoints = ''.join(endpoints)
express = ExpressServer()
express.construct(variables, functions, endpoints, port)
return express |
def dbcon(func):
"""Set up connection before executing function, commit and close connection
afterwards. Unless a connection already has been created."""
@wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
if self.dbcon is None:
# set up connection
self.dbcon = sqlite3.connect(self.db)
self.dbcur = self.dbcon.cursor()
self.dbcur.execute(SQL_SENSOR_TABLE)
self.dbcur.execute(SQL_TMPO_TABLE)
# execute function
try:
result = func(*args, **kwargs)
except Exception as e:
# on exception, first close connection and then raise
self.dbcon.rollback()
self.dbcon.commit()
self.dbcon.close()
self.dbcon = None
self.dbcur = None
raise e
else:
# commit everything and close connection
self.dbcon.commit()
self.dbcon.close()
self.dbcon = None
self.dbcur = None
else:
result = func(*args, **kwargs)
return result
return wrapper | Set up connection before executing function, commit and close connection
afterwards. Unless a connection already has been created. | Below is the the instruction that describes the task:
### Input:
Set up connection before executing function, commit and close connection
afterwards. Unless a connection already has been created.
### Response:
def dbcon(func):
"""Set up connection before executing function, commit and close connection
afterwards. Unless a connection already has been created."""
@wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
if self.dbcon is None:
# set up connection
self.dbcon = sqlite3.connect(self.db)
self.dbcur = self.dbcon.cursor()
self.dbcur.execute(SQL_SENSOR_TABLE)
self.dbcur.execute(SQL_TMPO_TABLE)
# execute function
try:
result = func(*args, **kwargs)
except Exception as e:
# on exception, first close connection and then raise
self.dbcon.rollback()
self.dbcon.commit()
self.dbcon.close()
self.dbcon = None
self.dbcur = None
raise e
else:
# commit everything and close connection
self.dbcon.commit()
self.dbcon.close()
self.dbcon = None
self.dbcur = None
else:
result = func(*args, **kwargs)
return result
return wrapper |
def setup_statemachine(self):
"""Setup and start state machine"""
machine = QtCore.QStateMachine()
# _______________
# | |
# | |
# | |
# |_______________|
#
group = util.QState("group", QtCore.QState.ParallelStates, machine)
# _______________
# | ____ ____ |
# || |---| ||
# ||____|---|____||
# |_______________| - Parallell State
#
visibility = util.QState("visibility", group)
hidden = util.QState("hidden", visibility)
visible = util.QState("visible", visibility)
# _______________
# | ____ ____ |
# || |---| ||
# ||____|---|____||
# |_______________| - Parallell State
#
operation = util.QState("operation", group)
ready = util.QState("ready", operation)
collecting = util.QState("collecting", operation)
validating = util.QState("validating", operation)
extracting = util.QState("extracting", operation)
integrating = util.QState("integrating", operation)
finished = util.QState("finished", operation)
repairing = util.QState("repairing", operation)
initialising = util.QState("initialising", operation)
stopping = util.QState("stopping", operation)
stopped = util.QState("stopped", operation)
saving = util.QState("saving", operation)
# _______________
# | ____ ____ |
# || |---| ||
# ||____|---|____||
# |_______________| - Parallell State
#
errored = util.QState("errored", group)
clean = util.QState("clean", errored)
dirty = util.QState("dirty", errored)
# _______________
# | ____ ____ |
# || |---| ||
# ||____|---|____||
# |_______________| - Parallell State
# States that block the underlying GUI
suspended = util.QState("suspended", group)
alive = util.QState("alive", suspended)
acting = util.QState("acting", suspended)
acted = QtCore.QHistoryState(operation)
acted.setDefaultState(ready)
# _______________
# | ____ ____ |
# || |---| ||
# ||____|---|____||
# |_______________|
# | ____ ____ |
# || |---| ||
# ||____|---|____||
# |_______________|
#
hidden.addTransition(self.show, visible)
visible.addTransition(self.hide, hidden)
ready.addTransition(self.acting, acting)
ready.addTransition(self.validating, validating)
ready.addTransition(self.initialising, initialising)
ready.addTransition(self.repairing, repairing)
ready.addTransition(self.saving, saving)
saving.addTransition(self.saved, ready)
collecting.addTransition(self.initialised, ready)
collecting.addTransition(self.stopping, stopping)
validating.addTransition(self.stopping, stopping)
validating.addTransition(self.finished, finished)
validating.addTransition(self.extracting, extracting)
extracting.addTransition(self.stopping, stopping)
extracting.addTransition(self.finished, finished)
extracting.addTransition(self.integrating, integrating)
integrating.addTransition(self.stopping, stopping)
integrating.addTransition(self.finished, finished)
finished.addTransition(self.initialising, initialising)
finished.addTransition(self.acting, acting)
initialising.addTransition(self.collecting, collecting)
stopping.addTransition(self.acted, acted)
stopping.addTransition(self.finished, finished)
dirty.addTransition(self.initialising, clean)
clean.addTransition(self.changed, dirty)
alive.addTransition(self.acting, acting)
acting.addTransition(self.acted, acted)
# Set initial states
for compound, state in {machine: group,
visibility: hidden,
operation: ready,
errored: clean,
suspended: alive}.items():
compound.setInitialState(state)
# Make connections
for state in (hidden,
visible,
ready,
collecting,
validating,
extracting,
integrating,
finished,
repairing,
initialising,
stopping,
saving,
stopped,
dirty,
clean,
acting,
alive,
acted):
state.entered.connect(
lambda state=state: self.state_changed.emit(state.name))
machine.start()
return machine | Setup and start state machine | Below is the the instruction that describes the task:
### Input:
Setup and start state machine
### Response:
def setup_statemachine(self):
"""Setup and start state machine"""
machine = QtCore.QStateMachine()
# _______________
# | |
# | |
# | |
# |_______________|
#
group = util.QState("group", QtCore.QState.ParallelStates, machine)
# _______________
# | ____ ____ |
# || |---| ||
# ||____|---|____||
# |_______________| - Parallell State
#
visibility = util.QState("visibility", group)
hidden = util.QState("hidden", visibility)
visible = util.QState("visible", visibility)
# _______________
# | ____ ____ |
# || |---| ||
# ||____|---|____||
# |_______________| - Parallell State
#
operation = util.QState("operation", group)
ready = util.QState("ready", operation)
collecting = util.QState("collecting", operation)
validating = util.QState("validating", operation)
extracting = util.QState("extracting", operation)
integrating = util.QState("integrating", operation)
finished = util.QState("finished", operation)
repairing = util.QState("repairing", operation)
initialising = util.QState("initialising", operation)
stopping = util.QState("stopping", operation)
stopped = util.QState("stopped", operation)
saving = util.QState("saving", operation)
# _______________
# | ____ ____ |
# || |---| ||
# ||____|---|____||
# |_______________| - Parallell State
#
errored = util.QState("errored", group)
clean = util.QState("clean", errored)
dirty = util.QState("dirty", errored)
# _______________
# | ____ ____ |
# || |---| ||
# ||____|---|____||
# |_______________| - Parallell State
# States that block the underlying GUI
suspended = util.QState("suspended", group)
alive = util.QState("alive", suspended)
acting = util.QState("acting", suspended)
acted = QtCore.QHistoryState(operation)
acted.setDefaultState(ready)
# _______________
# | ____ ____ |
# || |---| ||
# ||____|---|____||
# |_______________|
# | ____ ____ |
# || |---| ||
# ||____|---|____||
# |_______________|
#
hidden.addTransition(self.show, visible)
visible.addTransition(self.hide, hidden)
ready.addTransition(self.acting, acting)
ready.addTransition(self.validating, validating)
ready.addTransition(self.initialising, initialising)
ready.addTransition(self.repairing, repairing)
ready.addTransition(self.saving, saving)
saving.addTransition(self.saved, ready)
collecting.addTransition(self.initialised, ready)
collecting.addTransition(self.stopping, stopping)
validating.addTransition(self.stopping, stopping)
validating.addTransition(self.finished, finished)
validating.addTransition(self.extracting, extracting)
extracting.addTransition(self.stopping, stopping)
extracting.addTransition(self.finished, finished)
extracting.addTransition(self.integrating, integrating)
integrating.addTransition(self.stopping, stopping)
integrating.addTransition(self.finished, finished)
finished.addTransition(self.initialising, initialising)
finished.addTransition(self.acting, acting)
initialising.addTransition(self.collecting, collecting)
stopping.addTransition(self.acted, acted)
stopping.addTransition(self.finished, finished)
dirty.addTransition(self.initialising, clean)
clean.addTransition(self.changed, dirty)
alive.addTransition(self.acting, acting)
acting.addTransition(self.acted, acted)
# Set initial states
for compound, state in {machine: group,
visibility: hidden,
operation: ready,
errored: clean,
suspended: alive}.items():
compound.setInitialState(state)
# Make connections
for state in (hidden,
visible,
ready,
collecting,
validating,
extracting,
integrating,
finished,
repairing,
initialising,
stopping,
saving,
stopped,
dirty,
clean,
acting,
alive,
acted):
state.entered.connect(
lambda state=state: self.state_changed.emit(state.name))
machine.start()
return machine |
def delete_table_column_statistics(self, db_name, tbl_name, col_name):
"""
Parameters:
- db_name
- tbl_name
- col_name
"""
self.send_delete_table_column_statistics(db_name, tbl_name, col_name)
return self.recv_delete_table_column_statistics() | Parameters:
- db_name
- tbl_name
- col_name | Below is the the instruction that describes the task:
### Input:
Parameters:
- db_name
- tbl_name
- col_name
### Response:
def delete_table_column_statistics(self, db_name, tbl_name, col_name):
"""
Parameters:
- db_name
- tbl_name
- col_name
"""
self.send_delete_table_column_statistics(db_name, tbl_name, col_name)
return self.recv_delete_table_column_statistics() |
def up_capture(self, benchmark, threshold=0.0, compare_op="ge"):
"""Upside capture ratio.
Measures the performance of `self` relative to benchmark
conditioned on periods where `benchmark` is gt or ge to
`threshold`.
Upside capture ratios are calculated by taking the fund's
monthly return during the periods of positive benchmark
performance and dividing it by the benchmark return.
[Source: CFA Institute]
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
threshold : float, default 0.
The threshold at which the comparison should be done.
`self` and `benchmark` are "filtered" to periods where
`benchmark` is gt/ge `threshold`.
compare_op : {'ge', 'gt'}
Comparison operator used to compare to `threshold`.
'gt' is greater-than; 'ge' is greater-than-or-equal.
Returns
-------
float
Note
----
This metric uses geometric, not arithmetic, mean return.
"""
slf, bm = self.upmarket_filter(
benchmark=benchmark,
threshold=threshold,
compare_op=compare_op,
include_benchmark=True,
)
return slf.geomean() / bm.geomean() | Upside capture ratio.
Measures the performance of `self` relative to benchmark
conditioned on periods where `benchmark` is gt or ge to
`threshold`.
Upside capture ratios are calculated by taking the fund's
monthly return during the periods of positive benchmark
performance and dividing it by the benchmark return.
[Source: CFA Institute]
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
threshold : float, default 0.
The threshold at which the comparison should be done.
`self` and `benchmark` are "filtered" to periods where
`benchmark` is gt/ge `threshold`.
compare_op : {'ge', 'gt'}
Comparison operator used to compare to `threshold`.
'gt' is greater-than; 'ge' is greater-than-or-equal.
Returns
-------
float
Note
----
This metric uses geometric, not arithmetic, mean return. | Below is the the instruction that describes the task:
### Input:
Upside capture ratio.
Measures the performance of `self` relative to benchmark
conditioned on periods where `benchmark` is gt or ge to
`threshold`.
Upside capture ratios are calculated by taking the fund's
monthly return during the periods of positive benchmark
performance and dividing it by the benchmark return.
[Source: CFA Institute]
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
threshold : float, default 0.
The threshold at which the comparison should be done.
`self` and `benchmark` are "filtered" to periods where
`benchmark` is gt/ge `threshold`.
compare_op : {'ge', 'gt'}
Comparison operator used to compare to `threshold`.
'gt' is greater-than; 'ge' is greater-than-or-equal.
Returns
-------
float
Note
----
This metric uses geometric, not arithmetic, mean return.
### Response:
def up_capture(self, benchmark, threshold=0.0, compare_op="ge"):
"""Upside capture ratio.
Measures the performance of `self` relative to benchmark
conditioned on periods where `benchmark` is gt or ge to
`threshold`.
Upside capture ratios are calculated by taking the fund's
monthly return during the periods of positive benchmark
performance and dividing it by the benchmark return.
[Source: CFA Institute]
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
threshold : float, default 0.
The threshold at which the comparison should be done.
`self` and `benchmark` are "filtered" to periods where
`benchmark` is gt/ge `threshold`.
compare_op : {'ge', 'gt'}
Comparison operator used to compare to `threshold`.
'gt' is greater-than; 'ge' is greater-than-or-equal.
Returns
-------
float
Note
----
This metric uses geometric, not arithmetic, mean return.
"""
slf, bm = self.upmarket_filter(
benchmark=benchmark,
threshold=threshold,
compare_op=compare_op,
include_benchmark=True,
)
return slf.geomean() / bm.geomean() |
def get_annotated_fields(cls, instance, select=lambda *p: True):
"""Get dict of {annotated fields: annotations} by cls of
input instance.
:return: a set of (annotated fields, annotations).
:rtype: dict
"""
result = {}
for _, member in getmembers(instance):
try:
annotations = cls.get_annotations(
target=member, ctx=instance, select=select
)
except TypeError:
pass
else:
try:
if annotations:
result[member] = annotations
except TypeError: # if field is an object proxy
pass
return result | Get dict of {annotated fields: annotations} by cls of
input instance.
:return: a set of (annotated fields, annotations).
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Get dict of {annotated fields: annotations} by cls of
input instance.
:return: a set of (annotated fields, annotations).
:rtype: dict
### Response:
def get_annotated_fields(cls, instance, select=lambda *p: True):
"""Get dict of {annotated fields: annotations} by cls of
input instance.
:return: a set of (annotated fields, annotations).
:rtype: dict
"""
result = {}
for _, member in getmembers(instance):
try:
annotations = cls.get_annotations(
target=member, ctx=instance, select=select
)
except TypeError:
pass
else:
try:
if annotations:
result[member] = annotations
except TypeError: # if field is an object proxy
pass
return result |
def is_sub_to_all_kinds(self, *super_entity_kinds):
"""
Each returned entity will have superentites whos combined entity_kinds included *super_entity_kinds
"""
if super_entity_kinds:
if len(super_entity_kinds) == 1:
# Optimize for the case of just one
has_subset = EntityRelationship.objects.filter(
super_entity__entity_kind=super_entity_kinds[0]).values_list('sub_entity', flat=True)
else:
# Get a list of entities that have super entities with all types
has_subset = EntityRelationship.objects.filter(
super_entity__entity_kind__in=super_entity_kinds).values('sub_entity').annotate(
Count('super_entity')).filter(super_entity__count=len(set(super_entity_kinds))).values_list(
'sub_entity', flat=True)
return self.filter(pk__in=has_subset)
else:
return self | Each returned entity will have superentites whos combined entity_kinds included *super_entity_kinds | Below is the the instruction that describes the task:
### Input:
Each returned entity will have superentites whos combined entity_kinds included *super_entity_kinds
### Response:
def is_sub_to_all_kinds(self, *super_entity_kinds):
"""
Each returned entity will have superentites whos combined entity_kinds included *super_entity_kinds
"""
if super_entity_kinds:
if len(super_entity_kinds) == 1:
# Optimize for the case of just one
has_subset = EntityRelationship.objects.filter(
super_entity__entity_kind=super_entity_kinds[0]).values_list('sub_entity', flat=True)
else:
# Get a list of entities that have super entities with all types
has_subset = EntityRelationship.objects.filter(
super_entity__entity_kind__in=super_entity_kinds).values('sub_entity').annotate(
Count('super_entity')).filter(super_entity__count=len(set(super_entity_kinds))).values_list(
'sub_entity', flat=True)
return self.filter(pk__in=has_subset)
else:
return self |
def connect(uri, factory=pymongo.MongoClient):
"""
Use the factory to establish a connection to uri.
"""
warnings.warn(
"do not use. Just call MongoClient directly.", DeprecationWarning)
return factory(uri) | Use the factory to establish a connection to uri. | Below is the the instruction that describes the task:
### Input:
Use the factory to establish a connection to uri.
### Response:
def connect(uri, factory=pymongo.MongoClient):
"""
Use the factory to establish a connection to uri.
"""
warnings.warn(
"do not use. Just call MongoClient directly.", DeprecationWarning)
return factory(uri) |
def delete_module(modname, paranoid=None):
""" Delete a module.http://stackoverflow.com/a/1668289
:param modname:
:param paranoid:
:return:
"""
from sys import modules
try:
thismod = modules[modname]
except KeyError:
raise ValueError(modname)
these_symbols = dir(thismod)
if paranoid:
try:
paranoid[:] # sequence support
except:
raise ValueError('must supply a finite list for paranoid')
else:
these_symbols = paranoid[:]
del modules[modname]
for mod in modules.values():
try:
delattr(mod, modname)
except AttributeError:
pass
if paranoid:
for symbol in these_symbols:
if symbol[:2] == '__': # ignore special symbols
continue
try:
delattr(mod, symbol)
except AttributeError:
pass | Delete a module.http://stackoverflow.com/a/1668289
:param modname:
:param paranoid:
:return: | Below is the the instruction that describes the task:
### Input:
Delete a module.http://stackoverflow.com/a/1668289
:param modname:
:param paranoid:
:return:
### Response:
def delete_module(modname, paranoid=None):
""" Delete a module.http://stackoverflow.com/a/1668289
:param modname:
:param paranoid:
:return:
"""
from sys import modules
try:
thismod = modules[modname]
except KeyError:
raise ValueError(modname)
these_symbols = dir(thismod)
if paranoid:
try:
paranoid[:] # sequence support
except:
raise ValueError('must supply a finite list for paranoid')
else:
these_symbols = paranoid[:]
del modules[modname]
for mod in modules.values():
try:
delattr(mod, modname)
except AttributeError:
pass
if paranoid:
for symbol in these_symbols:
if symbol[:2] == '__': # ignore special symbols
continue
try:
delattr(mod, symbol)
except AttributeError:
pass |
def _make_png(data, level=6):
"""Convert numpy array to PNG byte array.
Parameters
----------
data : numpy.ndarray
Data must be (H, W, 3 | 4) with dtype = np.ubyte (np.uint8)
level : int
https://docs.python.org/2/library/zlib.html#zlib.compress
An integer from 0 to 9 controlling the level of compression:
* 1 is fastest and produces the least compression,
* 9 is slowest and produces the most.
* 0 is no compression.
The default value is 6.
Returns
-------
png : array
PNG formatted array
"""
# Eventually we might want to use ext/png.py for this, but this
# routine *should* be faster b/c it's speacialized for our use case
def mkchunk(data, name):
if isinstance(data, np.ndarray):
size = data.nbytes
else:
size = len(data)
chunk = np.empty(size + 12, dtype=np.ubyte)
chunk.data[0:4] = np.array(size, '>u4').tostring()
chunk.data[4:8] = name.encode('ASCII')
chunk.data[8:8 + size] = data
# and-ing may not be necessary, but is done for safety:
# https://docs.python.org/3/library/zlib.html#zlib.crc32
chunk.data[-4:] = np.array(zlib.crc32(chunk[4:-4]) & 0xffffffff,
'>u4').tostring()
return chunk
if data.dtype != np.ubyte:
raise TypeError('data.dtype must be np.ubyte (np.uint8)')
dim = data.shape[2] # Dimension
if dim not in (3, 4):
raise TypeError('data.shape[2] must be in (3, 4)')
# www.libpng.org/pub/png/spec/1.2/PNG-Chunks.html#C.IHDR
if dim == 4:
ctyp = 0b0110 # RGBA
else:
ctyp = 0b0010 # RGB
# www.libpng.org/pub/png/spec/1.2/PNG-Structure.html
header = b'\x89PNG\x0d\x0a\x1a\x0a' # header
h, w = data.shape[:2]
depth = data.itemsize * 8
ihdr = struct.pack('!IIBBBBB', w, h, depth, ctyp, 0, 0, 0)
c1 = mkchunk(ihdr, 'IHDR')
# www.libpng.org/pub/png/spec/1.2/PNG-Chunks.html#C.IDAT
# insert filter byte at each scanline
idat = np.empty((h, w * dim + 1), dtype=np.ubyte)
idat[:, 1:] = data.reshape(h, w * dim)
idat[:, 0] = 0
comp_data = zlib.compress(idat, level)
c2 = mkchunk(comp_data, 'IDAT')
c3 = mkchunk(np.empty((0,), dtype=np.ubyte), 'IEND')
# concatenate
lh = len(header)
png = np.empty(lh + c1.nbytes + c2.nbytes + c3.nbytes, dtype=np.ubyte)
png.data[:lh] = header
p = lh
for chunk in (c1, c2, c3):
png[p:p + len(chunk)] = chunk
p += chunk.nbytes
return png | Convert numpy array to PNG byte array.
Parameters
----------
data : numpy.ndarray
Data must be (H, W, 3 | 4) with dtype = np.ubyte (np.uint8)
level : int
https://docs.python.org/2/library/zlib.html#zlib.compress
An integer from 0 to 9 controlling the level of compression:
* 1 is fastest and produces the least compression,
* 9 is slowest and produces the most.
* 0 is no compression.
The default value is 6.
Returns
-------
png : array
PNG formatted array | Below is the the instruction that describes the task:
### Input:
Convert numpy array to PNG byte array.
Parameters
----------
data : numpy.ndarray
Data must be (H, W, 3 | 4) with dtype = np.ubyte (np.uint8)
level : int
https://docs.python.org/2/library/zlib.html#zlib.compress
An integer from 0 to 9 controlling the level of compression:
* 1 is fastest and produces the least compression,
* 9 is slowest and produces the most.
* 0 is no compression.
The default value is 6.
Returns
-------
png : array
PNG formatted array
### Response:
def _make_png(data, level=6):
"""Convert numpy array to PNG byte array.
Parameters
----------
data : numpy.ndarray
Data must be (H, W, 3 | 4) with dtype = np.ubyte (np.uint8)
level : int
https://docs.python.org/2/library/zlib.html#zlib.compress
An integer from 0 to 9 controlling the level of compression:
* 1 is fastest and produces the least compression,
* 9 is slowest and produces the most.
* 0 is no compression.
The default value is 6.
Returns
-------
png : array
PNG formatted array
"""
# Eventually we might want to use ext/png.py for this, but this
# routine *should* be faster b/c it's speacialized for our use case
def mkchunk(data, name):
if isinstance(data, np.ndarray):
size = data.nbytes
else:
size = len(data)
chunk = np.empty(size + 12, dtype=np.ubyte)
chunk.data[0:4] = np.array(size, '>u4').tostring()
chunk.data[4:8] = name.encode('ASCII')
chunk.data[8:8 + size] = data
# and-ing may not be necessary, but is done for safety:
# https://docs.python.org/3/library/zlib.html#zlib.crc32
chunk.data[-4:] = np.array(zlib.crc32(chunk[4:-4]) & 0xffffffff,
'>u4').tostring()
return chunk
if data.dtype != np.ubyte:
raise TypeError('data.dtype must be np.ubyte (np.uint8)')
dim = data.shape[2] # Dimension
if dim not in (3, 4):
raise TypeError('data.shape[2] must be in (3, 4)')
# www.libpng.org/pub/png/spec/1.2/PNG-Chunks.html#C.IHDR
if dim == 4:
ctyp = 0b0110 # RGBA
else:
ctyp = 0b0010 # RGB
# www.libpng.org/pub/png/spec/1.2/PNG-Structure.html
header = b'\x89PNG\x0d\x0a\x1a\x0a' # header
h, w = data.shape[:2]
depth = data.itemsize * 8
ihdr = struct.pack('!IIBBBBB', w, h, depth, ctyp, 0, 0, 0)
c1 = mkchunk(ihdr, 'IHDR')
# www.libpng.org/pub/png/spec/1.2/PNG-Chunks.html#C.IDAT
# insert filter byte at each scanline
idat = np.empty((h, w * dim + 1), dtype=np.ubyte)
idat[:, 1:] = data.reshape(h, w * dim)
idat[:, 0] = 0
comp_data = zlib.compress(idat, level)
c2 = mkchunk(comp_data, 'IDAT')
c3 = mkchunk(np.empty((0,), dtype=np.ubyte), 'IEND')
# concatenate
lh = len(header)
png = np.empty(lh + c1.nbytes + c2.nbytes + c3.nbytes, dtype=np.ubyte)
png.data[:lh] = header
p = lh
for chunk in (c1, c2, c3):
png[p:p + len(chunk)] = chunk
p += chunk.nbytes
return png |
def serialize(self):
"""Convert Entity to python dictionary."""
# Serialize fields to a dict
data = {}
for field_name in self:
value = self._values.get(field_name)
field = self.fields.get(field_name)
if value is not None:
if field.all:
value = [field.serialize(v) for v in value]
else:
value = field.serialize(value)
# Skip empty fields unless field.null
if not field.null and ((field.all and value == []) or (not field.all and value in {None, ''})):
continue
data[field.name] = value
return data | Convert Entity to python dictionary. | Below is the the instruction that describes the task:
### Input:
Convert Entity to python dictionary.
### Response:
def serialize(self):
"""Convert Entity to python dictionary."""
# Serialize fields to a dict
data = {}
for field_name in self:
value = self._values.get(field_name)
field = self.fields.get(field_name)
if value is not None:
if field.all:
value = [field.serialize(v) for v in value]
else:
value = field.serialize(value)
# Skip empty fields unless field.null
if not field.null and ((field.all and value == []) or (not field.all and value in {None, ''})):
continue
data[field.name] = value
return data |
def get_code_num(s: str) -> Optional[int]:
""" Get code number from an escape code.
Raises InvalidEscapeCode if an invalid number is found.
"""
if ';' in s:
# Extended fore/back codes.
numberstr = s.rpartition(';')[-1][:-1]
else:
# Fore, back, style, codes.
numberstr = s.rpartition('[')[-1][:-1]
num = try_parse_int(
numberstr,
default=None,
minimum=0,
maximum=255
)
if num is None:
raise InvalidEscapeCode(numberstr)
return num | Get code number from an escape code.
Raises InvalidEscapeCode if an invalid number is found. | Below is the the instruction that describes the task:
### Input:
Get code number from an escape code.
Raises InvalidEscapeCode if an invalid number is found.
### Response:
def get_code_num(s: str) -> Optional[int]:
""" Get code number from an escape code.
Raises InvalidEscapeCode if an invalid number is found.
"""
if ';' in s:
# Extended fore/back codes.
numberstr = s.rpartition(';')[-1][:-1]
else:
# Fore, back, style, codes.
numberstr = s.rpartition('[')[-1][:-1]
num = try_parse_int(
numberstr,
default=None,
minimum=0,
maximum=255
)
if num is None:
raise InvalidEscapeCode(numberstr)
return num |
def var_dump(*obs):
"""
shows structured information of a object, list, tuple etc
"""
i = 0
for x in obs:
str = var_dump_output(x, 0, ' ', '\n', True)
print (str.strip())
#dump(x, 0, i, '', object)
i += 1 | shows structured information of a object, list, tuple etc | Below is the the instruction that describes the task:
### Input:
shows structured information of a object, list, tuple etc
### Response:
def var_dump(*obs):
"""
shows structured information of a object, list, tuple etc
"""
i = 0
for x in obs:
str = var_dump_output(x, 0, ' ', '\n', True)
print (str.strip())
#dump(x, 0, i, '', object)
i += 1 |
def maybe_from_tuple(tup_or_range):
"""Convert a tuple into a range but pass ranges through silently.
This is useful to ensure that input is a range so that attributes may
be accessed with `.start`, `.stop` or so that containment checks are
constant time.
Parameters
----------
tup_or_range : tuple or range
A tuple to pass to from_tuple or a range to return.
Returns
-------
range : range
The input to convert to a range.
Raises
------
ValueError
Raised when the input is not a tuple or a range. ValueError is also
raised if the input is a tuple whose length is not 2 or 3.
"""
if isinstance(tup_or_range, tuple):
return from_tuple(tup_or_range)
elif isinstance(tup_or_range, range):
return tup_or_range
raise ValueError(
'maybe_from_tuple expects a tuple or range, got %r: %r' % (
type(tup_or_range).__name__,
tup_or_range,
),
) | Convert a tuple into a range but pass ranges through silently.
This is useful to ensure that input is a range so that attributes may
be accessed with `.start`, `.stop` or so that containment checks are
constant time.
Parameters
----------
tup_or_range : tuple or range
A tuple to pass to from_tuple or a range to return.
Returns
-------
range : range
The input to convert to a range.
Raises
------
ValueError
Raised when the input is not a tuple or a range. ValueError is also
raised if the input is a tuple whose length is not 2 or 3. | Below is the the instruction that describes the task:
### Input:
Convert a tuple into a range but pass ranges through silently.
This is useful to ensure that input is a range so that attributes may
be accessed with `.start`, `.stop` or so that containment checks are
constant time.
Parameters
----------
tup_or_range : tuple or range
A tuple to pass to from_tuple or a range to return.
Returns
-------
range : range
The input to convert to a range.
Raises
------
ValueError
Raised when the input is not a tuple or a range. ValueError is also
raised if the input is a tuple whose length is not 2 or 3.
### Response:
def maybe_from_tuple(tup_or_range):
"""Convert a tuple into a range but pass ranges through silently.
This is useful to ensure that input is a range so that attributes may
be accessed with `.start`, `.stop` or so that containment checks are
constant time.
Parameters
----------
tup_or_range : tuple or range
A tuple to pass to from_tuple or a range to return.
Returns
-------
range : range
The input to convert to a range.
Raises
------
ValueError
Raised when the input is not a tuple or a range. ValueError is also
raised if the input is a tuple whose length is not 2 or 3.
"""
if isinstance(tup_or_range, tuple):
return from_tuple(tup_or_range)
elif isinstance(tup_or_range, range):
return tup_or_range
raise ValueError(
'maybe_from_tuple expects a tuple or range, got %r: %r' % (
type(tup_or_range).__name__,
tup_or_range,
),
) |
def appendSpacePadding(str, blocksize=AES_blocksize):
'Pad with spaces'
pad_len = paddingLength(len(str), blocksize)
padding = '\0'*pad_len
return str + padding | Pad with spaces | Below is the the instruction that describes the task:
### Input:
Pad with spaces
### Response:
def appendSpacePadding(str, blocksize=AES_blocksize):
'Pad with spaces'
pad_len = paddingLength(len(str), blocksize)
padding = '\0'*pad_len
return str + padding |
def from_config(cls, cfg, **kwargs):
"""return an instance configured with the ``cfg`` dict"""
cfg = dict(cfg, **kwargs)
pythonpath = cfg.get('pythonpath', [])
if 'here' in cfg:
pythonpath.append(cfg['here'])
for path in pythonpath:
sys.path.append(os.path.expanduser(path))
prog = cls.server and 'irc3d' or 'irc3'
if cfg.get('debug'):
cls.venusian_categories.append(prog + '.debug')
if cfg.get('interactive'): # pragma: no cover
import irc3.testing
context = getattr(irc3.testing, cls.__name__)(**cfg)
else:
context = cls(**cfg)
if cfg.get('raw'):
context.include('irc3.plugins.log',
venusian_categories=[prog + '.debug'])
return context | return an instance configured with the ``cfg`` dict | Below is the the instruction that describes the task:
### Input:
return an instance configured with the ``cfg`` dict
### Response:
def from_config(cls, cfg, **kwargs):
"""return an instance configured with the ``cfg`` dict"""
cfg = dict(cfg, **kwargs)
pythonpath = cfg.get('pythonpath', [])
if 'here' in cfg:
pythonpath.append(cfg['here'])
for path in pythonpath:
sys.path.append(os.path.expanduser(path))
prog = cls.server and 'irc3d' or 'irc3'
if cfg.get('debug'):
cls.venusian_categories.append(prog + '.debug')
if cfg.get('interactive'): # pragma: no cover
import irc3.testing
context = getattr(irc3.testing, cls.__name__)(**cfg)
else:
context = cls(**cfg)
if cfg.get('raw'):
context.include('irc3.plugins.log',
venusian_categories=[prog + '.debug'])
return context |
def validate_depth(self, depth: DepthDefinitionType) -> Optional[int]:
""" Converts the depth to int and validates that the value can be used.
:raise ValueError: If the provided depth is not valid
"""
if depth is not None:
try:
depth = int(depth)
except ValueError:
raise ValueError(f"Depth '{depth}' can't be converted to int.")
if depth < 1:
raise ValueError(f"Depth '{depth}' isn't a positive number")
return depth
return None | Converts the depth to int and validates that the value can be used.
:raise ValueError: If the provided depth is not valid | Below is the the instruction that describes the task:
### Input:
Converts the depth to int and validates that the value can be used.
:raise ValueError: If the provided depth is not valid
### Response:
def validate_depth(self, depth: DepthDefinitionType) -> Optional[int]:
""" Converts the depth to int and validates that the value can be used.
:raise ValueError: If the provided depth is not valid
"""
if depth is not None:
try:
depth = int(depth)
except ValueError:
raise ValueError(f"Depth '{depth}' can't be converted to int.")
if depth < 1:
raise ValueError(f"Depth '{depth}' isn't a positive number")
return depth
return None |
def read(self):
"""Return a single byte from the output buffer
"""
if self._output_buffer:
b, self._output_buffer = (self._output_buffer[0:1],
self._output_buffer[1:])
return b
return b'' | Return a single byte from the output buffer | Below is the the instruction that describes the task:
### Input:
Return a single byte from the output buffer
### Response:
def read(self):
"""Return a single byte from the output buffer
"""
if self._output_buffer:
b, self._output_buffer = (self._output_buffer[0:1],
self._output_buffer[1:])
return b
return b'' |
def beta_to_uni(text, strict=False):
"""
Converts the given text from betacode to unicode.
Args:
text: The beta code text to convert. All of this text must be betacode.
strict: Flag to allow for flexible diacritic order on input.
Returns:
The converted text.
"""
# Check if the requested configuration for conversion already has a trie
# stored otherwise convert it.
param_key = (strict,)
try:
t = _BETA_CONVERSION_TRIES[param_key]
except KeyError:
t = _create_conversion_trie(*param_key)
_BETA_CONVERSION_TRIES[param_key] = t
transform = []
idx = 0
possible_word_boundary = False
while idx < len(text):
if possible_word_boundary and _penultimate_sigma_word_final(transform):
transform[-2] = _FINAL_LC_SIGMA
step = t.longest_prefix(text[idx:idx + _MAX_BETA_TOKEN_LEN])
if step:
possible_word_boundary = text[idx] in _BETA_PUNCTUATION
key, value = step
transform.append(value)
idx += len(key)
else:
possible_word_boundary = True
transform.append(text[idx])
idx += 1
# Check one last time in case there is some whitespace or punctuation at the
# end and check if the last character is a sigma.
if possible_word_boundary and _penultimate_sigma_word_final(transform):
transform[-2] = _FINAL_LC_SIGMA
elif len(transform) > 0 and transform[-1] == _MEDIAL_LC_SIGMA:
transform[-1] = _FINAL_LC_SIGMA
converted = ''.join(transform)
return converted | Converts the given text from betacode to unicode.
Args:
text: The beta code text to convert. All of this text must be betacode.
strict: Flag to allow for flexible diacritic order on input.
Returns:
The converted text. | Below is the the instruction that describes the task:
### Input:
Converts the given text from betacode to unicode.
Args:
text: The beta code text to convert. All of this text must be betacode.
strict: Flag to allow for flexible diacritic order on input.
Returns:
The converted text.
### Response:
def beta_to_uni(text, strict=False):
"""
Converts the given text from betacode to unicode.
Args:
text: The beta code text to convert. All of this text must be betacode.
strict: Flag to allow for flexible diacritic order on input.
Returns:
The converted text.
"""
# Check if the requested configuration for conversion already has a trie
# stored otherwise convert it.
param_key = (strict,)
try:
t = _BETA_CONVERSION_TRIES[param_key]
except KeyError:
t = _create_conversion_trie(*param_key)
_BETA_CONVERSION_TRIES[param_key] = t
transform = []
idx = 0
possible_word_boundary = False
while idx < len(text):
if possible_word_boundary and _penultimate_sigma_word_final(transform):
transform[-2] = _FINAL_LC_SIGMA
step = t.longest_prefix(text[idx:idx + _MAX_BETA_TOKEN_LEN])
if step:
possible_word_boundary = text[idx] in _BETA_PUNCTUATION
key, value = step
transform.append(value)
idx += len(key)
else:
possible_word_boundary = True
transform.append(text[idx])
idx += 1
# Check one last time in case there is some whitespace or punctuation at the
# end and check if the last character is a sigma.
if possible_word_boundary and _penultimate_sigma_word_final(transform):
transform[-2] = _FINAL_LC_SIGMA
elif len(transform) > 0 and transform[-1] == _MEDIAL_LC_SIGMA:
transform[-1] = _FINAL_LC_SIGMA
converted = ''.join(transform)
return converted |
def activate(self, key):
"""
Activates a new registree on the LEX with given activation key
:rtype: None
"""
url = self._base + 'user/activate'
r = requests.get(url, params={
'activation_key': key
})
r.raise_for_status() | Activates a new registree on the LEX with given activation key
:rtype: None | Below is the the instruction that describes the task:
### Input:
Activates a new registree on the LEX with given activation key
:rtype: None
### Response:
def activate(self, key):
"""
Activates a new registree on the LEX with given activation key
:rtype: None
"""
url = self._base + 'user/activate'
r = requests.get(url, params={
'activation_key': key
})
r.raise_for_status() |
def _maybe_parse_basic_type(self):
"""Try to parse a basic type (str, bool, number)."""
token_value = ''
# Allow a leading dash to handle negative numbers.
if self._current_token.value == '-':
token_value += self._current_token.value
self._advance()
basic_type_tokens = [tokenize.NAME, tokenize.NUMBER, tokenize.STRING]
continue_parsing = self._current_token.kind in basic_type_tokens
if not continue_parsing:
return False, None
while continue_parsing:
token_value += self._current_token.value
try:
value = ast.literal_eval(token_value)
except Exception as e: # pylint: disable=broad-except
err_str = "{}\n Failed to parse token '{}'"
self._raise_syntax_error(err_str.format(e, token_value))
was_string = self._current_token.kind == tokenize.STRING
self._advance()
is_string = self._current_token.kind == tokenize.STRING
continue_parsing = was_string and is_string
return True, value | Try to parse a basic type (str, bool, number). | Below is the the instruction that describes the task:
### Input:
Try to parse a basic type (str, bool, number).
### Response:
def _maybe_parse_basic_type(self):
"""Try to parse a basic type (str, bool, number)."""
token_value = ''
# Allow a leading dash to handle negative numbers.
if self._current_token.value == '-':
token_value += self._current_token.value
self._advance()
basic_type_tokens = [tokenize.NAME, tokenize.NUMBER, tokenize.STRING]
continue_parsing = self._current_token.kind in basic_type_tokens
if not continue_parsing:
return False, None
while continue_parsing:
token_value += self._current_token.value
try:
value = ast.literal_eval(token_value)
except Exception as e: # pylint: disable=broad-except
err_str = "{}\n Failed to parse token '{}'"
self._raise_syntax_error(err_str.format(e, token_value))
was_string = self._current_token.kind == tokenize.STRING
self._advance()
is_string = self._current_token.kind == tokenize.STRING
continue_parsing = was_string and is_string
return True, value |
def resolve_calls(func):
"""Parse a function into an AST with function calls resolved.
Since the calls are resolved using the global and local namespace of the
function it means that procedural parameters (i.e. functions passed as
arguments) won't be resolved.
Similarly, functions defined inside of the function that we are trying to
resolve won't be resolved, since they are not in the local namespace of the
outer function.
The function definition itself is also annotated, so that it can be matched
to calls to it in other functions.
Args:
func: The function whose calls are being resolved.
Returns:
node: An AST where each `Call` node has a `func` annotation with the
function handle that the call resolves to.
Raises:
AttributeError: When a function is used on the RHS of an assignment cannot
be resolved (because it was passed as an argument or was defined in the
body of the function).
"""
node = quoting.parse_function(func)
ResolveCalls(func).visit(node)
return node | Parse a function into an AST with function calls resolved.
Since the calls are resolved using the global and local namespace of the
function it means that procedural parameters (i.e. functions passed as
arguments) won't be resolved.
Similarly, functions defined inside of the function that we are trying to
resolve won't be resolved, since they are not in the local namespace of the
outer function.
The function definition itself is also annotated, so that it can be matched
to calls to it in other functions.
Args:
func: The function whose calls are being resolved.
Returns:
node: An AST where each `Call` node has a `func` annotation with the
function handle that the call resolves to.
Raises:
AttributeError: When a function is used on the RHS of an assignment cannot
be resolved (because it was passed as an argument or was defined in the
body of the function). | Below is the the instruction that describes the task:
### Input:
Parse a function into an AST with function calls resolved.
Since the calls are resolved using the global and local namespace of the
function it means that procedural parameters (i.e. functions passed as
arguments) won't be resolved.
Similarly, functions defined inside of the function that we are trying to
resolve won't be resolved, since they are not in the local namespace of the
outer function.
The function definition itself is also annotated, so that it can be matched
to calls to it in other functions.
Args:
func: The function whose calls are being resolved.
Returns:
node: An AST where each `Call` node has a `func` annotation with the
function handle that the call resolves to.
Raises:
AttributeError: When a function is used on the RHS of an assignment cannot
be resolved (because it was passed as an argument or was defined in the
body of the function).
### Response:
def resolve_calls(func):
"""Parse a function into an AST with function calls resolved.
Since the calls are resolved using the global and local namespace of the
function it means that procedural parameters (i.e. functions passed as
arguments) won't be resolved.
Similarly, functions defined inside of the function that we are trying to
resolve won't be resolved, since they are not in the local namespace of the
outer function.
The function definition itself is also annotated, so that it can be matched
to calls to it in other functions.
Args:
func: The function whose calls are being resolved.
Returns:
node: An AST where each `Call` node has a `func` annotation with the
function handle that the call resolves to.
Raises:
AttributeError: When a function is used on the RHS of an assignment cannot
be resolved (because it was passed as an argument or was defined in the
body of the function).
"""
node = quoting.parse_function(func)
ResolveCalls(func).visit(node)
return node |
def update_momentum_by_name(self, name, **kwargs):
"""Updates a momentum by the given name.
:param name: the momentum name.
:param velocity: (keyword-only) a new value for `velocity`.
:param since: (keyword-only) a new value for `since`.
:param until: (keyword-only) a new value for `until`.
:returns: a momentum updated.
:raises TypeError: `name` is ``None``.
:raises KeyError: failed to find a momentum named `name`.
"""
momentum = self.pop_momentum_by_name(name)
velocity, since, until = momentum[:3]
velocity = kwargs.get('velocity', velocity)
since = kwargs.get('since', since)
until = kwargs.get('until', until)
return self.add_momentum(velocity, since, until, name) | Updates a momentum by the given name.
:param name: the momentum name.
:param velocity: (keyword-only) a new value for `velocity`.
:param since: (keyword-only) a new value for `since`.
:param until: (keyword-only) a new value for `until`.
:returns: a momentum updated.
:raises TypeError: `name` is ``None``.
:raises KeyError: failed to find a momentum named `name`. | Below is the the instruction that describes the task:
### Input:
Updates a momentum by the given name.
:param name: the momentum name.
:param velocity: (keyword-only) a new value for `velocity`.
:param since: (keyword-only) a new value for `since`.
:param until: (keyword-only) a new value for `until`.
:returns: a momentum updated.
:raises TypeError: `name` is ``None``.
:raises KeyError: failed to find a momentum named `name`.
### Response:
def update_momentum_by_name(self, name, **kwargs):
"""Updates a momentum by the given name.
:param name: the momentum name.
:param velocity: (keyword-only) a new value for `velocity`.
:param since: (keyword-only) a new value for `since`.
:param until: (keyword-only) a new value for `until`.
:returns: a momentum updated.
:raises TypeError: `name` is ``None``.
:raises KeyError: failed to find a momentum named `name`.
"""
momentum = self.pop_momentum_by_name(name)
velocity, since, until = momentum[:3]
velocity = kwargs.get('velocity', velocity)
since = kwargs.get('since', since)
until = kwargs.get('until', until)
return self.add_momentum(velocity, since, until, name) |
def associate_vpc_with_hosted_zone(HostedZoneId=None, Name=None, VPCId=None,
VPCName=None, VPCRegion=None, Comment=None,
region=None, key=None, keyid=None, profile=None):
'''
Associates an Amazon VPC with a private hosted zone.
To perform the association, the VPC and the private hosted zone must already exist. You can't
convert a public hosted zone into a private hosted zone. If you want to associate a VPC from
one AWS account with a zone from a another, the AWS account owning the hosted zone must first
submit a CreateVPCAssociationAuthorization (using create_vpc_association_authorization() or by
other means, such as the AWS console). With that done, the account owning the VPC can then call
associate_vpc_with_hosted_zone() to create the association.
Note that if both sides happen to be within the same account, associate_vpc_with_hosted_zone()
is enough on its own, and there is no need for the CreateVPCAssociationAuthorization step.
Also note that looking up hosted zones by name (e.g. using the Name parameter) only works
within a single account - if you're associating a VPC to a zone in a different account, as
outlined above, you unfortunately MUST use the HostedZoneId parameter exclusively.
HostedZoneId
The unique Zone Identifier for the Hosted Zone.
Name
The domain name associated with the Hosted Zone(s).
VPCId
When working with a private hosted zone, either the VPC ID or VPC Name to associate with is
required. Exclusive with VPCName.
VPCName
When working with a private hosted zone, either the VPC ID or VPC Name to associate with is
required. Exclusive with VPCId.
VPCRegion
When working with a private hosted zone, the region of the associated VPC is required. If
not provided, an effort will be made to determine it from VPCId or VPCName, if possible. If
this fails, you'll need to provide an explicit value for VPCRegion.
Comment
Any comments you want to include about the change being made.
CLI Example::
salt myminion boto3_route53.associate_vpc_with_hosted_zone \
Name=example.org. VPCName=myVPC \
VPCRegion=us-east-1 Comment="Whoo-hoo! I added another VPC."
'''
if not _exactly_one((HostedZoneId, Name)):
raise SaltInvocationError('Exactly one of either HostedZoneId or Name is required.')
if not _exactly_one((VPCId, VPCName)):
raise SaltInvocationError('Exactly one of either VPCId or VPCName is required.')
if Name:
# {'PrivateZone': True} because you can only associate VPCs with private hosted zones.
args = {'Name': Name, 'PrivateZone': True, 'region': region,
'key': key, 'keyid': keyid, 'profile': profile}
zone = find_hosted_zone(**args)
if not zone:
log.error(
"Couldn't resolve domain name %s to a private hosted zone ID.",
Name
)
return False
HostedZoneId = zone[0]['HostedZone']['Id']
vpcs = __salt__['boto_vpc.describe_vpcs'](vpc_id=VPCId, name=VPCName, region=region, key=key,
keyid=keyid, profile=profile).get('vpcs', [])
if VPCRegion and vpcs:
vpcs = [v for v in vpcs if v['region'] == VPCRegion]
if not vpcs:
log.error('No VPC matching the given criteria found.')
return False
if len(vpcs) > 1:
log.error('Multiple VPCs matching the given criteria found: %s.',
', '.join([v['id'] for v in vpcs]))
return False
vpc = vpcs[0]
if VPCName:
VPCId = vpc['id']
if not VPCRegion:
VPCRegion = vpc['region']
args = {'HostedZoneId': HostedZoneId, 'VPC': {'VPCId': VPCId, 'VPCRegion': VPCRegion}}
args.update({'Comment': Comment}) if Comment is not None else None
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
tries = 10
while tries:
try:
r = conn.associate_vpc_with_hosted_zone(**args)
return _wait_for_sync(r['ChangeInfo']['Id'], conn)
except ClientError as e:
if e.response.get('Error', {}).get('Code') == 'ConflictingDomainExists':
log.debug('VPC Association already exists.')
# return True since the current state is the desired one
return True
if tries and e.response.get('Error', {}).get('Code') == 'Throttling':
log.debug('Throttled by AWS API.')
time.sleep(3)
tries -= 1
continue
log.error('Failed to associate VPC %s with hosted zone %s: %s',
VPCName or VPCId, Name or HostedZoneId, e)
return False | Associates an Amazon VPC with a private hosted zone.
To perform the association, the VPC and the private hosted zone must already exist. You can't
convert a public hosted zone into a private hosted zone. If you want to associate a VPC from
one AWS account with a zone from a another, the AWS account owning the hosted zone must first
submit a CreateVPCAssociationAuthorization (using create_vpc_association_authorization() or by
other means, such as the AWS console). With that done, the account owning the VPC can then call
associate_vpc_with_hosted_zone() to create the association.
Note that if both sides happen to be within the same account, associate_vpc_with_hosted_zone()
is enough on its own, and there is no need for the CreateVPCAssociationAuthorization step.
Also note that looking up hosted zones by name (e.g. using the Name parameter) only works
within a single account - if you're associating a VPC to a zone in a different account, as
outlined above, you unfortunately MUST use the HostedZoneId parameter exclusively.
HostedZoneId
The unique Zone Identifier for the Hosted Zone.
Name
The domain name associated with the Hosted Zone(s).
VPCId
When working with a private hosted zone, either the VPC ID or VPC Name to associate with is
required. Exclusive with VPCName.
VPCName
When working with a private hosted zone, either the VPC ID or VPC Name to associate with is
required. Exclusive with VPCId.
VPCRegion
When working with a private hosted zone, the region of the associated VPC is required. If
not provided, an effort will be made to determine it from VPCId or VPCName, if possible. If
this fails, you'll need to provide an explicit value for VPCRegion.
Comment
Any comments you want to include about the change being made.
CLI Example::
salt myminion boto3_route53.associate_vpc_with_hosted_zone \
Name=example.org. VPCName=myVPC \
VPCRegion=us-east-1 Comment="Whoo-hoo! I added another VPC." | Below is the the instruction that describes the task:
### Input:
Associates an Amazon VPC with a private hosted zone.
To perform the association, the VPC and the private hosted zone must already exist. You can't
convert a public hosted zone into a private hosted zone. If you want to associate a VPC from
one AWS account with a zone from a another, the AWS account owning the hosted zone must first
submit a CreateVPCAssociationAuthorization (using create_vpc_association_authorization() or by
other means, such as the AWS console). With that done, the account owning the VPC can then call
associate_vpc_with_hosted_zone() to create the association.
Note that if both sides happen to be within the same account, associate_vpc_with_hosted_zone()
is enough on its own, and there is no need for the CreateVPCAssociationAuthorization step.
Also note that looking up hosted zones by name (e.g. using the Name parameter) only works
within a single account - if you're associating a VPC to a zone in a different account, as
outlined above, you unfortunately MUST use the HostedZoneId parameter exclusively.
HostedZoneId
The unique Zone Identifier for the Hosted Zone.
Name
The domain name associated with the Hosted Zone(s).
VPCId
When working with a private hosted zone, either the VPC ID or VPC Name to associate with is
required. Exclusive with VPCName.
VPCName
When working with a private hosted zone, either the VPC ID or VPC Name to associate with is
required. Exclusive with VPCId.
VPCRegion
When working with a private hosted zone, the region of the associated VPC is required. If
not provided, an effort will be made to determine it from VPCId or VPCName, if possible. If
this fails, you'll need to provide an explicit value for VPCRegion.
Comment
Any comments you want to include about the change being made.
CLI Example::
salt myminion boto3_route53.associate_vpc_with_hosted_zone \
Name=example.org. VPCName=myVPC \
VPCRegion=us-east-1 Comment="Whoo-hoo! I added another VPC."
### Response:
def associate_vpc_with_hosted_zone(HostedZoneId=None, Name=None, VPCId=None,
VPCName=None, VPCRegion=None, Comment=None,
region=None, key=None, keyid=None, profile=None):
'''
Associates an Amazon VPC with a private hosted zone.
To perform the association, the VPC and the private hosted zone must already exist. You can't
convert a public hosted zone into a private hosted zone. If you want to associate a VPC from
one AWS account with a zone from a another, the AWS account owning the hosted zone must first
submit a CreateVPCAssociationAuthorization (using create_vpc_association_authorization() or by
other means, such as the AWS console). With that done, the account owning the VPC can then call
associate_vpc_with_hosted_zone() to create the association.
Note that if both sides happen to be within the same account, associate_vpc_with_hosted_zone()
is enough on its own, and there is no need for the CreateVPCAssociationAuthorization step.
Also note that looking up hosted zones by name (e.g. using the Name parameter) only works
within a single account - if you're associating a VPC to a zone in a different account, as
outlined above, you unfortunately MUST use the HostedZoneId parameter exclusively.
HostedZoneId
The unique Zone Identifier for the Hosted Zone.
Name
The domain name associated with the Hosted Zone(s).
VPCId
When working with a private hosted zone, either the VPC ID or VPC Name to associate with is
required. Exclusive with VPCName.
VPCName
When working with a private hosted zone, either the VPC ID or VPC Name to associate with is
required. Exclusive with VPCId.
VPCRegion
When working with a private hosted zone, the region of the associated VPC is required. If
not provided, an effort will be made to determine it from VPCId or VPCName, if possible. If
this fails, you'll need to provide an explicit value for VPCRegion.
Comment
Any comments you want to include about the change being made.
CLI Example::
salt myminion boto3_route53.associate_vpc_with_hosted_zone \
Name=example.org. VPCName=myVPC \
VPCRegion=us-east-1 Comment="Whoo-hoo! I added another VPC."
'''
if not _exactly_one((HostedZoneId, Name)):
raise SaltInvocationError('Exactly one of either HostedZoneId or Name is required.')
if not _exactly_one((VPCId, VPCName)):
raise SaltInvocationError('Exactly one of either VPCId or VPCName is required.')
if Name:
# {'PrivateZone': True} because you can only associate VPCs with private hosted zones.
args = {'Name': Name, 'PrivateZone': True, 'region': region,
'key': key, 'keyid': keyid, 'profile': profile}
zone = find_hosted_zone(**args)
if not zone:
log.error(
"Couldn't resolve domain name %s to a private hosted zone ID.",
Name
)
return False
HostedZoneId = zone[0]['HostedZone']['Id']
vpcs = __salt__['boto_vpc.describe_vpcs'](vpc_id=VPCId, name=VPCName, region=region, key=key,
keyid=keyid, profile=profile).get('vpcs', [])
if VPCRegion and vpcs:
vpcs = [v for v in vpcs if v['region'] == VPCRegion]
if not vpcs:
log.error('No VPC matching the given criteria found.')
return False
if len(vpcs) > 1:
log.error('Multiple VPCs matching the given criteria found: %s.',
', '.join([v['id'] for v in vpcs]))
return False
vpc = vpcs[0]
if VPCName:
VPCId = vpc['id']
if not VPCRegion:
VPCRegion = vpc['region']
args = {'HostedZoneId': HostedZoneId, 'VPC': {'VPCId': VPCId, 'VPCRegion': VPCRegion}}
args.update({'Comment': Comment}) if Comment is not None else None
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
tries = 10
while tries:
try:
r = conn.associate_vpc_with_hosted_zone(**args)
return _wait_for_sync(r['ChangeInfo']['Id'], conn)
except ClientError as e:
if e.response.get('Error', {}).get('Code') == 'ConflictingDomainExists':
log.debug('VPC Association already exists.')
# return True since the current state is the desired one
return True
if tries and e.response.get('Error', {}).get('Code') == 'Throttling':
log.debug('Throttled by AWS API.')
time.sleep(3)
tries -= 1
continue
log.error('Failed to associate VPC %s with hosted zone %s: %s',
VPCName or VPCId, Name or HostedZoneId, e)
return False |
def get_usb_controller_count_by_type(self, type_p):
"""Returns the number of USB controllers of the given type attached to the VM.
in type_p of type :class:`USBControllerType`
return controllers of type int
"""
if not isinstance(type_p, USBControllerType):
raise TypeError("type_p can only be an instance of type USBControllerType")
controllers = self._call("getUSBControllerCountByType",
in_p=[type_p])
return controllers | Returns the number of USB controllers of the given type attached to the VM.
in type_p of type :class:`USBControllerType`
return controllers of type int | Below is the the instruction that describes the task:
### Input:
Returns the number of USB controllers of the given type attached to the VM.
in type_p of type :class:`USBControllerType`
return controllers of type int
### Response:
def get_usb_controller_count_by_type(self, type_p):
"""Returns the number of USB controllers of the given type attached to the VM.
in type_p of type :class:`USBControllerType`
return controllers of type int
"""
if not isinstance(type_p, USBControllerType):
raise TypeError("type_p can only be an instance of type USBControllerType")
controllers = self._call("getUSBControllerCountByType",
in_p=[type_p])
return controllers |
def handle_cd(self, cmd):
"""changes dir """
if len(cmd) != 2:
print("Invalid syntax: cd path", file=self.output)
return
path = os.path.expandvars(os.path.expanduser(cmd[1]))
try:
os.chdir(path)
except OSError as ex:
print("cd: %s\n" % ex, file=self.output) | changes dir | Below is the the instruction that describes the task:
### Input:
changes dir
### Response:
def handle_cd(self, cmd):
"""changes dir """
if len(cmd) != 2:
print("Invalid syntax: cd path", file=self.output)
return
path = os.path.expandvars(os.path.expanduser(cmd[1]))
try:
os.chdir(path)
except OSError as ex:
print("cd: %s\n" % ex, file=self.output) |
def downstream_index_dinf(dinfdir_value, i, j):
"""find downslope coordinate for Dinf of TauDEM
Args:
dinfdir_value: dinf direction value
i: current row
j: current col
Returns:
downstream (row, col)s
"""
down_dirs = DinfUtil.dinf_downslope_direction(dinfdir_value)
down_coors = []
for dir_code in down_dirs:
row, col = D8Util.downstream_index(dir_code, i, j)
down_coors.append([row, col])
return down_coors | find downslope coordinate for Dinf of TauDEM
Args:
dinfdir_value: dinf direction value
i: current row
j: current col
Returns:
downstream (row, col)s | Below is the the instruction that describes the task:
### Input:
find downslope coordinate for Dinf of TauDEM
Args:
dinfdir_value: dinf direction value
i: current row
j: current col
Returns:
downstream (row, col)s
### Response:
def downstream_index_dinf(dinfdir_value, i, j):
"""find downslope coordinate for Dinf of TauDEM
Args:
dinfdir_value: dinf direction value
i: current row
j: current col
Returns:
downstream (row, col)s
"""
down_dirs = DinfUtil.dinf_downslope_direction(dinfdir_value)
down_coors = []
for dir_code in down_dirs:
row, col = D8Util.downstream_index(dir_code, i, j)
down_coors.append([row, col])
return down_coors |
def of(jvalue, bigdl_type="float"):
"""
Create a Python Layer base on the given java value and the real type.
:param jvalue: Java object create by Py4j
:return: A Python Layer
"""
def get_py_name(jclass_name):
if jclass_name == "StaticGraph" or jclass_name == "DynamicGraph":
return "Model"
elif jclass_name == "Input":
return "Layer"
else:
return jclass_name
jname = callBigDlFunc(bigdl_type,
"getRealClassNameOfJValue",
jvalue)
jpackage_name = ".".join(jname.split(".")[:-1])
pclass_name = get_py_name(jname.split(".")[-1])
if "com.intel.analytics.bigdl.nn.keras.Model" == jname or \
"com.intel.analytics.bigdl.nn.keras.Sequential" == jname:
base_module = importlib.import_module('bigdl.nn.keras.topology')
elif "com.intel.analytics.bigdl.nn.keras" == jpackage_name:
base_module = importlib.import_module('bigdl.nn.keras.layer')
else:
base_module = importlib.import_module('bigdl.nn.layer')
realClassName = "Layer" # The top base class
if pclass_name in dir(base_module):
realClassName = pclass_name
module = getattr(base_module, realClassName)
jvalue_creator = getattr(module, "from_jvalue")
model = jvalue_creator(jvalue, bigdl_type)
return model | Create a Python Layer base on the given java value and the real type.
:param jvalue: Java object create by Py4j
:return: A Python Layer | Below is the the instruction that describes the task:
### Input:
Create a Python Layer base on the given java value and the real type.
:param jvalue: Java object create by Py4j
:return: A Python Layer
### Response:
def of(jvalue, bigdl_type="float"):
"""
Create a Python Layer base on the given java value and the real type.
:param jvalue: Java object create by Py4j
:return: A Python Layer
"""
def get_py_name(jclass_name):
if jclass_name == "StaticGraph" or jclass_name == "DynamicGraph":
return "Model"
elif jclass_name == "Input":
return "Layer"
else:
return jclass_name
jname = callBigDlFunc(bigdl_type,
"getRealClassNameOfJValue",
jvalue)
jpackage_name = ".".join(jname.split(".")[:-1])
pclass_name = get_py_name(jname.split(".")[-1])
if "com.intel.analytics.bigdl.nn.keras.Model" == jname or \
"com.intel.analytics.bigdl.nn.keras.Sequential" == jname:
base_module = importlib.import_module('bigdl.nn.keras.topology')
elif "com.intel.analytics.bigdl.nn.keras" == jpackage_name:
base_module = importlib.import_module('bigdl.nn.keras.layer')
else:
base_module = importlib.import_module('bigdl.nn.layer')
realClassName = "Layer" # The top base class
if pclass_name in dir(base_module):
realClassName = pclass_name
module = getattr(base_module, realClassName)
jvalue_creator = getattr(module, "from_jvalue")
model = jvalue_creator(jvalue, bigdl_type)
return model |
def visit_create_library_command(element, compiler, **kw):
"""
Returns the actual sql query for the CreateLibraryCommand class.
"""
query = """
CREATE {or_replace} LIBRARY {name}
LANGUAGE pythonplu
FROM :location
WITH CREDENTIALS AS :credentials
{region}
"""
bindparams = [
sa.bindparam(
'location',
value=element.location,
type_=sa.String,
),
sa.bindparam(
'credentials',
value=element.credentials,
type_=sa.String,
),
]
if element.region is not None:
bindparams.append(sa.bindparam(
'region',
value=element.region,
type_=sa.String,
))
quoted_lib_name = compiler.preparer.quote_identifier(element.library_name)
query = query.format(name=quoted_lib_name,
or_replace='OR REPLACE' if element.replace else '',
region='REGION :region' if element.region else '')
return compiler.process(sa.text(query).bindparams(*bindparams), **kw) | Returns the actual sql query for the CreateLibraryCommand class. | Below is the the instruction that describes the task:
### Input:
Returns the actual sql query for the CreateLibraryCommand class.
### Response:
def visit_create_library_command(element, compiler, **kw):
"""
Returns the actual sql query for the CreateLibraryCommand class.
"""
query = """
CREATE {or_replace} LIBRARY {name}
LANGUAGE pythonplu
FROM :location
WITH CREDENTIALS AS :credentials
{region}
"""
bindparams = [
sa.bindparam(
'location',
value=element.location,
type_=sa.String,
),
sa.bindparam(
'credentials',
value=element.credentials,
type_=sa.String,
),
]
if element.region is not None:
bindparams.append(sa.bindparam(
'region',
value=element.region,
type_=sa.String,
))
quoted_lib_name = compiler.preparer.quote_identifier(element.library_name)
query = query.format(name=quoted_lib_name,
or_replace='OR REPLACE' if element.replace else '',
region='REGION :region' if element.region else '')
return compiler.process(sa.text(query).bindparams(*bindparams), **kw) |
def git_check():
"""
Check that all changes , besides versioning files, are committed
:return:
"""
# check that changes staged for commit are pushed to origin
output = local('git diff --name-only | egrep -v "^(pynb/version.py)|(version.py)$" | tr "\\n" " "',
capture=True).strip()
if output:
fatal('Stage for commit and commit all changes first: {}'.format(output))
output = local('git diff --cached --name-only | egrep -v "^(pynb/version.py)|(version.py)$" | tr "\\n" " "',
capture=True).strip()
if output:
fatal('Commit all changes first: {}'.format(output)) | Check that all changes , besides versioning files, are committed
:return: | Below is the the instruction that describes the task:
### Input:
Check that all changes , besides versioning files, are committed
:return:
### Response:
def git_check():
"""
Check that all changes , besides versioning files, are committed
:return:
"""
# check that changes staged for commit are pushed to origin
output = local('git diff --name-only | egrep -v "^(pynb/version.py)|(version.py)$" | tr "\\n" " "',
capture=True).strip()
if output:
fatal('Stage for commit and commit all changes first: {}'.format(output))
output = local('git diff --cached --name-only | egrep -v "^(pynb/version.py)|(version.py)$" | tr "\\n" " "',
capture=True).strip()
if output:
fatal('Commit all changes first: {}'.format(output)) |
def _init(self, server, nick, user, real_name, password, port=None,
tls=True, tls_verify=True,
proxy=False, proxy_type='SOCKS5', proxy_server=None,
proxy_port=None, proxy_username=None, proxy_password=None):
"""
Connect and register with the IRC server and -
set server-related information variables.
Required arguments:
* server - Server to connect to.
* nick - Nick to use.
If a tuple/list is specified it will try to use the first,
and if the first is already used -
it will try to use the second and so on.
* user - Username to use.
* real_name - Real name to use.
* password=None - IRC server password.
Optional arguments:
* port - Port to use.
* tls=True - Should we use TLS/SSL?
* tls_verify=True - Verify the TLS certificate?
Only works with Python 3.
* proxy=False - Should we use a proxy?
* proxy_type='SOCKS5' - Proxy type: SOCKS5, SOCKS4 or HTTP
* proxy_server=None - Proxy server's address
* proxy_port=None - Proxy server's port
* proxy_username=None - If SOCKS5 is used,
a proxy username/password can be specified.
* proxy_password=None - If SOCKS5 is used,
a proxy username/password can be specified.
"""
with self.lock:
self.current_nick = nick
if tls:
if not port:
port = 6697
self._connect(server, port, tls, tls_verify, proxy, \
proxy_type, proxy_server, proxy_port, \
proxy_username, proxy_password)
else:
if not port:
port = 6667
self._connect(server, port, tls, tls_verify, proxy, \
proxy_type, proxy_server, proxy_port, \
proxy_username, proxy_password)
while self.readable(2):
data = self.recv()
if data[0] == 'NOTICE':
self.server = data[1][0]
self.con_msg.append(data)
self._register(nick, user, real_name, password)
while self.readable(timeout=4):
rdata = self.recv()
if rdata[0] == 'UNKNOWN':
data = rdata[1][3].replace(':', '', 1)
ncode = rdata[1][1]
if ncode == '004':
info = data.split()
self.server = info[0]
self.ircd = info[1]
self.umodes = info[2]
self.cmodes = info[3]
elif ncode == '005':
version = rdata[1][3].replace(':are supported' + \
'by this server', '')
version = version.split()
for info in version:
try:
info = info.split('=')
name = info[0]
value = info[1]
self.version[name] = value
if name == 'CHARSET':
self.encoding = value
except IndexError:
self.version[info[0]] = True
elif ncode == '376':
self.con_msg.append(rdata)
break
elif ncode == '422':
self.con_msg.append(rdata)
break
else:
if rdata[0] == 'NOTICE':
self.server = rdata[1][0]
self.con_msg.append(rdata[1])
self.motd = tuple(self.motd)
self.con_msg = tuple(self.con_msg)
self.connected = True
self.keep_going = \
True | Connect and register with the IRC server and -
set server-related information variables.
Required arguments:
* server - Server to connect to.
* nick - Nick to use.
If a tuple/list is specified it will try to use the first,
and if the first is already used -
it will try to use the second and so on.
* user - Username to use.
* real_name - Real name to use.
* password=None - IRC server password.
Optional arguments:
* port - Port to use.
* tls=True - Should we use TLS/SSL?
* tls_verify=True - Verify the TLS certificate?
Only works with Python 3.
* proxy=False - Should we use a proxy?
* proxy_type='SOCKS5' - Proxy type: SOCKS5, SOCKS4 or HTTP
* proxy_server=None - Proxy server's address
* proxy_port=None - Proxy server's port
* proxy_username=None - If SOCKS5 is used,
a proxy username/password can be specified.
* proxy_password=None - If SOCKS5 is used,
a proxy username/password can be specified. | Below is the the instruction that describes the task:
### Input:
Connect and register with the IRC server and -
set server-related information variables.
Required arguments:
* server - Server to connect to.
* nick - Nick to use.
If a tuple/list is specified it will try to use the first,
and if the first is already used -
it will try to use the second and so on.
* user - Username to use.
* real_name - Real name to use.
* password=None - IRC server password.
Optional arguments:
* port - Port to use.
* tls=True - Should we use TLS/SSL?
* tls_verify=True - Verify the TLS certificate?
Only works with Python 3.
* proxy=False - Should we use a proxy?
* proxy_type='SOCKS5' - Proxy type: SOCKS5, SOCKS4 or HTTP
* proxy_server=None - Proxy server's address
* proxy_port=None - Proxy server's port
* proxy_username=None - If SOCKS5 is used,
a proxy username/password can be specified.
* proxy_password=None - If SOCKS5 is used,
a proxy username/password can be specified.
### Response:
def _init(self, server, nick, user, real_name, password, port=None,
tls=True, tls_verify=True,
proxy=False, proxy_type='SOCKS5', proxy_server=None,
proxy_port=None, proxy_username=None, proxy_password=None):
"""
Connect and register with the IRC server and -
set server-related information variables.
Required arguments:
* server - Server to connect to.
* nick - Nick to use.
If a tuple/list is specified it will try to use the first,
and if the first is already used -
it will try to use the second and so on.
* user - Username to use.
* real_name - Real name to use.
* password=None - IRC server password.
Optional arguments:
* port - Port to use.
* tls=True - Should we use TLS/SSL?
* tls_verify=True - Verify the TLS certificate?
Only works with Python 3.
* proxy=False - Should we use a proxy?
* proxy_type='SOCKS5' - Proxy type: SOCKS5, SOCKS4 or HTTP
* proxy_server=None - Proxy server's address
* proxy_port=None - Proxy server's port
* proxy_username=None - If SOCKS5 is used,
a proxy username/password can be specified.
* proxy_password=None - If SOCKS5 is used,
a proxy username/password can be specified.
"""
with self.lock:
self.current_nick = nick
if tls:
if not port:
port = 6697
self._connect(server, port, tls, tls_verify, proxy, \
proxy_type, proxy_server, proxy_port, \
proxy_username, proxy_password)
else:
if not port:
port = 6667
self._connect(server, port, tls, tls_verify, proxy, \
proxy_type, proxy_server, proxy_port, \
proxy_username, proxy_password)
while self.readable(2):
data = self.recv()
if data[0] == 'NOTICE':
self.server = data[1][0]
self.con_msg.append(data)
self._register(nick, user, real_name, password)
while self.readable(timeout=4):
rdata = self.recv()
if rdata[0] == 'UNKNOWN':
data = rdata[1][3].replace(':', '', 1)
ncode = rdata[1][1]
if ncode == '004':
info = data.split()
self.server = info[0]
self.ircd = info[1]
self.umodes = info[2]
self.cmodes = info[3]
elif ncode == '005':
version = rdata[1][3].replace(':are supported' + \
'by this server', '')
version = version.split()
for info in version:
try:
info = info.split('=')
name = info[0]
value = info[1]
self.version[name] = value
if name == 'CHARSET':
self.encoding = value
except IndexError:
self.version[info[0]] = True
elif ncode == '376':
self.con_msg.append(rdata)
break
elif ncode == '422':
self.con_msg.append(rdata)
break
else:
if rdata[0] == 'NOTICE':
self.server = rdata[1][0]
self.con_msg.append(rdata[1])
self.motd = tuple(self.motd)
self.con_msg = tuple(self.con_msg)
self.connected = True
self.keep_going = \
True |
def ToArray(value):
"""
Serialize the given `value` to a an array of bytes.
Args:
value (neo.IO.Mixins.SerializableMixin): object extending SerializableMixin.
Returns:
bytes: hex formatted bytes
"""
ms = StreamManager.GetStream()
writer = BinaryWriter(ms)
value.Serialize(writer)
retVal = ms.ToArray()
StreamManager.ReleaseStream(ms)
return retVal | Serialize the given `value` to a an array of bytes.
Args:
value (neo.IO.Mixins.SerializableMixin): object extending SerializableMixin.
Returns:
bytes: hex formatted bytes | Below is the the instruction that describes the task:
### Input:
Serialize the given `value` to a an array of bytes.
Args:
value (neo.IO.Mixins.SerializableMixin): object extending SerializableMixin.
Returns:
bytes: hex formatted bytes
### Response:
def ToArray(value):
"""
Serialize the given `value` to a an array of bytes.
Args:
value (neo.IO.Mixins.SerializableMixin): object extending SerializableMixin.
Returns:
bytes: hex formatted bytes
"""
ms = StreamManager.GetStream()
writer = BinaryWriter(ms)
value.Serialize(writer)
retVal = ms.ToArray()
StreamManager.ReleaseStream(ms)
return retVal |
def _crop_img_to(image, slices, copy=True):
"""Crops image to a smaller size
Crop img to size indicated by slices and modify the affine accordingly.
Parameters
----------
image: img-like object or str
Can either be:
- a file path to a Nifti image
- any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.
If niimg is a string, consider it as a path to Nifti image and
call nibabel.load on it. If it is an object, check if get_data()
and get_affine() methods are present, raise TypeError otherwise.
Image to be cropped.
slices: list of slices
Defines the range of the crop.
E.g. [slice(20, 200), slice(40, 150), slice(0, 100)]
defines a 3D cube
If slices has less entries than image has dimensions,
the slices will be applied to the first len(slices) dimensions.
copy: boolean
Specifies whether cropped data is to be copied or not.
Default: True
Returns
-------
cropped_img: img-like object
Cropped version of the input image
"""
img = check_img(image)
data = img.get_data()
affine = img.get_affine()
cropped_data = data[slices]
if copy:
cropped_data = cropped_data.copy()
linear_part = affine[:3, :3]
old_origin = affine[:3, 3]
new_origin_voxel = np.array([s.start for s in slices])
new_origin = old_origin + linear_part.dot(new_origin_voxel)
new_affine = np.eye(4)
new_affine[:3, :3] = linear_part
new_affine[:3, 3] = new_origin
new_img = nib.Nifti1Image(cropped_data, new_affine)
return new_img | Crops image to a smaller size
Crop img to size indicated by slices and modify the affine accordingly.
Parameters
----------
image: img-like object or str
Can either be:
- a file path to a Nifti image
- any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.
If niimg is a string, consider it as a path to Nifti image and
call nibabel.load on it. If it is an object, check if get_data()
and get_affine() methods are present, raise TypeError otherwise.
Image to be cropped.
slices: list of slices
Defines the range of the crop.
E.g. [slice(20, 200), slice(40, 150), slice(0, 100)]
defines a 3D cube
If slices has less entries than image has dimensions,
the slices will be applied to the first len(slices) dimensions.
copy: boolean
Specifies whether cropped data is to be copied or not.
Default: True
Returns
-------
cropped_img: img-like object
Cropped version of the input image | Below is the the instruction that describes the task:
### Input:
Crops image to a smaller size
Crop img to size indicated by slices and modify the affine accordingly.
Parameters
----------
image: img-like object or str
Can either be:
- a file path to a Nifti image
- any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.
If niimg is a string, consider it as a path to Nifti image and
call nibabel.load on it. If it is an object, check if get_data()
and get_affine() methods are present, raise TypeError otherwise.
Image to be cropped.
slices: list of slices
Defines the range of the crop.
E.g. [slice(20, 200), slice(40, 150), slice(0, 100)]
defines a 3D cube
If slices has less entries than image has dimensions,
the slices will be applied to the first len(slices) dimensions.
copy: boolean
Specifies whether cropped data is to be copied or not.
Default: True
Returns
-------
cropped_img: img-like object
Cropped version of the input image
### Response:
def _crop_img_to(image, slices, copy=True):
"""Crops image to a smaller size
Crop img to size indicated by slices and modify the affine accordingly.
Parameters
----------
image: img-like object or str
Can either be:
- a file path to a Nifti image
- any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.
If niimg is a string, consider it as a path to Nifti image and
call nibabel.load on it. If it is an object, check if get_data()
and get_affine() methods are present, raise TypeError otherwise.
Image to be cropped.
slices: list of slices
Defines the range of the crop.
E.g. [slice(20, 200), slice(40, 150), slice(0, 100)]
defines a 3D cube
If slices has less entries than image has dimensions,
the slices will be applied to the first len(slices) dimensions.
copy: boolean
Specifies whether cropped data is to be copied or not.
Default: True
Returns
-------
cropped_img: img-like object
Cropped version of the input image
"""
img = check_img(image)
data = img.get_data()
affine = img.get_affine()
cropped_data = data[slices]
if copy:
cropped_data = cropped_data.copy()
linear_part = affine[:3, :3]
old_origin = affine[:3, 3]
new_origin_voxel = np.array([s.start for s in slices])
new_origin = old_origin + linear_part.dot(new_origin_voxel)
new_affine = np.eye(4)
new_affine[:3, :3] = linear_part
new_affine[:3, 3] = new_origin
new_img = nib.Nifti1Image(cropped_data, new_affine)
return new_img |
def make_request(self, request, captcha_response=None):
"""Make api request helper function
:param request: vk_requests.api.Request instance
:param captcha_response: None or dict, e.g {'sid': <sid>, 'key': <key>}
:return: dict: json decoded http response
"""
logger.debug('Prepare API Method request %r', request)
response = self._send_api_request(request=request,
captcha_response=captcha_response)
response.raise_for_status()
response_or_error = json.loads(response.text)
logger.debug('response: %s', response_or_error)
if 'error' in response_or_error:
error_data = response_or_error['error']
vk_error = VkAPIError(error_data)
if vk_error.is_captcha_needed():
captcha_key = self.get_captcha_key(vk_error.captcha_img_url)
if not captcha_key:
raise vk_error
# Retry http request with captcha info attached
captcha_response = {
'sid': vk_error.captcha_sid,
'key': captcha_key,
}
return self.make_request(
request, captcha_response=captcha_response)
elif vk_error.is_access_token_incorrect():
logger.info(
'Authorization failed. Access token will be dropped')
self._access_token = None
return self.make_request(request)
else:
raise vk_error
elif 'execute_errors' in response_or_error:
# can take place while running .execute vk method
# See more: https://vk.com/dev/execute
raise VkAPIError(response_or_error['execute_errors'][0])
elif 'response' in response_or_error:
return response_or_error['response'] | Make api request helper function
:param request: vk_requests.api.Request instance
:param captcha_response: None or dict, e.g {'sid': <sid>, 'key': <key>}
:return: dict: json decoded http response | Below is the the instruction that describes the task:
### Input:
Make api request helper function
:param request: vk_requests.api.Request instance
:param captcha_response: None or dict, e.g {'sid': <sid>, 'key': <key>}
:return: dict: json decoded http response
### Response:
def make_request(self, request, captcha_response=None):
"""Make api request helper function
:param request: vk_requests.api.Request instance
:param captcha_response: None or dict, e.g {'sid': <sid>, 'key': <key>}
:return: dict: json decoded http response
"""
logger.debug('Prepare API Method request %r', request)
response = self._send_api_request(request=request,
captcha_response=captcha_response)
response.raise_for_status()
response_or_error = json.loads(response.text)
logger.debug('response: %s', response_or_error)
if 'error' in response_or_error:
error_data = response_or_error['error']
vk_error = VkAPIError(error_data)
if vk_error.is_captcha_needed():
captcha_key = self.get_captcha_key(vk_error.captcha_img_url)
if not captcha_key:
raise vk_error
# Retry http request with captcha info attached
captcha_response = {
'sid': vk_error.captcha_sid,
'key': captcha_key,
}
return self.make_request(
request, captcha_response=captcha_response)
elif vk_error.is_access_token_incorrect():
logger.info(
'Authorization failed. Access token will be dropped')
self._access_token = None
return self.make_request(request)
else:
raise vk_error
elif 'execute_errors' in response_or_error:
# can take place while running .execute vk method
# See more: https://vk.com/dev/execute
raise VkAPIError(response_or_error['execute_errors'][0])
elif 'response' in response_or_error:
return response_or_error['response'] |
def _get_bandgap_from_bands(energies, nelec):
"""Compute difference in conduction band min and valence band max"""
nelec = int(nelec)
valence = [x[nelec-1] for x in energies]
conduction = [x[nelec] for x in energies]
return max(min(conduction) - max(valence), 0.0) | Compute difference in conduction band min and valence band max | Below is the the instruction that describes the task:
### Input:
Compute difference in conduction band min and valence band max
### Response:
def _get_bandgap_from_bands(energies, nelec):
"""Compute difference in conduction band min and valence band max"""
nelec = int(nelec)
valence = [x[nelec-1] for x in energies]
conduction = [x[nelec] for x in energies]
return max(min(conduction) - max(valence), 0.0) |
def normalize_by_center_of_mass(coords, radii):
'''
Given coordinates of circle centers and radii, as two arrays,
returns new coordinates array, computed such that the center of mass of the
three circles is (0, 0).
>>> normalize_by_center_of_mass(np.array([[0.0, 0.0], [2.0, 0.0], [1.0, 3.0]]), np.array([1.0, 1.0, 1.0]))
array([[-1., -1.],
[ 1., -1.],
[ 0., 2.]])
>>> normalize_by_center_of_mass(np.array([[0.0, 0.0], [2.0, 0.0], [1.0, 2.0]]), np.array([1.0, 1.0, np.sqrt(2.0)]))
array([[-1., -1.],
[ 1., -1.],
[ 0., 1.]])
'''
# Now find the center of mass.
radii = radii**2
sum_r = np.sum(radii)
if sum_r < tol:
return coords
else:
return coords - np.dot(radii, coords) / np.sum(radii) | Given coordinates of circle centers and radii, as two arrays,
returns new coordinates array, computed such that the center of mass of the
three circles is (0, 0).
>>> normalize_by_center_of_mass(np.array([[0.0, 0.0], [2.0, 0.0], [1.0, 3.0]]), np.array([1.0, 1.0, 1.0]))
array([[-1., -1.],
[ 1., -1.],
[ 0., 2.]])
>>> normalize_by_center_of_mass(np.array([[0.0, 0.0], [2.0, 0.0], [1.0, 2.0]]), np.array([1.0, 1.0, np.sqrt(2.0)]))
array([[-1., -1.],
[ 1., -1.],
[ 0., 1.]]) | Below is the the instruction that describes the task:
### Input:
Given coordinates of circle centers and radii, as two arrays,
returns new coordinates array, computed such that the center of mass of the
three circles is (0, 0).
>>> normalize_by_center_of_mass(np.array([[0.0, 0.0], [2.0, 0.0], [1.0, 3.0]]), np.array([1.0, 1.0, 1.0]))
array([[-1., -1.],
[ 1., -1.],
[ 0., 2.]])
>>> normalize_by_center_of_mass(np.array([[0.0, 0.0], [2.0, 0.0], [1.0, 2.0]]), np.array([1.0, 1.0, np.sqrt(2.0)]))
array([[-1., -1.],
[ 1., -1.],
[ 0., 1.]])
### Response:
def normalize_by_center_of_mass(coords, radii):
'''
Given coordinates of circle centers and radii, as two arrays,
returns new coordinates array, computed such that the center of mass of the
three circles is (0, 0).
>>> normalize_by_center_of_mass(np.array([[0.0, 0.0], [2.0, 0.0], [1.0, 3.0]]), np.array([1.0, 1.0, 1.0]))
array([[-1., -1.],
[ 1., -1.],
[ 0., 2.]])
>>> normalize_by_center_of_mass(np.array([[0.0, 0.0], [2.0, 0.0], [1.0, 2.0]]), np.array([1.0, 1.0, np.sqrt(2.0)]))
array([[-1., -1.],
[ 1., -1.],
[ 0., 1.]])
'''
# Now find the center of mass.
radii = radii**2
sum_r = np.sum(radii)
if sum_r < tol:
return coords
else:
return coords - np.dot(radii, coords) / np.sum(radii) |
def get_netconf_client_capabilities_input_session_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_netconf_client_capabilities = ET.Element("get_netconf_client_capabilities")
config = get_netconf_client_capabilities
input = ET.SubElement(get_netconf_client_capabilities, "input")
session_id = ET.SubElement(input, "session-id")
session_id.text = kwargs.pop('session_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_netconf_client_capabilities_input_session_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_netconf_client_capabilities = ET.Element("get_netconf_client_capabilities")
config = get_netconf_client_capabilities
input = ET.SubElement(get_netconf_client_capabilities, "input")
session_id = ET.SubElement(input, "session-id")
session_id.text = kwargs.pop('session_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def extract_subjects(cert_pem):
"""Extract subjects from a DataONE PEM (Base64) encoded X.509 v3 certificate.
Args:
cert_pem: str or bytes
PEM (Base64) encoded X.509 v3 certificate
Returns:
2-tuple:
- The primary subject string, extracted from the certificate DN.
- A set of equivalent identities, group memberships and inferred symbolic
subjects extracted from the SubjectInfo (if present.)
- All returned subjects are DataONE compliant serializations.
- A copy of the primary subject is always included in the set of equivalent
identities.
"""
primary_str, subject_info_xml = d1_common.cert.x509.extract_subjects(cert_pem)
equivalent_set = {
primary_str,
d1_common.const.SUBJECT_AUTHENTICATED,
d1_common.const.SUBJECT_PUBLIC,
}
if subject_info_xml is not None:
equivalent_set |= d1_common.cert.subject_info.extract_subjects(
subject_info_xml, primary_str
)
return primary_str, equivalent_set | Extract subjects from a DataONE PEM (Base64) encoded X.509 v3 certificate.
Args:
cert_pem: str or bytes
PEM (Base64) encoded X.509 v3 certificate
Returns:
2-tuple:
- The primary subject string, extracted from the certificate DN.
- A set of equivalent identities, group memberships and inferred symbolic
subjects extracted from the SubjectInfo (if present.)
- All returned subjects are DataONE compliant serializations.
- A copy of the primary subject is always included in the set of equivalent
identities. | Below is the the instruction that describes the task:
### Input:
Extract subjects from a DataONE PEM (Base64) encoded X.509 v3 certificate.
Args:
cert_pem: str or bytes
PEM (Base64) encoded X.509 v3 certificate
Returns:
2-tuple:
- The primary subject string, extracted from the certificate DN.
- A set of equivalent identities, group memberships and inferred symbolic
subjects extracted from the SubjectInfo (if present.)
- All returned subjects are DataONE compliant serializations.
- A copy of the primary subject is always included in the set of equivalent
identities.
### Response:
def extract_subjects(cert_pem):
"""Extract subjects from a DataONE PEM (Base64) encoded X.509 v3 certificate.
Args:
cert_pem: str or bytes
PEM (Base64) encoded X.509 v3 certificate
Returns:
2-tuple:
- The primary subject string, extracted from the certificate DN.
- A set of equivalent identities, group memberships and inferred symbolic
subjects extracted from the SubjectInfo (if present.)
- All returned subjects are DataONE compliant serializations.
- A copy of the primary subject is always included in the set of equivalent
identities.
"""
primary_str, subject_info_xml = d1_common.cert.x509.extract_subjects(cert_pem)
equivalent_set = {
primary_str,
d1_common.const.SUBJECT_AUTHENTICATED,
d1_common.const.SUBJECT_PUBLIC,
}
if subject_info_xml is not None:
equivalent_set |= d1_common.cert.subject_info.extract_subjects(
subject_info_xml, primary_str
)
return primary_str, equivalent_set |
def convert_rom(ADDR_WIDTH=8, DATA_WIDTH=8, CONTENT=(4,5,6,7)):
''' Convert ROM'''
addr = Signal(intbv(0)[ADDR_WIDTH:])
dout = Signal(intbv(0)[DATA_WIDTH:])
toVerilog(rom, addr, dout, CONTENT) | Convert ROM | Below is the the instruction that describes the task:
### Input:
Convert ROM
### Response:
def convert_rom(ADDR_WIDTH=8, DATA_WIDTH=8, CONTENT=(4,5,6,7)):
''' Convert ROM'''
addr = Signal(intbv(0)[ADDR_WIDTH:])
dout = Signal(intbv(0)[DATA_WIDTH:])
toVerilog(rom, addr, dout, CONTENT) |
def select_write(self, timeout=None):
"""
Blocks until the socket is ready to be written to, or the timeout is hit
:param timeout:
A float - the period of time to wait for the socket to be ready to
written to. None for no time limit.
:return:
A boolean - if the socket is ready for writing. Will only be False
if timeout is not None.
"""
_, write_ready, _ = select.select([], [self._socket], [], timeout)
return len(write_ready) > 0 | Blocks until the socket is ready to be written to, or the timeout is hit
:param timeout:
A float - the period of time to wait for the socket to be ready to
written to. None for no time limit.
:return:
A boolean - if the socket is ready for writing. Will only be False
if timeout is not None. | Below is the the instruction that describes the task:
### Input:
Blocks until the socket is ready to be written to, or the timeout is hit
:param timeout:
A float - the period of time to wait for the socket to be ready to
written to. None for no time limit.
:return:
A boolean - if the socket is ready for writing. Will only be False
if timeout is not None.
### Response:
def select_write(self, timeout=None):
"""
Blocks until the socket is ready to be written to, or the timeout is hit
:param timeout:
A float - the period of time to wait for the socket to be ready to
written to. None for no time limit.
:return:
A boolean - if the socket is ready for writing. Will only be False
if timeout is not None.
"""
_, write_ready, _ = select.select([], [self._socket], [], timeout)
return len(write_ready) > 0 |
def issue_section(issue):
"""Returns the section heading for the issue, or None if this issue should be ignored."""
labels = issue.get('labels', [])
for label in labels:
if not label['name'].startswith('type: '):
continue
if label['name'] in LOG_SECTION:
return LOG_SECTION[label['name']]
elif label['name'] in IGNORE_ISSUE_TYPE:
return None
else:
logging.warning('unknown issue type: "{}" for: {}'.format(label['name'], issue_line(issue)))
return None | Returns the section heading for the issue, or None if this issue should be ignored. | Below is the the instruction that describes the task:
### Input:
Returns the section heading for the issue, or None if this issue should be ignored.
### Response:
def issue_section(issue):
"""Returns the section heading for the issue, or None if this issue should be ignored."""
labels = issue.get('labels', [])
for label in labels:
if not label['name'].startswith('type: '):
continue
if label['name'] in LOG_SECTION:
return LOG_SECTION[label['name']]
elif label['name'] in IGNORE_ISSUE_TYPE:
return None
else:
logging.warning('unknown issue type: "{}" for: {}'.format(label['name'], issue_line(issue)))
return None |
def _make_authorization_grant_assertion(self):
"""Create the OAuth 2.0 assertion.
This assertion is used during the OAuth 2.0 grant to acquire an
access token.
Returns:
bytes: The authorization grant assertion.
"""
now = _helpers.utcnow()
lifetime = datetime.timedelta(seconds=_DEFAULT_TOKEN_LIFETIME_SECS)
expiry = now + lifetime
payload = {
'iat': _helpers.datetime_to_secs(now),
'exp': _helpers.datetime_to_secs(expiry),
# The issuer must be the service account email.
'iss': self._service_account_email,
# The audience must be the auth token endpoint's URI
'aud': self._token_uri,
'scope': _helpers.scopes_to_string(self._scopes or ())
}
payload.update(self._additional_claims)
# The subject can be a user email for domain-wide delegation.
if self._subject:
payload.setdefault('sub', self._subject)
token = jwt.encode(self._signer, payload)
return token | Create the OAuth 2.0 assertion.
This assertion is used during the OAuth 2.0 grant to acquire an
access token.
Returns:
bytes: The authorization grant assertion. | Below is the the instruction that describes the task:
### Input:
Create the OAuth 2.0 assertion.
This assertion is used during the OAuth 2.0 grant to acquire an
access token.
Returns:
bytes: The authorization grant assertion.
### Response:
def _make_authorization_grant_assertion(self):
"""Create the OAuth 2.0 assertion.
This assertion is used during the OAuth 2.0 grant to acquire an
access token.
Returns:
bytes: The authorization grant assertion.
"""
now = _helpers.utcnow()
lifetime = datetime.timedelta(seconds=_DEFAULT_TOKEN_LIFETIME_SECS)
expiry = now + lifetime
payload = {
'iat': _helpers.datetime_to_secs(now),
'exp': _helpers.datetime_to_secs(expiry),
# The issuer must be the service account email.
'iss': self._service_account_email,
# The audience must be the auth token endpoint's URI
'aud': self._token_uri,
'scope': _helpers.scopes_to_string(self._scopes or ())
}
payload.update(self._additional_claims)
# The subject can be a user email for domain-wide delegation.
if self._subject:
payload.setdefault('sub', self._subject)
token = jwt.encode(self._signer, payload)
return token |
def complexity_fd_higushi(signal, k_max):
"""
Computes Higuchi Fractal Dimension of a signal. Based on the `pyrem <https://github.com/gilestrolab/pyrem>`_ repo by Quentin Geissmann.
Parameters
----------
signal : list or array
List or array of values.
k_max : int
The maximal value of k. The point at which the FD plateaus is considered a saturation point and that kmax value should be selected (Gómez, 2009). Some studies use a value of 8 or 16 for ECG signal and other 48 for MEG.
Returns
----------
fd_higushi : float
The Higushi Fractal Dimension as float value.
Example
----------
>>> import neurokit as nk
>>>
>>> signal = np.sin(np.log(np.random.sample(666)))
>>> fd_higushi = nk.complexity_fd_higushi(signal, 8)
Notes
----------
*Details*
- **Higushi Fractal Dimension**: Higuchi proposed in 1988 an efficient algorithm for measuring the FD of discrete time sequences. As the reconstruction of the attractor phase space is not necessary, this algorithm is simpler and faster than D2 and other classical measures derived from chaos theory. FD can be used to quantify the complexity and self-similarity of a signal. HFD has already been used to analyse the complexity of brain recordings and other biological signals.
*Authors*
- Quentin Geissmann (https://github.com/qgeissmann)
*Dependencies*
- numpy
*See Also*
- pyrem package: https://github.com/gilestrolab/pyrem
References
-----------
- Accardo, A., Affinito, M., Carrozzi, M., & Bouquet, F. (1997). Use of the fractal dimension for the analysis of electroencephalographic time series. Biological cybernetics, 77(5), 339-350.
- Gómez, C., Mediavilla, Á., Hornero, R., Abásolo, D., & Fernández, A. (2009). Use of the Higuchi's fractal dimension for the analysis of MEG recordings from Alzheimer's disease patients. Medical engineering & physics, 31(3), 306-313.
"""
signal = np.array(signal)
L = []
x = []
N = signal.size
km_idxs = np.triu_indices(k_max - 1)
km_idxs = k_max - np.flipud(np.column_stack(km_idxs)) -1
km_idxs[:,1] -= 1
for k in range(1, k_max):
Lk = 0
for m in range(0, k):
#we pregenerate all idxs
idxs = np.arange(1,int(np.floor((N-m)/k)))
Lmk = np.sum(np.abs(signal[m+idxs*k] - signal[m+k*(idxs-1)]))
Lmk = (Lmk*(N - 1)/(((N - m)/ k)* k)) / k
Lk += Lmk
if Lk != 0:
L.append(np.log(Lk/(m+1)))
x.append([np.log(1.0/ k), 1])
(p, r1, r2, s)=np.linalg.lstsq(x, L)
fd_higushi = p[0]
return (fd_higushi) | Computes Higuchi Fractal Dimension of a signal. Based on the `pyrem <https://github.com/gilestrolab/pyrem>`_ repo by Quentin Geissmann.
Parameters
----------
signal : list or array
List or array of values.
k_max : int
The maximal value of k. The point at which the FD plateaus is considered a saturation point and that kmax value should be selected (Gómez, 2009). Some studies use a value of 8 or 16 for ECG signal and other 48 for MEG.
Returns
----------
fd_higushi : float
The Higushi Fractal Dimension as float value.
Example
----------
>>> import neurokit as nk
>>>
>>> signal = np.sin(np.log(np.random.sample(666)))
>>> fd_higushi = nk.complexity_fd_higushi(signal, 8)
Notes
----------
*Details*
- **Higushi Fractal Dimension**: Higuchi proposed in 1988 an efficient algorithm for measuring the FD of discrete time sequences. As the reconstruction of the attractor phase space is not necessary, this algorithm is simpler and faster than D2 and other classical measures derived from chaos theory. FD can be used to quantify the complexity and self-similarity of a signal. HFD has already been used to analyse the complexity of brain recordings and other biological signals.
*Authors*
- Quentin Geissmann (https://github.com/qgeissmann)
*Dependencies*
- numpy
*See Also*
- pyrem package: https://github.com/gilestrolab/pyrem
References
-----------
- Accardo, A., Affinito, M., Carrozzi, M., & Bouquet, F. (1997). Use of the fractal dimension for the analysis of electroencephalographic time series. Biological cybernetics, 77(5), 339-350.
- Gómez, C., Mediavilla, Á., Hornero, R., Abásolo, D., & Fernández, A. (2009). Use of the Higuchi's fractal dimension for the analysis of MEG recordings from Alzheimer's disease patients. Medical engineering & physics, 31(3), 306-313. | Below is the the instruction that describes the task:
### Input:
Computes Higuchi Fractal Dimension of a signal. Based on the `pyrem <https://github.com/gilestrolab/pyrem>`_ repo by Quentin Geissmann.
Parameters
----------
signal : list or array
List or array of values.
k_max : int
The maximal value of k. The point at which the FD plateaus is considered a saturation point and that kmax value should be selected (Gómez, 2009). Some studies use a value of 8 or 16 for ECG signal and other 48 for MEG.
Returns
----------
fd_higushi : float
The Higushi Fractal Dimension as float value.
Example
----------
>>> import neurokit as nk
>>>
>>> signal = np.sin(np.log(np.random.sample(666)))
>>> fd_higushi = nk.complexity_fd_higushi(signal, 8)
Notes
----------
*Details*
- **Higushi Fractal Dimension**: Higuchi proposed in 1988 an efficient algorithm for measuring the FD of discrete time sequences. As the reconstruction of the attractor phase space is not necessary, this algorithm is simpler and faster than D2 and other classical measures derived from chaos theory. FD can be used to quantify the complexity and self-similarity of a signal. HFD has already been used to analyse the complexity of brain recordings and other biological signals.
*Authors*
- Quentin Geissmann (https://github.com/qgeissmann)
*Dependencies*
- numpy
*See Also*
- pyrem package: https://github.com/gilestrolab/pyrem
References
-----------
- Accardo, A., Affinito, M., Carrozzi, M., & Bouquet, F. (1997). Use of the fractal dimension for the analysis of electroencephalographic time series. Biological cybernetics, 77(5), 339-350.
- Gómez, C., Mediavilla, Á., Hornero, R., Abásolo, D., & Fernández, A. (2009). Use of the Higuchi's fractal dimension for the analysis of MEG recordings from Alzheimer's disease patients. Medical engineering & physics, 31(3), 306-313.
### Response:
def complexity_fd_higushi(signal, k_max):
"""
Computes Higuchi Fractal Dimension of a signal. Based on the `pyrem <https://github.com/gilestrolab/pyrem>`_ repo by Quentin Geissmann.
Parameters
----------
signal : list or array
List or array of values.
k_max : int
The maximal value of k. The point at which the FD plateaus is considered a saturation point and that kmax value should be selected (Gómez, 2009). Some studies use a value of 8 or 16 for ECG signal and other 48 for MEG.
Returns
----------
fd_higushi : float
The Higushi Fractal Dimension as float value.
Example
----------
>>> import neurokit as nk
>>>
>>> signal = np.sin(np.log(np.random.sample(666)))
>>> fd_higushi = nk.complexity_fd_higushi(signal, 8)
Notes
----------
*Details*
- **Higushi Fractal Dimension**: Higuchi proposed in 1988 an efficient algorithm for measuring the FD of discrete time sequences. As the reconstruction of the attractor phase space is not necessary, this algorithm is simpler and faster than D2 and other classical measures derived from chaos theory. FD can be used to quantify the complexity and self-similarity of a signal. HFD has already been used to analyse the complexity of brain recordings and other biological signals.
*Authors*
- Quentin Geissmann (https://github.com/qgeissmann)
*Dependencies*
- numpy
*See Also*
- pyrem package: https://github.com/gilestrolab/pyrem
References
-----------
- Accardo, A., Affinito, M., Carrozzi, M., & Bouquet, F. (1997). Use of the fractal dimension for the analysis of electroencephalographic time series. Biological cybernetics, 77(5), 339-350.
- Gómez, C., Mediavilla, Á., Hornero, R., Abásolo, D., & Fernández, A. (2009). Use of the Higuchi's fractal dimension for the analysis of MEG recordings from Alzheimer's disease patients. Medical engineering & physics, 31(3), 306-313.
"""
signal = np.array(signal)
L = []
x = []
N = signal.size
km_idxs = np.triu_indices(k_max - 1)
km_idxs = k_max - np.flipud(np.column_stack(km_idxs)) -1
km_idxs[:,1] -= 1
for k in range(1, k_max):
Lk = 0
for m in range(0, k):
#we pregenerate all idxs
idxs = np.arange(1,int(np.floor((N-m)/k)))
Lmk = np.sum(np.abs(signal[m+idxs*k] - signal[m+k*(idxs-1)]))
Lmk = (Lmk*(N - 1)/(((N - m)/ k)* k)) / k
Lk += Lmk
if Lk != 0:
L.append(np.log(Lk/(m+1)))
x.append([np.log(1.0/ k), 1])
(p, r1, r2, s)=np.linalg.lstsq(x, L)
fd_higushi = p[0]
return (fd_higushi) |
def _parse_cookies(cookie_str, dictionary):
"""Tries to parse any key-value pairs of cookies in a string,
then updates the the dictionary with any key-value pairs found.
**Example**::
dictionary = {}
_parse_cookies('my=value', dictionary)
# Now the following is True
dictionary['my'] == 'value'
:param cookie_str: A string containing "key=value" pairs from an HTTP "Set-Cookie" header.
:type cookie_str: ``str``
:param dictionary: A dictionary to update with any found key-value pairs.
:type dictionary: ``dict``
"""
parsed_cookie = six.moves.http_cookies.SimpleCookie(cookie_str)
for cookie in parsed_cookie.values():
dictionary[cookie.key] = cookie.coded_value | Tries to parse any key-value pairs of cookies in a string,
then updates the the dictionary with any key-value pairs found.
**Example**::
dictionary = {}
_parse_cookies('my=value', dictionary)
# Now the following is True
dictionary['my'] == 'value'
:param cookie_str: A string containing "key=value" pairs from an HTTP "Set-Cookie" header.
:type cookie_str: ``str``
:param dictionary: A dictionary to update with any found key-value pairs.
:type dictionary: ``dict`` | Below is the the instruction that describes the task:
### Input:
Tries to parse any key-value pairs of cookies in a string,
then updates the the dictionary with any key-value pairs found.
**Example**::
dictionary = {}
_parse_cookies('my=value', dictionary)
# Now the following is True
dictionary['my'] == 'value'
:param cookie_str: A string containing "key=value" pairs from an HTTP "Set-Cookie" header.
:type cookie_str: ``str``
:param dictionary: A dictionary to update with any found key-value pairs.
:type dictionary: ``dict``
### Response:
def _parse_cookies(cookie_str, dictionary):
"""Tries to parse any key-value pairs of cookies in a string,
then updates the the dictionary with any key-value pairs found.
**Example**::
dictionary = {}
_parse_cookies('my=value', dictionary)
# Now the following is True
dictionary['my'] == 'value'
:param cookie_str: A string containing "key=value" pairs from an HTTP "Set-Cookie" header.
:type cookie_str: ``str``
:param dictionary: A dictionary to update with any found key-value pairs.
:type dictionary: ``dict``
"""
parsed_cookie = six.moves.http_cookies.SimpleCookie(cookie_str)
for cookie in parsed_cookie.values():
dictionary[cookie.key] = cookie.coded_value |
def parse_version_string(name: str) -> Tuple[str, str]:
"""Parse a version string (name ID 5) and return (major, minor) strings.
Example of the expected format: 'Version 01.003; Comments'. Version
strings like "Version 1.3" will be post-processed into ("1", "300").
The parsed version numbers will therefore match in spirit, but not
necessarily in string form.
"""
import re
# We assume ";" is the universal delimiter here.
version_entry = name.split(";")[0]
# Catch both "Version 1.234" and "1.234" but not "1x2.34". Note: search()
# will return the first match.
version_string = re.search(r"(?: |^)(\d+\.\d+)", version_entry)
if version_string is None:
raise ValueError("The version string didn't contain a number of the format"
" major.minor.")
major, minor = version_string.group(1).split('.')
major = str(int(major)) # "01.123" -> "1.123"
minor = minor.ljust(3, '0') # "3.0" -> "3.000", but "3.123" -> "3.123"
return major, minor | Parse a version string (name ID 5) and return (major, minor) strings.
Example of the expected format: 'Version 01.003; Comments'. Version
strings like "Version 1.3" will be post-processed into ("1", "300").
The parsed version numbers will therefore match in spirit, but not
necessarily in string form. | Below is the the instruction that describes the task:
### Input:
Parse a version string (name ID 5) and return (major, minor) strings.
Example of the expected format: 'Version 01.003; Comments'. Version
strings like "Version 1.3" will be post-processed into ("1", "300").
The parsed version numbers will therefore match in spirit, but not
necessarily in string form.
### Response:
def parse_version_string(name: str) -> Tuple[str, str]:
"""Parse a version string (name ID 5) and return (major, minor) strings.
Example of the expected format: 'Version 01.003; Comments'. Version
strings like "Version 1.3" will be post-processed into ("1", "300").
The parsed version numbers will therefore match in spirit, but not
necessarily in string form.
"""
import re
# We assume ";" is the universal delimiter here.
version_entry = name.split(";")[0]
# Catch both "Version 1.234" and "1.234" but not "1x2.34". Note: search()
# will return the first match.
version_string = re.search(r"(?: |^)(\d+\.\d+)", version_entry)
if version_string is None:
raise ValueError("The version string didn't contain a number of the format"
" major.minor.")
major, minor = version_string.group(1).split('.')
major = str(int(major)) # "01.123" -> "1.123"
minor = minor.ljust(3, '0') # "3.0" -> "3.000", but "3.123" -> "3.123"
return major, minor |
def start_poll(args):
"""Starts a poll."""
if args.type == 'privmsg':
return "We don't have secret ballots in this benevolent dictatorship!"
if not args.msg:
return "Polls need a question."
ctrlchan = args.config['core']['ctrlchan']
poll = Polls(question=args.msg, submitter=args.nick)
args.session.add(poll)
args.session.flush()
if args.isadmin or not args.config.getboolean('adminrestrict', 'poll'):
poll.accepted = 1
return "Poll #%d created!" % poll.id
else:
args.send("Poll submitted for approval.", target=args.nick)
args.send("New Poll: #%d -- %s, Submitted by %s" % (poll.id, args.msg, args.nick), target=ctrlchan)
return "" | Starts a poll. | Below is the the instruction that describes the task:
### Input:
Starts a poll.
### Response:
def start_poll(args):
"""Starts a poll."""
if args.type == 'privmsg':
return "We don't have secret ballots in this benevolent dictatorship!"
if not args.msg:
return "Polls need a question."
ctrlchan = args.config['core']['ctrlchan']
poll = Polls(question=args.msg, submitter=args.nick)
args.session.add(poll)
args.session.flush()
if args.isadmin or not args.config.getboolean('adminrestrict', 'poll'):
poll.accepted = 1
return "Poll #%d created!" % poll.id
else:
args.send("Poll submitted for approval.", target=args.nick)
args.send("New Poll: #%d -- %s, Submitted by %s" % (poll.id, args.msg, args.nick), target=ctrlchan)
return "" |
def build_recursive_localize_command(destination, inputs, file_provider):
"""Return a multi-line string with a shell script to copy recursively.
Arguments:
destination: Folder where to put the data.
For example /mnt/data
inputs: a list of InputFileParam
file_provider: file provider string used to filter the output params; the
returned command will only apply outputs whose file provider
matches this file filter.
Returns:
a multi-line string with a shell script that copies the inputs
recursively from GCS.
"""
command = _LOCALIZE_COMMAND_MAP[file_provider]
filtered_inputs = [
var for var in inputs
if var.recursive and var.file_provider == file_provider
]
copy_input_dirs = '\n'.join([
textwrap.dedent("""
mkdir -p {data_mount}/{docker_path}
for ((i = 0; i < 3; i++)); do
if {command} {source_uri} {data_mount}/{docker_path}; then
break
elif ((i == 2)); then
2>&1 echo "Recursive localization failed."
exit 1
fi
done
chmod -R o+r {data_mount}/{docker_path}
""").format(
command=command,
source_uri=var.uri,
data_mount=destination.rstrip('/'),
docker_path=var.docker_path) for var in filtered_inputs
])
return copy_input_dirs | Return a multi-line string with a shell script to copy recursively.
Arguments:
destination: Folder where to put the data.
For example /mnt/data
inputs: a list of InputFileParam
file_provider: file provider string used to filter the output params; the
returned command will only apply outputs whose file provider
matches this file filter.
Returns:
a multi-line string with a shell script that copies the inputs
recursively from GCS. | Below is the the instruction that describes the task:
### Input:
Return a multi-line string with a shell script to copy recursively.
Arguments:
destination: Folder where to put the data.
For example /mnt/data
inputs: a list of InputFileParam
file_provider: file provider string used to filter the output params; the
returned command will only apply outputs whose file provider
matches this file filter.
Returns:
a multi-line string with a shell script that copies the inputs
recursively from GCS.
### Response:
def build_recursive_localize_command(destination, inputs, file_provider):
"""Return a multi-line string with a shell script to copy recursively.
Arguments:
destination: Folder where to put the data.
For example /mnt/data
inputs: a list of InputFileParam
file_provider: file provider string used to filter the output params; the
returned command will only apply outputs whose file provider
matches this file filter.
Returns:
a multi-line string with a shell script that copies the inputs
recursively from GCS.
"""
command = _LOCALIZE_COMMAND_MAP[file_provider]
filtered_inputs = [
var for var in inputs
if var.recursive and var.file_provider == file_provider
]
copy_input_dirs = '\n'.join([
textwrap.dedent("""
mkdir -p {data_mount}/{docker_path}
for ((i = 0; i < 3; i++)); do
if {command} {source_uri} {data_mount}/{docker_path}; then
break
elif ((i == 2)); then
2>&1 echo "Recursive localization failed."
exit 1
fi
done
chmod -R o+r {data_mount}/{docker_path}
""").format(
command=command,
source_uri=var.uri,
data_mount=destination.rstrip('/'),
docker_path=var.docker_path) for var in filtered_inputs
])
return copy_input_dirs |
def token_handler_str_default(
token, dispatcher, node, subnode, sourcepath_stack=(None,)):
"""
Standard token handler that will return the value, ignoring any
tokens or strings that have been remapped.
"""
if isinstance(token.pos, int):
_, lineno, colno = node.getpos(subnode, token.pos)
else:
lineno, colno = None, None
yield StreamFragment(subnode, lineno, colno, None, sourcepath_stack[-1]) | Standard token handler that will return the value, ignoring any
tokens or strings that have been remapped. | Below is the the instruction that describes the task:
### Input:
Standard token handler that will return the value, ignoring any
tokens or strings that have been remapped.
### Response:
def token_handler_str_default(
token, dispatcher, node, subnode, sourcepath_stack=(None,)):
"""
Standard token handler that will return the value, ignoring any
tokens or strings that have been remapped.
"""
if isinstance(token.pos, int):
_, lineno, colno = node.getpos(subnode, token.pos)
else:
lineno, colno = None, None
yield StreamFragment(subnode, lineno, colno, None, sourcepath_stack[-1]) |
def intersect_range(self, start=None, stop=None):
"""Intersect with range defined by `start` and `stop` values
**inclusive**.
Parameters
----------
start : int, optional
Start value.
stop : int, optional
Stop value.
Returns
-------
idx : SortedIndex
Examples
--------
>>> import allel
>>> idx = allel.SortedIndex([3, 6, 11, 20, 35])
>>> idx.intersect_range(4, 32)
<SortedIndex shape=(3,) dtype=int64>
[6, 11, 20]
"""
try:
loc = self.locate_range(start=start, stop=stop)
except KeyError:
return self.values[0:0]
else:
return self[loc] | Intersect with range defined by `start` and `stop` values
**inclusive**.
Parameters
----------
start : int, optional
Start value.
stop : int, optional
Stop value.
Returns
-------
idx : SortedIndex
Examples
--------
>>> import allel
>>> idx = allel.SortedIndex([3, 6, 11, 20, 35])
>>> idx.intersect_range(4, 32)
<SortedIndex shape=(3,) dtype=int64>
[6, 11, 20] | Below is the the instruction that describes the task:
### Input:
Intersect with range defined by `start` and `stop` values
**inclusive**.
Parameters
----------
start : int, optional
Start value.
stop : int, optional
Stop value.
Returns
-------
idx : SortedIndex
Examples
--------
>>> import allel
>>> idx = allel.SortedIndex([3, 6, 11, 20, 35])
>>> idx.intersect_range(4, 32)
<SortedIndex shape=(3,) dtype=int64>
[6, 11, 20]
### Response:
def intersect_range(self, start=None, stop=None):
"""Intersect with range defined by `start` and `stop` values
**inclusive**.
Parameters
----------
start : int, optional
Start value.
stop : int, optional
Stop value.
Returns
-------
idx : SortedIndex
Examples
--------
>>> import allel
>>> idx = allel.SortedIndex([3, 6, 11, 20, 35])
>>> idx.intersect_range(4, 32)
<SortedIndex shape=(3,) dtype=int64>
[6, 11, 20]
"""
try:
loc = self.locate_range(start=start, stop=stop)
except KeyError:
return self.values[0:0]
else:
return self[loc] |
def linear(target, m, b, prop):
r"""
Calculates a property as a linear function of a given property
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
m, b : floats
Slope and intercept of the linear corelation
prop : string
The dictionary key containing the independent variable or phase
property to be used in the correlation.
"""
x = target[prop]
value = m*x + b
return value | r"""
Calculates a property as a linear function of a given property
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
m, b : floats
Slope and intercept of the linear corelation
prop : string
The dictionary key containing the independent variable or phase
property to be used in the correlation. | Below is the the instruction that describes the task:
### Input:
r"""
Calculates a property as a linear function of a given property
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
m, b : floats
Slope and intercept of the linear corelation
prop : string
The dictionary key containing the independent variable or phase
property to be used in the correlation.
### Response:
def linear(target, m, b, prop):
r"""
Calculates a property as a linear function of a given property
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
m, b : floats
Slope and intercept of the linear corelation
prop : string
The dictionary key containing the independent variable or phase
property to be used in the correlation.
"""
x = target[prop]
value = m*x + b
return value |
def parser():
"""Return a parser for setting one or more configuration paths"""
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config_paths', default=[], action='append',
help='path to a configuration directory')
return parser | Return a parser for setting one or more configuration paths | Below is the the instruction that describes the task:
### Input:
Return a parser for setting one or more configuration paths
### Response:
def parser():
"""Return a parser for setting one or more configuration paths"""
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config_paths', default=[], action='append',
help='path to a configuration directory')
return parser |
def attributes_layout(self, EdgeAttribute=None, maxwidth=None, minrad=None, \
network=None, NodeAttribute=None,nodeList=None, radmult=None, \
spacingx=None, spacingy=None, verbose=False):
"""
Execute the Group Attributes Layout on a network
:param EdgeAttribute (string, optional): The name of the edge column
containing numeric values that will be used as weights in the layout
algorithm. Only columns containing numeric values are shown
:param maxwidth (string, optional): Maximum width of a row, in numeric value
:param minrad (string, optional): Minimum width of a partition, in
numeric value
:param network (string, optional): Specifies a network by name, or by
SUID if the prefix SUID: is used. The keyword CURRENT, or a blank
value can also be used to specify the current network.
:param NodeAttribute (string, optional): The name of the node column
containing numeric values that will be used as weights in the layout
algorithm. Only columns containing numeric values are shown
:param nodeList (string, optional): Specifies a list of nodes. The
keywords all, selected, or unselected can be used to specify nodes
by their selection state. The pattern COLUMN:VALUE sets this parameter
to any rows that contain the specified column value; if the COLUMN
prefix is not used, the NAME column is matched by default. A list
of COLUMN:VALUE pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,...
can be used to match multiple values.
:param radmult (string, optional): Minimum width of a partition, in
numeric value
:param spacingx (string, optional): Horizontal spacing between two
partitions in a row, in numeric value
:param spacingy (string, optional): Vertical spacing between the largest
partitions of two rows, in numeric value
:param verbose: print more
"""
network=check_network(self,network,verbose=verbose)
PARAMS=set_param(["EdgeAttribute","network","NodeAttribute","nodeList","singlePartition","spacing"],\
[EdgeAttribute, maxwidth, \
minrad, network, NodeAttribute,nodeList, radmult, \
spacingx, spacingy])
response=api(url=self.__url+"/attributes-layout", PARAMS=PARAMS, method="POST", verbose=verbose)
return response | Execute the Group Attributes Layout on a network
:param EdgeAttribute (string, optional): The name of the edge column
containing numeric values that will be used as weights in the layout
algorithm. Only columns containing numeric values are shown
:param maxwidth (string, optional): Maximum width of a row, in numeric value
:param minrad (string, optional): Minimum width of a partition, in
numeric value
:param network (string, optional): Specifies a network by name, or by
SUID if the prefix SUID: is used. The keyword CURRENT, or a blank
value can also be used to specify the current network.
:param NodeAttribute (string, optional): The name of the node column
containing numeric values that will be used as weights in the layout
algorithm. Only columns containing numeric values are shown
:param nodeList (string, optional): Specifies a list of nodes. The
keywords all, selected, or unselected can be used to specify nodes
by their selection state. The pattern COLUMN:VALUE sets this parameter
to any rows that contain the specified column value; if the COLUMN
prefix is not used, the NAME column is matched by default. A list
of COLUMN:VALUE pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,...
can be used to match multiple values.
:param radmult (string, optional): Minimum width of a partition, in
numeric value
:param spacingx (string, optional): Horizontal spacing between two
partitions in a row, in numeric value
:param spacingy (string, optional): Vertical spacing between the largest
partitions of two rows, in numeric value
:param verbose: print more | Below is the the instruction that describes the task:
### Input:
Execute the Group Attributes Layout on a network
:param EdgeAttribute (string, optional): The name of the edge column
containing numeric values that will be used as weights in the layout
algorithm. Only columns containing numeric values are shown
:param maxwidth (string, optional): Maximum width of a row, in numeric value
:param minrad (string, optional): Minimum width of a partition, in
numeric value
:param network (string, optional): Specifies a network by name, or by
SUID if the prefix SUID: is used. The keyword CURRENT, or a blank
value can also be used to specify the current network.
:param NodeAttribute (string, optional): The name of the node column
containing numeric values that will be used as weights in the layout
algorithm. Only columns containing numeric values are shown
:param nodeList (string, optional): Specifies a list of nodes. The
keywords all, selected, or unselected can be used to specify nodes
by their selection state. The pattern COLUMN:VALUE sets this parameter
to any rows that contain the specified column value; if the COLUMN
prefix is not used, the NAME column is matched by default. A list
of COLUMN:VALUE pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,...
can be used to match multiple values.
:param radmult (string, optional): Minimum width of a partition, in
numeric value
:param spacingx (string, optional): Horizontal spacing between two
partitions in a row, in numeric value
:param spacingy (string, optional): Vertical spacing between the largest
partitions of two rows, in numeric value
:param verbose: print more
### Response:
def attributes_layout(self, EdgeAttribute=None, maxwidth=None, minrad=None, \
network=None, NodeAttribute=None,nodeList=None, radmult=None, \
spacingx=None, spacingy=None, verbose=False):
"""
Execute the Group Attributes Layout on a network
:param EdgeAttribute (string, optional): The name of the edge column
containing numeric values that will be used as weights in the layout
algorithm. Only columns containing numeric values are shown
:param maxwidth (string, optional): Maximum width of a row, in numeric value
:param minrad (string, optional): Minimum width of a partition, in
numeric value
:param network (string, optional): Specifies a network by name, or by
SUID if the prefix SUID: is used. The keyword CURRENT, or a blank
value can also be used to specify the current network.
:param NodeAttribute (string, optional): The name of the node column
containing numeric values that will be used as weights in the layout
algorithm. Only columns containing numeric values are shown
:param nodeList (string, optional): Specifies a list of nodes. The
keywords all, selected, or unselected can be used to specify nodes
by their selection state. The pattern COLUMN:VALUE sets this parameter
to any rows that contain the specified column value; if the COLUMN
prefix is not used, the NAME column is matched by default. A list
of COLUMN:VALUE pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,...
can be used to match multiple values.
:param radmult (string, optional): Minimum width of a partition, in
numeric value
:param spacingx (string, optional): Horizontal spacing between two
partitions in a row, in numeric value
:param spacingy (string, optional): Vertical spacing between the largest
partitions of two rows, in numeric value
:param verbose: print more
"""
network=check_network(self,network,verbose=verbose)
PARAMS=set_param(["EdgeAttribute","network","NodeAttribute","nodeList","singlePartition","spacing"],\
[EdgeAttribute, maxwidth, \
minrad, network, NodeAttribute,nodeList, radmult, \
spacingx, spacingy])
response=api(url=self.__url+"/attributes-layout", PARAMS=PARAMS, method="POST", verbose=verbose)
return response |
def _usage_endpoint(self, endpoint, year=None, month=None):
"""
Common helper for getting usage and billing reports with
optional year and month URL elements.
:param str endpoint: Cloudant usage endpoint.
:param int year: Year to query against. Optional parameter.
Defaults to None. If used, it must be accompanied by ``month``.
:param int month: Month to query against that must be an integer
between 1 and 12. Optional parameter. Defaults to None.
If used, it must be accompanied by ``year``.
"""
err = False
if year is None and month is None:
resp = self.r_session.get(endpoint)
else:
try:
if int(year) > 0 and int(month) in range(1, 13):
resp = self.r_session.get(
'/'.join((endpoint, str(int(year)), str(int(month)))))
else:
err = True
except (ValueError, TypeError):
err = True
if err:
raise CloudantArgumentError(101, year, month)
resp.raise_for_status()
return response_to_json_dict(resp) | Common helper for getting usage and billing reports with
optional year and month URL elements.
:param str endpoint: Cloudant usage endpoint.
:param int year: Year to query against. Optional parameter.
Defaults to None. If used, it must be accompanied by ``month``.
:param int month: Month to query against that must be an integer
between 1 and 12. Optional parameter. Defaults to None.
If used, it must be accompanied by ``year``. | Below is the the instruction that describes the task:
### Input:
Common helper for getting usage and billing reports with
optional year and month URL elements.
:param str endpoint: Cloudant usage endpoint.
:param int year: Year to query against. Optional parameter.
Defaults to None. If used, it must be accompanied by ``month``.
:param int month: Month to query against that must be an integer
between 1 and 12. Optional parameter. Defaults to None.
If used, it must be accompanied by ``year``.
### Response:
def _usage_endpoint(self, endpoint, year=None, month=None):
"""
Common helper for getting usage and billing reports with
optional year and month URL elements.
:param str endpoint: Cloudant usage endpoint.
:param int year: Year to query against. Optional parameter.
Defaults to None. If used, it must be accompanied by ``month``.
:param int month: Month to query against that must be an integer
between 1 and 12. Optional parameter. Defaults to None.
If used, it must be accompanied by ``year``.
"""
err = False
if year is None and month is None:
resp = self.r_session.get(endpoint)
else:
try:
if int(year) > 0 and int(month) in range(1, 13):
resp = self.r_session.get(
'/'.join((endpoint, str(int(year)), str(int(month)))))
else:
err = True
except (ValueError, TypeError):
err = True
if err:
raise CloudantArgumentError(101, year, month)
resp.raise_for_status()
return response_to_json_dict(resp) |
def _snort_cmd(self, pcap):
"""
Given a pcap filename, get the commandline to run.
:param pcap: Pcap filename to scan
:returns: list of snort command args to scan supplied pcap file
"""
cmdline = "'{0}' -A console -N -y -c '{1}' {2} -r '{3}'" \
.format(self.conf['path'], self.conf['config'],
self.conf['extra_args'] or '', pcap)
# can't seem to capture stderr from snort on windows
# unless launched via cmd shell
if 'nt' in os.name:
cmdline = "cmd.exe /c " + cmdline
return shlex.split(cmdline) | Given a pcap filename, get the commandline to run.
:param pcap: Pcap filename to scan
:returns: list of snort command args to scan supplied pcap file | Below is the the instruction that describes the task:
### Input:
Given a pcap filename, get the commandline to run.
:param pcap: Pcap filename to scan
:returns: list of snort command args to scan supplied pcap file
### Response:
def _snort_cmd(self, pcap):
"""
Given a pcap filename, get the commandline to run.
:param pcap: Pcap filename to scan
:returns: list of snort command args to scan supplied pcap file
"""
cmdline = "'{0}' -A console -N -y -c '{1}' {2} -r '{3}'" \
.format(self.conf['path'], self.conf['config'],
self.conf['extra_args'] or '', pcap)
# can't seem to capture stderr from snort on windows
# unless launched via cmd shell
if 'nt' in os.name:
cmdline = "cmd.exe /c " + cmdline
return shlex.split(cmdline) |
def extract_number_oscillations(osc_dyn, index = 0, amplitude_threshold = 1.0):
"""!
@brief Extracts number of oscillations of specified oscillator.
@param[in] osc_dyn (list): Dynamic of oscillators.
@param[in] index (uint): Index of oscillator in dynamic.
@param[in] amplitude_threshold (double): Amplitude threshold when oscillation is taken into account, for example,
when oscillator amplitude is greater than threshold then oscillation is incremented.
@return (uint) Number of oscillations of specified oscillator.
"""
number_oscillations = 0;
waiting_differential = False;
threshold_passed = False;
high_level_trigger = True if (osc_dyn[0][index] > amplitude_threshold) else False;
for values in osc_dyn:
if ( (values[index] >= amplitude_threshold) and (high_level_trigger is False) ):
high_level_trigger = True;
threshold_passed = True;
elif ( (values[index] < amplitude_threshold) and (high_level_trigger is True) ):
high_level_trigger = False;
threshold_passed = True;
if (threshold_passed is True):
threshold_passed = False;
if (waiting_differential is True and high_level_trigger is False):
number_oscillations += 1;
waiting_differential = False;
else:
waiting_differential = True;
return number_oscillations; | !
@brief Extracts number of oscillations of specified oscillator.
@param[in] osc_dyn (list): Dynamic of oscillators.
@param[in] index (uint): Index of oscillator in dynamic.
@param[in] amplitude_threshold (double): Amplitude threshold when oscillation is taken into account, for example,
when oscillator amplitude is greater than threshold then oscillation is incremented.
@return (uint) Number of oscillations of specified oscillator. | Below is the the instruction that describes the task:
### Input:
!
@brief Extracts number of oscillations of specified oscillator.
@param[in] osc_dyn (list): Dynamic of oscillators.
@param[in] index (uint): Index of oscillator in dynamic.
@param[in] amplitude_threshold (double): Amplitude threshold when oscillation is taken into account, for example,
when oscillator amplitude is greater than threshold then oscillation is incremented.
@return (uint) Number of oscillations of specified oscillator.
### Response:
def extract_number_oscillations(osc_dyn, index = 0, amplitude_threshold = 1.0):
"""!
@brief Extracts number of oscillations of specified oscillator.
@param[in] osc_dyn (list): Dynamic of oscillators.
@param[in] index (uint): Index of oscillator in dynamic.
@param[in] amplitude_threshold (double): Amplitude threshold when oscillation is taken into account, for example,
when oscillator amplitude is greater than threshold then oscillation is incremented.
@return (uint) Number of oscillations of specified oscillator.
"""
number_oscillations = 0;
waiting_differential = False;
threshold_passed = False;
high_level_trigger = True if (osc_dyn[0][index] > amplitude_threshold) else False;
for values in osc_dyn:
if ( (values[index] >= amplitude_threshold) and (high_level_trigger is False) ):
high_level_trigger = True;
threshold_passed = True;
elif ( (values[index] < amplitude_threshold) and (high_level_trigger is True) ):
high_level_trigger = False;
threshold_passed = True;
if (threshold_passed is True):
threshold_passed = False;
if (waiting_differential is True and high_level_trigger is False):
number_oscillations += 1;
waiting_differential = False;
else:
waiting_differential = True;
return number_oscillations; |
def add_virtual_columns_proper_motion_gal2eq(self, long_in="ra", lat_in="dec", pm_long="pm_l", pm_lat="pm_b", pm_long_out="pm_ra", pm_lat_out="pm_dec",
name_prefix="__proper_motion_gal2eq",
right_ascension_galactic_pole=192.85,
declination_galactic_pole=27.12,
propagate_uncertainties=False,
radians=False):
"""Transform/rotate proper motions from galactic to equatorial coordinates.
Inverse of :py:`add_virtual_columns_proper_motion_eq2gal`
"""
kwargs = dict(**locals())
kwargs.pop('self')
kwargs['inverse'] = True
self.add_virtual_columns_proper_motion_eq2gal(**kwargs) | Transform/rotate proper motions from galactic to equatorial coordinates.
Inverse of :py:`add_virtual_columns_proper_motion_eq2gal` | Below is the the instruction that describes the task:
### Input:
Transform/rotate proper motions from galactic to equatorial coordinates.
Inverse of :py:`add_virtual_columns_proper_motion_eq2gal`
### Response:
def add_virtual_columns_proper_motion_gal2eq(self, long_in="ra", lat_in="dec", pm_long="pm_l", pm_lat="pm_b", pm_long_out="pm_ra", pm_lat_out="pm_dec",
name_prefix="__proper_motion_gal2eq",
right_ascension_galactic_pole=192.85,
declination_galactic_pole=27.12,
propagate_uncertainties=False,
radians=False):
"""Transform/rotate proper motions from galactic to equatorial coordinates.
Inverse of :py:`add_virtual_columns_proper_motion_eq2gal`
"""
kwargs = dict(**locals())
kwargs.pop('self')
kwargs['inverse'] = True
self.add_virtual_columns_proper_motion_eq2gal(**kwargs) |
def assemble(cls):
"""Assemble a single document using the blueprint"""
document = {}
for field_name, maker in cls._instructions.items():
with maker.target(document):
document[field_name] = maker()
return document | Assemble a single document using the blueprint | Below is the the instruction that describes the task:
### Input:
Assemble a single document using the blueprint
### Response:
def assemble(cls):
"""Assemble a single document using the blueprint"""
document = {}
for field_name, maker in cls._instructions.items():
with maker.target(document):
document[field_name] = maker()
return document |
def run():
''' This client calls a bunch of help commands from workbench '''
# Grab server args
args = client_helper.grab_server_args()
# Start up workbench connection
workbench = zerorpc.Client(timeout=300, heartbeat=60)
workbench.connect('tcp://'+args['server']+':'+args['port'])
# Call help methods
print workbench.help()
print workbench.help('basic')
print workbench.help('commands')
print workbench.help('store_sample')
print workbench.help('workers')
print workbench.help('meta')
# Call a test worker
print workbench.test_worker('meta') | This client calls a bunch of help commands from workbench | Below is the the instruction that describes the task:
### Input:
This client calls a bunch of help commands from workbench
### Response:
def run():
''' This client calls a bunch of help commands from workbench '''
# Grab server args
args = client_helper.grab_server_args()
# Start up workbench connection
workbench = zerorpc.Client(timeout=300, heartbeat=60)
workbench.connect('tcp://'+args['server']+':'+args['port'])
# Call help methods
print workbench.help()
print workbench.help('basic')
print workbench.help('commands')
print workbench.help('store_sample')
print workbench.help('workers')
print workbench.help('meta')
# Call a test worker
print workbench.test_worker('meta') |
def make(env_name, *args, **kwargs):
"""Try to get the equivalent functionality of gym.make in a sloppy way."""
if env_name not in REGISTERED_ENVS:
raise Exception(
"Environment {} not found. Make sure it is a registered environment among: {}".format(
env_name, ", ".join(REGISTERED_ENVS)
)
)
return REGISTERED_ENVS[env_name](*args, **kwargs) | Try to get the equivalent functionality of gym.make in a sloppy way. | Below is the the instruction that describes the task:
### Input:
Try to get the equivalent functionality of gym.make in a sloppy way.
### Response:
def make(env_name, *args, **kwargs):
"""Try to get the equivalent functionality of gym.make in a sloppy way."""
if env_name not in REGISTERED_ENVS:
raise Exception(
"Environment {} not found. Make sure it is a registered environment among: {}".format(
env_name, ", ".join(REGISTERED_ENVS)
)
)
return REGISTERED_ENVS[env_name](*args, **kwargs) |
def spiral(Di, rmax, rmin, pitch, fd):
r'''Returns loss coefficient for any size constant-pitch spiral
as shown in [1]_. Has applications in immersed coils in tanks.
.. math::
K = \frac{r_{max} - r_{min}}{p} \left[ f\pi\left(\frac{r_{max}
+r_{min}}{d}\right) + 0.20 + 4.8f\right]
+ \frac{13.2f}{(r_{min}/d)^2}
Parameters
----------
Di : float
Inside diameter of pipe, [m]
rmax : float
Radius of spiral at extremity, [m]
rmin : float
Radius of spiral at end near center, [m]
pitch : float
Distance between two subsequent coil centers, [m]
fd : float
Darcy friction factor [-]
Returns
-------
K : float
Loss coefficient [-]
Notes
-----
Source not compared against others.
Examples
--------
>>> spiral(Di=0.01, rmax=.1, rmin=.02, pitch=.01, fd=0.0185)
7.950918552775473
References
----------
.. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical
and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012.
'''
return (rmax-rmin)/pitch*(fd*pi*(rmax+rmin)/Di + 0.20 + 4.8*fd) + 13.2*fd/(rmin/Di)**2 | r'''Returns loss coefficient for any size constant-pitch spiral
as shown in [1]_. Has applications in immersed coils in tanks.
.. math::
K = \frac{r_{max} - r_{min}}{p} \left[ f\pi\left(\frac{r_{max}
+r_{min}}{d}\right) + 0.20 + 4.8f\right]
+ \frac{13.2f}{(r_{min}/d)^2}
Parameters
----------
Di : float
Inside diameter of pipe, [m]
rmax : float
Radius of spiral at extremity, [m]
rmin : float
Radius of spiral at end near center, [m]
pitch : float
Distance between two subsequent coil centers, [m]
fd : float
Darcy friction factor [-]
Returns
-------
K : float
Loss coefficient [-]
Notes
-----
Source not compared against others.
Examples
--------
>>> spiral(Di=0.01, rmax=.1, rmin=.02, pitch=.01, fd=0.0185)
7.950918552775473
References
----------
.. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical
and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012. | Below is the the instruction that describes the task:
### Input:
r'''Returns loss coefficient for any size constant-pitch spiral
as shown in [1]_. Has applications in immersed coils in tanks.
.. math::
K = \frac{r_{max} - r_{min}}{p} \left[ f\pi\left(\frac{r_{max}
+r_{min}}{d}\right) + 0.20 + 4.8f\right]
+ \frac{13.2f}{(r_{min}/d)^2}
Parameters
----------
Di : float
Inside diameter of pipe, [m]
rmax : float
Radius of spiral at extremity, [m]
rmin : float
Radius of spiral at end near center, [m]
pitch : float
Distance between two subsequent coil centers, [m]
fd : float
Darcy friction factor [-]
Returns
-------
K : float
Loss coefficient [-]
Notes
-----
Source not compared against others.
Examples
--------
>>> spiral(Di=0.01, rmax=.1, rmin=.02, pitch=.01, fd=0.0185)
7.950918552775473
References
----------
.. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical
and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012.
### Response:
def spiral(Di, rmax, rmin, pitch, fd):
r'''Returns loss coefficient for any size constant-pitch spiral
as shown in [1]_. Has applications in immersed coils in tanks.
.. math::
K = \frac{r_{max} - r_{min}}{p} \left[ f\pi\left(\frac{r_{max}
+r_{min}}{d}\right) + 0.20 + 4.8f\right]
+ \frac{13.2f}{(r_{min}/d)^2}
Parameters
----------
Di : float
Inside diameter of pipe, [m]
rmax : float
Radius of spiral at extremity, [m]
rmin : float
Radius of spiral at end near center, [m]
pitch : float
Distance between two subsequent coil centers, [m]
fd : float
Darcy friction factor [-]
Returns
-------
K : float
Loss coefficient [-]
Notes
-----
Source not compared against others.
Examples
--------
>>> spiral(Di=0.01, rmax=.1, rmin=.02, pitch=.01, fd=0.0185)
7.950918552775473
References
----------
.. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical
and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012.
'''
return (rmax-rmin)/pitch*(fd*pi*(rmax+rmin)/Di + 0.20 + 4.8*fd) + 13.2*fd/(rmin/Di)**2 |
def create_comment(self, body, commit_id, path, position):
"""
:calls: `POST /repos/:owner/:repo/pulls/:number/comments <http://developer.github.com/v3/pulls/comments>`_
:param body: string
:param commit_id: :class:`github.Commit.Commit`
:param path: string
:param position: integer
:rtype: :class:`github.PullRequestComment.PullRequestComment`
"""
return self.create_review_comment(body, commit_id, path, position) | :calls: `POST /repos/:owner/:repo/pulls/:number/comments <http://developer.github.com/v3/pulls/comments>`_
:param body: string
:param commit_id: :class:`github.Commit.Commit`
:param path: string
:param position: integer
:rtype: :class:`github.PullRequestComment.PullRequestComment` | Below is the the instruction that describes the task:
### Input:
:calls: `POST /repos/:owner/:repo/pulls/:number/comments <http://developer.github.com/v3/pulls/comments>`_
:param body: string
:param commit_id: :class:`github.Commit.Commit`
:param path: string
:param position: integer
:rtype: :class:`github.PullRequestComment.PullRequestComment`
### Response:
def create_comment(self, body, commit_id, path, position):
"""
:calls: `POST /repos/:owner/:repo/pulls/:number/comments <http://developer.github.com/v3/pulls/comments>`_
:param body: string
:param commit_id: :class:`github.Commit.Commit`
:param path: string
:param position: integer
:rtype: :class:`github.PullRequestComment.PullRequestComment`
"""
return self.create_review_comment(body, commit_id, path, position) |
def get_requires(self, build_requires=False, private_build_requires=False):
"""Get the requirements of the variant.
Args:
build_requires (bool): If True, include build requirements.
private_build_requires (bool): If True, include private build
requirements.
Returns:
List of `Requirement` objects.
"""
requires = self.requires or []
if build_requires:
requires = requires + (self.build_requires or [])
if private_build_requires:
requires = requires + (self.private_build_requires or [])
return requires | Get the requirements of the variant.
Args:
build_requires (bool): If True, include build requirements.
private_build_requires (bool): If True, include private build
requirements.
Returns:
List of `Requirement` objects. | Below is the the instruction that describes the task:
### Input:
Get the requirements of the variant.
Args:
build_requires (bool): If True, include build requirements.
private_build_requires (bool): If True, include private build
requirements.
Returns:
List of `Requirement` objects.
### Response:
def get_requires(self, build_requires=False, private_build_requires=False):
"""Get the requirements of the variant.
Args:
build_requires (bool): If True, include build requirements.
private_build_requires (bool): If True, include private build
requirements.
Returns:
List of `Requirement` objects.
"""
requires = self.requires or []
if build_requires:
requires = requires + (self.build_requires or [])
if private_build_requires:
requires = requires + (self.private_build_requires or [])
return requires |
def connect(remote_host):
""" Connect to remote host and show our status """
if remote_host in ('master', 'server'):
remote_host = nago.settings.get_option('server')
node = nago.core.get_node(remote_host)
if not node:
try:
address = socket.gethostbyname(remote_host)
node = nago.core.Node()
node['host_name'] = remote_host
node['address'] = address
node['access'] = 'node'
if token_or_hostname == nago.settings.get_option('server'):
node['access'] = 'master'
node.save()
except Exception:
raise Exception("'%s' was not found in list of known hosts, and does not resolve to a valid address" % remote_host)
ping_result = node.send_command('nodes', 'ping')
if 'Pong' in ping_result.get('result', ''):
return "Connection with %s ok" % remote_host
else:
return ping_result.get('result', ping_result) | Connect to remote host and show our status | Below is the the instruction that describes the task:
### Input:
Connect to remote host and show our status
### Response:
def connect(remote_host):
""" Connect to remote host and show our status """
if remote_host in ('master', 'server'):
remote_host = nago.settings.get_option('server')
node = nago.core.get_node(remote_host)
if not node:
try:
address = socket.gethostbyname(remote_host)
node = nago.core.Node()
node['host_name'] = remote_host
node['address'] = address
node['access'] = 'node'
if token_or_hostname == nago.settings.get_option('server'):
node['access'] = 'master'
node.save()
except Exception:
raise Exception("'%s' was not found in list of known hosts, and does not resolve to a valid address" % remote_host)
ping_result = node.send_command('nodes', 'ping')
if 'Pong' in ping_result.get('result', ''):
return "Connection with %s ok" % remote_host
else:
return ping_result.get('result', ping_result) |
def is_container_system_config_file(file):
"""Determine whether a given file is one of the files created by setup_container_system_config().
@param file: Absolute file path as string.
"""
if not file.startswith("/etc/"):
return False
return file in [os.path.join("/etc", f.decode()) for f in CONTAINER_ETC_FILE_OVERRIDE] | Determine whether a given file is one of the files created by setup_container_system_config().
@param file: Absolute file path as string. | Below is the the instruction that describes the task:
### Input:
Determine whether a given file is one of the files created by setup_container_system_config().
@param file: Absolute file path as string.
### Response:
def is_container_system_config_file(file):
"""Determine whether a given file is one of the files created by setup_container_system_config().
@param file: Absolute file path as string.
"""
if not file.startswith("/etc/"):
return False
return file in [os.path.join("/etc", f.decode()) for f in CONTAINER_ETC_FILE_OVERRIDE] |
def buttonbox(msg="",title=" "
,choices=("Button1", "Button2", "Button3")
, image=None
, root=None
):
"""
Display a msg, a title, and a set of buttons.
The buttons are defined by the members of the choices list.
Return the text of the button that the user selected.
@arg msg: the msg to be displayed.
@arg title: the window title
@arg choices: a list or tuple of the choices to be displayed
"""
if sys.platform == 'darwin':
_bring_to_front()
global boxRoot, __replyButtonText, __widgetTexts, buttonsFrame
# Initialize __replyButtonText to the first choice.
# This is what will be used if the window is closed by the close button.
__replyButtonText = choices[0]
if root:
root.withdraw()
boxRoot = Toplevel(master=root)
boxRoot.withdraw()
else:
boxRoot = Tk()
boxRoot.withdraw()
boxRoot.protocol('WM_DELETE_WINDOW', denyWindowManagerClose )
boxRoot.title(title)
boxRoot.iconname('Dialog')
boxRoot.geometry(rootWindowPosition)
boxRoot.minsize(400, 100)
# ------------- define the messageFrame ---------------------------------
messageFrame = Frame(master=boxRoot)
messageFrame.pack(side=TOP, fill=BOTH)
# ------------- define the imageFrame ---------------------------------
tk_Image = None
if image:
imageFilename = os.path.normpath(image)
junk,ext = os.path.splitext(imageFilename)
if os.path.exists(imageFilename):
if ext.lower() in [".gif", ".pgm", ".ppm"]:
tk_Image = PhotoImage(master=boxRoot, file=imageFilename)
else:
if PILisLoaded:
try:
pil_Image = PILImage.open(imageFilename)
tk_Image = PILImageTk.PhotoImage(pil_Image, master=boxRoot)
except:
msg += ImageErrorMsg % (imageFilename,
"\nThe Python Imaging Library (PIL) could not convert this file to a displayable image."
"\n\nPIL reports:\n" + exception_format())
else: # PIL is not loaded
msg += ImageErrorMsg % (imageFilename,
"\nI could not import the Python Imaging Library (PIL) to display the image.\n\n"
"You may need to install PIL\n"
"(http://www.pythonware.com/products/pil/)\n"
"to display " + ext + " image files.")
else:
msg += ImageErrorMsg % (imageFilename, "\nImage file not found.")
if tk_Image:
imageFrame = Frame(master=boxRoot)
imageFrame.pack(side=TOP, fill=BOTH)
label = Label(imageFrame,image=tk_Image)
label.image = tk_Image # keep a reference!
label.pack(side=TOP, expand=YES, fill=X, padx='1m', pady='1m')
# ------------- define the buttonsFrame ---------------------------------
buttonsFrame = Frame(master=boxRoot)
buttonsFrame.pack(side=TOP, fill=BOTH)
# -------------------- place the widgets in the frames -----------------------
messageWidget = Message(messageFrame, text=msg, width=400)
messageWidget.configure(font=(PROPORTIONAL_FONT_FAMILY,PROPORTIONAL_FONT_SIZE))
messageWidget.pack(side=TOP, expand=YES, fill=X, padx='3m', pady='3m')
__put_buttons_in_buttonframe(choices)
# -------------- the action begins -----------
# put the focus on the first button
__firstWidget.focus_force()
boxRoot.deiconify()
boxRoot.mainloop()
boxRoot.destroy()
if root: root.deiconify()
return __replyButtonText | Display a msg, a title, and a set of buttons.
The buttons are defined by the members of the choices list.
Return the text of the button that the user selected.
@arg msg: the msg to be displayed.
@arg title: the window title
@arg choices: a list or tuple of the choices to be displayed | Below is the the instruction that describes the task:
### Input:
Display a msg, a title, and a set of buttons.
The buttons are defined by the members of the choices list.
Return the text of the button that the user selected.
@arg msg: the msg to be displayed.
@arg title: the window title
@arg choices: a list or tuple of the choices to be displayed
### Response:
def buttonbox(msg="",title=" "
,choices=("Button1", "Button2", "Button3")
, image=None
, root=None
):
"""
Display a msg, a title, and a set of buttons.
The buttons are defined by the members of the choices list.
Return the text of the button that the user selected.
@arg msg: the msg to be displayed.
@arg title: the window title
@arg choices: a list or tuple of the choices to be displayed
"""
if sys.platform == 'darwin':
_bring_to_front()
global boxRoot, __replyButtonText, __widgetTexts, buttonsFrame
# Initialize __replyButtonText to the first choice.
# This is what will be used if the window is closed by the close button.
__replyButtonText = choices[0]
if root:
root.withdraw()
boxRoot = Toplevel(master=root)
boxRoot.withdraw()
else:
boxRoot = Tk()
boxRoot.withdraw()
boxRoot.protocol('WM_DELETE_WINDOW', denyWindowManagerClose )
boxRoot.title(title)
boxRoot.iconname('Dialog')
boxRoot.geometry(rootWindowPosition)
boxRoot.minsize(400, 100)
# ------------- define the messageFrame ---------------------------------
messageFrame = Frame(master=boxRoot)
messageFrame.pack(side=TOP, fill=BOTH)
# ------------- define the imageFrame ---------------------------------
tk_Image = None
if image:
imageFilename = os.path.normpath(image)
junk,ext = os.path.splitext(imageFilename)
if os.path.exists(imageFilename):
if ext.lower() in [".gif", ".pgm", ".ppm"]:
tk_Image = PhotoImage(master=boxRoot, file=imageFilename)
else:
if PILisLoaded:
try:
pil_Image = PILImage.open(imageFilename)
tk_Image = PILImageTk.PhotoImage(pil_Image, master=boxRoot)
except:
msg += ImageErrorMsg % (imageFilename,
"\nThe Python Imaging Library (PIL) could not convert this file to a displayable image."
"\n\nPIL reports:\n" + exception_format())
else: # PIL is not loaded
msg += ImageErrorMsg % (imageFilename,
"\nI could not import the Python Imaging Library (PIL) to display the image.\n\n"
"You may need to install PIL\n"
"(http://www.pythonware.com/products/pil/)\n"
"to display " + ext + " image files.")
else:
msg += ImageErrorMsg % (imageFilename, "\nImage file not found.")
if tk_Image:
imageFrame = Frame(master=boxRoot)
imageFrame.pack(side=TOP, fill=BOTH)
label = Label(imageFrame,image=tk_Image)
label.image = tk_Image # keep a reference!
label.pack(side=TOP, expand=YES, fill=X, padx='1m', pady='1m')
# ------------- define the buttonsFrame ---------------------------------
buttonsFrame = Frame(master=boxRoot)
buttonsFrame.pack(side=TOP, fill=BOTH)
# -------------------- place the widgets in the frames -----------------------
messageWidget = Message(messageFrame, text=msg, width=400)
messageWidget.configure(font=(PROPORTIONAL_FONT_FAMILY,PROPORTIONAL_FONT_SIZE))
messageWidget.pack(side=TOP, expand=YES, fill=X, padx='3m', pady='3m')
__put_buttons_in_buttonframe(choices)
# -------------- the action begins -----------
# put the focus on the first button
__firstWidget.focus_force()
boxRoot.deiconify()
boxRoot.mainloop()
boxRoot.destroy()
if root: root.deiconify()
return __replyButtonText |
def build_new_offsets(cls, client, topics_offset_data, topic_partitions, current_offsets):
"""Build complete consumer offsets from parsed current consumer-offsets
and lowmarks and highmarks from current-offsets for.
"""
new_offsets = defaultdict(dict)
try:
for topic, partitions in six.iteritems(topic_partitions):
# Validate current offsets in range of low and highmarks
# Currently we only validate for positive offsets and warn
# if out of range of low and highmarks
valid_partitions = set()
for topic_partition_offsets in current_offsets[topic]:
partition = topic_partition_offsets.partition
valid_partitions.add(partition)
# Skip the partition not present in list
if partition not in topic_partitions[topic]:
continue
lowmark = topic_partition_offsets.lowmark
highmark = topic_partition_offsets.highmark
new_offset = topics_offset_data[topic][partition]
if new_offset < 0:
print(
"Error: Given offset: {offset} is negative"
.format(offset=new_offset),
file=sys.stderr,
)
sys.exit(1)
if new_offset < lowmark or new_offset > highmark:
print(
"Warning: Given offset {offset} for topic-partition "
"{topic}:{partition} is outside the range of lowmark "
"{lowmark} and highmark {highmark}".format(
offset=new_offset,
topic=topic,
partition=partition,
lowmark=lowmark,
highmark=highmark,
)
)
new_offsets[topic][partition] = new_offset
if not set(partitions).issubset(valid_partitions):
print(
"Error: Some invalid partitions {partitions} for topic "
"{topic} found. Valid partition-list {valid_partitions}. "
"Exiting...".format(
partitions=', '.join([str(p) for p in partitions]),
valid_partitions=', '.join([str(p) for p in valid_partitions]),
topic=topic,
),
file=sys.stderr,
)
sys.exit(1)
except KeyError as ex:
print(
"Error: Possible invalid topic or partition. Error msg: {ex}. "
"Exiting...".format(ex=ex),
)
sys.exit(1)
return new_offsets | Build complete consumer offsets from parsed current consumer-offsets
and lowmarks and highmarks from current-offsets for. | Below is the the instruction that describes the task:
### Input:
Build complete consumer offsets from parsed current consumer-offsets
and lowmarks and highmarks from current-offsets for.
### Response:
def build_new_offsets(cls, client, topics_offset_data, topic_partitions, current_offsets):
"""Build complete consumer offsets from parsed current consumer-offsets
and lowmarks and highmarks from current-offsets for.
"""
new_offsets = defaultdict(dict)
try:
for topic, partitions in six.iteritems(topic_partitions):
# Validate current offsets in range of low and highmarks
# Currently we only validate for positive offsets and warn
# if out of range of low and highmarks
valid_partitions = set()
for topic_partition_offsets in current_offsets[topic]:
partition = topic_partition_offsets.partition
valid_partitions.add(partition)
# Skip the partition not present in list
if partition not in topic_partitions[topic]:
continue
lowmark = topic_partition_offsets.lowmark
highmark = topic_partition_offsets.highmark
new_offset = topics_offset_data[topic][partition]
if new_offset < 0:
print(
"Error: Given offset: {offset} is negative"
.format(offset=new_offset),
file=sys.stderr,
)
sys.exit(1)
if new_offset < lowmark or new_offset > highmark:
print(
"Warning: Given offset {offset} for topic-partition "
"{topic}:{partition} is outside the range of lowmark "
"{lowmark} and highmark {highmark}".format(
offset=new_offset,
topic=topic,
partition=partition,
lowmark=lowmark,
highmark=highmark,
)
)
new_offsets[topic][partition] = new_offset
if not set(partitions).issubset(valid_partitions):
print(
"Error: Some invalid partitions {partitions} for topic "
"{topic} found. Valid partition-list {valid_partitions}. "
"Exiting...".format(
partitions=', '.join([str(p) for p in partitions]),
valid_partitions=', '.join([str(p) for p in valid_partitions]),
topic=topic,
),
file=sys.stderr,
)
sys.exit(1)
except KeyError as ex:
print(
"Error: Possible invalid topic or partition. Error msg: {ex}. "
"Exiting...".format(ex=ex),
)
sys.exit(1)
return new_offsets |
def compressedpubkey(self):
""" Derive uncompressed public key """
secret = unhexlify(repr(self._wif))
order = ecdsa.SigningKey.from_string(
secret, curve=ecdsa.SECP256k1).curve.generator.order()
p = ecdsa.SigningKey.from_string(
secret, curve=ecdsa.SECP256k1).verifying_key.pubkey.point
x_str = ecdsa.util.number_to_string(p.x(), order)
y_str = ecdsa.util.number_to_string(p.y(), order)
compressed = hexlify(
chr(2 + (p.y() & 1)).encode('ascii') + x_str
).decode('ascii')
uncompressed = hexlify(
chr(4).encode('ascii') + x_str + y_str).decode(
'ascii')
return [compressed, uncompressed] | Derive uncompressed public key | Below is the the instruction that describes the task:
### Input:
Derive uncompressed public key
### Response:
def compressedpubkey(self):
""" Derive uncompressed public key """
secret = unhexlify(repr(self._wif))
order = ecdsa.SigningKey.from_string(
secret, curve=ecdsa.SECP256k1).curve.generator.order()
p = ecdsa.SigningKey.from_string(
secret, curve=ecdsa.SECP256k1).verifying_key.pubkey.point
x_str = ecdsa.util.number_to_string(p.x(), order)
y_str = ecdsa.util.number_to_string(p.y(), order)
compressed = hexlify(
chr(2 + (p.y() & 1)).encode('ascii') + x_str
).decode('ascii')
uncompressed = hexlify(
chr(4).encode('ascii') + x_str + y_str).decode(
'ascii')
return [compressed, uncompressed] |
def split_unbalanced(chunks):
"""Return (unbalanced_start, balanced, unbalanced_end), where each is
a list of text and tag chunks.
unbalanced_start is a list of all the tags that are opened, but
not closed in this span. Similarly, unbalanced_end is a list of
tags that are closed but were not opened. Extracting these might
mean some reordering of the chunks."""
start = []
end = []
tag_stack = []
balanced = []
for chunk in chunks:
if not chunk.startswith('<'):
balanced.append(chunk)
continue
endtag = chunk[1] == '/'
name = chunk.split()[0].strip('<>/')
if name in empty_tags:
balanced.append(chunk)
continue
if endtag:
if tag_stack and tag_stack[-1][0] == name:
balanced.append(chunk)
name, pos, tag = tag_stack.pop()
balanced[pos] = tag
elif tag_stack:
start.extend([tag for name, pos, tag in tag_stack])
tag_stack = []
end.append(chunk)
else:
end.append(chunk)
else:
tag_stack.append((name, len(balanced), chunk))
balanced.append(None)
start.extend(
[chunk for name, pos, chunk in tag_stack])
balanced = [chunk for chunk in balanced if chunk is not None]
return start, balanced, end | Return (unbalanced_start, balanced, unbalanced_end), where each is
a list of text and tag chunks.
unbalanced_start is a list of all the tags that are opened, but
not closed in this span. Similarly, unbalanced_end is a list of
tags that are closed but were not opened. Extracting these might
mean some reordering of the chunks. | Below is the the instruction that describes the task:
### Input:
Return (unbalanced_start, balanced, unbalanced_end), where each is
a list of text and tag chunks.
unbalanced_start is a list of all the tags that are opened, but
not closed in this span. Similarly, unbalanced_end is a list of
tags that are closed but were not opened. Extracting these might
mean some reordering of the chunks.
### Response:
def split_unbalanced(chunks):
"""Return (unbalanced_start, balanced, unbalanced_end), where each is
a list of text and tag chunks.
unbalanced_start is a list of all the tags that are opened, but
not closed in this span. Similarly, unbalanced_end is a list of
tags that are closed but were not opened. Extracting these might
mean some reordering of the chunks."""
start = []
end = []
tag_stack = []
balanced = []
for chunk in chunks:
if not chunk.startswith('<'):
balanced.append(chunk)
continue
endtag = chunk[1] == '/'
name = chunk.split()[0].strip('<>/')
if name in empty_tags:
balanced.append(chunk)
continue
if endtag:
if tag_stack and tag_stack[-1][0] == name:
balanced.append(chunk)
name, pos, tag = tag_stack.pop()
balanced[pos] = tag
elif tag_stack:
start.extend([tag for name, pos, tag in tag_stack])
tag_stack = []
end.append(chunk)
else:
end.append(chunk)
else:
tag_stack.append((name, len(balanced), chunk))
balanced.append(None)
start.extend(
[chunk for name, pos, chunk in tag_stack])
balanced = [chunk for chunk in balanced if chunk is not None]
return start, balanced, end |
def _split_path(self, path):
"""
Splits a Registry path and returns the hive and key.
@type path: str
@param path: Registry path.
@rtype: tuple( int, str )
@return: Tuple containing the hive handle and the subkey path.
The hive handle is always one of the following integer constants:
- L{win32.HKEY_CLASSES_ROOT}
- L{win32.HKEY_CURRENT_USER}
- L{win32.HKEY_LOCAL_MACHINE}
- L{win32.HKEY_USERS}
- L{win32.HKEY_PERFORMANCE_DATA}
- L{win32.HKEY_CURRENT_CONFIG}
"""
if '\\' in path:
p = path.find('\\')
hive = path[:p]
path = path[p+1:]
else:
hive = path
path = None
handle = self._hives_by_name[ hive.upper() ]
return handle, path | Splits a Registry path and returns the hive and key.
@type path: str
@param path: Registry path.
@rtype: tuple( int, str )
@return: Tuple containing the hive handle and the subkey path.
The hive handle is always one of the following integer constants:
- L{win32.HKEY_CLASSES_ROOT}
- L{win32.HKEY_CURRENT_USER}
- L{win32.HKEY_LOCAL_MACHINE}
- L{win32.HKEY_USERS}
- L{win32.HKEY_PERFORMANCE_DATA}
- L{win32.HKEY_CURRENT_CONFIG} | Below is the the instruction that describes the task:
### Input:
Splits a Registry path and returns the hive and key.
@type path: str
@param path: Registry path.
@rtype: tuple( int, str )
@return: Tuple containing the hive handle and the subkey path.
The hive handle is always one of the following integer constants:
- L{win32.HKEY_CLASSES_ROOT}
- L{win32.HKEY_CURRENT_USER}
- L{win32.HKEY_LOCAL_MACHINE}
- L{win32.HKEY_USERS}
- L{win32.HKEY_PERFORMANCE_DATA}
- L{win32.HKEY_CURRENT_CONFIG}
### Response:
def _split_path(self, path):
"""
Splits a Registry path and returns the hive and key.
@type path: str
@param path: Registry path.
@rtype: tuple( int, str )
@return: Tuple containing the hive handle and the subkey path.
The hive handle is always one of the following integer constants:
- L{win32.HKEY_CLASSES_ROOT}
- L{win32.HKEY_CURRENT_USER}
- L{win32.HKEY_LOCAL_MACHINE}
- L{win32.HKEY_USERS}
- L{win32.HKEY_PERFORMANCE_DATA}
- L{win32.HKEY_CURRENT_CONFIG}
"""
if '\\' in path:
p = path.find('\\')
hive = path[:p]
path = path[p+1:]
else:
hive = path
path = None
handle = self._hives_by_name[ hive.upper() ]
return handle, path |
def calc_freefree_eta(ne, t, hz):
"""Dulk (1985) equations 7 and 20, assuming pure hydrogen."""
kappa = calc_freefree_kappa(ne, t, hz)
return kappa * cgs.k * t * hz**2 / cgs.c**2 | Dulk (1985) equations 7 and 20, assuming pure hydrogen. | Below is the the instruction that describes the task:
### Input:
Dulk (1985) equations 7 and 20, assuming pure hydrogen.
### Response:
def calc_freefree_eta(ne, t, hz):
"""Dulk (1985) equations 7 and 20, assuming pure hydrogen."""
kappa = calc_freefree_kappa(ne, t, hz)
return kappa * cgs.k * t * hz**2 / cgs.c**2 |
def _averageFromHeader(self, header, keyword):
""" Averages out values taken from header. The keywords where
to read values from are passed as a comma-separated list.
"""
_list = ''
for _kw in keyword.split(','):
if _kw in header:
_list = _list + ',' + str(header[_kw])
else:
return None
return self._averageFromList(_list) | Averages out values taken from header. The keywords where
to read values from are passed as a comma-separated list. | Below is the the instruction that describes the task:
### Input:
Averages out values taken from header. The keywords where
to read values from are passed as a comma-separated list.
### Response:
def _averageFromHeader(self, header, keyword):
""" Averages out values taken from header. The keywords where
to read values from are passed as a comma-separated list.
"""
_list = ''
for _kw in keyword.split(','):
if _kw in header:
_list = _list + ',' + str(header[_kw])
else:
return None
return self._averageFromList(_list) |
def identifier(self, text):
"""identifier = alpha_character | "_" . {alpha_character | "_" | digit} ;"""
self._attempting(text)
return concatenation([
alternation([
self.alpha_character,
"_"
]),
zero_or_more(
alternation([
self.alpha_character,
"_",
self.digit
])
)
], ignore_whitespace=False)(text).compressed(TokenType.identifier) | identifier = alpha_character | "_" . {alpha_character | "_" | digit} ; | Below is the the instruction that describes the task:
### Input:
identifier = alpha_character | "_" . {alpha_character | "_" | digit} ;
### Response:
def identifier(self, text):
"""identifier = alpha_character | "_" . {alpha_character | "_" | digit} ;"""
self._attempting(text)
return concatenation([
alternation([
self.alpha_character,
"_"
]),
zero_or_more(
alternation([
self.alpha_character,
"_",
self.digit
])
)
], ignore_whitespace=False)(text).compressed(TokenType.identifier) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.