code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def lat_bounds(self):
"""Latitude of grid interfaces (degrees North)
:getter: Returns the bounds of axis ``'lat'`` if availible in the
process's domains.
:type: array
:raises: :exc:`ValueError`
if no ``'lat'`` axis can be found.
"""
try:
for domname, dom in self.domains.items():
try:
thislat = dom.axes['lat'].bounds
except:
pass
return thislat
except:
raise ValueError('Can\'t resolve a lat axis.') | Latitude of grid interfaces (degrees North)
:getter: Returns the bounds of axis ``'lat'`` if availible in the
process's domains.
:type: array
:raises: :exc:`ValueError`
if no ``'lat'`` axis can be found. | Below is the the instruction that describes the task:
### Input:
Latitude of grid interfaces (degrees North)
:getter: Returns the bounds of axis ``'lat'`` if availible in the
process's domains.
:type: array
:raises: :exc:`ValueError`
if no ``'lat'`` axis can be found.
### Response:
def lat_bounds(self):
"""Latitude of grid interfaces (degrees North)
:getter: Returns the bounds of axis ``'lat'`` if availible in the
process's domains.
:type: array
:raises: :exc:`ValueError`
if no ``'lat'`` axis can be found.
"""
try:
for domname, dom in self.domains.items():
try:
thislat = dom.axes['lat'].bounds
except:
pass
return thislat
except:
raise ValueError('Can\'t resolve a lat axis.') |
def patch_namespaced_cron_job(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_cron_job # noqa: E501
partially update the specified CronJob # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_cron_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param UNKNOWN_BASE_TYPE body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_cron_job_with_http_info(name, namespace, body, **kwargs) # noqa: E501
else:
(data) = self.patch_namespaced_cron_job_with_http_info(name, namespace, body, **kwargs) # noqa: E501
return data | patch_namespaced_cron_job # noqa: E501
partially update the specified CronJob # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_cron_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param UNKNOWN_BASE_TYPE body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
patch_namespaced_cron_job # noqa: E501
partially update the specified CronJob # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_cron_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param UNKNOWN_BASE_TYPE body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread.
### Response:
def patch_namespaced_cron_job(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_cron_job # noqa: E501
partially update the specified CronJob # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_cron_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param UNKNOWN_BASE_TYPE body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_cron_job_with_http_info(name, namespace, body, **kwargs) # noqa: E501
else:
(data) = self.patch_namespaced_cron_job_with_http_info(name, namespace, body, **kwargs) # noqa: E501
return data |
def current_codes_from_pdb():
""" Get list of all PDB codes currently listed in the PDB.
Returns
-------
pdb_codes : list(str)
List of PDB codes (in lower case).
"""
url = 'http://www.rcsb.org/pdb/rest/getCurrent'
r = requests.get(url)
if r.status_code == 200:
pdb_codes = [x.lower() for x in r.text.split('"') if len(x) == 4]
else:
print('Request for {0} failed with status code {1}'.format(url, r.status_code))
return
return pdb_codes | Get list of all PDB codes currently listed in the PDB.
Returns
-------
pdb_codes : list(str)
List of PDB codes (in lower case). | Below is the the instruction that describes the task:
### Input:
Get list of all PDB codes currently listed in the PDB.
Returns
-------
pdb_codes : list(str)
List of PDB codes (in lower case).
### Response:
def current_codes_from_pdb():
""" Get list of all PDB codes currently listed in the PDB.
Returns
-------
pdb_codes : list(str)
List of PDB codes (in lower case).
"""
url = 'http://www.rcsb.org/pdb/rest/getCurrent'
r = requests.get(url)
if r.status_code == 200:
pdb_codes = [x.lower() for x in r.text.split('"') if len(x) == 4]
else:
print('Request for {0} failed with status code {1}'.format(url, r.status_code))
return
return pdb_codes |
def accepts_admin_roles(func):
"""
Decorator that accepts only admin roles
:param func:
:return:
"""
if inspect.isclass(func):
apply_function_to_members(func, accepts_admin_roles)
return func
else:
@functools.wraps(func)
def decorator(*args, **kwargs):
return accepts_roles(*ROLES_ADMIN)(func)(*args, **kwargs)
return decorator | Decorator that accepts only admin roles
:param func:
:return: | Below is the the instruction that describes the task:
### Input:
Decorator that accepts only admin roles
:param func:
:return:
### Response:
def accepts_admin_roles(func):
"""
Decorator that accepts only admin roles
:param func:
:return:
"""
if inspect.isclass(func):
apply_function_to_members(func, accepts_admin_roles)
return func
else:
@functools.wraps(func)
def decorator(*args, **kwargs):
return accepts_roles(*ROLES_ADMIN)(func)(*args, **kwargs)
return decorator |
def raw(self, clean=False):
"""Raw identifier.
args:
clean (bool): clean name
returns:
str
"""
if clean:
return ''.join(''.join(p) for p in self.parsed).replace('?', ' ')
return '%'.join('%'.join(p) for p in self.parsed).strip().strip('%') | Raw identifier.
args:
clean (bool): clean name
returns:
str | Below is the the instruction that describes the task:
### Input:
Raw identifier.
args:
clean (bool): clean name
returns:
str
### Response:
def raw(self, clean=False):
"""Raw identifier.
args:
clean (bool): clean name
returns:
str
"""
if clean:
return ''.join(''.join(p) for p in self.parsed).replace('?', ' ')
return '%'.join('%'.join(p) for p in self.parsed).strip().strip('%') |
async def issueClaims(self, allClaimRequest: Dict[ID, ClaimRequest]) -> \
Dict[ID, Claims]:
"""
Issue claims for the given users and schemas.
:param allClaimRequest: a map of schema ID to a claim
request containing prover ID and prover-generated values
:return: The claims (both primary and non-revocation)
"""
res = {}
for schemaId, claimReq in allClaimRequest.items():
res[schemaId] = await self.issueClaim(schemaId, claimReq)
return res | Issue claims for the given users and schemas.
:param allClaimRequest: a map of schema ID to a claim
request containing prover ID and prover-generated values
:return: The claims (both primary and non-revocation) | Below is the the instruction that describes the task:
### Input:
Issue claims for the given users and schemas.
:param allClaimRequest: a map of schema ID to a claim
request containing prover ID and prover-generated values
:return: The claims (both primary and non-revocation)
### Response:
async def issueClaims(self, allClaimRequest: Dict[ID, ClaimRequest]) -> \
Dict[ID, Claims]:
"""
Issue claims for the given users and schemas.
:param allClaimRequest: a map of schema ID to a claim
request containing prover ID and prover-generated values
:return: The claims (both primary and non-revocation)
"""
res = {}
for schemaId, claimReq in allClaimRequest.items():
res[schemaId] = await self.issueClaim(schemaId, claimReq)
return res |
def noise_from_psd(length, delta_t, psd, seed=None):
""" Create noise with a given psd.
Return noise with a given psd. Note that if unique noise is desired
a unique seed should be provided.
Parameters
----------
length : int
The length of noise to generate in samples.
delta_t : float
The time step of the noise.
psd : FrequencySeries
The noise weighting to color the noise.
seed : {0, int}
The seed to generate the noise.
Returns
--------
noise : TimeSeries
A TimeSeries containing gaussian noise colored by the given psd.
"""
noise_ts = TimeSeries(zeros(length), delta_t=delta_t)
if seed is None:
seed = numpy.random.randint(2**32)
randomness = lal.gsl_rng("ranlux", seed)
N = int (1.0 / delta_t / psd.delta_f)
n = N//2+1
stride = N//2
if n > len(psd):
raise ValueError("PSD not compatible with requested delta_t")
psd = (psd[0:n]).lal()
psd.data.data[n-1] = 0
segment = TimeSeries(zeros(N), delta_t=delta_t).lal()
length_generated = 0
SimNoise(segment, 0, psd, randomness)
while (length_generated < length):
if (length_generated + stride) < length:
noise_ts.data[length_generated:length_generated+stride] = segment.data.data[0:stride]
else:
noise_ts.data[length_generated:length] = segment.data.data[0:length-length_generated]
length_generated += stride
SimNoise(segment, stride, psd, randomness)
return noise_ts | Create noise with a given psd.
Return noise with a given psd. Note that if unique noise is desired
a unique seed should be provided.
Parameters
----------
length : int
The length of noise to generate in samples.
delta_t : float
The time step of the noise.
psd : FrequencySeries
The noise weighting to color the noise.
seed : {0, int}
The seed to generate the noise.
Returns
--------
noise : TimeSeries
A TimeSeries containing gaussian noise colored by the given psd. | Below is the the instruction that describes the task:
### Input:
Create noise with a given psd.
Return noise with a given psd. Note that if unique noise is desired
a unique seed should be provided.
Parameters
----------
length : int
The length of noise to generate in samples.
delta_t : float
The time step of the noise.
psd : FrequencySeries
The noise weighting to color the noise.
seed : {0, int}
The seed to generate the noise.
Returns
--------
noise : TimeSeries
A TimeSeries containing gaussian noise colored by the given psd.
### Response:
def noise_from_psd(length, delta_t, psd, seed=None):
""" Create noise with a given psd.
Return noise with a given psd. Note that if unique noise is desired
a unique seed should be provided.
Parameters
----------
length : int
The length of noise to generate in samples.
delta_t : float
The time step of the noise.
psd : FrequencySeries
The noise weighting to color the noise.
seed : {0, int}
The seed to generate the noise.
Returns
--------
noise : TimeSeries
A TimeSeries containing gaussian noise colored by the given psd.
"""
noise_ts = TimeSeries(zeros(length), delta_t=delta_t)
if seed is None:
seed = numpy.random.randint(2**32)
randomness = lal.gsl_rng("ranlux", seed)
N = int (1.0 / delta_t / psd.delta_f)
n = N//2+1
stride = N//2
if n > len(psd):
raise ValueError("PSD not compatible with requested delta_t")
psd = (psd[0:n]).lal()
psd.data.data[n-1] = 0
segment = TimeSeries(zeros(N), delta_t=delta_t).lal()
length_generated = 0
SimNoise(segment, 0, psd, randomness)
while (length_generated < length):
if (length_generated + stride) < length:
noise_ts.data[length_generated:length_generated+stride] = segment.data.data[0:stride]
else:
noise_ts.data[length_generated:length] = segment.data.data[0:length-length_generated]
length_generated += stride
SimNoise(segment, stride, psd, randomness)
return noise_ts |
def get_host_info_dict_from_describe_dict(self, describe_dict):
''' Parses the dictionary returned by the API call into a flat list
of parameters. This method should be used only when 'describe' is
used directly because Boto doesn't provide specific classes. '''
# I really don't agree with prefixing everything with 'ec2'
# because EC2, RDS and ElastiCache are different services.
# I'm just following the pattern used until now to not break any
# compatibility.
host_info = {}
for key in describe_dict:
value = describe_dict[key]
key = self.to_safe('ec2_' + self.uncammelize(key))
# Handle complex types
# Target: Memcached Cache Clusters
if key == 'ec2_configuration_endpoint' and value:
host_info['ec2_configuration_endpoint_address'] = value['Address']
host_info['ec2_configuration_endpoint_port'] = value['Port']
# Target: Cache Nodes and Redis Cache Clusters (single node)
if key == 'ec2_endpoint' and value:
host_info['ec2_endpoint_address'] = value['Address']
host_info['ec2_endpoint_port'] = value['Port']
# Target: Redis Replication Groups
if key == 'ec2_node_groups' and value:
host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address']
host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port']
replica_count = 0
for node in value[0]['NodeGroupMembers']:
if node['CurrentRole'] == 'primary':
host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address']
host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port']
host_info['ec2_primary_cluster_id'] = node['CacheClusterId']
elif node['CurrentRole'] == 'replica':
host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address']
host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port']
host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId']
replica_count += 1
# Target: Redis Replication Groups
if key == 'ec2_member_clusters' and value:
host_info['ec2_member_clusters'] = ','.join([str(i) for i in value])
# Target: All Cache Clusters
elif key == 'ec2_cache_parameter_group':
host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']])
host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName']
host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus']
# Target: Almost everything
elif key == 'ec2_security_groups':
# Skip if SecurityGroups is None
# (it is possible to have the key defined but no value in it).
if value is not None:
sg_ids = []
for sg in value:
sg_ids.append(sg['SecurityGroupId'])
host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids])
# Target: Everything
# Preserve booleans and integers
elif type(value) in [int, bool]:
host_info[key] = value
# Target: Everything
# Sanitize string values
elif isinstance(value, six.string_types):
host_info[key] = value.strip()
# Target: Everything
# Replace None by an empty string
elif type(value) == type(None):
host_info[key] = ''
else:
# Remove non-processed complex types
pass
return host_info | Parses the dictionary returned by the API call into a flat list
of parameters. This method should be used only when 'describe' is
used directly because Boto doesn't provide specific classes. | Below is the the instruction that describes the task:
### Input:
Parses the dictionary returned by the API call into a flat list
of parameters. This method should be used only when 'describe' is
used directly because Boto doesn't provide specific classes.
### Response:
def get_host_info_dict_from_describe_dict(self, describe_dict):
''' Parses the dictionary returned by the API call into a flat list
of parameters. This method should be used only when 'describe' is
used directly because Boto doesn't provide specific classes. '''
# I really don't agree with prefixing everything with 'ec2'
# because EC2, RDS and ElastiCache are different services.
# I'm just following the pattern used until now to not break any
# compatibility.
host_info = {}
for key in describe_dict:
value = describe_dict[key]
key = self.to_safe('ec2_' + self.uncammelize(key))
# Handle complex types
# Target: Memcached Cache Clusters
if key == 'ec2_configuration_endpoint' and value:
host_info['ec2_configuration_endpoint_address'] = value['Address']
host_info['ec2_configuration_endpoint_port'] = value['Port']
# Target: Cache Nodes and Redis Cache Clusters (single node)
if key == 'ec2_endpoint' and value:
host_info['ec2_endpoint_address'] = value['Address']
host_info['ec2_endpoint_port'] = value['Port']
# Target: Redis Replication Groups
if key == 'ec2_node_groups' and value:
host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address']
host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port']
replica_count = 0
for node in value[0]['NodeGroupMembers']:
if node['CurrentRole'] == 'primary':
host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address']
host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port']
host_info['ec2_primary_cluster_id'] = node['CacheClusterId']
elif node['CurrentRole'] == 'replica':
host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address']
host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port']
host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId']
replica_count += 1
# Target: Redis Replication Groups
if key == 'ec2_member_clusters' and value:
host_info['ec2_member_clusters'] = ','.join([str(i) for i in value])
# Target: All Cache Clusters
elif key == 'ec2_cache_parameter_group':
host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']])
host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName']
host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus']
# Target: Almost everything
elif key == 'ec2_security_groups':
# Skip if SecurityGroups is None
# (it is possible to have the key defined but no value in it).
if value is not None:
sg_ids = []
for sg in value:
sg_ids.append(sg['SecurityGroupId'])
host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids])
# Target: Everything
# Preserve booleans and integers
elif type(value) in [int, bool]:
host_info[key] = value
# Target: Everything
# Sanitize string values
elif isinstance(value, six.string_types):
host_info[key] = value.strip()
# Target: Everything
# Replace None by an empty string
elif type(value) == type(None):
host_info[key] = ''
else:
# Remove non-processed complex types
pass
return host_info |
def move_to(self, location):
"""Changes the location of this medium. Some medium types may support
changing the storage unit location by simply changing the value of the
associated property. In this case the operation is performed
immediately, and @a progress is returning a @c null reference.
Otherwise on success there is a progress object returned, which
signals progress and completion of the operation. This distinction is
necessary because for some formats the operation is very fast, while
for others it can be very slow (moving the image file by copying all
data), and in the former case it'd be a waste of resources to create
a progress object which will immediately signal completion.
When setting a location for a medium which corresponds to a/several
regular file(s) in the host's file system, the given file name may be
either relative to the :py:func:`IVirtualBox.home_folder` VirtualBox
home folder or absolute. Note that if the given location
specification does not contain the file extension part then a proper
default extension will be automatically appended by the implementation
depending on the medium type.
in location of type str
New location.
return progress of type :class:`IProgress`
Progress object to track the operation completion.
raises :class:`OleErrorNotimpl`
The operation is not implemented yet.
raises :class:`VBoxErrorNotSupported`
Medium format does not support changing the location.
"""
if not isinstance(location, basestring):
raise TypeError("location can only be an instance of type basestring")
progress = self._call("moveTo",
in_p=[location])
progress = IProgress(progress)
return progress | Changes the location of this medium. Some medium types may support
changing the storage unit location by simply changing the value of the
associated property. In this case the operation is performed
immediately, and @a progress is returning a @c null reference.
Otherwise on success there is a progress object returned, which
signals progress and completion of the operation. This distinction is
necessary because for some formats the operation is very fast, while
for others it can be very slow (moving the image file by copying all
data), and in the former case it'd be a waste of resources to create
a progress object which will immediately signal completion.
When setting a location for a medium which corresponds to a/several
regular file(s) in the host's file system, the given file name may be
either relative to the :py:func:`IVirtualBox.home_folder` VirtualBox
home folder or absolute. Note that if the given location
specification does not contain the file extension part then a proper
default extension will be automatically appended by the implementation
depending on the medium type.
in location of type str
New location.
return progress of type :class:`IProgress`
Progress object to track the operation completion.
raises :class:`OleErrorNotimpl`
The operation is not implemented yet.
raises :class:`VBoxErrorNotSupported`
Medium format does not support changing the location. | Below is the the instruction that describes the task:
### Input:
Changes the location of this medium. Some medium types may support
changing the storage unit location by simply changing the value of the
associated property. In this case the operation is performed
immediately, and @a progress is returning a @c null reference.
Otherwise on success there is a progress object returned, which
signals progress and completion of the operation. This distinction is
necessary because for some formats the operation is very fast, while
for others it can be very slow (moving the image file by copying all
data), and in the former case it'd be a waste of resources to create
a progress object which will immediately signal completion.
When setting a location for a medium which corresponds to a/several
regular file(s) in the host's file system, the given file name may be
either relative to the :py:func:`IVirtualBox.home_folder` VirtualBox
home folder or absolute. Note that if the given location
specification does not contain the file extension part then a proper
default extension will be automatically appended by the implementation
depending on the medium type.
in location of type str
New location.
return progress of type :class:`IProgress`
Progress object to track the operation completion.
raises :class:`OleErrorNotimpl`
The operation is not implemented yet.
raises :class:`VBoxErrorNotSupported`
Medium format does not support changing the location.
### Response:
def move_to(self, location):
"""Changes the location of this medium. Some medium types may support
changing the storage unit location by simply changing the value of the
associated property. In this case the operation is performed
immediately, and @a progress is returning a @c null reference.
Otherwise on success there is a progress object returned, which
signals progress and completion of the operation. This distinction is
necessary because for some formats the operation is very fast, while
for others it can be very slow (moving the image file by copying all
data), and in the former case it'd be a waste of resources to create
a progress object which will immediately signal completion.
When setting a location for a medium which corresponds to a/several
regular file(s) in the host's file system, the given file name may be
either relative to the :py:func:`IVirtualBox.home_folder` VirtualBox
home folder or absolute. Note that if the given location
specification does not contain the file extension part then a proper
default extension will be automatically appended by the implementation
depending on the medium type.
in location of type str
New location.
return progress of type :class:`IProgress`
Progress object to track the operation completion.
raises :class:`OleErrorNotimpl`
The operation is not implemented yet.
raises :class:`VBoxErrorNotSupported`
Medium format does not support changing the location.
"""
if not isinstance(location, basestring):
raise TypeError("location can only be an instance of type basestring")
progress = self._call("moveTo",
in_p=[location])
progress = IProgress(progress)
return progress |
def _ValidateFSM(self):
"""Checks state names and destinations for validity.
Each destination state must exist, be a valid name and
not be a reserved name.
There must be a 'Start' state and if 'EOF' or 'End' states are specified,
they must be empty.
Returns:
True if FSM is valid.
Raises:
TextFSMTemplateError: If any state definitions are invalid.
"""
# Must have 'Start' state.
if 'Start' not in self.states:
raise TextFSMTemplateError("Missing state 'Start'.")
# 'End/EOF' state (if specified) must be empty.
if self.states.get('End'):
raise TextFSMTemplateError("Non-Empty 'End' state.")
if self.states.get('EOF'):
raise TextFSMTemplateError("Non-Empty 'EOF' state.")
# Remove 'End' state.
if 'End' in self.states:
del self.states['End']
self.state_list.remove('End')
# Ensure jump states are all valid.
for state in self.states:
for rule in self.states[state]:
if rule.line_op == 'Error':
continue
if not rule.new_state or rule.new_state in ('End', 'EOF'):
continue
if rule.new_state not in self.states:
raise TextFSMTemplateError(
"State '%s' not found, referenced in state '%s'" %
(rule.new_state, state))
return True | Checks state names and destinations for validity.
Each destination state must exist, be a valid name and
not be a reserved name.
There must be a 'Start' state and if 'EOF' or 'End' states are specified,
they must be empty.
Returns:
True if FSM is valid.
Raises:
TextFSMTemplateError: If any state definitions are invalid. | Below is the the instruction that describes the task:
### Input:
Checks state names and destinations for validity.
Each destination state must exist, be a valid name and
not be a reserved name.
There must be a 'Start' state and if 'EOF' or 'End' states are specified,
they must be empty.
Returns:
True if FSM is valid.
Raises:
TextFSMTemplateError: If any state definitions are invalid.
### Response:
def _ValidateFSM(self):
"""Checks state names and destinations for validity.
Each destination state must exist, be a valid name and
not be a reserved name.
There must be a 'Start' state and if 'EOF' or 'End' states are specified,
they must be empty.
Returns:
True if FSM is valid.
Raises:
TextFSMTemplateError: If any state definitions are invalid.
"""
# Must have 'Start' state.
if 'Start' not in self.states:
raise TextFSMTemplateError("Missing state 'Start'.")
# 'End/EOF' state (if specified) must be empty.
if self.states.get('End'):
raise TextFSMTemplateError("Non-Empty 'End' state.")
if self.states.get('EOF'):
raise TextFSMTemplateError("Non-Empty 'EOF' state.")
# Remove 'End' state.
if 'End' in self.states:
del self.states['End']
self.state_list.remove('End')
# Ensure jump states are all valid.
for state in self.states:
for rule in self.states[state]:
if rule.line_op == 'Error':
continue
if not rule.new_state or rule.new_state in ('End', 'EOF'):
continue
if rule.new_state not in self.states:
raise TextFSMTemplateError(
"State '%s' not found, referenced in state '%s'" %
(rule.new_state, state))
return True |
def display_value(self, value):
"""Sets `sysparm_display_value`
:param value: Bool or 'all'
"""
if not (isinstance(value, bool) or value == 'all'):
raise InvalidUsage("Display value can be of type bool or value 'all'")
self._sysparms['sysparm_display_value'] = value | Sets `sysparm_display_value`
:param value: Bool or 'all' | Below is the the instruction that describes the task:
### Input:
Sets `sysparm_display_value`
:param value: Bool or 'all'
### Response:
def display_value(self, value):
"""Sets `sysparm_display_value`
:param value: Bool or 'all'
"""
if not (isinstance(value, bool) or value == 'all'):
raise InvalidUsage("Display value can be of type bool or value 'all'")
self._sysparms['sysparm_display_value'] = value |
def startIndyPool(**kwargs):
'''Start the indy_pool docker container iff it is not already running. See
<indy-sdk>/ci/indy-pool.dockerfile for details. Idempotent. Simply ensures
that the indy_pool container is up and running.'''
# TODO: Decide if we need a separate docker container for testing and one for
# development. The indy_sdk tests setup and teardown "indy_pool" on
# ports 9701-9708. Perhaps we need an "indy_dev_pool" on 9709-9716? I'm
# not quite sure where all of our dependencies are on port 9701-9708.
# If the test harness (mocks) are hardcoding 9701-9708, then an
# indy_dev_pool on different ports will not work.
print("Starting indy_pool ...")
# Check if indy_pool is running
if containerIsRunning("indy_pool"):
print("... already running")
exit(0)
else:
# If the container already exists and isn't running, force remove it and
# readd it. This is brute force, but is sufficient and simple.
container = getContainer("indy_pool")
if container:
container.remove(force=True)
# Build and run indy_pool if it is not already running
# Build the indy_pool image from the dockerfile in:
# /vagrant/indy-sdk/ci/indy-pool.dockerfile
#
# In shell using docker cli:
# cd /vagrant/indy-sdk
# sudo docker build -f ci/indy-pool.dockerfile -t indy_pool .
#
# NOTE: https://jira.hyperledger.org/browse/IS-406 prevents indy_pool from
# starting on the `rc` branch. Apply the patch in the Jira issue to
# overcome this problem.
try:
# indy-sdk won't be in /vagrant if the indy-sdk is cloned to a directory outside
# the Vagrant project. Regardless of where indy-sdk is cloned, it will be found
# in /src/indy-sdk in the Vagrant VM.
image = getImage(path="/src/indy-sdk", dockerfile="ci/indy-pool.dockerfile",
tag="indy_pool")
except TypeError as exc:
image = getImage(path="/vagrant/indy-sdk", dockerfile="ci/indy-pool.dockerfile",
tag="indy_pool")
except:
print("Failed to find indy-pool.dockerfile in /vagrant/indy-sdk or /src/indy-sdk")
# Run a container using the image
#
# In shell using docker cli:
# sudo docker run -itd -p 9701-9708:9701-9708 indy_pool
#
# NOTE: {'2222/tcp': 3333} is sufficient. A tuple of (address, port) if you
# want to specify the host interface.
container = runContainer(image, ports={
'9701/tcp': ('0.0.0.0', 9701),
'9702/tcp': ('0.0.0.0', 9702),
'9703/tcp': ('0.0.0.0', 9703),
'9704/tcp': ('0.0.0.0', 9704),
'9705/tcp': ('0.0.0.0', 9705),
'9706/tcp': ('0.0.0.0', 9706),
'9707/tcp': ('0.0.0.0', 9707),
'9708/tcp': ('0.0.0.0', 9708)
}, detach=True, name="indy_pool"
)
print("...started") | Start the indy_pool docker container iff it is not already running. See
<indy-sdk>/ci/indy-pool.dockerfile for details. Idempotent. Simply ensures
that the indy_pool container is up and running. | Below is the the instruction that describes the task:
### Input:
Start the indy_pool docker container iff it is not already running. See
<indy-sdk>/ci/indy-pool.dockerfile for details. Idempotent. Simply ensures
that the indy_pool container is up and running.
### Response:
def startIndyPool(**kwargs):
'''Start the indy_pool docker container iff it is not already running. See
<indy-sdk>/ci/indy-pool.dockerfile for details. Idempotent. Simply ensures
that the indy_pool container is up and running.'''
# TODO: Decide if we need a separate docker container for testing and one for
# development. The indy_sdk tests setup and teardown "indy_pool" on
# ports 9701-9708. Perhaps we need an "indy_dev_pool" on 9709-9716? I'm
# not quite sure where all of our dependencies are on port 9701-9708.
# If the test harness (mocks) are hardcoding 9701-9708, then an
# indy_dev_pool on different ports will not work.
print("Starting indy_pool ...")
# Check if indy_pool is running
if containerIsRunning("indy_pool"):
print("... already running")
exit(0)
else:
# If the container already exists and isn't running, force remove it and
# readd it. This is brute force, but is sufficient and simple.
container = getContainer("indy_pool")
if container:
container.remove(force=True)
# Build and run indy_pool if it is not already running
# Build the indy_pool image from the dockerfile in:
# /vagrant/indy-sdk/ci/indy-pool.dockerfile
#
# In shell using docker cli:
# cd /vagrant/indy-sdk
# sudo docker build -f ci/indy-pool.dockerfile -t indy_pool .
#
# NOTE: https://jira.hyperledger.org/browse/IS-406 prevents indy_pool from
# starting on the `rc` branch. Apply the patch in the Jira issue to
# overcome this problem.
try:
# indy-sdk won't be in /vagrant if the indy-sdk is cloned to a directory outside
# the Vagrant project. Regardless of where indy-sdk is cloned, it will be found
# in /src/indy-sdk in the Vagrant VM.
image = getImage(path="/src/indy-sdk", dockerfile="ci/indy-pool.dockerfile",
tag="indy_pool")
except TypeError as exc:
image = getImage(path="/vagrant/indy-sdk", dockerfile="ci/indy-pool.dockerfile",
tag="indy_pool")
except:
print("Failed to find indy-pool.dockerfile in /vagrant/indy-sdk or /src/indy-sdk")
# Run a container using the image
#
# In shell using docker cli:
# sudo docker run -itd -p 9701-9708:9701-9708 indy_pool
#
# NOTE: {'2222/tcp': 3333} is sufficient. A tuple of (address, port) if you
# want to specify the host interface.
container = runContainer(image, ports={
'9701/tcp': ('0.0.0.0', 9701),
'9702/tcp': ('0.0.0.0', 9702),
'9703/tcp': ('0.0.0.0', 9703),
'9704/tcp': ('0.0.0.0', 9704),
'9705/tcp': ('0.0.0.0', 9705),
'9706/tcp': ('0.0.0.0', 9706),
'9707/tcp': ('0.0.0.0', 9707),
'9708/tcp': ('0.0.0.0', 9708)
}, detach=True, name="indy_pool"
)
print("...started") |
def get_domain_events(self, originator_id, gt=None, gte=None, lt=None, lte=None, limit=None, is_ascending=True,
page_size=None):
"""
Returns domain events for given entity ID.
""" | Returns domain events for given entity ID. | Below is the the instruction that describes the task:
### Input:
Returns domain events for given entity ID.
### Response:
def get_domain_events(self, originator_id, gt=None, gte=None, lt=None, lte=None, limit=None, is_ascending=True,
page_size=None):
"""
Returns domain events for given entity ID.
""" |
def url2fs(url):
""" encode a URL to be safe as a filename """
uri, extension = posixpath.splitext(url)
return safe64.dir(uri) + extension | encode a URL to be safe as a filename | Below is the the instruction that describes the task:
### Input:
encode a URL to be safe as a filename
### Response:
def url2fs(url):
""" encode a URL to be safe as a filename """
uri, extension = posixpath.splitext(url)
return safe64.dir(uri) + extension |
def _SetTable(self, table):
"""Sets table, with column headers and separators."""
if not isinstance(table, TextTable):
raise TypeError('Not an instance of TextTable.')
self.Reset()
self._table = copy.deepcopy(table._table) # pylint: disable=W0212
# Point parent table of each row back ourselves.
for row in self:
row.table = self | Sets table, with column headers and separators. | Below is the the instruction that describes the task:
### Input:
Sets table, with column headers and separators.
### Response:
def _SetTable(self, table):
"""Sets table, with column headers and separators."""
if not isinstance(table, TextTable):
raise TypeError('Not an instance of TextTable.')
self.Reset()
self._table = copy.deepcopy(table._table) # pylint: disable=W0212
# Point parent table of each row back ourselves.
for row in self:
row.table = self |
def dev_from_name(self, name):
"""Return the first pcap device name for a given Windows
device name.
"""
try:
return next(iface for iface in six.itervalues(self)
if (iface.name == name or iface.description == name))
except (StopIteration, RuntimeError):
raise ValueError("Unknown network interface %r" % name) | Return the first pcap device name for a given Windows
device name. | Below is the the instruction that describes the task:
### Input:
Return the first pcap device name for a given Windows
device name.
### Response:
def dev_from_name(self, name):
"""Return the first pcap device name for a given Windows
device name.
"""
try:
return next(iface for iface in six.itervalues(self)
if (iface.name == name or iface.description == name))
except (StopIteration, RuntimeError):
raise ValueError("Unknown network interface %r" % name) |
def add_mutations_and_flush(self, table, muts):
"""
Add mutations to a table without the need to create and manage a batch writer.
"""
if not isinstance(muts, list) and not isinstance(muts, tuple):
muts = [muts]
cells = {}
for mut in muts:
cells.setdefault(mut.row, []).extend(mut.updates)
self.client.updateAndFlush(self.login, table, cells) | Add mutations to a table without the need to create and manage a batch writer. | Below is the the instruction that describes the task:
### Input:
Add mutations to a table without the need to create and manage a batch writer.
### Response:
def add_mutations_and_flush(self, table, muts):
"""
Add mutations to a table without the need to create and manage a batch writer.
"""
if not isinstance(muts, list) and not isinstance(muts, tuple):
muts = [muts]
cells = {}
for mut in muts:
cells.setdefault(mut.row, []).extend(mut.updates)
self.client.updateAndFlush(self.login, table, cells) |
def sjuncChunk(key, chunk):
"""
Parse Super Junction (SJUNC) Chunk Method
"""
schunk = chunk[0].strip().split()
result = {'sjuncNumber': schunk[1],
'groundSurfaceElev': schunk[2],
'invertElev': schunk[3],
'manholeSA': schunk[4],
'inletCode': schunk[5],
'linkOrCellI': schunk[6],
'nodeOrCellJ': schunk[7],
'weirSideLength': schunk[8],
'orificeDiameter': schunk[9]}
return result | Parse Super Junction (SJUNC) Chunk Method | Below is the the instruction that describes the task:
### Input:
Parse Super Junction (SJUNC) Chunk Method
### Response:
def sjuncChunk(key, chunk):
"""
Parse Super Junction (SJUNC) Chunk Method
"""
schunk = chunk[0].strip().split()
result = {'sjuncNumber': schunk[1],
'groundSurfaceElev': schunk[2],
'invertElev': schunk[3],
'manholeSA': schunk[4],
'inletCode': schunk[5],
'linkOrCellI': schunk[6],
'nodeOrCellJ': schunk[7],
'weirSideLength': schunk[8],
'orificeDiameter': schunk[9]}
return result |
def _init(self):
"""Read the success byte."""
self._api_version = self._file.read(1)[0]
self._firmware_version = FirmwareVersion(*self._file.read(2)) | Read the success byte. | Below is the the instruction that describes the task:
### Input:
Read the success byte.
### Response:
def _init(self):
"""Read the success byte."""
self._api_version = self._file.read(1)[0]
self._firmware_version = FirmwareVersion(*self._file.read(2)) |
def get_instance_by_slug(model, slug, **kwargs):
"""Get an instance by slug.
:param model: a string, model name in rio.models
:param slug: a string used to query by `slug`. This requires there is a
slug field in model definition.
:return: None or a SQLAlchemy Model instance.
"""
try:
model = get_model(model)
except ImportError:
return None
query_params = dict(kwargs)
query_params['slug'] = slug
return model.query.filter_by(**query_params).first() | Get an instance by slug.
:param model: a string, model name in rio.models
:param slug: a string used to query by `slug`. This requires there is a
slug field in model definition.
:return: None or a SQLAlchemy Model instance. | Below is the the instruction that describes the task:
### Input:
Get an instance by slug.
:param model: a string, model name in rio.models
:param slug: a string used to query by `slug`. This requires there is a
slug field in model definition.
:return: None or a SQLAlchemy Model instance.
### Response:
def get_instance_by_slug(model, slug, **kwargs):
"""Get an instance by slug.
:param model: a string, model name in rio.models
:param slug: a string used to query by `slug`. This requires there is a
slug field in model definition.
:return: None or a SQLAlchemy Model instance.
"""
try:
model = get_model(model)
except ImportError:
return None
query_params = dict(kwargs)
query_params['slug'] = slug
return model.query.filter_by(**query_params).first() |
def getvariable(name):
"""Get the value of a local variable somewhere in the call stack."""
import inspect
fr = inspect.currentframe()
try:
while fr:
fr = fr.f_back
vars = fr.f_locals
if name in vars:
return vars[name]
except:
pass
return None | Get the value of a local variable somewhere in the call stack. | Below is the the instruction that describes the task:
### Input:
Get the value of a local variable somewhere in the call stack.
### Response:
def getvariable(name):
"""Get the value of a local variable somewhere in the call stack."""
import inspect
fr = inspect.currentframe()
try:
while fr:
fr = fr.f_back
vars = fr.f_locals
if name in vars:
return vars[name]
except:
pass
return None |
async def _dump_message_field(self, writer, msg, field, fvalue=None):
"""
Dumps a message field to the writer. Field is defined by the message field specification.
:param writer:
:param msg:
:param field:
:param fvalue:
:return:
"""
fname, ftype, params = field[0], field[1], field[2:]
fvalue = getattr(msg, fname, None) if fvalue is None else fvalue
await self.dump_field(writer, fvalue, ftype, params) | Dumps a message field to the writer. Field is defined by the message field specification.
:param writer:
:param msg:
:param field:
:param fvalue:
:return: | Below is the the instruction that describes the task:
### Input:
Dumps a message field to the writer. Field is defined by the message field specification.
:param writer:
:param msg:
:param field:
:param fvalue:
:return:
### Response:
async def _dump_message_field(self, writer, msg, field, fvalue=None):
"""
Dumps a message field to the writer. Field is defined by the message field specification.
:param writer:
:param msg:
:param field:
:param fvalue:
:return:
"""
fname, ftype, params = field[0], field[1], field[2:]
fvalue = getattr(msg, fname, None) if fvalue is None else fvalue
await self.dump_field(writer, fvalue, ftype, params) |
def __msg_curse_sum(self, ret, sep_char='_', mmm=None, args=None):
"""
Build the sum message (only when filter is on) and add it to the ret dict.
* ret: list of string where the message is added
* sep_char: define the line separation char
* mmm: display min, max, mean or current (if mmm=None)
* args: Glances args
"""
ret.append(self.curse_new_line())
if mmm is None:
ret.append(self.curse_add_line(sep_char * 69))
ret.append(self.curse_new_line())
# CPU percent sum
msg = self.layout_stat['cpu'].format(self.__sum_stats('cpu_percent', mmm=mmm))
ret.append(self.curse_add_line(msg,
decoration=self.__mmm_deco(mmm)))
# MEM percent sum
msg = self.layout_stat['mem'].format(self.__sum_stats('memory_percent', mmm=mmm))
ret.append(self.curse_add_line(msg,
decoration=self.__mmm_deco(mmm)))
# VIRT and RES memory sum
if 'memory_info' in self.stats[0] and self.stats[0]['memory_info'] is not None and self.stats[0]['memory_info'] != '':
# VMS
msg = self.layout_stat['virt'].format(self.auto_unit(self.__sum_stats('memory_info', indice=1, mmm=mmm), low_precision=False))
ret.append(self.curse_add_line(msg,
decoration=self.__mmm_deco(mmm),
optional=True))
# RSS
msg = self.layout_stat['res'].format(self.auto_unit(self.__sum_stats('memory_info', indice=0, mmm=mmm), low_precision=False))
ret.append(self.curse_add_line(msg,
decoration=self.__mmm_deco(mmm),
optional=True))
else:
msg = self.layout_header['virt'].format('')
ret.append(self.curse_add_line(msg))
msg = self.layout_header['res'].format('')
ret.append(self.curse_add_line(msg))
# PID
msg = self.layout_header['pid'].format('', width=self.__max_pid_size())
ret.append(self.curse_add_line(msg))
# USER
msg = self.layout_header['user'].format('')
ret.append(self.curse_add_line(msg))
# TIME+
msg = self.layout_header['time'].format('')
ret.append(self.curse_add_line(msg, optional=True))
# THREAD
msg = self.layout_header['thread'].format('')
ret.append(self.curse_add_line(msg))
# NICE
msg = self.layout_header['nice'].format('')
ret.append(self.curse_add_line(msg))
# STATUS
msg = self.layout_header['status'].format('')
ret.append(self.curse_add_line(msg))
# IO read/write
if 'io_counters' in self.stats[0] and mmm is None:
# IO read
io_rs = int((self.__sum_stats('io_counters', 0) - self.__sum_stats('io_counters', indice=2, mmm=mmm)) / self.stats[0]['time_since_update'])
if io_rs == 0:
msg = self.layout_stat['ior'].format('0')
else:
msg = self.layout_stat['ior'].format(self.auto_unit(io_rs, low_precision=True))
ret.append(self.curse_add_line(msg,
decoration=self.__mmm_deco(mmm),
optional=True, additional=True))
# IO write
io_ws = int((self.__sum_stats('io_counters', 1) - self.__sum_stats('io_counters', indice=3, mmm=mmm)) / self.stats[0]['time_since_update'])
if io_ws == 0:
msg = self.layout_stat['iow'].format('0')
else:
msg = self.layout_stat['iow'].format(self.auto_unit(io_ws, low_precision=True))
ret.append(self.curse_add_line(msg,
decoration=self.__mmm_deco(mmm),
optional=True, additional=True))
else:
msg = self.layout_header['ior'].format('')
ret.append(self.curse_add_line(msg, optional=True, additional=True))
msg = self.layout_header['iow'].format('')
ret.append(self.curse_add_line(msg, optional=True, additional=True))
if mmm is None:
msg = ' < {}'.format('current')
ret.append(self.curse_add_line(msg, optional=True))
else:
msg = ' < {}'.format(mmm)
ret.append(self.curse_add_line(msg, optional=True))
msg = ' (\'M\' to reset)'
ret.append(self.curse_add_line(msg, optional=True)) | Build the sum message (only when filter is on) and add it to the ret dict.
* ret: list of string where the message is added
* sep_char: define the line separation char
* mmm: display min, max, mean or current (if mmm=None)
* args: Glances args | Below is the the instruction that describes the task:
### Input:
Build the sum message (only when filter is on) and add it to the ret dict.
* ret: list of string where the message is added
* sep_char: define the line separation char
* mmm: display min, max, mean or current (if mmm=None)
* args: Glances args
### Response:
def __msg_curse_sum(self, ret, sep_char='_', mmm=None, args=None):
"""
Build the sum message (only when filter is on) and add it to the ret dict.
* ret: list of string where the message is added
* sep_char: define the line separation char
* mmm: display min, max, mean or current (if mmm=None)
* args: Glances args
"""
ret.append(self.curse_new_line())
if mmm is None:
ret.append(self.curse_add_line(sep_char * 69))
ret.append(self.curse_new_line())
# CPU percent sum
msg = self.layout_stat['cpu'].format(self.__sum_stats('cpu_percent', mmm=mmm))
ret.append(self.curse_add_line(msg,
decoration=self.__mmm_deco(mmm)))
# MEM percent sum
msg = self.layout_stat['mem'].format(self.__sum_stats('memory_percent', mmm=mmm))
ret.append(self.curse_add_line(msg,
decoration=self.__mmm_deco(mmm)))
# VIRT and RES memory sum
if 'memory_info' in self.stats[0] and self.stats[0]['memory_info'] is not None and self.stats[0]['memory_info'] != '':
# VMS
msg = self.layout_stat['virt'].format(self.auto_unit(self.__sum_stats('memory_info', indice=1, mmm=mmm), low_precision=False))
ret.append(self.curse_add_line(msg,
decoration=self.__mmm_deco(mmm),
optional=True))
# RSS
msg = self.layout_stat['res'].format(self.auto_unit(self.__sum_stats('memory_info', indice=0, mmm=mmm), low_precision=False))
ret.append(self.curse_add_line(msg,
decoration=self.__mmm_deco(mmm),
optional=True))
else:
msg = self.layout_header['virt'].format('')
ret.append(self.curse_add_line(msg))
msg = self.layout_header['res'].format('')
ret.append(self.curse_add_line(msg))
# PID
msg = self.layout_header['pid'].format('', width=self.__max_pid_size())
ret.append(self.curse_add_line(msg))
# USER
msg = self.layout_header['user'].format('')
ret.append(self.curse_add_line(msg))
# TIME+
msg = self.layout_header['time'].format('')
ret.append(self.curse_add_line(msg, optional=True))
# THREAD
msg = self.layout_header['thread'].format('')
ret.append(self.curse_add_line(msg))
# NICE
msg = self.layout_header['nice'].format('')
ret.append(self.curse_add_line(msg))
# STATUS
msg = self.layout_header['status'].format('')
ret.append(self.curse_add_line(msg))
# IO read/write
if 'io_counters' in self.stats[0] and mmm is None:
# IO read
io_rs = int((self.__sum_stats('io_counters', 0) - self.__sum_stats('io_counters', indice=2, mmm=mmm)) / self.stats[0]['time_since_update'])
if io_rs == 0:
msg = self.layout_stat['ior'].format('0')
else:
msg = self.layout_stat['ior'].format(self.auto_unit(io_rs, low_precision=True))
ret.append(self.curse_add_line(msg,
decoration=self.__mmm_deco(mmm),
optional=True, additional=True))
# IO write
io_ws = int((self.__sum_stats('io_counters', 1) - self.__sum_stats('io_counters', indice=3, mmm=mmm)) / self.stats[0]['time_since_update'])
if io_ws == 0:
msg = self.layout_stat['iow'].format('0')
else:
msg = self.layout_stat['iow'].format(self.auto_unit(io_ws, low_precision=True))
ret.append(self.curse_add_line(msg,
decoration=self.__mmm_deco(mmm),
optional=True, additional=True))
else:
msg = self.layout_header['ior'].format('')
ret.append(self.curse_add_line(msg, optional=True, additional=True))
msg = self.layout_header['iow'].format('')
ret.append(self.curse_add_line(msg, optional=True, additional=True))
if mmm is None:
msg = ' < {}'.format('current')
ret.append(self.curse_add_line(msg, optional=True))
else:
msg = ' < {}'.format(mmm)
ret.append(self.curse_add_line(msg, optional=True))
msg = ' (\'M\' to reset)'
ret.append(self.curse_add_line(msg, optional=True)) |
def _iter_axes_subartists(ax):
r"""Yield all child `Artist`\s (*not* `Container`\s) of *ax*."""
yield from ax.collections
yield from ax.images
yield from ax.lines
yield from ax.patches
yield from ax.texts | r"""Yield all child `Artist`\s (*not* `Container`\s) of *ax*. | Below is the the instruction that describes the task:
### Input:
r"""Yield all child `Artist`\s (*not* `Container`\s) of *ax*.
### Response:
def _iter_axes_subartists(ax):
r"""Yield all child `Artist`\s (*not* `Container`\s) of *ax*."""
yield from ax.collections
yield from ax.images
yield from ax.lines
yield from ax.patches
yield from ax.texts |
def pkill():
"""Kill all of FIO processes"""
if env():
return 1
cmd = ["ps -aux | grep fio | grep -v grep"]
status, _, _ = cij.ssh.command(cmd, shell=True, echo=False)
if not status:
status, _, _ = cij.ssh.command(["pkill -f fio"], shell=True)
if status:
return 1
return 0 | Kill all of FIO processes | Below is the the instruction that describes the task:
### Input:
Kill all of FIO processes
### Response:
def pkill():
"""Kill all of FIO processes"""
if env():
return 1
cmd = ["ps -aux | grep fio | grep -v grep"]
status, _, _ = cij.ssh.command(cmd, shell=True, echo=False)
if not status:
status, _, _ = cij.ssh.command(["pkill -f fio"], shell=True)
if status:
return 1
return 0 |
def get_size_in_bytes(self, handle):
"""Return the size in bytes."""
fpath = self._fpath_from_handle(handle)
return os.stat(fpath).st_size | Return the size in bytes. | Below is the the instruction that describes the task:
### Input:
Return the size in bytes.
### Response:
def get_size_in_bytes(self, handle):
"""Return the size in bytes."""
fpath = self._fpath_from_handle(handle)
return os.stat(fpath).st_size |
def Runtime_compileScript(self, expression, sourceURL, persistScript, **kwargs
):
"""
Function path: Runtime.compileScript
Domain: Runtime
Method name: compileScript
Parameters:
Required arguments:
'expression' (type: string) -> Expression to compile.
'sourceURL' (type: string) -> Source url to be set for the script.
'persistScript' (type: boolean) -> Specifies whether the compiled script should be persisted.
Optional arguments:
'executionContextId' (type: ExecutionContextId) -> Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page.
Returns:
'scriptId' (type: ScriptId) -> Id of the script.
'exceptionDetails' (type: ExceptionDetails) -> Exception details.
Description: Compiles expression.
"""
assert isinstance(expression, (str,)
), "Argument 'expression' must be of type '['str']'. Received type: '%s'" % type(
expression)
assert isinstance(sourceURL, (str,)
), "Argument 'sourceURL' must be of type '['str']'. Received type: '%s'" % type(
sourceURL)
assert isinstance(persistScript, (bool,)
), "Argument 'persistScript' must be of type '['bool']'. Received type: '%s'" % type(
persistScript)
expected = ['executionContextId']
passed_keys = list(kwargs.keys())
assert all([(key in expected) for key in passed_keys]
), "Allowed kwargs are ['executionContextId']. Passed kwargs: %s" % passed_keys
subdom_funcs = self.synchronous_command('Runtime.compileScript',
expression=expression, sourceURL=sourceURL, persistScript=
persistScript, **kwargs)
return subdom_funcs | Function path: Runtime.compileScript
Domain: Runtime
Method name: compileScript
Parameters:
Required arguments:
'expression' (type: string) -> Expression to compile.
'sourceURL' (type: string) -> Source url to be set for the script.
'persistScript' (type: boolean) -> Specifies whether the compiled script should be persisted.
Optional arguments:
'executionContextId' (type: ExecutionContextId) -> Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page.
Returns:
'scriptId' (type: ScriptId) -> Id of the script.
'exceptionDetails' (type: ExceptionDetails) -> Exception details.
Description: Compiles expression. | Below is the the instruction that describes the task:
### Input:
Function path: Runtime.compileScript
Domain: Runtime
Method name: compileScript
Parameters:
Required arguments:
'expression' (type: string) -> Expression to compile.
'sourceURL' (type: string) -> Source url to be set for the script.
'persistScript' (type: boolean) -> Specifies whether the compiled script should be persisted.
Optional arguments:
'executionContextId' (type: ExecutionContextId) -> Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page.
Returns:
'scriptId' (type: ScriptId) -> Id of the script.
'exceptionDetails' (type: ExceptionDetails) -> Exception details.
Description: Compiles expression.
### Response:
def Runtime_compileScript(self, expression, sourceURL, persistScript, **kwargs
):
"""
Function path: Runtime.compileScript
Domain: Runtime
Method name: compileScript
Parameters:
Required arguments:
'expression' (type: string) -> Expression to compile.
'sourceURL' (type: string) -> Source url to be set for the script.
'persistScript' (type: boolean) -> Specifies whether the compiled script should be persisted.
Optional arguments:
'executionContextId' (type: ExecutionContextId) -> Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page.
Returns:
'scriptId' (type: ScriptId) -> Id of the script.
'exceptionDetails' (type: ExceptionDetails) -> Exception details.
Description: Compiles expression.
"""
assert isinstance(expression, (str,)
), "Argument 'expression' must be of type '['str']'. Received type: '%s'" % type(
expression)
assert isinstance(sourceURL, (str,)
), "Argument 'sourceURL' must be of type '['str']'. Received type: '%s'" % type(
sourceURL)
assert isinstance(persistScript, (bool,)
), "Argument 'persistScript' must be of type '['bool']'. Received type: '%s'" % type(
persistScript)
expected = ['executionContextId']
passed_keys = list(kwargs.keys())
assert all([(key in expected) for key in passed_keys]
), "Allowed kwargs are ['executionContextId']. Passed kwargs: %s" % passed_keys
subdom_funcs = self.synchronous_command('Runtime.compileScript',
expression=expression, sourceURL=sourceURL, persistScript=
persistScript, **kwargs)
return subdom_funcs |
def get_trigger(self, trigger_id):
"""
Retrieves the named trigger from the Weather Alert API.
:param trigger_id: the ID of the trigger
:type trigger_id: str
:return: a `pyowm.alertapi30.trigger.Trigger` instance
"""
assert isinstance(trigger_id, str), "Value must be a string"
status, data = self.http_client.get_json(
NAMED_TRIGGER_URI % trigger_id,
params={'appid': self.API_key},
headers={'Content-Type': 'application/json'})
return self.trigger_parser.parse_dict(data) | Retrieves the named trigger from the Weather Alert API.
:param trigger_id: the ID of the trigger
:type trigger_id: str
:return: a `pyowm.alertapi30.trigger.Trigger` instance | Below is the the instruction that describes the task:
### Input:
Retrieves the named trigger from the Weather Alert API.
:param trigger_id: the ID of the trigger
:type trigger_id: str
:return: a `pyowm.alertapi30.trigger.Trigger` instance
### Response:
def get_trigger(self, trigger_id):
"""
Retrieves the named trigger from the Weather Alert API.
:param trigger_id: the ID of the trigger
:type trigger_id: str
:return: a `pyowm.alertapi30.trigger.Trigger` instance
"""
assert isinstance(trigger_id, str), "Value must be a string"
status, data = self.http_client.get_json(
NAMED_TRIGGER_URI % trigger_id,
params={'appid': self.API_key},
headers={'Content-Type': 'application/json'})
return self.trigger_parser.parse_dict(data) |
def _load_config(self):
"""Read the configuration file and load it into memory."""
self._config = ConfigParser.SafeConfigParser()
self._config.read(self.config_path) | Read the configuration file and load it into memory. | Below is the the instruction that describes the task:
### Input:
Read the configuration file and load it into memory.
### Response:
def _load_config(self):
"""Read the configuration file and load it into memory."""
self._config = ConfigParser.SafeConfigParser()
self._config.read(self.config_path) |
def update_cached_response(self, request, response):
"""On a 304 we will get a new set of headers that we want to
update our cached value with, assuming we have one.
This should only ever be called when we've sent an ETag and
gotten a 304 as the response.
"""
cache_url = self.cache_url(request.url)
cached_response = self.serializer.loads(request, self.cache.get(cache_url))
if not cached_response:
# we didn't have a cached response
return response
# Lets update our headers with the headers from the new request:
# http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1
#
# The server isn't supposed to send headers that would make
# the cached body invalid. But... just in case, we'll be sure
# to strip out ones we know that might be problmatic due to
# typical assumptions.
excluded_headers = ["content-length"]
cached_response.headers.update(
dict(
(k, v)
for k, v in response.headers.items()
if k.lower() not in excluded_headers
)
)
# we want a 200 b/c we have content via the cache
cached_response.status = 200
# update our cache
self.cache.set(cache_url, self.serializer.dumps(request, cached_response))
return cached_response | On a 304 we will get a new set of headers that we want to
update our cached value with, assuming we have one.
This should only ever be called when we've sent an ETag and
gotten a 304 as the response. | Below is the the instruction that describes the task:
### Input:
On a 304 we will get a new set of headers that we want to
update our cached value with, assuming we have one.
This should only ever be called when we've sent an ETag and
gotten a 304 as the response.
### Response:
def update_cached_response(self, request, response):
"""On a 304 we will get a new set of headers that we want to
update our cached value with, assuming we have one.
This should only ever be called when we've sent an ETag and
gotten a 304 as the response.
"""
cache_url = self.cache_url(request.url)
cached_response = self.serializer.loads(request, self.cache.get(cache_url))
if not cached_response:
# we didn't have a cached response
return response
# Lets update our headers with the headers from the new request:
# http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1
#
# The server isn't supposed to send headers that would make
# the cached body invalid. But... just in case, we'll be sure
# to strip out ones we know that might be problmatic due to
# typical assumptions.
excluded_headers = ["content-length"]
cached_response.headers.update(
dict(
(k, v)
for k, v in response.headers.items()
if k.lower() not in excluded_headers
)
)
# we want a 200 b/c we have content via the cache
cached_response.status = 200
# update our cache
self.cache.set(cache_url, self.serializer.dumps(request, cached_response))
return cached_response |
def create(session, course_name):
"""
Create an instance using a session and a course_name.
@param session: Requests session.
@type session: requests.Session
@param course_name: Course name (slug) from course json.
@type course_name: str
@return: Instance of OnDemandCourseMaterialItems
@rtype: OnDemandCourseMaterialItems
"""
dom = get_page(session, OPENCOURSE_ONDEMAND_COURSE_MATERIALS,
json=True,
class_name=course_name)
return OnDemandCourseMaterialItemsV1(
dom['linked']['onDemandCourseMaterialItems.v1']) | Create an instance using a session and a course_name.
@param session: Requests session.
@type session: requests.Session
@param course_name: Course name (slug) from course json.
@type course_name: str
@return: Instance of OnDemandCourseMaterialItems
@rtype: OnDemandCourseMaterialItems | Below is the the instruction that describes the task:
### Input:
Create an instance using a session and a course_name.
@param session: Requests session.
@type session: requests.Session
@param course_name: Course name (slug) from course json.
@type course_name: str
@return: Instance of OnDemandCourseMaterialItems
@rtype: OnDemandCourseMaterialItems
### Response:
def create(session, course_name):
"""
Create an instance using a session and a course_name.
@param session: Requests session.
@type session: requests.Session
@param course_name: Course name (slug) from course json.
@type course_name: str
@return: Instance of OnDemandCourseMaterialItems
@rtype: OnDemandCourseMaterialItems
"""
dom = get_page(session, OPENCOURSE_ONDEMAND_COURSE_MATERIALS,
json=True,
class_name=course_name)
return OnDemandCourseMaterialItemsV1(
dom['linked']['onDemandCourseMaterialItems.v1']) |
def set_schedule_enabled(self, state):
"""
:param state: a boolean True (on) or False (off)
:return: nothing
"""
desired_state = {"schedule_enabled": state}
response = self.api_interface.set_device_state(self, {
"desired_state": desired_state
})
self._update_state_from_response(response) | :param state: a boolean True (on) or False (off)
:return: nothing | Below is the the instruction that describes the task:
### Input:
:param state: a boolean True (on) or False (off)
:return: nothing
### Response:
def set_schedule_enabled(self, state):
"""
:param state: a boolean True (on) or False (off)
:return: nothing
"""
desired_state = {"schedule_enabled": state}
response = self.api_interface.set_device_state(self, {
"desired_state": desired_state
})
self._update_state_from_response(response) |
def login_in_terminal(self, need_captcha=False, use_getpass=True):
"""不使用cookies,在终端中根据提示登陆知乎
:param bool need_captcha: 是否要求输入验证码,如果登录失败请设为 True
:param bool use_getpass: 是否使用安全模式输入密码,默认为 True,
如果在某些 Windows IDE 中无法正常输入密码,请把此参数设置为 False 试试
:return: 如果成功返回cookies字符串
:rtype: str
"""
print('====== zhihu login =====')
email = input('email: ')
if use_getpass:
password = getpass.getpass('password: ')
else:
password = input("password: ")
if need_captcha:
captcha_data = self.get_captcha()
with open('captcha.gif', 'wb') as f:
f.write(captcha_data)
print('please check captcha.gif for captcha')
captcha = input('captcha: ')
os.remove('captcha.gif')
else:
captcha = None
print('====== logging.... =====')
code, msg, cookies = self.login(email, password, captcha)
if code == 0:
print('login successfully')
else:
print('login failed, reason: {0}'.format(msg))
return cookies | 不使用cookies,在终端中根据提示登陆知乎
:param bool need_captcha: 是否要求输入验证码,如果登录失败请设为 True
:param bool use_getpass: 是否使用安全模式输入密码,默认为 True,
如果在某些 Windows IDE 中无法正常输入密码,请把此参数设置为 False 试试
:return: 如果成功返回cookies字符串
:rtype: str | Below is the the instruction that describes the task:
### Input:
不使用cookies,在终端中根据提示登陆知乎
:param bool need_captcha: 是否要求输入验证码,如果登录失败请设为 True
:param bool use_getpass: 是否使用安全模式输入密码,默认为 True,
如果在某些 Windows IDE 中无法正常输入密码,请把此参数设置为 False 试试
:return: 如果成功返回cookies字符串
:rtype: str
### Response:
def login_in_terminal(self, need_captcha=False, use_getpass=True):
"""不使用cookies,在终端中根据提示登陆知乎
:param bool need_captcha: 是否要求输入验证码,如果登录失败请设为 True
:param bool use_getpass: 是否使用安全模式输入密码,默认为 True,
如果在某些 Windows IDE 中无法正常输入密码,请把此参数设置为 False 试试
:return: 如果成功返回cookies字符串
:rtype: str
"""
print('====== zhihu login =====')
email = input('email: ')
if use_getpass:
password = getpass.getpass('password: ')
else:
password = input("password: ")
if need_captcha:
captcha_data = self.get_captcha()
with open('captcha.gif', 'wb') as f:
f.write(captcha_data)
print('please check captcha.gif for captcha')
captcha = input('captcha: ')
os.remove('captcha.gif')
else:
captcha = None
print('====== logging.... =====')
code, msg, cookies = self.login(email, password, captcha)
if code == 0:
print('login successfully')
else:
print('login failed, reason: {0}'.format(msg))
return cookies |
def read_images(img_list, path='', n_threads=10, printable=True):
"""Returns all images in list by given path and name of each image file.
Parameters
-------------
img_list : list of str
The image file names.
path : str
The image folder path.
n_threads : int
The number of threads to read image.
printable : boolean
Whether to print information when reading images.
Returns
-------
list of numpy.array
The images.
"""
imgs = []
for idx in range(0, len(img_list), n_threads):
b_imgs_list = img_list[idx:idx + n_threads]
b_imgs = tl.prepro.threading_data(b_imgs_list, fn=read_image, path=path)
# tl.logging.info(b_imgs.shape)
imgs.extend(b_imgs)
if printable:
tl.logging.info('read %d from %s' % (len(imgs), path))
return imgs | Returns all images in list by given path and name of each image file.
Parameters
-------------
img_list : list of str
The image file names.
path : str
The image folder path.
n_threads : int
The number of threads to read image.
printable : boolean
Whether to print information when reading images.
Returns
-------
list of numpy.array
The images. | Below is the the instruction that describes the task:
### Input:
Returns all images in list by given path and name of each image file.
Parameters
-------------
img_list : list of str
The image file names.
path : str
The image folder path.
n_threads : int
The number of threads to read image.
printable : boolean
Whether to print information when reading images.
Returns
-------
list of numpy.array
The images.
### Response:
def read_images(img_list, path='', n_threads=10, printable=True):
"""Returns all images in list by given path and name of each image file.
Parameters
-------------
img_list : list of str
The image file names.
path : str
The image folder path.
n_threads : int
The number of threads to read image.
printable : boolean
Whether to print information when reading images.
Returns
-------
list of numpy.array
The images.
"""
imgs = []
for idx in range(0, len(img_list), n_threads):
b_imgs_list = img_list[idx:idx + n_threads]
b_imgs = tl.prepro.threading_data(b_imgs_list, fn=read_image, path=path)
# tl.logging.info(b_imgs.shape)
imgs.extend(b_imgs)
if printable:
tl.logging.info('read %d from %s' % (len(imgs), path))
return imgs |
def identical_blocks(self):
"""
:returns: A list of block matches which appear to be identical
"""
identical_blocks = []
for (block_a, block_b) in self._block_matches:
if self.blocks_probably_identical(block_a, block_b):
identical_blocks.append((block_a, block_b))
return identical_blocks | :returns: A list of block matches which appear to be identical | Below is the the instruction that describes the task:
### Input:
:returns: A list of block matches which appear to be identical
### Response:
def identical_blocks(self):
"""
:returns: A list of block matches which appear to be identical
"""
identical_blocks = []
for (block_a, block_b) in self._block_matches:
if self.blocks_probably_identical(block_a, block_b):
identical_blocks.append((block_a, block_b))
return identical_blocks |
def get_fobj(fname, mode='w+'):
"""Obtain a proper file object.
Parameters
----------
fname : string, file object, file descriptor
If a string or file descriptor, then we create a file object. If
*fname* is a file object, then we do nothing and ignore the specified
*mode* parameter.
mode : str
The mode of the file to be opened.
Returns
-------
fobj : file object
The file object.
close : bool
If *fname* was a string, then *close* will be *True* to signify that
the file object should be closed after writing to it. Otherwise,
*close* will be *False* signifying that the user, in essence,
created the file object already and that subsequent operations
should not close it.
"""
if is_string_like(fname):
fobj = open(fname, mode)
close = True
elif hasattr(fname, 'write'):
# fname is a file-like object, perhaps a StringIO (for example)
fobj = fname
close = False
else:
# assume it is a file descriptor
fobj = os.fdopen(fname, mode)
close = False
return fobj, close | Obtain a proper file object.
Parameters
----------
fname : string, file object, file descriptor
If a string or file descriptor, then we create a file object. If
*fname* is a file object, then we do nothing and ignore the specified
*mode* parameter.
mode : str
The mode of the file to be opened.
Returns
-------
fobj : file object
The file object.
close : bool
If *fname* was a string, then *close* will be *True* to signify that
the file object should be closed after writing to it. Otherwise,
*close* will be *False* signifying that the user, in essence,
created the file object already and that subsequent operations
should not close it. | Below is the the instruction that describes the task:
### Input:
Obtain a proper file object.
Parameters
----------
fname : string, file object, file descriptor
If a string or file descriptor, then we create a file object. If
*fname* is a file object, then we do nothing and ignore the specified
*mode* parameter.
mode : str
The mode of the file to be opened.
Returns
-------
fobj : file object
The file object.
close : bool
If *fname* was a string, then *close* will be *True* to signify that
the file object should be closed after writing to it. Otherwise,
*close* will be *False* signifying that the user, in essence,
created the file object already and that subsequent operations
should not close it.
### Response:
def get_fobj(fname, mode='w+'):
"""Obtain a proper file object.
Parameters
----------
fname : string, file object, file descriptor
If a string or file descriptor, then we create a file object. If
*fname* is a file object, then we do nothing and ignore the specified
*mode* parameter.
mode : str
The mode of the file to be opened.
Returns
-------
fobj : file object
The file object.
close : bool
If *fname* was a string, then *close* will be *True* to signify that
the file object should be closed after writing to it. Otherwise,
*close* will be *False* signifying that the user, in essence,
created the file object already and that subsequent operations
should not close it.
"""
if is_string_like(fname):
fobj = open(fname, mode)
close = True
elif hasattr(fname, 'write'):
# fname is a file-like object, perhaps a StringIO (for example)
fobj = fname
close = False
else:
# assume it is a file descriptor
fobj = os.fdopen(fname, mode)
close = False
return fobj, close |
def handle_message(self, msg):
"""Handle a message from the server.
Parameters
----------
msg : Message object
The Message to dispatch to the handler methods.
"""
# log messages received so that no one else has to
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug(
"received from {}: {}"
.format(self.bind_address_string, repr(str(msg))))
if msg.mtype == Message.INFORM:
return self.handle_inform(msg)
elif msg.mtype == Message.REPLY:
return self.handle_reply(msg)
elif msg.mtype == Message.REQUEST:
return self.handle_request(msg)
else:
self._logger.error("Unexpected message type from server ['%s']."
% (msg,)) | Handle a message from the server.
Parameters
----------
msg : Message object
The Message to dispatch to the handler methods. | Below is the the instruction that describes the task:
### Input:
Handle a message from the server.
Parameters
----------
msg : Message object
The Message to dispatch to the handler methods.
### Response:
def handle_message(self, msg):
"""Handle a message from the server.
Parameters
----------
msg : Message object
The Message to dispatch to the handler methods.
"""
# log messages received so that no one else has to
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug(
"received from {}: {}"
.format(self.bind_address_string, repr(str(msg))))
if msg.mtype == Message.INFORM:
return self.handle_inform(msg)
elif msg.mtype == Message.REPLY:
return self.handle_reply(msg)
elif msg.mtype == Message.REQUEST:
return self.handle_request(msg)
else:
self._logger.error("Unexpected message type from server ['%s']."
% (msg,)) |
def get_summary(url, spk=True):
''' simple function to retrieve the header of a BSP file and return SPK object'''
# connect to file at URL
bspurl = urllib2.urlopen(url)
# retrieve the "tip" of a file at URL
bsptip = bspurl.read(10**5) # first 100kB
# save data in fake file object (in-memory)
bspstr = StringIO(bsptip)
# load into DAF object
daf = DAF(bspstr)
# return either SPK or DAF object
if spk:
# make a SPK object
spk = SPK(daf)
# return representation
return spk
else:
# return representation
return daf | simple function to retrieve the header of a BSP file and return SPK object | Below is the the instruction that describes the task:
### Input:
simple function to retrieve the header of a BSP file and return SPK object
### Response:
def get_summary(url, spk=True):
''' simple function to retrieve the header of a BSP file and return SPK object'''
# connect to file at URL
bspurl = urllib2.urlopen(url)
# retrieve the "tip" of a file at URL
bsptip = bspurl.read(10**5) # first 100kB
# save data in fake file object (in-memory)
bspstr = StringIO(bsptip)
# load into DAF object
daf = DAF(bspstr)
# return either SPK or DAF object
if spk:
# make a SPK object
spk = SPK(daf)
# return representation
return spk
else:
# return representation
return daf |
def asset_path(cls, organization, asset):
"""Return a fully-qualified asset string."""
return google.api_core.path_template.expand(
"organizations/{organization}/assets/{asset}",
organization=organization,
asset=asset,
) | Return a fully-qualified asset string. | Below is the the instruction that describes the task:
### Input:
Return a fully-qualified asset string.
### Response:
def asset_path(cls, organization, asset):
"""Return a fully-qualified asset string."""
return google.api_core.path_template.expand(
"organizations/{organization}/assets/{asset}",
organization=organization,
asset=asset,
) |
def Extra(self):
"""
Returns any `V`, `P`, `DOI` or `misc` values as a string. These are all the values not returned by [ID()](#metaknowledge.citation.Citation.ID), they are separated by `' ,'`.
# Returns
`str`
> A string containing the data not in the ID of the `Citation`.
"""
extraTags = ['V', 'P', 'DOI', 'misc']
retVal = ""
for tag in extraTags:
if getattr(self, tag):
retVal += getattr(self, tag) + ', '
if len(retVal) > 2:
return retVal[:-2]
else:
return retVal | Returns any `V`, `P`, `DOI` or `misc` values as a string. These are all the values not returned by [ID()](#metaknowledge.citation.Citation.ID), they are separated by `' ,'`.
# Returns
`str`
> A string containing the data not in the ID of the `Citation`. | Below is the the instruction that describes the task:
### Input:
Returns any `V`, `P`, `DOI` or `misc` values as a string. These are all the values not returned by [ID()](#metaknowledge.citation.Citation.ID), they are separated by `' ,'`.
# Returns
`str`
> A string containing the data not in the ID of the `Citation`.
### Response:
def Extra(self):
"""
Returns any `V`, `P`, `DOI` or `misc` values as a string. These are all the values not returned by [ID()](#metaknowledge.citation.Citation.ID), they are separated by `' ,'`.
# Returns
`str`
> A string containing the data not in the ID of the `Citation`.
"""
extraTags = ['V', 'P', 'DOI', 'misc']
retVal = ""
for tag in extraTags:
if getattr(self, tag):
retVal += getattr(self, tag) + ', '
if len(retVal) > 2:
return retVal[:-2]
else:
return retVal |
def merge_ddb_files(self, delete_source_ddbs=True, only_dfpt_tasks=True,
exclude_tasks=None, include_tasks=None):
"""
This method is called when all the q-points have been computed.
It runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
Args:
delete_source_ddbs: True if input DDB should be removed once final DDB is created.
only_dfpt_tasks: False to merge all DDB files produced by the tasks of the work
Useful e.g. for finite stress corrections in which the stress in the
initial configuration should be merged in the final DDB.
exclude_tasks: List of tasks that should be excluded when merging the partial DDB files.
include_tasks: List of tasks that should be included when merging the partial DDB files.
Mutually exclusive with exclude_tasks.
Returns:
path to the output DDB file
"""
if exclude_tasks:
my_tasks = [task for task in self if task not in exclude_tasks]
elif include_tasks:
my_tasks = [task for task in self if task in include_tasks]
else:
my_tasks = [task for task in self]
if only_dfpt_tasks:
ddb_files = list(filter(None, [task.outdir.has_abiext("DDB") for task in my_tasks \
if isinstance(task, DfptTask)]))
else:
ddb_files = list(filter(None, [task.outdir.has_abiext("DDB") for task in my_tasks]))
self.history.info("Will call mrgddb to merge %s DDB files:" % len(ddb_files))
# DDB files are always produces so this should never happen!
if not ddb_files:
raise RuntimeError("Cannot find any DDB file to merge by the task of " % self)
# Final DDB file will be produced in the outdir of the work.
out_ddb = self.outdir.path_in("out_DDB")
if len(ddb_files) == 1:
# Avoid the merge. Just copy the DDB file to the outdir of the work.
shutil.copy(ddb_files[0], out_ddb)
else:
# Call mrgddb
desc = "DDB file merged by %s on %s" % (self.__class__.__name__, time.asctime())
mrgddb = wrappers.Mrgddb(manager=self[0].manager, verbose=0)
mrgddb.merge(self.outdir.path, ddb_files, out_ddb=out_ddb, description=desc,
delete_source_ddbs=delete_source_ddbs)
return out_ddb | This method is called when all the q-points have been computed.
It runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
Args:
delete_source_ddbs: True if input DDB should be removed once final DDB is created.
only_dfpt_tasks: False to merge all DDB files produced by the tasks of the work
Useful e.g. for finite stress corrections in which the stress in the
initial configuration should be merged in the final DDB.
exclude_tasks: List of tasks that should be excluded when merging the partial DDB files.
include_tasks: List of tasks that should be included when merging the partial DDB files.
Mutually exclusive with exclude_tasks.
Returns:
path to the output DDB file | Below is the the instruction that describes the task:
### Input:
This method is called when all the q-points have been computed.
It runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
Args:
delete_source_ddbs: True if input DDB should be removed once final DDB is created.
only_dfpt_tasks: False to merge all DDB files produced by the tasks of the work
Useful e.g. for finite stress corrections in which the stress in the
initial configuration should be merged in the final DDB.
exclude_tasks: List of tasks that should be excluded when merging the partial DDB files.
include_tasks: List of tasks that should be included when merging the partial DDB files.
Mutually exclusive with exclude_tasks.
Returns:
path to the output DDB file
### Response:
def merge_ddb_files(self, delete_source_ddbs=True, only_dfpt_tasks=True,
exclude_tasks=None, include_tasks=None):
"""
This method is called when all the q-points have been computed.
It runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
Args:
delete_source_ddbs: True if input DDB should be removed once final DDB is created.
only_dfpt_tasks: False to merge all DDB files produced by the tasks of the work
Useful e.g. for finite stress corrections in which the stress in the
initial configuration should be merged in the final DDB.
exclude_tasks: List of tasks that should be excluded when merging the partial DDB files.
include_tasks: List of tasks that should be included when merging the partial DDB files.
Mutually exclusive with exclude_tasks.
Returns:
path to the output DDB file
"""
if exclude_tasks:
my_tasks = [task for task in self if task not in exclude_tasks]
elif include_tasks:
my_tasks = [task for task in self if task in include_tasks]
else:
my_tasks = [task for task in self]
if only_dfpt_tasks:
ddb_files = list(filter(None, [task.outdir.has_abiext("DDB") for task in my_tasks \
if isinstance(task, DfptTask)]))
else:
ddb_files = list(filter(None, [task.outdir.has_abiext("DDB") for task in my_tasks]))
self.history.info("Will call mrgddb to merge %s DDB files:" % len(ddb_files))
# DDB files are always produces so this should never happen!
if not ddb_files:
raise RuntimeError("Cannot find any DDB file to merge by the task of " % self)
# Final DDB file will be produced in the outdir of the work.
out_ddb = self.outdir.path_in("out_DDB")
if len(ddb_files) == 1:
# Avoid the merge. Just copy the DDB file to the outdir of the work.
shutil.copy(ddb_files[0], out_ddb)
else:
# Call mrgddb
desc = "DDB file merged by %s on %s" % (self.__class__.__name__, time.asctime())
mrgddb = wrappers.Mrgddb(manager=self[0].manager, verbose=0)
mrgddb.merge(self.outdir.path, ddb_files, out_ddb=out_ddb, description=desc,
delete_source_ddbs=delete_source_ddbs)
return out_ddb |
def write(self, basename='/tmp/sitemap.xml'):
"""Write one or a set of sitemap files to disk.
resources is a ResourceContainer that may be an ResourceList or
a ChangeList. This may be a generator so data is read as needed
and length is determined at the end.
basename is used as the name of the single sitemap file or the
sitemapindex for a set of sitemap files.
Uses self.max_sitemap_entries to determine whether the resource_list can
be written as one sitemap. If there are more entries and
self.allow_multifile is set True then a set of sitemap files,
with an sitemapindex, will be written.
"""
# Access resources through iterator only
resources_iter = iter(self.resources)
(chunk, nxt) = self.get_resources_chunk(resources_iter)
s = self.new_sitemap()
if (nxt is not None):
# Have more than self.max_sitemap_entries => sitemapindex
if (not self.allow_multifile):
raise ListBaseIndexError(
"Too many entries for a single sitemap but multifile disabled")
# Work out URI of sitemapindex so that we can link up to
# it from the individual sitemap files
try:
index_uri = self.mapper.dst_to_src(basename)
except MapperError as e:
raise ListBaseIndexError(
"Cannot map sitemapindex filename to URI (%s)" %
str(e))
# Use iterator over all resources and count off sets of
# max_sitemap_entries to go into each sitemap, store the
# names of the sitemaps as we go. Copy md from self into
# the index and use this for all chunks also
index = ListBase(md=self.md.copy(), ln=list(self.ln))
index.capability_name = self.capability_name
index.default_capability()
while (len(chunk) > 0):
file = self.part_name(basename, len(index))
# Check that we can map the filename of this sitemap into
# URI space for the sitemapindex
try:
uri = self.mapper.dst_to_src(file)
except MapperError as e:
raise ListBaseIndexError(
"Cannot map sitemap filename to URI (%s)" % str(e))
self.logger.info("Writing sitemap %s..." % (file))
f = open(file, 'w')
chunk.index = index_uri
chunk.md = index.md
s.resources_as_xml(chunk, fh=f)
f.close()
# Record information about this sitemap for index
r = Resource(uri=uri,
timestamp=os.stat(file).st_mtime,
md5=Hashes(['md5'], file).md5)
index.add(r)
# Get next chunk
(chunk, nxt) = self.get_resources_chunk(resources_iter, nxt)
self.logger.info("Wrote %d sitemaps" % (len(index)))
f = open(basename, 'w')
self.logger.info("Writing sitemapindex %s..." % (basename))
s.resources_as_xml(index, sitemapindex=True, fh=f)
f.close()
self.logger.info("Wrote sitemapindex %s" % (basename))
elif self.sitemapindex:
f = open(basename, 'w')
self.logger.info("Writing sitemapindex %s..." % (basename))
s.resources_as_xml(chunk, sitemapindex=True, fh=f)
f.close()
self.logger.info("Wrote sitemapindex %s" % (basename))
else:
f = open(basename, 'w')
self.logger.info("Writing sitemap %s..." % (basename))
s.resources_as_xml(chunk, fh=f)
f.close()
self.logger.info("Wrote sitemap %s" % (basename)) | Write one or a set of sitemap files to disk.
resources is a ResourceContainer that may be an ResourceList or
a ChangeList. This may be a generator so data is read as needed
and length is determined at the end.
basename is used as the name of the single sitemap file or the
sitemapindex for a set of sitemap files.
Uses self.max_sitemap_entries to determine whether the resource_list can
be written as one sitemap. If there are more entries and
self.allow_multifile is set True then a set of sitemap files,
with an sitemapindex, will be written. | Below is the the instruction that describes the task:
### Input:
Write one or a set of sitemap files to disk.
resources is a ResourceContainer that may be an ResourceList or
a ChangeList. This may be a generator so data is read as needed
and length is determined at the end.
basename is used as the name of the single sitemap file or the
sitemapindex for a set of sitemap files.
Uses self.max_sitemap_entries to determine whether the resource_list can
be written as one sitemap. If there are more entries and
self.allow_multifile is set True then a set of sitemap files,
with an sitemapindex, will be written.
### Response:
def write(self, basename='/tmp/sitemap.xml'):
"""Write one or a set of sitemap files to disk.
resources is a ResourceContainer that may be an ResourceList or
a ChangeList. This may be a generator so data is read as needed
and length is determined at the end.
basename is used as the name of the single sitemap file or the
sitemapindex for a set of sitemap files.
Uses self.max_sitemap_entries to determine whether the resource_list can
be written as one sitemap. If there are more entries and
self.allow_multifile is set True then a set of sitemap files,
with an sitemapindex, will be written.
"""
# Access resources through iterator only
resources_iter = iter(self.resources)
(chunk, nxt) = self.get_resources_chunk(resources_iter)
s = self.new_sitemap()
if (nxt is not None):
# Have more than self.max_sitemap_entries => sitemapindex
if (not self.allow_multifile):
raise ListBaseIndexError(
"Too many entries for a single sitemap but multifile disabled")
# Work out URI of sitemapindex so that we can link up to
# it from the individual sitemap files
try:
index_uri = self.mapper.dst_to_src(basename)
except MapperError as e:
raise ListBaseIndexError(
"Cannot map sitemapindex filename to URI (%s)" %
str(e))
# Use iterator over all resources and count off sets of
# max_sitemap_entries to go into each sitemap, store the
# names of the sitemaps as we go. Copy md from self into
# the index and use this for all chunks also
index = ListBase(md=self.md.copy(), ln=list(self.ln))
index.capability_name = self.capability_name
index.default_capability()
while (len(chunk) > 0):
file = self.part_name(basename, len(index))
# Check that we can map the filename of this sitemap into
# URI space for the sitemapindex
try:
uri = self.mapper.dst_to_src(file)
except MapperError as e:
raise ListBaseIndexError(
"Cannot map sitemap filename to URI (%s)" % str(e))
self.logger.info("Writing sitemap %s..." % (file))
f = open(file, 'w')
chunk.index = index_uri
chunk.md = index.md
s.resources_as_xml(chunk, fh=f)
f.close()
# Record information about this sitemap for index
r = Resource(uri=uri,
timestamp=os.stat(file).st_mtime,
md5=Hashes(['md5'], file).md5)
index.add(r)
# Get next chunk
(chunk, nxt) = self.get_resources_chunk(resources_iter, nxt)
self.logger.info("Wrote %d sitemaps" % (len(index)))
f = open(basename, 'w')
self.logger.info("Writing sitemapindex %s..." % (basename))
s.resources_as_xml(index, sitemapindex=True, fh=f)
f.close()
self.logger.info("Wrote sitemapindex %s" % (basename))
elif self.sitemapindex:
f = open(basename, 'w')
self.logger.info("Writing sitemapindex %s..." % (basename))
s.resources_as_xml(chunk, sitemapindex=True, fh=f)
f.close()
self.logger.info("Wrote sitemapindex %s" % (basename))
else:
f = open(basename, 'w')
self.logger.info("Writing sitemap %s..." % (basename))
s.resources_as_xml(chunk, fh=f)
f.close()
self.logger.info("Wrote sitemap %s" % (basename)) |
def get_function_name(s):
"""
Get the function name from a C-style function declaration string.
:param str s: A C-style function declaration string.
:return: The function name.
:rtype: str
"""
s = s.strip()
if s.startswith("__attribute__"):
# Remove "__attribute__ ((foobar))"
if "))" not in s:
raise ValueError("__attribute__ is present, but I cannot find double-right parenthesis in the function "
"declaration string.")
s = s[s.index("))") + 2 : ].strip()
if '(' not in s:
raise ValueError("Cannot find any left parenthesis in the function declaration string.")
func_name = s[:s.index('(')].strip()
for i, ch in enumerate(reversed(func_name)):
if ch == ' ':
pos = len(func_name) - 1 - i
break
else:
raise ValueError('Cannot find any space in the function declaration string.')
func_name = func_name[pos + 1 : ]
return func_name | Get the function name from a C-style function declaration string.
:param str s: A C-style function declaration string.
:return: The function name.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Get the function name from a C-style function declaration string.
:param str s: A C-style function declaration string.
:return: The function name.
:rtype: str
### Response:
def get_function_name(s):
"""
Get the function name from a C-style function declaration string.
:param str s: A C-style function declaration string.
:return: The function name.
:rtype: str
"""
s = s.strip()
if s.startswith("__attribute__"):
# Remove "__attribute__ ((foobar))"
if "))" not in s:
raise ValueError("__attribute__ is present, but I cannot find double-right parenthesis in the function "
"declaration string.")
s = s[s.index("))") + 2 : ].strip()
if '(' not in s:
raise ValueError("Cannot find any left parenthesis in the function declaration string.")
func_name = s[:s.index('(')].strip()
for i, ch in enumerate(reversed(func_name)):
if ch == ' ':
pos = len(func_name) - 1 - i
break
else:
raise ValueError('Cannot find any space in the function declaration string.')
func_name = func_name[pos + 1 : ]
return func_name |
def cancel_base_units(units, to_remove):
"""Given a list of units, remove a specified number of each base unit.
Arguments:
units: an iterable of units
to_remove: a mapping of base_unit => count, such as that returned from
count_base_units
Returns a 2-tuple of (factor, remaining_units).
"""
# Copy the dict since we're about to mutate it
to_remove = to_remove.copy()
remaining_units = []
total_factor = Fraction(1)
for unit in units:
factor, base_unit = get_conversion_factor(unit)
if not to_remove.get(base_unit, 0):
remaining_units.append(unit)
continue
total_factor *= factor
to_remove[base_unit] -= 1
return total_factor, remaining_units | Given a list of units, remove a specified number of each base unit.
Arguments:
units: an iterable of units
to_remove: a mapping of base_unit => count, such as that returned from
count_base_units
Returns a 2-tuple of (factor, remaining_units). | Below is the the instruction that describes the task:
### Input:
Given a list of units, remove a specified number of each base unit.
Arguments:
units: an iterable of units
to_remove: a mapping of base_unit => count, such as that returned from
count_base_units
Returns a 2-tuple of (factor, remaining_units).
### Response:
def cancel_base_units(units, to_remove):
"""Given a list of units, remove a specified number of each base unit.
Arguments:
units: an iterable of units
to_remove: a mapping of base_unit => count, such as that returned from
count_base_units
Returns a 2-tuple of (factor, remaining_units).
"""
# Copy the dict since we're about to mutate it
to_remove = to_remove.copy()
remaining_units = []
total_factor = Fraction(1)
for unit in units:
factor, base_unit = get_conversion_factor(unit)
if not to_remove.get(base_unit, 0):
remaining_units.append(unit)
continue
total_factor *= factor
to_remove[base_unit] -= 1
return total_factor, remaining_units |
def Element(self, elem, **params):
"""Ensure that the input element is immutable by the transformation. Returns a single element."""
res = self.__call__(deepcopy(elem), **params)
if len(res) > 0:
return res[0]
else:
return None | Ensure that the input element is immutable by the transformation. Returns a single element. | Below is the the instruction that describes the task:
### Input:
Ensure that the input element is immutable by the transformation. Returns a single element.
### Response:
def Element(self, elem, **params):
"""Ensure that the input element is immutable by the transformation. Returns a single element."""
res = self.__call__(deepcopy(elem), **params)
if len(res) > 0:
return res[0]
else:
return None |
def re_install_net_ctrl_paths(self, vrf_table):
"""Re-installs paths from NC with current BGP policy.
Iterates over known paths from NC installed in `vrf4_table` and
adds new path with path attributes as per current VRF configuration.
"""
assert vrf_table
for dest in vrf_table.values():
for path in dest.known_path_list:
if path.source is None:
vrf_table.insert_vrf_path(
nlri=path.nlri,
next_hop=path.nexthop,
gen_lbl=True
)
LOG.debug('Re-installed NC paths with current policy for table %s.',
vrf_table) | Re-installs paths from NC with current BGP policy.
Iterates over known paths from NC installed in `vrf4_table` and
adds new path with path attributes as per current VRF configuration. | Below is the the instruction that describes the task:
### Input:
Re-installs paths from NC with current BGP policy.
Iterates over known paths from NC installed in `vrf4_table` and
adds new path with path attributes as per current VRF configuration.
### Response:
def re_install_net_ctrl_paths(self, vrf_table):
"""Re-installs paths from NC with current BGP policy.
Iterates over known paths from NC installed in `vrf4_table` and
adds new path with path attributes as per current VRF configuration.
"""
assert vrf_table
for dest in vrf_table.values():
for path in dest.known_path_list:
if path.source is None:
vrf_table.insert_vrf_path(
nlri=path.nlri,
next_hop=path.nexthop,
gen_lbl=True
)
LOG.debug('Re-installed NC paths with current policy for table %s.',
vrf_table) |
def setComponentByName(self, name, value=noValue,
verifyConstraints=True,
matchTags=True,
matchConstraints=True):
"""Assign |ASN.1| type component by name.
Equivalent to Python :class:`dict` item assignment operation (e.g. `[]`).
Parameters
----------
name: :class:`str`
|ASN.1| type component name
Keyword Args
------------
value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
A Python value to initialize |ASN.1| component with (if *componentType* is set)
or ASN.1 value object to assign to |ASN.1| component.
verifyConstraints: :class:`bool`
If `False`, skip constraints validation
matchTags: :class:`bool`
If `False`, skip component tags matching
matchConstraints: :class:`bool`
If `False`, skip component constraints matching
Returns
-------
self
"""
if self._componentTypeLen:
idx = self.componentType.getPositionByName(name)
else:
try:
idx = self._dynamicNames.getPositionByName(name)
except KeyError:
raise error.PyAsn1Error('Name %s not found' % (name,))
return self.setComponentByPosition(
idx, value, verifyConstraints, matchTags, matchConstraints
) | Assign |ASN.1| type component by name.
Equivalent to Python :class:`dict` item assignment operation (e.g. `[]`).
Parameters
----------
name: :class:`str`
|ASN.1| type component name
Keyword Args
------------
value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
A Python value to initialize |ASN.1| component with (if *componentType* is set)
or ASN.1 value object to assign to |ASN.1| component.
verifyConstraints: :class:`bool`
If `False`, skip constraints validation
matchTags: :class:`bool`
If `False`, skip component tags matching
matchConstraints: :class:`bool`
If `False`, skip component constraints matching
Returns
-------
self | Below is the the instruction that describes the task:
### Input:
Assign |ASN.1| type component by name.
Equivalent to Python :class:`dict` item assignment operation (e.g. `[]`).
Parameters
----------
name: :class:`str`
|ASN.1| type component name
Keyword Args
------------
value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
A Python value to initialize |ASN.1| component with (if *componentType* is set)
or ASN.1 value object to assign to |ASN.1| component.
verifyConstraints: :class:`bool`
If `False`, skip constraints validation
matchTags: :class:`bool`
If `False`, skip component tags matching
matchConstraints: :class:`bool`
If `False`, skip component constraints matching
Returns
-------
self
### Response:
def setComponentByName(self, name, value=noValue,
verifyConstraints=True,
matchTags=True,
matchConstraints=True):
"""Assign |ASN.1| type component by name.
Equivalent to Python :class:`dict` item assignment operation (e.g. `[]`).
Parameters
----------
name: :class:`str`
|ASN.1| type component name
Keyword Args
------------
value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
A Python value to initialize |ASN.1| component with (if *componentType* is set)
or ASN.1 value object to assign to |ASN.1| component.
verifyConstraints: :class:`bool`
If `False`, skip constraints validation
matchTags: :class:`bool`
If `False`, skip component tags matching
matchConstraints: :class:`bool`
If `False`, skip component constraints matching
Returns
-------
self
"""
if self._componentTypeLen:
idx = self.componentType.getPositionByName(name)
else:
try:
idx = self._dynamicNames.getPositionByName(name)
except KeyError:
raise error.PyAsn1Error('Name %s not found' % (name,))
return self.setComponentByPosition(
idx, value, verifyConstraints, matchTags, matchConstraints
) |
def dataset_suggest(q=None, type=None, keyword=None, owningOrg=None,
publishingOrg=None, hostingOrg=None, publishingCountry=None, decade=None,
limit = 100, offset = None, **kwargs):
'''
Search that returns up to 20 matching datasets. Results are ordered by relevance.
:param q: [str] Query term(s) for full text search. The value for this parameter can be a simple word or a phrase. Wildcards can be added to the simple word parameters only, e.g. ``q=*puma*``
:param type: [str] Type of dataset, options include OCCURRENCE, etc.
:param keyword: [str] Keyword to search by. Datasets can be tagged by keywords, which you can search on. The search is done on the merged collection of tags, the dataset keywordCollections and temporalCoverages. SEEMS TO NOT BE WORKING ANYMORE AS OF 2016-09-02.
:param owningOrg: [str] Owning organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param publishingOrg: [str] Publishing organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param hostingOrg: [str] Hosting organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param publishingCountry: [str] Publishing country.
:param decade: [str] Decade, e.g., 1980. Filters datasets by their temporal coverage broken down to decades. Decades are given as a full year, e.g. 1880, 1960, 2000, etc, and will return datasets wholly contained in the decade as well as those that cover the entire decade or more. Facet by decade to get the break down, e.g. ``/search?facet=DECADE&facet_only=true`` (see example below)
:param limit: [int] Number of results to return. Default: ``300``
:param offset: [int] Record to start at. Default: ``0``
:return: A dictionary
References: http://www.gbif.org/developer/registry#datasetSearch
Usage::
from pygbif import registry
registry.dataset_suggest(q="Amazon", type="OCCURRENCE")
# Suggest datasets tagged with keyword "france".
registry.dataset_suggest(keyword="france")
# Suggest datasets owned by the organization with key
# "07f617d0-c688-11d8-bf62-b8a03c50a862" (UK NBN).
registry.dataset_suggest(owningOrg="07f617d0-c688-11d8-bf62-b8a03c50a862")
# Fulltext search for all datasets having the word "amsterdam" somewhere in
# its metadata (title, description, etc).
registry.dataset_suggest(q="amsterdam")
# Limited search
registry.dataset_suggest(type="OCCURRENCE", limit=2)
registry.dataset_suggest(type="OCCURRENCE", limit=2, offset=10)
# Return just descriptions
registry.dataset_suggest(type="OCCURRENCE", limit = 5, description=True)
# Search by decade
registry.dataset_suggest(decade=1980, limit = 30)
'''
url = gbif_baseurl + 'dataset/suggest'
args = {'q': q, 'type': type, 'keyword': keyword,
'publishingOrg': publishingOrg, 'hostingOrg': hostingOrg,
'owningOrg': owningOrg, 'decade': decade,
'publishingCountry': publishingCountry,
'limit': limit, 'offset': offset}
out = gbif_GET(url, args, **kwargs)
return out | Search that returns up to 20 matching datasets. Results are ordered by relevance.
:param q: [str] Query term(s) for full text search. The value for this parameter can be a simple word or a phrase. Wildcards can be added to the simple word parameters only, e.g. ``q=*puma*``
:param type: [str] Type of dataset, options include OCCURRENCE, etc.
:param keyword: [str] Keyword to search by. Datasets can be tagged by keywords, which you can search on. The search is done on the merged collection of tags, the dataset keywordCollections and temporalCoverages. SEEMS TO NOT BE WORKING ANYMORE AS OF 2016-09-02.
:param owningOrg: [str] Owning organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param publishingOrg: [str] Publishing organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param hostingOrg: [str] Hosting organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param publishingCountry: [str] Publishing country.
:param decade: [str] Decade, e.g., 1980. Filters datasets by their temporal coverage broken down to decades. Decades are given as a full year, e.g. 1880, 1960, 2000, etc, and will return datasets wholly contained in the decade as well as those that cover the entire decade or more. Facet by decade to get the break down, e.g. ``/search?facet=DECADE&facet_only=true`` (see example below)
:param limit: [int] Number of results to return. Default: ``300``
:param offset: [int] Record to start at. Default: ``0``
:return: A dictionary
References: http://www.gbif.org/developer/registry#datasetSearch
Usage::
from pygbif import registry
registry.dataset_suggest(q="Amazon", type="OCCURRENCE")
# Suggest datasets tagged with keyword "france".
registry.dataset_suggest(keyword="france")
# Suggest datasets owned by the organization with key
# "07f617d0-c688-11d8-bf62-b8a03c50a862" (UK NBN).
registry.dataset_suggest(owningOrg="07f617d0-c688-11d8-bf62-b8a03c50a862")
# Fulltext search for all datasets having the word "amsterdam" somewhere in
# its metadata (title, description, etc).
registry.dataset_suggest(q="amsterdam")
# Limited search
registry.dataset_suggest(type="OCCURRENCE", limit=2)
registry.dataset_suggest(type="OCCURRENCE", limit=2, offset=10)
# Return just descriptions
registry.dataset_suggest(type="OCCURRENCE", limit = 5, description=True)
# Search by decade
registry.dataset_suggest(decade=1980, limit = 30) | Below is the the instruction that describes the task:
### Input:
Search that returns up to 20 matching datasets. Results are ordered by relevance.
:param q: [str] Query term(s) for full text search. The value for this parameter can be a simple word or a phrase. Wildcards can be added to the simple word parameters only, e.g. ``q=*puma*``
:param type: [str] Type of dataset, options include OCCURRENCE, etc.
:param keyword: [str] Keyword to search by. Datasets can be tagged by keywords, which you can search on. The search is done on the merged collection of tags, the dataset keywordCollections and temporalCoverages. SEEMS TO NOT BE WORKING ANYMORE AS OF 2016-09-02.
:param owningOrg: [str] Owning organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param publishingOrg: [str] Publishing organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param hostingOrg: [str] Hosting organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param publishingCountry: [str] Publishing country.
:param decade: [str] Decade, e.g., 1980. Filters datasets by their temporal coverage broken down to decades. Decades are given as a full year, e.g. 1880, 1960, 2000, etc, and will return datasets wholly contained in the decade as well as those that cover the entire decade or more. Facet by decade to get the break down, e.g. ``/search?facet=DECADE&facet_only=true`` (see example below)
:param limit: [int] Number of results to return. Default: ``300``
:param offset: [int] Record to start at. Default: ``0``
:return: A dictionary
References: http://www.gbif.org/developer/registry#datasetSearch
Usage::
from pygbif import registry
registry.dataset_suggest(q="Amazon", type="OCCURRENCE")
# Suggest datasets tagged with keyword "france".
registry.dataset_suggest(keyword="france")
# Suggest datasets owned by the organization with key
# "07f617d0-c688-11d8-bf62-b8a03c50a862" (UK NBN).
registry.dataset_suggest(owningOrg="07f617d0-c688-11d8-bf62-b8a03c50a862")
# Fulltext search for all datasets having the word "amsterdam" somewhere in
# its metadata (title, description, etc).
registry.dataset_suggest(q="amsterdam")
# Limited search
registry.dataset_suggest(type="OCCURRENCE", limit=2)
registry.dataset_suggest(type="OCCURRENCE", limit=2, offset=10)
# Return just descriptions
registry.dataset_suggest(type="OCCURRENCE", limit = 5, description=True)
# Search by decade
registry.dataset_suggest(decade=1980, limit = 30)
### Response:
def dataset_suggest(q=None, type=None, keyword=None, owningOrg=None,
publishingOrg=None, hostingOrg=None, publishingCountry=None, decade=None,
limit = 100, offset = None, **kwargs):
'''
Search that returns up to 20 matching datasets. Results are ordered by relevance.
:param q: [str] Query term(s) for full text search. The value for this parameter can be a simple word or a phrase. Wildcards can be added to the simple word parameters only, e.g. ``q=*puma*``
:param type: [str] Type of dataset, options include OCCURRENCE, etc.
:param keyword: [str] Keyword to search by. Datasets can be tagged by keywords, which you can search on. The search is done on the merged collection of tags, the dataset keywordCollections and temporalCoverages. SEEMS TO NOT BE WORKING ANYMORE AS OF 2016-09-02.
:param owningOrg: [str] Owning organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param publishingOrg: [str] Publishing organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param hostingOrg: [str] Hosting organization. A uuid string. See :func:`~pygbif.registry.organizations`
:param publishingCountry: [str] Publishing country.
:param decade: [str] Decade, e.g., 1980. Filters datasets by their temporal coverage broken down to decades. Decades are given as a full year, e.g. 1880, 1960, 2000, etc, and will return datasets wholly contained in the decade as well as those that cover the entire decade or more. Facet by decade to get the break down, e.g. ``/search?facet=DECADE&facet_only=true`` (see example below)
:param limit: [int] Number of results to return. Default: ``300``
:param offset: [int] Record to start at. Default: ``0``
:return: A dictionary
References: http://www.gbif.org/developer/registry#datasetSearch
Usage::
from pygbif import registry
registry.dataset_suggest(q="Amazon", type="OCCURRENCE")
# Suggest datasets tagged with keyword "france".
registry.dataset_suggest(keyword="france")
# Suggest datasets owned by the organization with key
# "07f617d0-c688-11d8-bf62-b8a03c50a862" (UK NBN).
registry.dataset_suggest(owningOrg="07f617d0-c688-11d8-bf62-b8a03c50a862")
# Fulltext search for all datasets having the word "amsterdam" somewhere in
# its metadata (title, description, etc).
registry.dataset_suggest(q="amsterdam")
# Limited search
registry.dataset_suggest(type="OCCURRENCE", limit=2)
registry.dataset_suggest(type="OCCURRENCE", limit=2, offset=10)
# Return just descriptions
registry.dataset_suggest(type="OCCURRENCE", limit = 5, description=True)
# Search by decade
registry.dataset_suggest(decade=1980, limit = 30)
'''
url = gbif_baseurl + 'dataset/suggest'
args = {'q': q, 'type': type, 'keyword': keyword,
'publishingOrg': publishingOrg, 'hostingOrg': hostingOrg,
'owningOrg': owningOrg, 'decade': decade,
'publishingCountry': publishingCountry,
'limit': limit, 'offset': offset}
out = gbif_GET(url, args, **kwargs)
return out |
def field2type_and_format(self, field):
"""Return the dictionary of OpenAPI type and format based on the field
type
:param Field field: A marshmallow field.
:rtype: dict
"""
# If this type isn't directly in the field mapping then check the
# hierarchy until we find something that does.
for field_class in type(field).__mro__:
if field_class in self.field_mapping:
type_, fmt = self.field_mapping[field_class]
break
else:
warnings.warn(
"Field of type {} does not inherit from marshmallow.Field.".format(
type(field)
),
UserWarning,
)
type_, fmt = "string", None
ret = {"type": type_}
if fmt:
ret["format"] = fmt
return ret | Return the dictionary of OpenAPI type and format based on the field
type
:param Field field: A marshmallow field.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Return the dictionary of OpenAPI type and format based on the field
type
:param Field field: A marshmallow field.
:rtype: dict
### Response:
def field2type_and_format(self, field):
"""Return the dictionary of OpenAPI type and format based on the field
type
:param Field field: A marshmallow field.
:rtype: dict
"""
# If this type isn't directly in the field mapping then check the
# hierarchy until we find something that does.
for field_class in type(field).__mro__:
if field_class in self.field_mapping:
type_, fmt = self.field_mapping[field_class]
break
else:
warnings.warn(
"Field of type {} does not inherit from marshmallow.Field.".format(
type(field)
),
UserWarning,
)
type_, fmt = "string", None
ret = {"type": type_}
if fmt:
ret["format"] = fmt
return ret |
def list_privileges(self, principal_name, principal_type, hiveObject):
"""
Parameters:
- principal_name
- principal_type
- hiveObject
"""
self.send_list_privileges(principal_name, principal_type, hiveObject)
return self.recv_list_privileges() | Parameters:
- principal_name
- principal_type
- hiveObject | Below is the the instruction that describes the task:
### Input:
Parameters:
- principal_name
- principal_type
- hiveObject
### Response:
def list_privileges(self, principal_name, principal_type, hiveObject):
"""
Parameters:
- principal_name
- principal_type
- hiveObject
"""
self.send_list_privileges(principal_name, principal_type, hiveObject)
return self.recv_list_privileges() |
def remove_directory(directory, show_warnings=True):
"""Deletes a directory and its contents.
Returns a list of errors in form (function, path, excinfo)."""
errors = []
def onerror(function, path, excinfo):
if show_warnings:
print 'Cannot delete %s: %s' % (os.path.relpath(directory), excinfo[1])
errors.append((function, path, excinfo))
if os.path.exists(directory):
if not os.path.isdir(directory):
raise NotADirectoryError(directory)
shutil.rmtree(directory, onerror=onerror)
return errors | Deletes a directory and its contents.
Returns a list of errors in form (function, path, excinfo). | Below is the the instruction that describes the task:
### Input:
Deletes a directory and its contents.
Returns a list of errors in form (function, path, excinfo).
### Response:
def remove_directory(directory, show_warnings=True):
"""Deletes a directory and its contents.
Returns a list of errors in form (function, path, excinfo)."""
errors = []
def onerror(function, path, excinfo):
if show_warnings:
print 'Cannot delete %s: %s' % (os.path.relpath(directory), excinfo[1])
errors.append((function, path, excinfo))
if os.path.exists(directory):
if not os.path.isdir(directory):
raise NotADirectoryError(directory)
shutil.rmtree(directory, onerror=onerror)
return errors |
def serialize(obj, **options):
'''
Serialize Python data to a Python string representation (via pprint.format)
:param obj: the data structure to serialize
:param options: options given to pprint.format
'''
#round-trip this through JSON to avoid OrderedDict types
# there's probably a more performant way to do this...
# TODO remove json round-trip when all dataset will use
# serializers
return pprint.pformat(
salt.utils.json.loads(
salt.utils.json.dumps(obj, _json_module=_json),
_json_module=_json
),
**options
) | Serialize Python data to a Python string representation (via pprint.format)
:param obj: the data structure to serialize
:param options: options given to pprint.format | Below is the the instruction that describes the task:
### Input:
Serialize Python data to a Python string representation (via pprint.format)
:param obj: the data structure to serialize
:param options: options given to pprint.format
### Response:
def serialize(obj, **options):
'''
Serialize Python data to a Python string representation (via pprint.format)
:param obj: the data structure to serialize
:param options: options given to pprint.format
'''
#round-trip this through JSON to avoid OrderedDict types
# there's probably a more performant way to do this...
# TODO remove json round-trip when all dataset will use
# serializers
return pprint.pformat(
salt.utils.json.loads(
salt.utils.json.dumps(obj, _json_module=_json),
_json_module=_json
),
**options
) |
def gte(self, key, value, includeMissing=False):
'''Return entries where the key's value is greater or equal (>=).
Example of use:
>>> test = [
... {"name": "Jim", "age": 18, "income": 93000, "wigs": 68 },
... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]},
... {"name": "Joe", "age": 20, "income": 15000, "wigs": [1, 2, 3]},
... {"name": "Bill", "age": 19, "income": 29000 },
... ]
>>> print PLOD(test).gte("age", 19).returnString()
[
{age: 20, income: 15000, name: 'Joe' , wigs: [1, 2, 3]},
{age: 19, income: 29000, name: 'Bill', wigs: None }
]
.. versionadded:: 0.1.1
:param key:
The dictionary key (or cascading list of keys) that should be the
basis of comparison.
:param value:
The value to compare with.
:param includeMissing:
Defaults to False. If True, then entries missing the key are also
included.
:returns: self
'''
(self.table, self.index_track) = internal.select(self.table, self.index_track, key, self.GREATERorEQUAL, value, includeMissing)
return self | Return entries where the key's value is greater or equal (>=).
Example of use:
>>> test = [
... {"name": "Jim", "age": 18, "income": 93000, "wigs": 68 },
... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]},
... {"name": "Joe", "age": 20, "income": 15000, "wigs": [1, 2, 3]},
... {"name": "Bill", "age": 19, "income": 29000 },
... ]
>>> print PLOD(test).gte("age", 19).returnString()
[
{age: 20, income: 15000, name: 'Joe' , wigs: [1, 2, 3]},
{age: 19, income: 29000, name: 'Bill', wigs: None }
]
.. versionadded:: 0.1.1
:param key:
The dictionary key (or cascading list of keys) that should be the
basis of comparison.
:param value:
The value to compare with.
:param includeMissing:
Defaults to False. If True, then entries missing the key are also
included.
:returns: self | Below is the the instruction that describes the task:
### Input:
Return entries where the key's value is greater or equal (>=).
Example of use:
>>> test = [
... {"name": "Jim", "age": 18, "income": 93000, "wigs": 68 },
... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]},
... {"name": "Joe", "age": 20, "income": 15000, "wigs": [1, 2, 3]},
... {"name": "Bill", "age": 19, "income": 29000 },
... ]
>>> print PLOD(test).gte("age", 19).returnString()
[
{age: 20, income: 15000, name: 'Joe' , wigs: [1, 2, 3]},
{age: 19, income: 29000, name: 'Bill', wigs: None }
]
.. versionadded:: 0.1.1
:param key:
The dictionary key (or cascading list of keys) that should be the
basis of comparison.
:param value:
The value to compare with.
:param includeMissing:
Defaults to False. If True, then entries missing the key are also
included.
:returns: self
### Response:
def gte(self, key, value, includeMissing=False):
'''Return entries where the key's value is greater or equal (>=).
Example of use:
>>> test = [
... {"name": "Jim", "age": 18, "income": 93000, "wigs": 68 },
... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]},
... {"name": "Joe", "age": 20, "income": 15000, "wigs": [1, 2, 3]},
... {"name": "Bill", "age": 19, "income": 29000 },
... ]
>>> print PLOD(test).gte("age", 19).returnString()
[
{age: 20, income: 15000, name: 'Joe' , wigs: [1, 2, 3]},
{age: 19, income: 29000, name: 'Bill', wigs: None }
]
.. versionadded:: 0.1.1
:param key:
The dictionary key (or cascading list of keys) that should be the
basis of comparison.
:param value:
The value to compare with.
:param includeMissing:
Defaults to False. If True, then entries missing the key are also
included.
:returns: self
'''
(self.table, self.index_track) = internal.select(self.table, self.index_track, key, self.GREATERorEQUAL, value, includeMissing)
return self |
def sync(self):
"""基于账户/密码去sync数据库
"""
if self.wechat_id is not None:
res = self.client.find_one({'wechat_id': self.wechat_id})
else:
res = self.client.find_one(
{
'username': self.username,
'password': self.password
}
)
if res is None:
if self.client.find_one({'username': self.username}) is None:
self.client.insert_one(self.message)
return self
else:
raise RuntimeError('账户名已存在且账户密码不匹配')
else:
self.reload(res)
return self | 基于账户/密码去sync数据库 | Below is the the instruction that describes the task:
### Input:
基于账户/密码去sync数据库
### Response:
def sync(self):
"""基于账户/密码去sync数据库
"""
if self.wechat_id is not None:
res = self.client.find_one({'wechat_id': self.wechat_id})
else:
res = self.client.find_one(
{
'username': self.username,
'password': self.password
}
)
if res is None:
if self.client.find_one({'username': self.username}) is None:
self.client.insert_one(self.message)
return self
else:
raise RuntimeError('账户名已存在且账户密码不匹配')
else:
self.reload(res)
return self |
def email(self, name, to, from_addr, subject, body, header, owner=None, **kwargs):
"""
Create the Email TI object.
Args:
owner:
to:
from_addr:
name:
subject:
header:
body:
**kwargs:
Return:
"""
return Email(self.tcex, name, to, from_addr, subject, body, header, owner=owner, **kwargs) | Create the Email TI object.
Args:
owner:
to:
from_addr:
name:
subject:
header:
body:
**kwargs:
Return: | Below is the the instruction that describes the task:
### Input:
Create the Email TI object.
Args:
owner:
to:
from_addr:
name:
subject:
header:
body:
**kwargs:
Return:
### Response:
def email(self, name, to, from_addr, subject, body, header, owner=None, **kwargs):
"""
Create the Email TI object.
Args:
owner:
to:
from_addr:
name:
subject:
header:
body:
**kwargs:
Return:
"""
return Email(self.tcex, name, to, from_addr, subject, body, header, owner=owner, **kwargs) |
def get_client(self, initial_timeout=0.1, next_timeout=30):
"""
Wait until a client instance is available
:param float initial_timeout:
how long to wait initially for an existing client to complete
:param float next_timeout:
if the pool could not obtain a client during the initial timeout,
and we have allocated the maximum available number of clients, wait
this long until we can retrieve another one
:return: A connection object
"""
try:
return self._test_client(self._q.get(True, initial_timeout))
except Empty:
try:
self._lock.acquire()
if self._clients_in_use >= self._max_clients:
raise _ClientUnavailableError("Too many clients in use")
return self._test_client(self._make_client())
except NetworkError:
if not self._tolerate_error:
raise
except _ClientUnavailableError as e:
try:
return self._test_client(self._q.get(True, next_timeout))
except Empty:
raise e
finally:
self._lock.release() | Wait until a client instance is available
:param float initial_timeout:
how long to wait initially for an existing client to complete
:param float next_timeout:
if the pool could not obtain a client during the initial timeout,
and we have allocated the maximum available number of clients, wait
this long until we can retrieve another one
:return: A connection object | Below is the the instruction that describes the task:
### Input:
Wait until a client instance is available
:param float initial_timeout:
how long to wait initially for an existing client to complete
:param float next_timeout:
if the pool could not obtain a client during the initial timeout,
and we have allocated the maximum available number of clients, wait
this long until we can retrieve another one
:return: A connection object
### Response:
def get_client(self, initial_timeout=0.1, next_timeout=30):
"""
Wait until a client instance is available
:param float initial_timeout:
how long to wait initially for an existing client to complete
:param float next_timeout:
if the pool could not obtain a client during the initial timeout,
and we have allocated the maximum available number of clients, wait
this long until we can retrieve another one
:return: A connection object
"""
try:
return self._test_client(self._q.get(True, initial_timeout))
except Empty:
try:
self._lock.acquire()
if self._clients_in_use >= self._max_clients:
raise _ClientUnavailableError("Too many clients in use")
return self._test_client(self._make_client())
except NetworkError:
if not self._tolerate_error:
raise
except _ClientUnavailableError as e:
try:
return self._test_client(self._q.get(True, next_timeout))
except Empty:
raise e
finally:
self._lock.release() |
def _output_to_file(self):
""" Save to filepath specified on
init. (Will throw an error if
the document is already open).
"""
f = open(self.filepath, 'wb')
if not f:
raise Exception('Unable to create output file: ', self.filepath)
f.write(self.session.buffer)
f.close() | Save to filepath specified on
init. (Will throw an error if
the document is already open). | Below is the the instruction that describes the task:
### Input:
Save to filepath specified on
init. (Will throw an error if
the document is already open).
### Response:
def _output_to_file(self):
""" Save to filepath specified on
init. (Will throw an error if
the document is already open).
"""
f = open(self.filepath, 'wb')
if not f:
raise Exception('Unable to create output file: ', self.filepath)
f.write(self.session.buffer)
f.close() |
def write_info (self, url_data):
"""Write url_data.info."""
self.write(self.part("info") + self.spaces("info"))
self.writeln(self.wrap(url_data.info, 65), color=self.colorinfo) | Write url_data.info. | Below is the the instruction that describes the task:
### Input:
Write url_data.info.
### Response:
def write_info (self, url_data):
"""Write url_data.info."""
self.write(self.part("info") + self.spaces("info"))
self.writeln(self.wrap(url_data.info, 65), color=self.colorinfo) |
def get_components(self):
""" Returns all the applications from the store """
components = []
for app_id in self.components:
components.append(self.components[app_id])
return components | Returns all the applications from the store | Below is the the instruction that describes the task:
### Input:
Returns all the applications from the store
### Response:
def get_components(self):
""" Returns all the applications from the store """
components = []
for app_id in self.components:
components.append(self.components[app_id])
return components |
def mle(x1, x2, x1err=[], x2err=[], cerr=[], s_int=True,
po=(1,0,0.1), verbose=False, logify=True, full_output=False):
"""
Maximum Likelihood Estimation of best-fit parameters
Parameters
----------
x1, x2 : float arrays
the independent and dependent variables.
x1err, x2err : float arrays (optional)
measurement uncertainties on independent and dependent
variables. Any of the two, or both, can be supplied.
cerr : float array (same size as x1)
covariance on the measurement errors
s_int : boolean (default True)
whether to include intrinsic scatter in the MLE.
po : tuple of floats
initial guess for free parameters. If s_int is True, then
po must have 3 elements; otherwise it can have two (for the
zero point and the slope)
verbose : boolean (default False)
verbose?
logify : boolean (default True)
whether to convert the values to log10's. This is to
calculate the best-fit power law. Note that the result is
given for the equation log(y)=a+b*log(x) -- i.e., the
zero point must be converted to 10**a if logify=True
full_output : boolean (default False)
numpy.optimize.fmin's full_output argument
Returns
-------
a : float
Maximum Likelihood Estimate of the zero point. Note that
if logify=True, the power-law intercept is 10**a
b : float
Maximum Likelihood Estimate of the slope
s : float (optional, if s_int=True)
Maximum Likelihood Estimate of the intrinsic scatter
"""
from scipy import optimize
n = len(x1)
if len(x2) != n:
raise ValueError('x1 and x2 must have same length')
if len(x1err) == 0:
x1err = numpy.ones(n)
if len(x2err) == 0:
x2err = numpy.ones(n)
if logify:
x1, x2, x1err, x2err = to_log(x1, x2, x1err, x2err)
f = lambda a, b: a + b * x1
if s_int:
w = lambda b, s: numpy.sqrt(b**2 * x1err**2 + x2err**2 + s**2)
loglike = lambda p: 2 * sum(numpy.log(w(p[1],p[2]))) + \
sum(((x2 - f(p[0],p[1])) / w(p[1],p[2])) ** 2) + \
numpy.log(n * numpy.sqrt(2*numpy.pi)) / 2
else:
w = lambda b: numpy.sqrt(b**2 * x1err**2 + x2err**2)
loglike = lambda p: sum(numpy.log(w(p[1]))) + \
sum(((x2 - f(p[0],p[1])) / w(p[1])) ** 2) / 2 + \
numpy.log(n * numpy.sqrt(2*numpy.pi)) / 2
po = po[:2]
out = optimize.fmin(loglike, po, disp=verbose, full_output=full_output)
return out | Maximum Likelihood Estimation of best-fit parameters
Parameters
----------
x1, x2 : float arrays
the independent and dependent variables.
x1err, x2err : float arrays (optional)
measurement uncertainties on independent and dependent
variables. Any of the two, or both, can be supplied.
cerr : float array (same size as x1)
covariance on the measurement errors
s_int : boolean (default True)
whether to include intrinsic scatter in the MLE.
po : tuple of floats
initial guess for free parameters. If s_int is True, then
po must have 3 elements; otherwise it can have two (for the
zero point and the slope)
verbose : boolean (default False)
verbose?
logify : boolean (default True)
whether to convert the values to log10's. This is to
calculate the best-fit power law. Note that the result is
given for the equation log(y)=a+b*log(x) -- i.e., the
zero point must be converted to 10**a if logify=True
full_output : boolean (default False)
numpy.optimize.fmin's full_output argument
Returns
-------
a : float
Maximum Likelihood Estimate of the zero point. Note that
if logify=True, the power-law intercept is 10**a
b : float
Maximum Likelihood Estimate of the slope
s : float (optional, if s_int=True)
Maximum Likelihood Estimate of the intrinsic scatter | Below is the the instruction that describes the task:
### Input:
Maximum Likelihood Estimation of best-fit parameters
Parameters
----------
x1, x2 : float arrays
the independent and dependent variables.
x1err, x2err : float arrays (optional)
measurement uncertainties on independent and dependent
variables. Any of the two, or both, can be supplied.
cerr : float array (same size as x1)
covariance on the measurement errors
s_int : boolean (default True)
whether to include intrinsic scatter in the MLE.
po : tuple of floats
initial guess for free parameters. If s_int is True, then
po must have 3 elements; otherwise it can have two (for the
zero point and the slope)
verbose : boolean (default False)
verbose?
logify : boolean (default True)
whether to convert the values to log10's. This is to
calculate the best-fit power law. Note that the result is
given for the equation log(y)=a+b*log(x) -- i.e., the
zero point must be converted to 10**a if logify=True
full_output : boolean (default False)
numpy.optimize.fmin's full_output argument
Returns
-------
a : float
Maximum Likelihood Estimate of the zero point. Note that
if logify=True, the power-law intercept is 10**a
b : float
Maximum Likelihood Estimate of the slope
s : float (optional, if s_int=True)
Maximum Likelihood Estimate of the intrinsic scatter
### Response:
def mle(x1, x2, x1err=[], x2err=[], cerr=[], s_int=True,
po=(1,0,0.1), verbose=False, logify=True, full_output=False):
"""
Maximum Likelihood Estimation of best-fit parameters
Parameters
----------
x1, x2 : float arrays
the independent and dependent variables.
x1err, x2err : float arrays (optional)
measurement uncertainties on independent and dependent
variables. Any of the two, or both, can be supplied.
cerr : float array (same size as x1)
covariance on the measurement errors
s_int : boolean (default True)
whether to include intrinsic scatter in the MLE.
po : tuple of floats
initial guess for free parameters. If s_int is True, then
po must have 3 elements; otherwise it can have two (for the
zero point and the slope)
verbose : boolean (default False)
verbose?
logify : boolean (default True)
whether to convert the values to log10's. This is to
calculate the best-fit power law. Note that the result is
given for the equation log(y)=a+b*log(x) -- i.e., the
zero point must be converted to 10**a if logify=True
full_output : boolean (default False)
numpy.optimize.fmin's full_output argument
Returns
-------
a : float
Maximum Likelihood Estimate of the zero point. Note that
if logify=True, the power-law intercept is 10**a
b : float
Maximum Likelihood Estimate of the slope
s : float (optional, if s_int=True)
Maximum Likelihood Estimate of the intrinsic scatter
"""
from scipy import optimize
n = len(x1)
if len(x2) != n:
raise ValueError('x1 and x2 must have same length')
if len(x1err) == 0:
x1err = numpy.ones(n)
if len(x2err) == 0:
x2err = numpy.ones(n)
if logify:
x1, x2, x1err, x2err = to_log(x1, x2, x1err, x2err)
f = lambda a, b: a + b * x1
if s_int:
w = lambda b, s: numpy.sqrt(b**2 * x1err**2 + x2err**2 + s**2)
loglike = lambda p: 2 * sum(numpy.log(w(p[1],p[2]))) + \
sum(((x2 - f(p[0],p[1])) / w(p[1],p[2])) ** 2) + \
numpy.log(n * numpy.sqrt(2*numpy.pi)) / 2
else:
w = lambda b: numpy.sqrt(b**2 * x1err**2 + x2err**2)
loglike = lambda p: sum(numpy.log(w(p[1]))) + \
sum(((x2 - f(p[0],p[1])) / w(p[1])) ** 2) / 2 + \
numpy.log(n * numpy.sqrt(2*numpy.pi)) / 2
po = po[:2]
out = optimize.fmin(loglike, po, disp=verbose, full_output=full_output)
return out |
def setup(self, app):
""" Setup the plugin from an application. """
super().setup(app)
if isinstance(self.cfg.template_folders, str):
self.cfg.template_folders = [self.cfg.template_folders]
else:
self.cfg.template_folders = list(self.cfg.template_folders)
self.ctx_provider(lambda: {'app': self.app})
self.env = Environment(debug=app.cfg.DEBUG, **self.cfg) | Setup the plugin from an application. | Below is the the instruction that describes the task:
### Input:
Setup the plugin from an application.
### Response:
def setup(self, app):
""" Setup the plugin from an application. """
super().setup(app)
if isinstance(self.cfg.template_folders, str):
self.cfg.template_folders = [self.cfg.template_folders]
else:
self.cfg.template_folders = list(self.cfg.template_folders)
self.ctx_provider(lambda: {'app': self.app})
self.env = Environment(debug=app.cfg.DEBUG, **self.cfg) |
def _follow_next(self, url):
"""Follow the 'next' link on paginated results."""
response = self._json(self._get(url), 200)
data = response['data']
next_url = self._get_attribute(response, 'links', 'next')
while next_url is not None:
response = self._json(self._get(next_url), 200)
data.extend(response['data'])
next_url = self._get_attribute(response, 'links', 'next')
return data | Follow the 'next' link on paginated results. | Below is the the instruction that describes the task:
### Input:
Follow the 'next' link on paginated results.
### Response:
def _follow_next(self, url):
"""Follow the 'next' link on paginated results."""
response = self._json(self._get(url), 200)
data = response['data']
next_url = self._get_attribute(response, 'links', 'next')
while next_url is not None:
response = self._json(self._get(next_url), 200)
data.extend(response['data'])
next_url = self._get_attribute(response, 'links', 'next')
return data |
def create_app(client_name, scopes=__DEFAULT_SCOPES, redirect_uris=None, website=None, to_file=None,
api_base_url=__DEFAULT_BASE_URL, request_timeout=__DEFAULT_TIMEOUT, session=None):
"""
Create a new app with given `client_name` and `scopes` (The basic scropse are "read", "write", "follow" and "push"
- more granular scopes are available, please refere to Mastodon documentation for which).
Specify `redirect_uris` if you want users to be redirected to a certain page after authenticating in an oauth flow.
You can specify multiple URLs by passing a list. Note that if you wish to use OAuth authentication with redirects,
the redirect URI must be one of the URLs specified here.
Specify `to_file` to persist your apps info to a file so you can use them in the constructor.
Specify `api_base_url` if you want to register an app on an instance different from the flagship one.
Specify `website` to give a website for your app.
Specify `session` with a requests.Session for it to be used instead of the deafult.
Presently, app registration is open by default, but this is not guaranteed to be the case for all
future mastodon instances or even the flagship instance in the future.
Returns `client_id` and `client_secret`, both as strings.
"""
api_base_url = Mastodon.__protocolize(api_base_url)
request_data = {
'client_name': client_name,
'scopes': " ".join(scopes)
}
try:
if redirect_uris is not None:
if isinstance(redirect_uris, (list, tuple)):
redirect_uris = "\n".join(list(redirect_uris))
request_data['redirect_uris'] = redirect_uris
else:
request_data['redirect_uris'] = 'urn:ietf:wg:oauth:2.0:oob'
if website is not None:
request_data['website'] = website
if session:
ret = session.post(api_base_url + '/api/v1/apps', data=request_data, timeout=request_timeout)
response = ret.json()
else:
response = requests.post(api_base_url + '/api/v1/apps', data=request_data, timeout=request_timeout)
response = response.json()
except Exception as e:
raise MastodonNetworkError("Could not complete request: %s" % e)
if to_file is not None:
with open(to_file, 'w') as secret_file:
secret_file.write(response['client_id'] + '\n')
secret_file.write(response['client_secret'] + '\n')
return (response['client_id'], response['client_secret']) | Create a new app with given `client_name` and `scopes` (The basic scropse are "read", "write", "follow" and "push"
- more granular scopes are available, please refere to Mastodon documentation for which).
Specify `redirect_uris` if you want users to be redirected to a certain page after authenticating in an oauth flow.
You can specify multiple URLs by passing a list. Note that if you wish to use OAuth authentication with redirects,
the redirect URI must be one of the URLs specified here.
Specify `to_file` to persist your apps info to a file so you can use them in the constructor.
Specify `api_base_url` if you want to register an app on an instance different from the flagship one.
Specify `website` to give a website for your app.
Specify `session` with a requests.Session for it to be used instead of the deafult.
Presently, app registration is open by default, but this is not guaranteed to be the case for all
future mastodon instances or even the flagship instance in the future.
Returns `client_id` and `client_secret`, both as strings. | Below is the the instruction that describes the task:
### Input:
Create a new app with given `client_name` and `scopes` (The basic scropse are "read", "write", "follow" and "push"
- more granular scopes are available, please refere to Mastodon documentation for which).
Specify `redirect_uris` if you want users to be redirected to a certain page after authenticating in an oauth flow.
You can specify multiple URLs by passing a list. Note that if you wish to use OAuth authentication with redirects,
the redirect URI must be one of the URLs specified here.
Specify `to_file` to persist your apps info to a file so you can use them in the constructor.
Specify `api_base_url` if you want to register an app on an instance different from the flagship one.
Specify `website` to give a website for your app.
Specify `session` with a requests.Session for it to be used instead of the deafult.
Presently, app registration is open by default, but this is not guaranteed to be the case for all
future mastodon instances or even the flagship instance in the future.
Returns `client_id` and `client_secret`, both as strings.
### Response:
def create_app(client_name, scopes=__DEFAULT_SCOPES, redirect_uris=None, website=None, to_file=None,
api_base_url=__DEFAULT_BASE_URL, request_timeout=__DEFAULT_TIMEOUT, session=None):
"""
Create a new app with given `client_name` and `scopes` (The basic scropse are "read", "write", "follow" and "push"
- more granular scopes are available, please refere to Mastodon documentation for which).
Specify `redirect_uris` if you want users to be redirected to a certain page after authenticating in an oauth flow.
You can specify multiple URLs by passing a list. Note that if you wish to use OAuth authentication with redirects,
the redirect URI must be one of the URLs specified here.
Specify `to_file` to persist your apps info to a file so you can use them in the constructor.
Specify `api_base_url` if you want to register an app on an instance different from the flagship one.
Specify `website` to give a website for your app.
Specify `session` with a requests.Session for it to be used instead of the deafult.
Presently, app registration is open by default, but this is not guaranteed to be the case for all
future mastodon instances or even the flagship instance in the future.
Returns `client_id` and `client_secret`, both as strings.
"""
api_base_url = Mastodon.__protocolize(api_base_url)
request_data = {
'client_name': client_name,
'scopes': " ".join(scopes)
}
try:
if redirect_uris is not None:
if isinstance(redirect_uris, (list, tuple)):
redirect_uris = "\n".join(list(redirect_uris))
request_data['redirect_uris'] = redirect_uris
else:
request_data['redirect_uris'] = 'urn:ietf:wg:oauth:2.0:oob'
if website is not None:
request_data['website'] = website
if session:
ret = session.post(api_base_url + '/api/v1/apps', data=request_data, timeout=request_timeout)
response = ret.json()
else:
response = requests.post(api_base_url + '/api/v1/apps', data=request_data, timeout=request_timeout)
response = response.json()
except Exception as e:
raise MastodonNetworkError("Could not complete request: %s" % e)
if to_file is not None:
with open(to_file, 'w') as secret_file:
secret_file.write(response['client_id'] + '\n')
secret_file.write(response['client_secret'] + '\n')
return (response['client_id'], response['client_secret']) |
def on(self, image):
"""
Project bounding boxes from one image to a new one.
Parameters
----------
image : ndarray or tuple of int
The new image onto which to project.
Either an image with shape ``(H,W,[C])`` or a tuple denoting
such an image shape.
Returns
-------
line_strings : imgaug.augmentables.lines.LineStrings
Object containing all projected line strings.
"""
shape = normalize_shape(image)
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
line_strings = [ls.project(self.shape, shape)
for ls in self.line_strings]
return self.deepcopy(line_strings=line_strings, shape=shape) | Project bounding boxes from one image to a new one.
Parameters
----------
image : ndarray or tuple of int
The new image onto which to project.
Either an image with shape ``(H,W,[C])`` or a tuple denoting
such an image shape.
Returns
-------
line_strings : imgaug.augmentables.lines.LineStrings
Object containing all projected line strings. | Below is the the instruction that describes the task:
### Input:
Project bounding boxes from one image to a new one.
Parameters
----------
image : ndarray or tuple of int
The new image onto which to project.
Either an image with shape ``(H,W,[C])`` or a tuple denoting
such an image shape.
Returns
-------
line_strings : imgaug.augmentables.lines.LineStrings
Object containing all projected line strings.
### Response:
def on(self, image):
"""
Project bounding boxes from one image to a new one.
Parameters
----------
image : ndarray or tuple of int
The new image onto which to project.
Either an image with shape ``(H,W,[C])`` or a tuple denoting
such an image shape.
Returns
-------
line_strings : imgaug.augmentables.lines.LineStrings
Object containing all projected line strings.
"""
shape = normalize_shape(image)
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
line_strings = [ls.project(self.shape, shape)
for ls in self.line_strings]
return self.deepcopy(line_strings=line_strings, shape=shape) |
def _guess_name(desc, taken=None):
"""Attempts to guess the menu entry name from the function name."""
taken = taken or []
name = ""
# Try to find the shortest name based on the given description.
for word in desc.split():
c = word[0].lower()
if not c.isalnum():
continue
name += c
if name not in taken:
break
# If name is still taken, add a number postfix.
count = 2
while name in taken:
name = name + str(count)
count += 1
return name | Attempts to guess the menu entry name from the function name. | Below is the the instruction that describes the task:
### Input:
Attempts to guess the menu entry name from the function name.
### Response:
def _guess_name(desc, taken=None):
"""Attempts to guess the menu entry name from the function name."""
taken = taken or []
name = ""
# Try to find the shortest name based on the given description.
for word in desc.split():
c = word[0].lower()
if not c.isalnum():
continue
name += c
if name not in taken:
break
# If name is still taken, add a number postfix.
count = 2
while name in taken:
name = name + str(count)
count += 1
return name |
def bigger_version(version_string_a, version_string_b):
"""Returns the bigger version of two version strings."""
major_a, minor_a, patch_a = parse_version_string(version_string_a)
major_b, minor_b, patch_b = parse_version_string(version_string_b)
if major_a > major_b:
return version_string_a
elif major_a == major_b and minor_a > minor_b:
return version_string_a
elif major_a == major_b and minor_a == minor_b and patch_a > patch_b:
return version_string_a
return version_string_b | Returns the bigger version of two version strings. | Below is the the instruction that describes the task:
### Input:
Returns the bigger version of two version strings.
### Response:
def bigger_version(version_string_a, version_string_b):
"""Returns the bigger version of two version strings."""
major_a, minor_a, patch_a = parse_version_string(version_string_a)
major_b, minor_b, patch_b = parse_version_string(version_string_b)
if major_a > major_b:
return version_string_a
elif major_a == major_b and minor_a > minor_b:
return version_string_a
elif major_a == major_b and minor_a == minor_b and patch_a > patch_b:
return version_string_a
return version_string_b |
def update(self):
""" Remove items in cart """
subtotal = float(0)
total = float(0)
for product in self.items:
subtotal += float(product["price"])
if subtotal > 0:
total = subtotal + self.extra_amount
self.subtotal = subtotal
self.total = total | Remove items in cart | Below is the the instruction that describes the task:
### Input:
Remove items in cart
### Response:
def update(self):
""" Remove items in cart """
subtotal = float(0)
total = float(0)
for product in self.items:
subtotal += float(product["price"])
if subtotal > 0:
total = subtotal + self.extra_amount
self.subtotal = subtotal
self.total = total |
def flush(self):
"""Flush changes to record."""
files = self.dumps()
# Do not create `_files` when there has not been `_files` field before
# and the record still has no files attached.
if files or '_files' in self.record:
self.record['_files'] = files | Flush changes to record. | Below is the the instruction that describes the task:
### Input:
Flush changes to record.
### Response:
def flush(self):
"""Flush changes to record."""
files = self.dumps()
# Do not create `_files` when there has not been `_files` field before
# and the record still has no files attached.
if files or '_files' in self.record:
self.record['_files'] = files |
def _add_request_entry(self, entry=()):
"""This function record the request with netfn, sequence number and
command, which will be used in parse_ipmi_payload.
:param entry: a set of netfn, sequence number and command.
"""
if not self._lookup_request_entry(entry):
self.request_entry.append(entry) | This function record the request with netfn, sequence number and
command, which will be used in parse_ipmi_payload.
:param entry: a set of netfn, sequence number and command. | Below is the the instruction that describes the task:
### Input:
This function record the request with netfn, sequence number and
command, which will be used in parse_ipmi_payload.
:param entry: a set of netfn, sequence number and command.
### Response:
def _add_request_entry(self, entry=()):
"""This function record the request with netfn, sequence number and
command, which will be used in parse_ipmi_payload.
:param entry: a set of netfn, sequence number and command.
"""
if not self._lookup_request_entry(entry):
self.request_entry.append(entry) |
def register_warning_code(code, exception_type, domain='core'):
"""Register a new warning code"""
Logger._warning_code_to_exception[code] = (exception_type, domain)
Logger._domain_codes[domain].add(code) | Register a new warning code | Below is the the instruction that describes the task:
### Input:
Register a new warning code
### Response:
def register_warning_code(code, exception_type, domain='core'):
"""Register a new warning code"""
Logger._warning_code_to_exception[code] = (exception_type, domain)
Logger._domain_codes[domain].add(code) |
def validate(self, value, model=None, context=None):
"""
Validate
Perform value validation and return result
:param value: value to check, cast to string
:param model: parent model being validated
:param context: object or None, validation context
:return: shiftschema.results.SimpleResult
"""
import re
value = str(value)
match = re.match(r'^\d+', value)
if not match or value != match.group():
return Error(self.not_digital)
# success otherwise
return Error() | Validate
Perform value validation and return result
:param value: value to check, cast to string
:param model: parent model being validated
:param context: object or None, validation context
:return: shiftschema.results.SimpleResult | Below is the the instruction that describes the task:
### Input:
Validate
Perform value validation and return result
:param value: value to check, cast to string
:param model: parent model being validated
:param context: object or None, validation context
:return: shiftschema.results.SimpleResult
### Response:
def validate(self, value, model=None, context=None):
"""
Validate
Perform value validation and return result
:param value: value to check, cast to string
:param model: parent model being validated
:param context: object or None, validation context
:return: shiftschema.results.SimpleResult
"""
import re
value = str(value)
match = re.match(r'^\d+', value)
if not match or value != match.group():
return Error(self.not_digital)
# success otherwise
return Error() |
def register(self, *dependencies, default=False, hidden=False, ignore_return_code=False):
"""
Decorates a callable to turn it into a task
"""
def outer(func):
task = Task(func, *dependencies, default=default, hidden=hidden, ignore_return_code=ignore_return_code)
overidden_task = self._tasks.pop(task.name, None)
if overidden_task:
self._overidden_tasks[task.name].append(overidden_task)
self[task.name] = task
return task
return outer | Decorates a callable to turn it into a task | Below is the the instruction that describes the task:
### Input:
Decorates a callable to turn it into a task
### Response:
def register(self, *dependencies, default=False, hidden=False, ignore_return_code=False):
"""
Decorates a callable to turn it into a task
"""
def outer(func):
task = Task(func, *dependencies, default=default, hidden=hidden, ignore_return_code=ignore_return_code)
overidden_task = self._tasks.pop(task.name, None)
if overidden_task:
self._overidden_tasks[task.name].append(overidden_task)
self[task.name] = task
return task
return outer |
def check_error(result, func, cargs):
"Error checking proper value returns"
if result != 0:
msg = 'Error in "%s": %s' % (func.__name__, get_errors(result) )
raise RTreeError(msg)
return | Error checking proper value returns | Below is the the instruction that describes the task:
### Input:
Error checking proper value returns
### Response:
def check_error(result, func, cargs):
"Error checking proper value returns"
if result != 0:
msg = 'Error in "%s": %s' % (func.__name__, get_errors(result) )
raise RTreeError(msg)
return |
def handle_link(self, value):
"""
rdf:link rdf:resource points to the resource described by a record.
"""
for s, p, o in self.graph.triples((value, None, None)):
if p == LINK_ELEM:
return unicode(o).replace('file://', '') | rdf:link rdf:resource points to the resource described by a record. | Below is the the instruction that describes the task:
### Input:
rdf:link rdf:resource points to the resource described by a record.
### Response:
def handle_link(self, value):
"""
rdf:link rdf:resource points to the resource described by a record.
"""
for s, p, o in self.graph.triples((value, None, None)):
if p == LINK_ELEM:
return unicode(o).replace('file://', '') |
def graph2geoff(graph, edge_rel_name, encoder=None):
""" Get the `graph` as Geoff string. The edges between the nodes
have relationship name `edge_rel_name`. The code
below shows a simple example::
# create a graph
import networkx as nx
G = nx.Graph()
G.add_nodes_from([1, 2, 3])
G.add_edge(1, 2)
G.add_edge(2, 3)
# get the geoff string
geoff_string = graph2geoff(G, 'LINKS_TO')
If the properties are not json encodable, please pass a custom JSON encoder
class. See `JSONEncoder
<http://docs.python.org/2/library/json.html#json.JSONEncoder/>`_.
Parameters
----------
graph : Graph or DiGraph
a NetworkX Graph or a DiGraph
edge_rel_name : str
relationship name between the nodes
encoder: JSONEncoder or None
JSONEncoder object. Defaults to JSONEncoder.
Returns
-------
geoff : str
a Geoff string
"""
if encoder is None:
encoder = json.JSONEncoder()
is_digraph = isinstance(graph, nx.DiGraph)
lines = []
lapp = lines.append
for node_name, properties in graph.nodes(data=True):
lapp(node2geoff(node_name, properties, encoder))
for from_node, to_node, properties in graph.edges(data=True):
lapp(edge2geoff(from_node, to_node, properties, edge_rel_name, encoder))
if not is_digraph:
lapp(edge2geoff(to_node, from_node, properties, edge_rel_name,
encoder))
return '\n'.join(lines) | Get the `graph` as Geoff string. The edges between the nodes
have relationship name `edge_rel_name`. The code
below shows a simple example::
# create a graph
import networkx as nx
G = nx.Graph()
G.add_nodes_from([1, 2, 3])
G.add_edge(1, 2)
G.add_edge(2, 3)
# get the geoff string
geoff_string = graph2geoff(G, 'LINKS_TO')
If the properties are not json encodable, please pass a custom JSON encoder
class. See `JSONEncoder
<http://docs.python.org/2/library/json.html#json.JSONEncoder/>`_.
Parameters
----------
graph : Graph or DiGraph
a NetworkX Graph or a DiGraph
edge_rel_name : str
relationship name between the nodes
encoder: JSONEncoder or None
JSONEncoder object. Defaults to JSONEncoder.
Returns
-------
geoff : str
a Geoff string | Below is the the instruction that describes the task:
### Input:
Get the `graph` as Geoff string. The edges between the nodes
have relationship name `edge_rel_name`. The code
below shows a simple example::
# create a graph
import networkx as nx
G = nx.Graph()
G.add_nodes_from([1, 2, 3])
G.add_edge(1, 2)
G.add_edge(2, 3)
# get the geoff string
geoff_string = graph2geoff(G, 'LINKS_TO')
If the properties are not json encodable, please pass a custom JSON encoder
class. See `JSONEncoder
<http://docs.python.org/2/library/json.html#json.JSONEncoder/>`_.
Parameters
----------
graph : Graph or DiGraph
a NetworkX Graph or a DiGraph
edge_rel_name : str
relationship name between the nodes
encoder: JSONEncoder or None
JSONEncoder object. Defaults to JSONEncoder.
Returns
-------
geoff : str
a Geoff string
### Response:
def graph2geoff(graph, edge_rel_name, encoder=None):
""" Get the `graph` as Geoff string. The edges between the nodes
have relationship name `edge_rel_name`. The code
below shows a simple example::
# create a graph
import networkx as nx
G = nx.Graph()
G.add_nodes_from([1, 2, 3])
G.add_edge(1, 2)
G.add_edge(2, 3)
# get the geoff string
geoff_string = graph2geoff(G, 'LINKS_TO')
If the properties are not json encodable, please pass a custom JSON encoder
class. See `JSONEncoder
<http://docs.python.org/2/library/json.html#json.JSONEncoder/>`_.
Parameters
----------
graph : Graph or DiGraph
a NetworkX Graph or a DiGraph
edge_rel_name : str
relationship name between the nodes
encoder: JSONEncoder or None
JSONEncoder object. Defaults to JSONEncoder.
Returns
-------
geoff : str
a Geoff string
"""
if encoder is None:
encoder = json.JSONEncoder()
is_digraph = isinstance(graph, nx.DiGraph)
lines = []
lapp = lines.append
for node_name, properties in graph.nodes(data=True):
lapp(node2geoff(node_name, properties, encoder))
for from_node, to_node, properties in graph.edges(data=True):
lapp(edge2geoff(from_node, to_node, properties, edge_rel_name, encoder))
if not is_digraph:
lapp(edge2geoff(to_node, from_node, properties, edge_rel_name,
encoder))
return '\n'.join(lines) |
def _uneven_utility_transform(systematic_utilities,
alt_IDs,
rows_to_alts,
shape_params,
intercept_params,
intercept_ref_pos=None,
*args, **kwargs):
"""
Parameters
----------
systematic_utilities : 1D ndarray.
All elements should be ints, floats, or longs. Should contain the
systematic utilities of each observation per available alternative.
Note that this vector is formed by the dot product of the design matrix
with the vector of utility coefficients.
alt_IDs : 1D ndarray.
All elements should be ints. There should be one row per obervation per
available alternative for the given observation. Elements denote the
alternative corresponding to the given row of the design matrix.
rows_to_alts : 2D scipy sparse matrix.
There should be one row per observation per available alternative and
one column per possible alternative. This matrix maps the rows of the
design matrix to the possible alternatives for this dataset. All
elements should be zeros or ones.
shape_params : None or 1D ndarray.
If an array, each element should be an int, float, or long. There
should be one value per shape parameter of the model being used.
intercept_params : None or 1D ndarray.
If an array, each element should be an int, float, or long. If J is the
total number of possible alternatives for the dataset being modeled,
there should be J-1 elements in the array.
intercept_ref_pos : int, or None, optional.
Specifies the index of the alternative, in the ordered array of unique
alternatives, that is not having its intercept parameter estimated (in
order to ensure identifiability). Should only be None if
`intercept_params` is None.
Returns
-------
transformed_utilities : 2D ndarray.
Should have shape `(systematic_utilities.shape[0], 1)`. The returned
array contains the transformed utility values for this model. All
elements will be ints, longs, or floats.
"""
# Convert the shape parameters back into their 'natural parametrization'
natural_shapes = np.exp(shape_params)
natural_shapes[np.isposinf(natural_shapes)] = max_comp_value
# Figure out what shape values correspond to each row of the
# systematic utilities
long_natural_shapes = rows_to_alts.dot(natural_shapes)
# Get the exponentiated neative utilities
exp_neg_utilities = np.exp(-1 * systematic_utilities)
# Get the log of 1 + exponentiated negative utilities
log_1_plus_exp_neg_utilitiles = np.log1p(exp_neg_utilities)
# Guard against overflow. Underflow not a problem since we add one to a
# near zero number and log of one will evaluate to zero
inf_idx = np.isinf(log_1_plus_exp_neg_utilitiles)
log_1_plus_exp_neg_utilitiles[inf_idx] = -1 * systematic_utilities[inf_idx]
# Get the exponentiated (negative utilities times the shape parameter)
exp_neg_shape_utilities = np.exp(-1 *
long_natural_shapes *
systematic_utilities)
# Get the log of 1 + exponentiated (negative utiltiies times the shape)
log_1_plus_exp_neg_shape_utilities = np.log1p(exp_neg_shape_utilities)
##########
# Guard against overflow
##########
# Check for any values which have gone off to positive infinity
inf_idx = np.isinf(log_1_plus_exp_neg_shape_utilities)
# Replace those values with an approximation of the true values by ignoring
# the "1." The idea is that 1 + infinity ~ infinity so the effect of the +1
# on the log is minimal.
if np.any(inf_idx):
log_1_plus_exp_neg_shape_utilities[inf_idx] =\
-1 * long_natural_shapes[inf_idx] * systematic_utilities[inf_idx]
# Calculate the transformed utility values
transformed_utilities = (systematic_utilities +
log_1_plus_exp_neg_utilitiles -
log_1_plus_exp_neg_shape_utilities)
# Perform a final guard against numbers that are too large to deal with
transformed_utilities[np.isposinf(transformed_utilities)] = max_comp_value
transformed_utilities[np.isneginf(transformed_utilities)] = -max_comp_value
transformed_utilities[np.isneginf(systematic_utilities)] = -max_comp_value
# Account for the outside intercept parameters if there are any.
if intercept_params is not None and intercept_ref_pos is not None:
# Get a list of all the indices (or row indices) corresponding to the
# alternatives whose intercept parameters are being estimated.
needed_idxs = range(rows_to_alts.shape[1])
needed_idxs.remove(intercept_ref_pos)
if len(intercept_params.shape) > 1 and intercept_params.shape[1] > 1:
# Get an array of zeros with shape
# (num_possible_alternatives, num_parameter_samples)
all_intercepts = np.zeros((rows_to_alts.shape[1],
intercept_params.shape[1]))
# For alternatives having their intercept estimated, replace the
# zeros with the current value of the estimated intercepts
all_intercepts[needed_idxs, :] = intercept_params
else:
# Get an array of zeros with shape (num_possible_alternatives,)
all_intercepts = np.zeros(rows_to_alts.shape[1])
# For alternatives having their intercept estimated, replace the
# zeros with the current value of the estimated intercepts
all_intercepts[needed_idxs] = intercept_params
# Add the intercept values to f(x, beta, c)
transformed_utilities += rows_to_alts.dot(all_intercepts)
# Be sure to return a 2D array since other functions will be expecting that
if len(transformed_utilities.shape) == 1:
transformed_utilities = transformed_utilities[:, np.newaxis]
return transformed_utilities | Parameters
----------
systematic_utilities : 1D ndarray.
All elements should be ints, floats, or longs. Should contain the
systematic utilities of each observation per available alternative.
Note that this vector is formed by the dot product of the design matrix
with the vector of utility coefficients.
alt_IDs : 1D ndarray.
All elements should be ints. There should be one row per obervation per
available alternative for the given observation. Elements denote the
alternative corresponding to the given row of the design matrix.
rows_to_alts : 2D scipy sparse matrix.
There should be one row per observation per available alternative and
one column per possible alternative. This matrix maps the rows of the
design matrix to the possible alternatives for this dataset. All
elements should be zeros or ones.
shape_params : None or 1D ndarray.
If an array, each element should be an int, float, or long. There
should be one value per shape parameter of the model being used.
intercept_params : None or 1D ndarray.
If an array, each element should be an int, float, or long. If J is the
total number of possible alternatives for the dataset being modeled,
there should be J-1 elements in the array.
intercept_ref_pos : int, or None, optional.
Specifies the index of the alternative, in the ordered array of unique
alternatives, that is not having its intercept parameter estimated (in
order to ensure identifiability). Should only be None if
`intercept_params` is None.
Returns
-------
transformed_utilities : 2D ndarray.
Should have shape `(systematic_utilities.shape[0], 1)`. The returned
array contains the transformed utility values for this model. All
elements will be ints, longs, or floats. | Below is the the instruction that describes the task:
### Input:
Parameters
----------
systematic_utilities : 1D ndarray.
All elements should be ints, floats, or longs. Should contain the
systematic utilities of each observation per available alternative.
Note that this vector is formed by the dot product of the design matrix
with the vector of utility coefficients.
alt_IDs : 1D ndarray.
All elements should be ints. There should be one row per obervation per
available alternative for the given observation. Elements denote the
alternative corresponding to the given row of the design matrix.
rows_to_alts : 2D scipy sparse matrix.
There should be one row per observation per available alternative and
one column per possible alternative. This matrix maps the rows of the
design matrix to the possible alternatives for this dataset. All
elements should be zeros or ones.
shape_params : None or 1D ndarray.
If an array, each element should be an int, float, or long. There
should be one value per shape parameter of the model being used.
intercept_params : None or 1D ndarray.
If an array, each element should be an int, float, or long. If J is the
total number of possible alternatives for the dataset being modeled,
there should be J-1 elements in the array.
intercept_ref_pos : int, or None, optional.
Specifies the index of the alternative, in the ordered array of unique
alternatives, that is not having its intercept parameter estimated (in
order to ensure identifiability). Should only be None if
`intercept_params` is None.
Returns
-------
transformed_utilities : 2D ndarray.
Should have shape `(systematic_utilities.shape[0], 1)`. The returned
array contains the transformed utility values for this model. All
elements will be ints, longs, or floats.
### Response:
def _uneven_utility_transform(systematic_utilities,
alt_IDs,
rows_to_alts,
shape_params,
intercept_params,
intercept_ref_pos=None,
*args, **kwargs):
"""
Parameters
----------
systematic_utilities : 1D ndarray.
All elements should be ints, floats, or longs. Should contain the
systematic utilities of each observation per available alternative.
Note that this vector is formed by the dot product of the design matrix
with the vector of utility coefficients.
alt_IDs : 1D ndarray.
All elements should be ints. There should be one row per obervation per
available alternative for the given observation. Elements denote the
alternative corresponding to the given row of the design matrix.
rows_to_alts : 2D scipy sparse matrix.
There should be one row per observation per available alternative and
one column per possible alternative. This matrix maps the rows of the
design matrix to the possible alternatives for this dataset. All
elements should be zeros or ones.
shape_params : None or 1D ndarray.
If an array, each element should be an int, float, or long. There
should be one value per shape parameter of the model being used.
intercept_params : None or 1D ndarray.
If an array, each element should be an int, float, or long. If J is the
total number of possible alternatives for the dataset being modeled,
there should be J-1 elements in the array.
intercept_ref_pos : int, or None, optional.
Specifies the index of the alternative, in the ordered array of unique
alternatives, that is not having its intercept parameter estimated (in
order to ensure identifiability). Should only be None if
`intercept_params` is None.
Returns
-------
transformed_utilities : 2D ndarray.
Should have shape `(systematic_utilities.shape[0], 1)`. The returned
array contains the transformed utility values for this model. All
elements will be ints, longs, or floats.
"""
# Convert the shape parameters back into their 'natural parametrization'
natural_shapes = np.exp(shape_params)
natural_shapes[np.isposinf(natural_shapes)] = max_comp_value
# Figure out what shape values correspond to each row of the
# systematic utilities
long_natural_shapes = rows_to_alts.dot(natural_shapes)
# Get the exponentiated neative utilities
exp_neg_utilities = np.exp(-1 * systematic_utilities)
# Get the log of 1 + exponentiated negative utilities
log_1_plus_exp_neg_utilitiles = np.log1p(exp_neg_utilities)
# Guard against overflow. Underflow not a problem since we add one to a
# near zero number and log of one will evaluate to zero
inf_idx = np.isinf(log_1_plus_exp_neg_utilitiles)
log_1_plus_exp_neg_utilitiles[inf_idx] = -1 * systematic_utilities[inf_idx]
# Get the exponentiated (negative utilities times the shape parameter)
exp_neg_shape_utilities = np.exp(-1 *
long_natural_shapes *
systematic_utilities)
# Get the log of 1 + exponentiated (negative utiltiies times the shape)
log_1_plus_exp_neg_shape_utilities = np.log1p(exp_neg_shape_utilities)
##########
# Guard against overflow
##########
# Check for any values which have gone off to positive infinity
inf_idx = np.isinf(log_1_plus_exp_neg_shape_utilities)
# Replace those values with an approximation of the true values by ignoring
# the "1." The idea is that 1 + infinity ~ infinity so the effect of the +1
# on the log is minimal.
if np.any(inf_idx):
log_1_plus_exp_neg_shape_utilities[inf_idx] =\
-1 * long_natural_shapes[inf_idx] * systematic_utilities[inf_idx]
# Calculate the transformed utility values
transformed_utilities = (systematic_utilities +
log_1_plus_exp_neg_utilitiles -
log_1_plus_exp_neg_shape_utilities)
# Perform a final guard against numbers that are too large to deal with
transformed_utilities[np.isposinf(transformed_utilities)] = max_comp_value
transformed_utilities[np.isneginf(transformed_utilities)] = -max_comp_value
transformed_utilities[np.isneginf(systematic_utilities)] = -max_comp_value
# Account for the outside intercept parameters if there are any.
if intercept_params is not None and intercept_ref_pos is not None:
# Get a list of all the indices (or row indices) corresponding to the
# alternatives whose intercept parameters are being estimated.
needed_idxs = range(rows_to_alts.shape[1])
needed_idxs.remove(intercept_ref_pos)
if len(intercept_params.shape) > 1 and intercept_params.shape[1] > 1:
# Get an array of zeros with shape
# (num_possible_alternatives, num_parameter_samples)
all_intercepts = np.zeros((rows_to_alts.shape[1],
intercept_params.shape[1]))
# For alternatives having their intercept estimated, replace the
# zeros with the current value of the estimated intercepts
all_intercepts[needed_idxs, :] = intercept_params
else:
# Get an array of zeros with shape (num_possible_alternatives,)
all_intercepts = np.zeros(rows_to_alts.shape[1])
# For alternatives having their intercept estimated, replace the
# zeros with the current value of the estimated intercepts
all_intercepts[needed_idxs] = intercept_params
# Add the intercept values to f(x, beta, c)
transformed_utilities += rows_to_alts.dot(all_intercepts)
# Be sure to return a 2D array since other functions will be expecting that
if len(transformed_utilities.shape) == 1:
transformed_utilities = transformed_utilities[:, np.newaxis]
return transformed_utilities |
def stop_cont(self, cont=True):
"""Send SIGSTOP/SIGCONT to processes called <name>
"""
for proc in psutil.process_iter():
if proc.name() == self.process_name:
sig = psutil.signal.SIGCONT if cont else psutil.signal.SIGSTOP
proc.send_signal(sig)
if self.debug:
sig = 'CONT' if cont else 'STOP'
print("Sent SIG%s to process %d" % (sig, proc.pid)) | Send SIGSTOP/SIGCONT to processes called <name> | Below is the the instruction that describes the task:
### Input:
Send SIGSTOP/SIGCONT to processes called <name>
### Response:
def stop_cont(self, cont=True):
"""Send SIGSTOP/SIGCONT to processes called <name>
"""
for proc in psutil.process_iter():
if proc.name() == self.process_name:
sig = psutil.signal.SIGCONT if cont else psutil.signal.SIGSTOP
proc.send_signal(sig)
if self.debug:
sig = 'CONT' if cont else 'STOP'
print("Sent SIG%s to process %d" % (sig, proc.pid)) |
def load_dict():
"""
Load java messages that can be ignored pickle file into a dict structure g_ok_java_messages.
:return: none
"""
global g_load_java_message_filename
global g_ok_java_messages
if os.path.isfile(g_load_java_message_filename):
# only load dict from file if it exists.
with open(g_load_java_message_filename,'rb') as ofile:
g_ok_java_messages = pickle.load(ofile)
else: # no previous java messages to be excluded are found
g_ok_java_messages["general"] = [] | Load java messages that can be ignored pickle file into a dict structure g_ok_java_messages.
:return: none | Below is the the instruction that describes the task:
### Input:
Load java messages that can be ignored pickle file into a dict structure g_ok_java_messages.
:return: none
### Response:
def load_dict():
"""
Load java messages that can be ignored pickle file into a dict structure g_ok_java_messages.
:return: none
"""
global g_load_java_message_filename
global g_ok_java_messages
if os.path.isfile(g_load_java_message_filename):
# only load dict from file if it exists.
with open(g_load_java_message_filename,'rb') as ofile:
g_ok_java_messages = pickle.load(ofile)
else: # no previous java messages to be excluded are found
g_ok_java_messages["general"] = [] |
def read_git_commit_timestamp_for_file(filepath, repo_path=None, repo=None):
"""Obtain the timestamp for the most recent commit to a given file in a
Git repository.
Parameters
----------
filepath : `str`
Absolute or repository-relative path for a file.
repo_path : `str`, optional
Path to the Git repository. Leave as `None` to use the current working
directory or if a ``repo`` argument is provided.
repo : `git.Repo`, optional
A `git.Repo` instance.
Returns
-------
commit_timestamp : `datetime.datetime`
The datetime of the most recent commit to the given file.
Raises
------
IOError
Raised if the ``filepath`` does not exist in the Git repository.
"""
logger = logging.getLogger(__name__)
if repo is None:
repo = git.repo.base.Repo(path=repo_path,
search_parent_directories=True)
repo_path = repo.working_tree_dir
head_commit = repo.head.commit
# filepath relative to the repo path
logger.debug('Using Git repo at %r', repo_path)
filepath = os.path.relpath(
os.path.abspath(filepath),
start=repo_path)
logger.debug('Repo-relative filepath is %r', filepath)
# Most recent commit datetime of the given file.
# Don't use head_commit.iter_parents because then it skips the
# commit of a file that's added but never modified.
for commit in head_commit.iter_items(repo,
head_commit,
[filepath],
skip=0):
return commit.committed_datetime
# Only get here if git could not find the file path in the history
raise IOError('File {} not found'.format(filepath)) | Obtain the timestamp for the most recent commit to a given file in a
Git repository.
Parameters
----------
filepath : `str`
Absolute or repository-relative path for a file.
repo_path : `str`, optional
Path to the Git repository. Leave as `None` to use the current working
directory or if a ``repo`` argument is provided.
repo : `git.Repo`, optional
A `git.Repo` instance.
Returns
-------
commit_timestamp : `datetime.datetime`
The datetime of the most recent commit to the given file.
Raises
------
IOError
Raised if the ``filepath`` does not exist in the Git repository. | Below is the the instruction that describes the task:
### Input:
Obtain the timestamp for the most recent commit to a given file in a
Git repository.
Parameters
----------
filepath : `str`
Absolute or repository-relative path for a file.
repo_path : `str`, optional
Path to the Git repository. Leave as `None` to use the current working
directory or if a ``repo`` argument is provided.
repo : `git.Repo`, optional
A `git.Repo` instance.
Returns
-------
commit_timestamp : `datetime.datetime`
The datetime of the most recent commit to the given file.
Raises
------
IOError
Raised if the ``filepath`` does not exist in the Git repository.
### Response:
def read_git_commit_timestamp_for_file(filepath, repo_path=None, repo=None):
"""Obtain the timestamp for the most recent commit to a given file in a
Git repository.
Parameters
----------
filepath : `str`
Absolute or repository-relative path for a file.
repo_path : `str`, optional
Path to the Git repository. Leave as `None` to use the current working
directory or if a ``repo`` argument is provided.
repo : `git.Repo`, optional
A `git.Repo` instance.
Returns
-------
commit_timestamp : `datetime.datetime`
The datetime of the most recent commit to the given file.
Raises
------
IOError
Raised if the ``filepath`` does not exist in the Git repository.
"""
logger = logging.getLogger(__name__)
if repo is None:
repo = git.repo.base.Repo(path=repo_path,
search_parent_directories=True)
repo_path = repo.working_tree_dir
head_commit = repo.head.commit
# filepath relative to the repo path
logger.debug('Using Git repo at %r', repo_path)
filepath = os.path.relpath(
os.path.abspath(filepath),
start=repo_path)
logger.debug('Repo-relative filepath is %r', filepath)
# Most recent commit datetime of the given file.
# Don't use head_commit.iter_parents because then it skips the
# commit of a file that's added but never modified.
for commit in head_commit.iter_items(repo,
head_commit,
[filepath],
skip=0):
return commit.committed_datetime
# Only get here if git could not find the file path in the history
raise IOError('File {} not found'.format(filepath)) |
def copy(self, deep=True, data=None):
"""Returns a copy of this array.
If `deep=True`, a deep copy is made of the data array.
Otherwise, a shallow copy is made, so each variable in the new
array's dataset is also a variable in this array's dataset.
Use `data` to create a new object with the same structure as
original but entirely new data.
Parameters
----------
deep : bool, optional
Whether the data array and its coordinates are loaded into memory
and copied onto the new object. Default is True.
data : array_like, optional
Data to use in the new object. Must have same shape as original.
When `data` is used, `deep` is ignored for all data variables,
and only used for coords.
Returns
-------
object : DataArray
New object with dimensions, attributes, coordinates, name,
encoding, and optionally data copied from original.
Examples
--------
Shallow versus deep copy
>>> array = xr.DataArray([1, 2, 3], dims='x',
... coords={'x': ['a', 'b', 'c']})
>>> array.copy()
<xarray.DataArray (x: 3)>
array([1, 2, 3])
Coordinates:
* x (x) <U1 'a' 'b' 'c'
>>> array_0 = array.copy(deep=False)
>>> array_0[0] = 7
>>> array_0
<xarray.DataArray (x: 3)>
array([7, 2, 3])
Coordinates:
* x (x) <U1 'a' 'b' 'c'
>>> array
<xarray.DataArray (x: 3)>
array([7, 2, 3])
Coordinates:
* x (x) <U1 'a' 'b' 'c'
Changing the data using the ``data`` argument maintains the
structure of the original object, but with the new data. Original
object is unaffected.
>>> array.copy(data=[0.1, 0.2, 0.3])
<xarray.DataArray (x: 3)>
array([ 0.1, 0.2, 0.3])
Coordinates:
* x (x) <U1 'a' 'b' 'c'
>>> array
<xarray.DataArray (x: 3)>
array([1, 2, 3])
Coordinates:
* x (x) <U1 'a' 'b' 'c'
See also
--------
pandas.DataFrame.copy
"""
variable = self.variable.copy(deep=deep, data=data)
coords = OrderedDict((k, v.copy(deep=deep))
for k, v in self._coords.items())
return self._replace(variable, coords) | Returns a copy of this array.
If `deep=True`, a deep copy is made of the data array.
Otherwise, a shallow copy is made, so each variable in the new
array's dataset is also a variable in this array's dataset.
Use `data` to create a new object with the same structure as
original but entirely new data.
Parameters
----------
deep : bool, optional
Whether the data array and its coordinates are loaded into memory
and copied onto the new object. Default is True.
data : array_like, optional
Data to use in the new object. Must have same shape as original.
When `data` is used, `deep` is ignored for all data variables,
and only used for coords.
Returns
-------
object : DataArray
New object with dimensions, attributes, coordinates, name,
encoding, and optionally data copied from original.
Examples
--------
Shallow versus deep copy
>>> array = xr.DataArray([1, 2, 3], dims='x',
... coords={'x': ['a', 'b', 'c']})
>>> array.copy()
<xarray.DataArray (x: 3)>
array([1, 2, 3])
Coordinates:
* x (x) <U1 'a' 'b' 'c'
>>> array_0 = array.copy(deep=False)
>>> array_0[0] = 7
>>> array_0
<xarray.DataArray (x: 3)>
array([7, 2, 3])
Coordinates:
* x (x) <U1 'a' 'b' 'c'
>>> array
<xarray.DataArray (x: 3)>
array([7, 2, 3])
Coordinates:
* x (x) <U1 'a' 'b' 'c'
Changing the data using the ``data`` argument maintains the
structure of the original object, but with the new data. Original
object is unaffected.
>>> array.copy(data=[0.1, 0.2, 0.3])
<xarray.DataArray (x: 3)>
array([ 0.1, 0.2, 0.3])
Coordinates:
* x (x) <U1 'a' 'b' 'c'
>>> array
<xarray.DataArray (x: 3)>
array([1, 2, 3])
Coordinates:
* x (x) <U1 'a' 'b' 'c'
See also
--------
pandas.DataFrame.copy | Below is the the instruction that describes the task:
### Input:
Returns a copy of this array.
If `deep=True`, a deep copy is made of the data array.
Otherwise, a shallow copy is made, so each variable in the new
array's dataset is also a variable in this array's dataset.
Use `data` to create a new object with the same structure as
original but entirely new data.
Parameters
----------
deep : bool, optional
Whether the data array and its coordinates are loaded into memory
and copied onto the new object. Default is True.
data : array_like, optional
Data to use in the new object. Must have same shape as original.
When `data` is used, `deep` is ignored for all data variables,
and only used for coords.
Returns
-------
object : DataArray
New object with dimensions, attributes, coordinates, name,
encoding, and optionally data copied from original.
Examples
--------
Shallow versus deep copy
>>> array = xr.DataArray([1, 2, 3], dims='x',
... coords={'x': ['a', 'b', 'c']})
>>> array.copy()
<xarray.DataArray (x: 3)>
array([1, 2, 3])
Coordinates:
* x (x) <U1 'a' 'b' 'c'
>>> array_0 = array.copy(deep=False)
>>> array_0[0] = 7
>>> array_0
<xarray.DataArray (x: 3)>
array([7, 2, 3])
Coordinates:
* x (x) <U1 'a' 'b' 'c'
>>> array
<xarray.DataArray (x: 3)>
array([7, 2, 3])
Coordinates:
* x (x) <U1 'a' 'b' 'c'
Changing the data using the ``data`` argument maintains the
structure of the original object, but with the new data. Original
object is unaffected.
>>> array.copy(data=[0.1, 0.2, 0.3])
<xarray.DataArray (x: 3)>
array([ 0.1, 0.2, 0.3])
Coordinates:
* x (x) <U1 'a' 'b' 'c'
>>> array
<xarray.DataArray (x: 3)>
array([1, 2, 3])
Coordinates:
* x (x) <U1 'a' 'b' 'c'
See also
--------
pandas.DataFrame.copy
### Response:
def copy(self, deep=True, data=None):
"""Returns a copy of this array.
If `deep=True`, a deep copy is made of the data array.
Otherwise, a shallow copy is made, so each variable in the new
array's dataset is also a variable in this array's dataset.
Use `data` to create a new object with the same structure as
original but entirely new data.
Parameters
----------
deep : bool, optional
Whether the data array and its coordinates are loaded into memory
and copied onto the new object. Default is True.
data : array_like, optional
Data to use in the new object. Must have same shape as original.
When `data` is used, `deep` is ignored for all data variables,
and only used for coords.
Returns
-------
object : DataArray
New object with dimensions, attributes, coordinates, name,
encoding, and optionally data copied from original.
Examples
--------
Shallow versus deep copy
>>> array = xr.DataArray([1, 2, 3], dims='x',
... coords={'x': ['a', 'b', 'c']})
>>> array.copy()
<xarray.DataArray (x: 3)>
array([1, 2, 3])
Coordinates:
* x (x) <U1 'a' 'b' 'c'
>>> array_0 = array.copy(deep=False)
>>> array_0[0] = 7
>>> array_0
<xarray.DataArray (x: 3)>
array([7, 2, 3])
Coordinates:
* x (x) <U1 'a' 'b' 'c'
>>> array
<xarray.DataArray (x: 3)>
array([7, 2, 3])
Coordinates:
* x (x) <U1 'a' 'b' 'c'
Changing the data using the ``data`` argument maintains the
structure of the original object, but with the new data. Original
object is unaffected.
>>> array.copy(data=[0.1, 0.2, 0.3])
<xarray.DataArray (x: 3)>
array([ 0.1, 0.2, 0.3])
Coordinates:
* x (x) <U1 'a' 'b' 'c'
>>> array
<xarray.DataArray (x: 3)>
array([1, 2, 3])
Coordinates:
* x (x) <U1 'a' 'b' 'c'
See also
--------
pandas.DataFrame.copy
"""
variable = self.variable.copy(deep=deep, data=data)
coords = OrderedDict((k, v.copy(deep=deep))
for k, v in self._coords.items())
return self._replace(variable, coords) |
def get_source(self, source, clean=False, callback=None):
"""
Download a file from a URL and return it wrapped in a row-generating acessor object.
:param spec: A SourceSpec that describes the source to fetch.
:param account_accessor: A callable to return the username and password to use for access FTP and S3 URLs.
:param clean: Delete files in cache and re-download.
:param callback: A callback, called while reading files in download. signatire is f(read_len, total_len)
:return: a SourceFile object.
"""
from fs.zipfs import ZipOpenError
import os
from ambry_sources.sources import ( GoogleSource, CsvSource, TsvSource, FixedSource,
ExcelSource, PartitionSource, SourceError, DelayedOpen,
DelayedDownload, ShapefileSource, SocrataSource )
from ambry_sources import extract_file_from_zip
spec = source.spec
cache_fs = self.library.download_cache
account_accessor = self.library.account_accessor
# FIXME. urltype should be moved to reftype.
url_type = spec.get_urltype()
def do_download():
from ambry_sources.fetch import download
return download(spec.url, cache_fs, account_accessor, clean=clean,
logger=self.logger, callback=callback)
if url_type == 'file':
from fs.opener import fsopen
syspath = spec.url.replace('file://', '')
cache_path = syspath.strip('/')
cache_fs.makedir(os.path.dirname(cache_path), recursive=True, allow_recreate=True)
if os.path.isabs(syspath):
# FIXME! Probably should not be
with open(syspath) as f:
cache_fs.setcontents(cache_path, f)
else:
cache_fs.setcontents(cache_path, self.source_fs.getcontents(syspath))
elif url_type not in ('gs', 'socrata'): # FIXME. Need to clean up the logic for gs types.
try:
cache_path, download_time = do_download()
spec.download_time = download_time
except Exception as e:
from ambry_sources.exceptions import DownloadError
raise DownloadError("Failed to download {}; {}".format(spec.url, e))
else:
cache_path, download_time = None, None
if url_type == 'zip':
try:
fstor = extract_file_from_zip(cache_fs, cache_path, spec.url, spec.file)
except ZipOpenError:
# Try it again
cache_fs.remove(cache_path)
cache_path, spec.download_time = do_download()
fstor = extract_file_from_zip(cache_fs, cache_path, spec.url, spec.file)
file_type = spec.get_filetype(fstor.path)
elif url_type == 'gs':
fstor = get_gs(spec.url, spec.segment, account_accessor)
file_type = 'gs'
elif url_type == 'socrata':
spec.encoding = 'utf8'
spec.header_lines = [0]
spec.start_line = 1
url = SocrataSource.download_url(spec)
fstor = DelayedDownload(url, cache_fs)
file_type = 'socrata'
else:
fstor = DelayedOpen(cache_fs, cache_path, 'rb')
file_type = spec.get_filetype(fstor.path)
spec.filetype = file_type
TYPE_TO_SOURCE_MAP = {
'gs': GoogleSource,
'csv': CsvSource,
'tsv': TsvSource,
'fixed': FixedSource,
'txt': FixedSource,
'xls': ExcelSource,
'xlsx': ExcelSource,
'partition': PartitionSource,
'shape': ShapefileSource,
'socrata': SocrataSource
}
cls = TYPE_TO_SOURCE_MAP.get(file_type)
if cls is None:
raise SourceError(
"Failed to determine file type for source '{}'; unknown type '{}' "
.format(spec.name, file_type))
return cls(spec, fstor) | Download a file from a URL and return it wrapped in a row-generating acessor object.
:param spec: A SourceSpec that describes the source to fetch.
:param account_accessor: A callable to return the username and password to use for access FTP and S3 URLs.
:param clean: Delete files in cache and re-download.
:param callback: A callback, called while reading files in download. signatire is f(read_len, total_len)
:return: a SourceFile object. | Below is the the instruction that describes the task:
### Input:
Download a file from a URL and return it wrapped in a row-generating acessor object.
:param spec: A SourceSpec that describes the source to fetch.
:param account_accessor: A callable to return the username and password to use for access FTP and S3 URLs.
:param clean: Delete files in cache and re-download.
:param callback: A callback, called while reading files in download. signatire is f(read_len, total_len)
:return: a SourceFile object.
### Response:
def get_source(self, source, clean=False, callback=None):
"""
Download a file from a URL and return it wrapped in a row-generating acessor object.
:param spec: A SourceSpec that describes the source to fetch.
:param account_accessor: A callable to return the username and password to use for access FTP and S3 URLs.
:param clean: Delete files in cache and re-download.
:param callback: A callback, called while reading files in download. signatire is f(read_len, total_len)
:return: a SourceFile object.
"""
from fs.zipfs import ZipOpenError
import os
from ambry_sources.sources import ( GoogleSource, CsvSource, TsvSource, FixedSource,
ExcelSource, PartitionSource, SourceError, DelayedOpen,
DelayedDownload, ShapefileSource, SocrataSource )
from ambry_sources import extract_file_from_zip
spec = source.spec
cache_fs = self.library.download_cache
account_accessor = self.library.account_accessor
# FIXME. urltype should be moved to reftype.
url_type = spec.get_urltype()
def do_download():
from ambry_sources.fetch import download
return download(spec.url, cache_fs, account_accessor, clean=clean,
logger=self.logger, callback=callback)
if url_type == 'file':
from fs.opener import fsopen
syspath = spec.url.replace('file://', '')
cache_path = syspath.strip('/')
cache_fs.makedir(os.path.dirname(cache_path), recursive=True, allow_recreate=True)
if os.path.isabs(syspath):
# FIXME! Probably should not be
with open(syspath) as f:
cache_fs.setcontents(cache_path, f)
else:
cache_fs.setcontents(cache_path, self.source_fs.getcontents(syspath))
elif url_type not in ('gs', 'socrata'): # FIXME. Need to clean up the logic for gs types.
try:
cache_path, download_time = do_download()
spec.download_time = download_time
except Exception as e:
from ambry_sources.exceptions import DownloadError
raise DownloadError("Failed to download {}; {}".format(spec.url, e))
else:
cache_path, download_time = None, None
if url_type == 'zip':
try:
fstor = extract_file_from_zip(cache_fs, cache_path, spec.url, spec.file)
except ZipOpenError:
# Try it again
cache_fs.remove(cache_path)
cache_path, spec.download_time = do_download()
fstor = extract_file_from_zip(cache_fs, cache_path, spec.url, spec.file)
file_type = spec.get_filetype(fstor.path)
elif url_type == 'gs':
fstor = get_gs(spec.url, spec.segment, account_accessor)
file_type = 'gs'
elif url_type == 'socrata':
spec.encoding = 'utf8'
spec.header_lines = [0]
spec.start_line = 1
url = SocrataSource.download_url(spec)
fstor = DelayedDownload(url, cache_fs)
file_type = 'socrata'
else:
fstor = DelayedOpen(cache_fs, cache_path, 'rb')
file_type = spec.get_filetype(fstor.path)
spec.filetype = file_type
TYPE_TO_SOURCE_MAP = {
'gs': GoogleSource,
'csv': CsvSource,
'tsv': TsvSource,
'fixed': FixedSource,
'txt': FixedSource,
'xls': ExcelSource,
'xlsx': ExcelSource,
'partition': PartitionSource,
'shape': ShapefileSource,
'socrata': SocrataSource
}
cls = TYPE_TO_SOURCE_MAP.get(file_type)
if cls is None:
raise SourceError(
"Failed to determine file type for source '{}'; unknown type '{}' "
.format(spec.name, file_type))
return cls(spec, fstor) |
def stop_reactor():
"""Stop the reactor and join the reactor thread until it stops.
Call this function in teardown at the module or package level to
reset the twisted system after your tests. You *must* do this if
you mix tests using these tools and tests using twisted.trial.
"""
global _twisted_thread
def stop_reactor():
'''Helper for calling stop from withing the thread.'''
reactor.stop()
reactor.callFromThread(stop_reactor)
reactor_thread.join()
for p in reactor.getDelayedCalls():
if p.active():
p.cancel()
_twisted_thread = None | Stop the reactor and join the reactor thread until it stops.
Call this function in teardown at the module or package level to
reset the twisted system after your tests. You *must* do this if
you mix tests using these tools and tests using twisted.trial. | Below is the the instruction that describes the task:
### Input:
Stop the reactor and join the reactor thread until it stops.
Call this function in teardown at the module or package level to
reset the twisted system after your tests. You *must* do this if
you mix tests using these tools and tests using twisted.trial.
### Response:
def stop_reactor():
"""Stop the reactor and join the reactor thread until it stops.
Call this function in teardown at the module or package level to
reset the twisted system after your tests. You *must* do this if
you mix tests using these tools and tests using twisted.trial.
"""
global _twisted_thread
def stop_reactor():
'''Helper for calling stop from withing the thread.'''
reactor.stop()
reactor.callFromThread(stop_reactor)
reactor_thread.join()
for p in reactor.getDelayedCalls():
if p.active():
p.cancel()
_twisted_thread = None |
def items(self):
"""D.items() -> a set-like object providing a view on D's items"""
keycol = self._keycol
for row in self.__iter__():
yield (row[keycol], dict(row)) | D.items() -> a set-like object providing a view on D's items | Below is the the instruction that describes the task:
### Input:
D.items() -> a set-like object providing a view on D's items
### Response:
def items(self):
"""D.items() -> a set-like object providing a view on D's items"""
keycol = self._keycol
for row in self.__iter__():
yield (row[keycol], dict(row)) |
def yaml_force_unicode():
"""
Force pyyaml to return unicode values.
"""
#/
## modified from |http://stackoverflow.com/a/2967461|
if sys.version_info[0] == 2:
def construct_func(self, node):
return self.construct_scalar(node)
yaml.Loader.add_constructor(U('tag:yaml.org,2002:str'), construct_func)
yaml.SafeLoader.add_constructor(U('tag:yaml.org,2002:str'), construct_func) | Force pyyaml to return unicode values. | Below is the the instruction that describes the task:
### Input:
Force pyyaml to return unicode values.
### Response:
def yaml_force_unicode():
"""
Force pyyaml to return unicode values.
"""
#/
## modified from |http://stackoverflow.com/a/2967461|
if sys.version_info[0] == 2:
def construct_func(self, node):
return self.construct_scalar(node)
yaml.Loader.add_constructor(U('tag:yaml.org,2002:str'), construct_func)
yaml.SafeLoader.add_constructor(U('tag:yaml.org,2002:str'), construct_func) |
def window_blackman(N, alpha=0.16):
r"""Blackman window
:param N: window length
.. math:: a_0 - a_1 \cos(\frac{2\pi n}{N-1}) +a_2 \cos(\frac{4\pi n }{N-1})
with
.. math::
a_0 = (1-\alpha)/2, a_1=0.5, a_2=\alpha/2 \rm{\;and\; \alpha}=0.16
When :math:`\alpha=0.16`, this is the unqualified Blackman window with
:math:`a_0=0.48` and :math:`a_2=0.08`.
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'blackman')
.. note:: Although Numpy implements a blackman window for :math:`\alpha=0.16`,
this implementation is valid for any :math:`\alpha`.
.. seealso:: numpy.blackman, :func:`create_window`, :class:`Window`
"""
a0 = (1. - alpha)/2.
a1 = 0.5
a2 = alpha/2.
if (N == 1):
win = array([1.])
else:
k = arange(0, N)/float(N-1.)
win = a0 - a1 * cos (2 * pi * k) + a2 * cos (4 * pi * k)
return win | r"""Blackman window
:param N: window length
.. math:: a_0 - a_1 \cos(\frac{2\pi n}{N-1}) +a_2 \cos(\frac{4\pi n }{N-1})
with
.. math::
a_0 = (1-\alpha)/2, a_1=0.5, a_2=\alpha/2 \rm{\;and\; \alpha}=0.16
When :math:`\alpha=0.16`, this is the unqualified Blackman window with
:math:`a_0=0.48` and :math:`a_2=0.08`.
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'blackman')
.. note:: Although Numpy implements a blackman window for :math:`\alpha=0.16`,
this implementation is valid for any :math:`\alpha`.
.. seealso:: numpy.blackman, :func:`create_window`, :class:`Window` | Below is the the instruction that describes the task:
### Input:
r"""Blackman window
:param N: window length
.. math:: a_0 - a_1 \cos(\frac{2\pi n}{N-1}) +a_2 \cos(\frac{4\pi n }{N-1})
with
.. math::
a_0 = (1-\alpha)/2, a_1=0.5, a_2=\alpha/2 \rm{\;and\; \alpha}=0.16
When :math:`\alpha=0.16`, this is the unqualified Blackman window with
:math:`a_0=0.48` and :math:`a_2=0.08`.
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'blackman')
.. note:: Although Numpy implements a blackman window for :math:`\alpha=0.16`,
this implementation is valid for any :math:`\alpha`.
.. seealso:: numpy.blackman, :func:`create_window`, :class:`Window`
### Response:
def window_blackman(N, alpha=0.16):
r"""Blackman window
:param N: window length
.. math:: a_0 - a_1 \cos(\frac{2\pi n}{N-1}) +a_2 \cos(\frac{4\pi n }{N-1})
with
.. math::
a_0 = (1-\alpha)/2, a_1=0.5, a_2=\alpha/2 \rm{\;and\; \alpha}=0.16
When :math:`\alpha=0.16`, this is the unqualified Blackman window with
:math:`a_0=0.48` and :math:`a_2=0.08`.
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'blackman')
.. note:: Although Numpy implements a blackman window for :math:`\alpha=0.16`,
this implementation is valid for any :math:`\alpha`.
.. seealso:: numpy.blackman, :func:`create_window`, :class:`Window`
"""
a0 = (1. - alpha)/2.
a1 = 0.5
a2 = alpha/2.
if (N == 1):
win = array([1.])
else:
k = arange(0, N)/float(N-1.)
win = a0 - a1 * cos (2 * pi * k) + a2 * cos (4 * pi * k)
return win |
def clean_cache(self, request):
"""
Remove all MenuItems from Cache.
"""
treenav.delete_cache()
self.message_user(request, _('Cache menuitem cache cleaned successfully.'))
info = self.model._meta.app_label, self.model._meta.model_name
changelist_url = reverse('admin:%s_%s_changelist' % info, current_app=self.admin_site.name)
return redirect(changelist_url) | Remove all MenuItems from Cache. | Below is the the instruction that describes the task:
### Input:
Remove all MenuItems from Cache.
### Response:
def clean_cache(self, request):
"""
Remove all MenuItems from Cache.
"""
treenav.delete_cache()
self.message_user(request, _('Cache menuitem cache cleaned successfully.'))
info = self.model._meta.app_label, self.model._meta.model_name
changelist_url = reverse('admin:%s_%s_changelist' % info, current_app=self.admin_site.name)
return redirect(changelist_url) |
def _create_grammar_state(self,
world: WikiTablesWorld,
possible_actions: List[ProductionRule],
linking_scores: torch.Tensor,
entity_types: torch.Tensor) -> LambdaGrammarStatelet:
"""
This method creates the LambdaGrammarStatelet object that's used for decoding. Part of
creating that is creating the `valid_actions` dictionary, which contains embedded
representations of all of the valid actions. So, we create that here as well.
The way we represent the valid expansions is a little complicated: we use a
dictionary of `action types`, where the key is the action type (like "global", "linked", or
whatever your model is expecting), and the value is a tuple representing all actions of
that type. The tuple is (input tensor, output tensor, action id). The input tensor has
the representation that is used when `selecting` actions, for all actions of this type.
The output tensor has the representation that is used when feeding the action to the next
step of the decoder (this could just be the same as the input tensor). The action ids are
a list of indices into the main action list for each batch instance.
The inputs to this method are for a `single instance in the batch`; none of the tensors we
create here are batched. We grab the global action ids from the input
``ProductionRules``, and we use those to embed the valid actions for every
non-terminal type. We use the input ``linking_scores`` for non-global actions.
Parameters
----------
world : ``WikiTablesWorld``
From the input to ``forward`` for a single batch instance.
possible_actions : ``List[ProductionRule]``
From the input to ``forward`` for a single batch instance.
linking_scores : ``torch.Tensor``
Assumed to have shape ``(num_entities, num_question_tokens)`` (i.e., there is no batch
dimension).
entity_types : ``torch.Tensor``
Assumed to have shape ``(num_entities,)`` (i.e., there is no batch dimension).
"""
# TODO(mattg): Move the "valid_actions" construction to another method.
action_map = {}
for action_index, action in enumerate(possible_actions):
action_string = action[0]
action_map[action_string] = action_index
entity_map = {}
for entity_index, entity in enumerate(world.table_graph.entities):
entity_map[entity] = entity_index
valid_actions = world.get_valid_actions()
translated_valid_actions: Dict[str, Dict[str, Tuple[torch.Tensor, torch.Tensor, List[int]]]] = {}
for key, action_strings in valid_actions.items():
translated_valid_actions[key] = {}
# `key` here is a non-terminal from the grammar, and `action_strings` are all the valid
# productions of that non-terminal. We'll first split those productions by global vs.
# linked action.
action_indices = [action_map[action_string] for action_string in action_strings]
production_rule_arrays = [(possible_actions[index], index) for index in action_indices]
global_actions = []
linked_actions = []
for production_rule_array, action_index in production_rule_arrays:
if production_rule_array[1]:
global_actions.append((production_rule_array[2], action_index))
else:
linked_actions.append((production_rule_array[0], action_index))
# Then we get the embedded representations of the global actions.
global_action_tensors, global_action_ids = zip(*global_actions)
global_action_tensor = torch.cat(global_action_tensors, dim=0)
global_input_embeddings = self._action_embedder(global_action_tensor)
if self._add_action_bias:
global_action_biases = self._action_biases(global_action_tensor)
global_input_embeddings = torch.cat([global_input_embeddings, global_action_biases], dim=-1)
global_output_embeddings = self._output_action_embedder(global_action_tensor)
translated_valid_actions[key]['global'] = (global_input_embeddings,
global_output_embeddings,
list(global_action_ids))
# Then the representations of the linked actions.
if linked_actions:
linked_rules, linked_action_ids = zip(*linked_actions)
entities = [rule.split(' -> ')[1] for rule in linked_rules]
entity_ids = [entity_map[entity] for entity in entities]
# (num_linked_actions, num_question_tokens)
entity_linking_scores = linking_scores[entity_ids]
# (num_linked_actions,)
entity_type_tensor = entity_types[entity_ids]
# (num_linked_actions, entity_type_embedding_dim)
entity_type_embeddings = self._entity_type_decoder_embedding(entity_type_tensor)
translated_valid_actions[key]['linked'] = (entity_linking_scores,
entity_type_embeddings,
list(linked_action_ids))
# Lastly, we need to also create embedded representations of context-specific actions. In
# this case, those are only variable productions, like "r -> x". Note that our language
# only permits one lambda at a time, so we don't need to worry about how nested lambdas
# might impact this.
context_actions = {}
for action_id, action in enumerate(possible_actions):
if action[0].endswith(" -> x"):
input_embedding = self._action_embedder(action[2])
if self._add_action_bias:
input_bias = self._action_biases(action[2])
input_embedding = torch.cat([input_embedding, input_bias], dim=-1)
output_embedding = self._output_action_embedder(action[2])
context_actions[action[0]] = (input_embedding, output_embedding, action_id)
return LambdaGrammarStatelet([START_SYMBOL],
{},
translated_valid_actions,
context_actions,
type_declaration.is_nonterminal) | This method creates the LambdaGrammarStatelet object that's used for decoding. Part of
creating that is creating the `valid_actions` dictionary, which contains embedded
representations of all of the valid actions. So, we create that here as well.
The way we represent the valid expansions is a little complicated: we use a
dictionary of `action types`, where the key is the action type (like "global", "linked", or
whatever your model is expecting), and the value is a tuple representing all actions of
that type. The tuple is (input tensor, output tensor, action id). The input tensor has
the representation that is used when `selecting` actions, for all actions of this type.
The output tensor has the representation that is used when feeding the action to the next
step of the decoder (this could just be the same as the input tensor). The action ids are
a list of indices into the main action list for each batch instance.
The inputs to this method are for a `single instance in the batch`; none of the tensors we
create here are batched. We grab the global action ids from the input
``ProductionRules``, and we use those to embed the valid actions for every
non-terminal type. We use the input ``linking_scores`` for non-global actions.
Parameters
----------
world : ``WikiTablesWorld``
From the input to ``forward`` for a single batch instance.
possible_actions : ``List[ProductionRule]``
From the input to ``forward`` for a single batch instance.
linking_scores : ``torch.Tensor``
Assumed to have shape ``(num_entities, num_question_tokens)`` (i.e., there is no batch
dimension).
entity_types : ``torch.Tensor``
Assumed to have shape ``(num_entities,)`` (i.e., there is no batch dimension). | Below is the the instruction that describes the task:
### Input:
This method creates the LambdaGrammarStatelet object that's used for decoding. Part of
creating that is creating the `valid_actions` dictionary, which contains embedded
representations of all of the valid actions. So, we create that here as well.
The way we represent the valid expansions is a little complicated: we use a
dictionary of `action types`, where the key is the action type (like "global", "linked", or
whatever your model is expecting), and the value is a tuple representing all actions of
that type. The tuple is (input tensor, output tensor, action id). The input tensor has
the representation that is used when `selecting` actions, for all actions of this type.
The output tensor has the representation that is used when feeding the action to the next
step of the decoder (this could just be the same as the input tensor). The action ids are
a list of indices into the main action list for each batch instance.
The inputs to this method are for a `single instance in the batch`; none of the tensors we
create here are batched. We grab the global action ids from the input
``ProductionRules``, and we use those to embed the valid actions for every
non-terminal type. We use the input ``linking_scores`` for non-global actions.
Parameters
----------
world : ``WikiTablesWorld``
From the input to ``forward`` for a single batch instance.
possible_actions : ``List[ProductionRule]``
From the input to ``forward`` for a single batch instance.
linking_scores : ``torch.Tensor``
Assumed to have shape ``(num_entities, num_question_tokens)`` (i.e., there is no batch
dimension).
entity_types : ``torch.Tensor``
Assumed to have shape ``(num_entities,)`` (i.e., there is no batch dimension).
### Response:
def _create_grammar_state(self,
world: WikiTablesWorld,
possible_actions: List[ProductionRule],
linking_scores: torch.Tensor,
entity_types: torch.Tensor) -> LambdaGrammarStatelet:
"""
This method creates the LambdaGrammarStatelet object that's used for decoding. Part of
creating that is creating the `valid_actions` dictionary, which contains embedded
representations of all of the valid actions. So, we create that here as well.
The way we represent the valid expansions is a little complicated: we use a
dictionary of `action types`, where the key is the action type (like "global", "linked", or
whatever your model is expecting), and the value is a tuple representing all actions of
that type. The tuple is (input tensor, output tensor, action id). The input tensor has
the representation that is used when `selecting` actions, for all actions of this type.
The output tensor has the representation that is used when feeding the action to the next
step of the decoder (this could just be the same as the input tensor). The action ids are
a list of indices into the main action list for each batch instance.
The inputs to this method are for a `single instance in the batch`; none of the tensors we
create here are batched. We grab the global action ids from the input
``ProductionRules``, and we use those to embed the valid actions for every
non-terminal type. We use the input ``linking_scores`` for non-global actions.
Parameters
----------
world : ``WikiTablesWorld``
From the input to ``forward`` for a single batch instance.
possible_actions : ``List[ProductionRule]``
From the input to ``forward`` for a single batch instance.
linking_scores : ``torch.Tensor``
Assumed to have shape ``(num_entities, num_question_tokens)`` (i.e., there is no batch
dimension).
entity_types : ``torch.Tensor``
Assumed to have shape ``(num_entities,)`` (i.e., there is no batch dimension).
"""
# TODO(mattg): Move the "valid_actions" construction to another method.
action_map = {}
for action_index, action in enumerate(possible_actions):
action_string = action[0]
action_map[action_string] = action_index
entity_map = {}
for entity_index, entity in enumerate(world.table_graph.entities):
entity_map[entity] = entity_index
valid_actions = world.get_valid_actions()
translated_valid_actions: Dict[str, Dict[str, Tuple[torch.Tensor, torch.Tensor, List[int]]]] = {}
for key, action_strings in valid_actions.items():
translated_valid_actions[key] = {}
# `key` here is a non-terminal from the grammar, and `action_strings` are all the valid
# productions of that non-terminal. We'll first split those productions by global vs.
# linked action.
action_indices = [action_map[action_string] for action_string in action_strings]
production_rule_arrays = [(possible_actions[index], index) for index in action_indices]
global_actions = []
linked_actions = []
for production_rule_array, action_index in production_rule_arrays:
if production_rule_array[1]:
global_actions.append((production_rule_array[2], action_index))
else:
linked_actions.append((production_rule_array[0], action_index))
# Then we get the embedded representations of the global actions.
global_action_tensors, global_action_ids = zip(*global_actions)
global_action_tensor = torch.cat(global_action_tensors, dim=0)
global_input_embeddings = self._action_embedder(global_action_tensor)
if self._add_action_bias:
global_action_biases = self._action_biases(global_action_tensor)
global_input_embeddings = torch.cat([global_input_embeddings, global_action_biases], dim=-1)
global_output_embeddings = self._output_action_embedder(global_action_tensor)
translated_valid_actions[key]['global'] = (global_input_embeddings,
global_output_embeddings,
list(global_action_ids))
# Then the representations of the linked actions.
if linked_actions:
linked_rules, linked_action_ids = zip(*linked_actions)
entities = [rule.split(' -> ')[1] for rule in linked_rules]
entity_ids = [entity_map[entity] for entity in entities]
# (num_linked_actions, num_question_tokens)
entity_linking_scores = linking_scores[entity_ids]
# (num_linked_actions,)
entity_type_tensor = entity_types[entity_ids]
# (num_linked_actions, entity_type_embedding_dim)
entity_type_embeddings = self._entity_type_decoder_embedding(entity_type_tensor)
translated_valid_actions[key]['linked'] = (entity_linking_scores,
entity_type_embeddings,
list(linked_action_ids))
# Lastly, we need to also create embedded representations of context-specific actions. In
# this case, those are only variable productions, like "r -> x". Note that our language
# only permits one lambda at a time, so we don't need to worry about how nested lambdas
# might impact this.
context_actions = {}
for action_id, action in enumerate(possible_actions):
if action[0].endswith(" -> x"):
input_embedding = self._action_embedder(action[2])
if self._add_action_bias:
input_bias = self._action_biases(action[2])
input_embedding = torch.cat([input_embedding, input_bias], dim=-1)
output_embedding = self._output_action_embedder(action[2])
context_actions[action[0]] = (input_embedding, output_embedding, action_id)
return LambdaGrammarStatelet([START_SYMBOL],
{},
translated_valid_actions,
context_actions,
type_declaration.is_nonterminal) |
def closenessScores(self, expValues, actValues, fractional=True,):
""" See the function description in base.py
kwargs will have the keyword "fractional", which is ignored by this encoder
"""
expValue = expValues[0]
actValue = actValues[0]
if expValue == actValue:
closeness = 1.0
else:
closeness = 0.0
if not fractional:
closeness = 1.0 - closeness
return numpy.array([closeness]) | See the function description in base.py
kwargs will have the keyword "fractional", which is ignored by this encoder | Below is the the instruction that describes the task:
### Input:
See the function description in base.py
kwargs will have the keyword "fractional", which is ignored by this encoder
### Response:
def closenessScores(self, expValues, actValues, fractional=True,):
""" See the function description in base.py
kwargs will have the keyword "fractional", which is ignored by this encoder
"""
expValue = expValues[0]
actValue = actValues[0]
if expValue == actValue:
closeness = 1.0
else:
closeness = 0.0
if not fractional:
closeness = 1.0 - closeness
return numpy.array([closeness]) |
def log_status(self):
'''show download status'''
if self.download_filename is None:
print("No download")
return
dt = time.time() - self.download_start
speed = os.path.getsize(self.download_filename) / (1000.0 * dt)
m = self.entries.get(self.download_lognum, None)
if m is None:
size = 0
else:
size = m.size
highest = max(self.download_set)
diff = set(range(highest)).difference(self.download_set)
print("Downloading %s - %u/%u bytes %.1f kbyte/s (%u retries %u missing)" % (self.download_filename,
os.path.getsize(self.download_filename),
size,
speed,
self.retries,
len(diff))) | show download status | Below is the the instruction that describes the task:
### Input:
show download status
### Response:
def log_status(self):
'''show download status'''
if self.download_filename is None:
print("No download")
return
dt = time.time() - self.download_start
speed = os.path.getsize(self.download_filename) / (1000.0 * dt)
m = self.entries.get(self.download_lognum, None)
if m is None:
size = 0
else:
size = m.size
highest = max(self.download_set)
diff = set(range(highest)).difference(self.download_set)
print("Downloading %s - %u/%u bytes %.1f kbyte/s (%u retries %u missing)" % (self.download_filename,
os.path.getsize(self.download_filename),
size,
speed,
self.retries,
len(diff))) |
def metric(self, measurement_name, values, tags=None, timestamp=None):
"""
Append global tags configured for the client to the tags given then
converts the data into InfluxDB Line protocol and sends to to socket
"""
if not measurement_name or values in (None, {}):
# Don't try to send empty data
return
tags = tags or {}
# Do a shallow merge of the metric tags and global tags
all_tags = dict(self.tags, **tags)
# Create a metric line from the input and then send it to socket
line = Line(measurement_name, values, all_tags, timestamp)
self.send(line.to_line_protocol()) | Append global tags configured for the client to the tags given then
converts the data into InfluxDB Line protocol and sends to to socket | Below is the the instruction that describes the task:
### Input:
Append global tags configured for the client to the tags given then
converts the data into InfluxDB Line protocol and sends to to socket
### Response:
def metric(self, measurement_name, values, tags=None, timestamp=None):
"""
Append global tags configured for the client to the tags given then
converts the data into InfluxDB Line protocol and sends to to socket
"""
if not measurement_name or values in (None, {}):
# Don't try to send empty data
return
tags = tags or {}
# Do a shallow merge of the metric tags and global tags
all_tags = dict(self.tags, **tags)
# Create a metric line from the input and then send it to socket
line = Line(measurement_name, values, all_tags, timestamp)
self.send(line.to_line_protocol()) |
def shutdown_server(self):
"""Shut down server if it is alive."""
self.log.debug('shutdown_server: in')
if self.ensime and self.toggle_teardown:
self.ensime.stop() | Shut down server if it is alive. | Below is the the instruction that describes the task:
### Input:
Shut down server if it is alive.
### Response:
def shutdown_server(self):
"""Shut down server if it is alive."""
self.log.debug('shutdown_server: in')
if self.ensime and self.toggle_teardown:
self.ensime.stop() |
def make_sequence(content, error=None, version=None, mode=None, mask=None,
encoding=None, boost_error=True, symbol_count=None):
"""\
Creates a sequence of QR Codes.
If the content fits into one QR Code and neither ``version`` nor
``symbol_count`` is provided, this function may return a sequence with
one QR Code which does not use the Structured Append mode. Otherwise a
sequence of 2 .. n (max. n = 16) QR Codes is returned which use the
Structured Append mode.
The Structured Append mode allows to split the content over a number
(max. 16) QR Codes.
The Structured Append mode isn't available for Micro QR Codes, therefor
the returned sequence contains QR Codes, only.
Since this function returns an iterable object, it may be used as follows:
.. code-block:: python
for i, qrcode in enumerate(segno.make_sequence(data, symbol_count=2)):
qrcode.save('seq-%d.svg' % i, scale=10, color='darkblue')
The returned number of QR Codes is determined by the `version` or
`symbol_count` parameter
See :py:func:`make` for a description of the other parameters.
:param int symbol_count: Number of symbols.
:rtype: QRCodeSequence
"""
return QRCodeSequence(map(QRCode,
encoder.encode_sequence(content, error=error,
version=version,
mode=mode, mask=mask,
encoding=encoding,
boost_error=boost_error,
symbol_count=symbol_count))) | \
Creates a sequence of QR Codes.
If the content fits into one QR Code and neither ``version`` nor
``symbol_count`` is provided, this function may return a sequence with
one QR Code which does not use the Structured Append mode. Otherwise a
sequence of 2 .. n (max. n = 16) QR Codes is returned which use the
Structured Append mode.
The Structured Append mode allows to split the content over a number
(max. 16) QR Codes.
The Structured Append mode isn't available for Micro QR Codes, therefor
the returned sequence contains QR Codes, only.
Since this function returns an iterable object, it may be used as follows:
.. code-block:: python
for i, qrcode in enumerate(segno.make_sequence(data, symbol_count=2)):
qrcode.save('seq-%d.svg' % i, scale=10, color='darkblue')
The returned number of QR Codes is determined by the `version` or
`symbol_count` parameter
See :py:func:`make` for a description of the other parameters.
:param int symbol_count: Number of symbols.
:rtype: QRCodeSequence | Below is the the instruction that describes the task:
### Input:
\
Creates a sequence of QR Codes.
If the content fits into one QR Code and neither ``version`` nor
``symbol_count`` is provided, this function may return a sequence with
one QR Code which does not use the Structured Append mode. Otherwise a
sequence of 2 .. n (max. n = 16) QR Codes is returned which use the
Structured Append mode.
The Structured Append mode allows to split the content over a number
(max. 16) QR Codes.
The Structured Append mode isn't available for Micro QR Codes, therefor
the returned sequence contains QR Codes, only.
Since this function returns an iterable object, it may be used as follows:
.. code-block:: python
for i, qrcode in enumerate(segno.make_sequence(data, symbol_count=2)):
qrcode.save('seq-%d.svg' % i, scale=10, color='darkblue')
The returned number of QR Codes is determined by the `version` or
`symbol_count` parameter
See :py:func:`make` for a description of the other parameters.
:param int symbol_count: Number of symbols.
:rtype: QRCodeSequence
### Response:
def make_sequence(content, error=None, version=None, mode=None, mask=None,
encoding=None, boost_error=True, symbol_count=None):
"""\
Creates a sequence of QR Codes.
If the content fits into one QR Code and neither ``version`` nor
``symbol_count`` is provided, this function may return a sequence with
one QR Code which does not use the Structured Append mode. Otherwise a
sequence of 2 .. n (max. n = 16) QR Codes is returned which use the
Structured Append mode.
The Structured Append mode allows to split the content over a number
(max. 16) QR Codes.
The Structured Append mode isn't available for Micro QR Codes, therefor
the returned sequence contains QR Codes, only.
Since this function returns an iterable object, it may be used as follows:
.. code-block:: python
for i, qrcode in enumerate(segno.make_sequence(data, symbol_count=2)):
qrcode.save('seq-%d.svg' % i, scale=10, color='darkblue')
The returned number of QR Codes is determined by the `version` or
`symbol_count` parameter
See :py:func:`make` for a description of the other parameters.
:param int symbol_count: Number of symbols.
:rtype: QRCodeSequence
"""
return QRCodeSequence(map(QRCode,
encoder.encode_sequence(content, error=error,
version=version,
mode=mode, mask=mask,
encoding=encoding,
boost_error=boost_error,
symbol_count=symbol_count))) |
def adsSyncWriteReqEx(port, address, index_group, index_offset, value, plc_data_type):
# type: (int, AmsAddr, int, int, Any, Type) -> None
"""Send data synchronous to an ADS-device.
:param int port: local AMS port as returned by adsPortOpenEx()
:param pyads.structs.AmsAddr address: local or remote AmsAddr
:param int indexGroup: PLC storage area, according to the INDEXGROUP
constants
:param int index_offset: PLC storage address
:param value: value to write to the storage address of the PLC
:param int plc_data_type: type of the data given to the PLC,
according to PLCTYPE constants
"""
sync_write_request = _adsDLL.AdsSyncWriteReqEx
ams_address_pointer = ctypes.pointer(address.amsAddrStruct())
index_group_c = ctypes.c_ulong(index_group)
index_offset_c = ctypes.c_ulong(index_offset)
if plc_data_type == PLCTYPE_STRING:
data = ctypes.c_char_p(value.encode("utf-8"))
data_pointer = data # type: Union[ctypes.c_char_p, ctypes.pointer]
data_length = len(data_pointer.value) + 1 # type: ignore
else:
if type(plc_data_type).__name__ == "PyCArrayType":
data = plc_data_type(*value)
else:
data = plc_data_type(value)
data_pointer = ctypes.pointer(data)
data_length = ctypes.sizeof(data)
error_code = sync_write_request(
port,
ams_address_pointer,
index_group_c,
index_offset_c,
data_length,
data_pointer,
)
if error_code:
raise ADSError(error_code) | Send data synchronous to an ADS-device.
:param int port: local AMS port as returned by adsPortOpenEx()
:param pyads.structs.AmsAddr address: local or remote AmsAddr
:param int indexGroup: PLC storage area, according to the INDEXGROUP
constants
:param int index_offset: PLC storage address
:param value: value to write to the storage address of the PLC
:param int plc_data_type: type of the data given to the PLC,
according to PLCTYPE constants | Below is the the instruction that describes the task:
### Input:
Send data synchronous to an ADS-device.
:param int port: local AMS port as returned by adsPortOpenEx()
:param pyads.structs.AmsAddr address: local or remote AmsAddr
:param int indexGroup: PLC storage area, according to the INDEXGROUP
constants
:param int index_offset: PLC storage address
:param value: value to write to the storage address of the PLC
:param int plc_data_type: type of the data given to the PLC,
according to PLCTYPE constants
### Response:
def adsSyncWriteReqEx(port, address, index_group, index_offset, value, plc_data_type):
# type: (int, AmsAddr, int, int, Any, Type) -> None
"""Send data synchronous to an ADS-device.
:param int port: local AMS port as returned by adsPortOpenEx()
:param pyads.structs.AmsAddr address: local or remote AmsAddr
:param int indexGroup: PLC storage area, according to the INDEXGROUP
constants
:param int index_offset: PLC storage address
:param value: value to write to the storage address of the PLC
:param int plc_data_type: type of the data given to the PLC,
according to PLCTYPE constants
"""
sync_write_request = _adsDLL.AdsSyncWriteReqEx
ams_address_pointer = ctypes.pointer(address.amsAddrStruct())
index_group_c = ctypes.c_ulong(index_group)
index_offset_c = ctypes.c_ulong(index_offset)
if plc_data_type == PLCTYPE_STRING:
data = ctypes.c_char_p(value.encode("utf-8"))
data_pointer = data # type: Union[ctypes.c_char_p, ctypes.pointer]
data_length = len(data_pointer.value) + 1 # type: ignore
else:
if type(plc_data_type).__name__ == "PyCArrayType":
data = plc_data_type(*value)
else:
data = plc_data_type(value)
data_pointer = ctypes.pointer(data)
data_length = ctypes.sizeof(data)
error_code = sync_write_request(
port,
ams_address_pointer,
index_group_c,
index_offset_c,
data_length,
data_pointer,
)
if error_code:
raise ADSError(error_code) |
def format_usage(program_name, description, commands=None, options=()):
"""
Construct the usage text.
Parameters
----------
program_name : str
Usually the name of the python file that contains the experiment.
description : str
description of this experiment (usually the docstring).
commands : dict[str, func]
Dictionary of supported commands.
Each entry should be a tuple of (name, function).
options : list[sacred.commandline_options.CommandLineOption]
A list of all supported commandline options.
Returns
-------
str
The complete formatted usage text for this experiment.
It adheres to the structure required by ``docopt``.
"""
usage = USAGE_TEMPLATE.format(
program_name=cmd_quote(program_name),
description=description.strip() if description else '',
options=_format_options_usage(options),
arguments=_format_arguments_usage(options),
commands=_format_command_usage(commands)
)
return usage | Construct the usage text.
Parameters
----------
program_name : str
Usually the name of the python file that contains the experiment.
description : str
description of this experiment (usually the docstring).
commands : dict[str, func]
Dictionary of supported commands.
Each entry should be a tuple of (name, function).
options : list[sacred.commandline_options.CommandLineOption]
A list of all supported commandline options.
Returns
-------
str
The complete formatted usage text for this experiment.
It adheres to the structure required by ``docopt``. | Below is the the instruction that describes the task:
### Input:
Construct the usage text.
Parameters
----------
program_name : str
Usually the name of the python file that contains the experiment.
description : str
description of this experiment (usually the docstring).
commands : dict[str, func]
Dictionary of supported commands.
Each entry should be a tuple of (name, function).
options : list[sacred.commandline_options.CommandLineOption]
A list of all supported commandline options.
Returns
-------
str
The complete formatted usage text for this experiment.
It adheres to the structure required by ``docopt``.
### Response:
def format_usage(program_name, description, commands=None, options=()):
"""
Construct the usage text.
Parameters
----------
program_name : str
Usually the name of the python file that contains the experiment.
description : str
description of this experiment (usually the docstring).
commands : dict[str, func]
Dictionary of supported commands.
Each entry should be a tuple of (name, function).
options : list[sacred.commandline_options.CommandLineOption]
A list of all supported commandline options.
Returns
-------
str
The complete formatted usage text for this experiment.
It adheres to the structure required by ``docopt``.
"""
usage = USAGE_TEMPLATE.format(
program_name=cmd_quote(program_name),
description=description.strip() if description else '',
options=_format_options_usage(options),
arguments=_format_arguments_usage(options),
commands=_format_command_usage(commands)
)
return usage |
def Copy(self,
old_urn,
new_urn,
age=NEWEST_TIME,
limit=None,
update_timestamps=False):
"""Make a copy of one AFF4 object to a different URN."""
new_urn = rdfvalue.RDFURN(new_urn)
if update_timestamps and age != NEWEST_TIME:
raise ValueError(
"Can't update timestamps unless reading the latest version.")
values = {}
for predicate, value, ts in data_store.DB.ResolvePrefix(
old_urn,
AFF4_PREFIXES,
timestamp=self.ParseAgeSpecification(age),
limit=limit):
if update_timestamps:
values.setdefault(predicate, []).append((value, None))
else:
values.setdefault(predicate, []).append((value, ts))
if values:
with data_store.DB.GetMutationPool() as pool:
pool.MultiSet(new_urn, values, replace=False)
self._UpdateChildIndex(new_urn, pool) | Make a copy of one AFF4 object to a different URN. | Below is the the instruction that describes the task:
### Input:
Make a copy of one AFF4 object to a different URN.
### Response:
def Copy(self,
old_urn,
new_urn,
age=NEWEST_TIME,
limit=None,
update_timestamps=False):
"""Make a copy of one AFF4 object to a different URN."""
new_urn = rdfvalue.RDFURN(new_urn)
if update_timestamps and age != NEWEST_TIME:
raise ValueError(
"Can't update timestamps unless reading the latest version.")
values = {}
for predicate, value, ts in data_store.DB.ResolvePrefix(
old_urn,
AFF4_PREFIXES,
timestamp=self.ParseAgeSpecification(age),
limit=limit):
if update_timestamps:
values.setdefault(predicate, []).append((value, None))
else:
values.setdefault(predicate, []).append((value, ts))
if values:
with data_store.DB.GetMutationPool() as pool:
pool.MultiSet(new_urn, values, replace=False)
self._UpdateChildIndex(new_urn, pool) |
def uploader(func):
"""This method only used for CKEditor under version 4.5, in newer version,
you should use ``upload_success()`` and ``upload_fail()`` instead.
Decorated the view function that handle the file upload. The upload
view must return the uploaded image's url. For example::
from flask import send_from_directory
app.config['CKEDITOR_FILE_UPLOADER'] = 'upload' # this value can be endpoint or url
@app.route('/files/<filename>')
def uploaded_files(filename):
path = '/the/uploaded/directory'
return send_from_directory(path, filename)
@app.route('/upload', methods=['POST'])
@ckeditor.uploader
def upload():
f = request.files.get('upload')
f.save(os.path.join('/the/uploaded/directory', f.filename))
url = url_for('uploaded_files', filename=f.filename)
return url
.. versionadded:: 0.3
"""
@wraps(func)
def wrapper(*args, **kwargs):
func_num = request.args.get('CKEditorFuncNum')
# ckeditor = request.args.get('CKEditor')
# language code used for error message, not used yet.
# lang_code = request.args.get('langCode')
# the error message to display when upload failed.
message = current_app.config['CKEDITOR_UPLOAD_ERROR_MESSAGE']
url = func(*args, **kwargs)
return Markup('''<script type="text/javascript">
window.parent.CKEDITOR.tools.callFunction(%s, "%s", "%s");</script>''' % (func_num, url, message))
return wrapper | This method only used for CKEditor under version 4.5, in newer version,
you should use ``upload_success()`` and ``upload_fail()`` instead.
Decorated the view function that handle the file upload. The upload
view must return the uploaded image's url. For example::
from flask import send_from_directory
app.config['CKEDITOR_FILE_UPLOADER'] = 'upload' # this value can be endpoint or url
@app.route('/files/<filename>')
def uploaded_files(filename):
path = '/the/uploaded/directory'
return send_from_directory(path, filename)
@app.route('/upload', methods=['POST'])
@ckeditor.uploader
def upload():
f = request.files.get('upload')
f.save(os.path.join('/the/uploaded/directory', f.filename))
url = url_for('uploaded_files', filename=f.filename)
return url
.. versionadded:: 0.3 | Below is the the instruction that describes the task:
### Input:
This method only used for CKEditor under version 4.5, in newer version,
you should use ``upload_success()`` and ``upload_fail()`` instead.
Decorated the view function that handle the file upload. The upload
view must return the uploaded image's url. For example::
from flask import send_from_directory
app.config['CKEDITOR_FILE_UPLOADER'] = 'upload' # this value can be endpoint or url
@app.route('/files/<filename>')
def uploaded_files(filename):
path = '/the/uploaded/directory'
return send_from_directory(path, filename)
@app.route('/upload', methods=['POST'])
@ckeditor.uploader
def upload():
f = request.files.get('upload')
f.save(os.path.join('/the/uploaded/directory', f.filename))
url = url_for('uploaded_files', filename=f.filename)
return url
.. versionadded:: 0.3
### Response:
def uploader(func):
"""This method only used for CKEditor under version 4.5, in newer version,
you should use ``upload_success()`` and ``upload_fail()`` instead.
Decorated the view function that handle the file upload. The upload
view must return the uploaded image's url. For example::
from flask import send_from_directory
app.config['CKEDITOR_FILE_UPLOADER'] = 'upload' # this value can be endpoint or url
@app.route('/files/<filename>')
def uploaded_files(filename):
path = '/the/uploaded/directory'
return send_from_directory(path, filename)
@app.route('/upload', methods=['POST'])
@ckeditor.uploader
def upload():
f = request.files.get('upload')
f.save(os.path.join('/the/uploaded/directory', f.filename))
url = url_for('uploaded_files', filename=f.filename)
return url
.. versionadded:: 0.3
"""
@wraps(func)
def wrapper(*args, **kwargs):
func_num = request.args.get('CKEditorFuncNum')
# ckeditor = request.args.get('CKEditor')
# language code used for error message, not used yet.
# lang_code = request.args.get('langCode')
# the error message to display when upload failed.
message = current_app.config['CKEDITOR_UPLOAD_ERROR_MESSAGE']
url = func(*args, **kwargs)
return Markup('''<script type="text/javascript">
window.parent.CKEDITOR.tools.callFunction(%s, "%s", "%s");</script>''' % (func_num, url, message))
return wrapper |
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.iterallitems():
params[key] = value
for key, value in self.forms.iterallitems():
params[key] = value
return params | A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. | Below is the the instruction that describes the task:
### Input:
A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`.
### Response:
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.iterallitems():
params[key] = value
for key, value in self.forms.iterallitems():
params[key] = value
return params |
def source_to_unicode(txt, errors='replace', skip_encoding_cookie=True):
"""Converts a bytes string with python source code to unicode.
Unicode strings are passed through unchanged. Byte strings are checked
for the python source file encoding cookie to determine encoding.
txt can be either a bytes buffer or a string containing the source
code.
"""
if isinstance(txt, six.text_type):
return txt
if isinstance(txt, six.binary_type):
buffer = io.BytesIO(txt)
else:
buffer = txt
try:
encoding, _ = detect_encoding(buffer.readline)
except SyntaxError:
encoding = "ascii"
buffer.seek(0)
newline_decoder = io.IncrementalNewlineDecoder(None, True)
text = io.TextIOWrapper(buffer, encoding, errors=errors, line_buffering=True)
text.mode = 'r'
if skip_encoding_cookie:
return u"".join(strip_encoding_cookie(text))
else:
return text.read() | Converts a bytes string with python source code to unicode.
Unicode strings are passed through unchanged. Byte strings are checked
for the python source file encoding cookie to determine encoding.
txt can be either a bytes buffer or a string containing the source
code. | Below is the the instruction that describes the task:
### Input:
Converts a bytes string with python source code to unicode.
Unicode strings are passed through unchanged. Byte strings are checked
for the python source file encoding cookie to determine encoding.
txt can be either a bytes buffer or a string containing the source
code.
### Response:
def source_to_unicode(txt, errors='replace', skip_encoding_cookie=True):
"""Converts a bytes string with python source code to unicode.
Unicode strings are passed through unchanged. Byte strings are checked
for the python source file encoding cookie to determine encoding.
txt can be either a bytes buffer or a string containing the source
code.
"""
if isinstance(txt, six.text_type):
return txt
if isinstance(txt, six.binary_type):
buffer = io.BytesIO(txt)
else:
buffer = txt
try:
encoding, _ = detect_encoding(buffer.readline)
except SyntaxError:
encoding = "ascii"
buffer.seek(0)
newline_decoder = io.IncrementalNewlineDecoder(None, True)
text = io.TextIOWrapper(buffer, encoding, errors=errors, line_buffering=True)
text.mode = 'r'
if skip_encoding_cookie:
return u"".join(strip_encoding_cookie(text))
else:
return text.read() |
def _parse_mode(self, mode, allowed=None, single=False):
r"""
This private method is for checking the \'mode\' used in the calling
method.
Parameters
----------
mode : string or list of strings
The mode(s) to be parsed
allowed : list of strings
A list containing the allowed modes. This list is defined by the
calling method. If any of the received modes are not in the
allowed list an exception is raised.
single : boolean (default is False)
Indicates if only a single mode is allowed. If this argument is
True than a string is returned rather than a list of strings, which
makes it easier to work with in the caller method.
Returns
-------
A list containing the received modes as strings, checked to ensure they
are all within the allowed set (if provoided). Also, if the ``single``
argument was True, then a string is returned.
"""
if type(mode) is str:
mode = [mode]
for item in mode:
if (allowed is not None) and (item not in allowed):
raise Exception('\'mode\' must be one of the following: ' +
allowed.__str__())
# Remove duplicates, if any
[mode.remove(L) for L in mode if mode.count(L) > 1]
if single:
if len(mode) > 1:
raise Exception('Multiple modes received when only one mode ' +
'allowed')
else:
mode = mode[0]
return mode | r"""
This private method is for checking the \'mode\' used in the calling
method.
Parameters
----------
mode : string or list of strings
The mode(s) to be parsed
allowed : list of strings
A list containing the allowed modes. This list is defined by the
calling method. If any of the received modes are not in the
allowed list an exception is raised.
single : boolean (default is False)
Indicates if only a single mode is allowed. If this argument is
True than a string is returned rather than a list of strings, which
makes it easier to work with in the caller method.
Returns
-------
A list containing the received modes as strings, checked to ensure they
are all within the allowed set (if provoided). Also, if the ``single``
argument was True, then a string is returned. | Below is the the instruction that describes the task:
### Input:
r"""
This private method is for checking the \'mode\' used in the calling
method.
Parameters
----------
mode : string or list of strings
The mode(s) to be parsed
allowed : list of strings
A list containing the allowed modes. This list is defined by the
calling method. If any of the received modes are not in the
allowed list an exception is raised.
single : boolean (default is False)
Indicates if only a single mode is allowed. If this argument is
True than a string is returned rather than a list of strings, which
makes it easier to work with in the caller method.
Returns
-------
A list containing the received modes as strings, checked to ensure they
are all within the allowed set (if provoided). Also, if the ``single``
argument was True, then a string is returned.
### Response:
def _parse_mode(self, mode, allowed=None, single=False):
r"""
This private method is for checking the \'mode\' used in the calling
method.
Parameters
----------
mode : string or list of strings
The mode(s) to be parsed
allowed : list of strings
A list containing the allowed modes. This list is defined by the
calling method. If any of the received modes are not in the
allowed list an exception is raised.
single : boolean (default is False)
Indicates if only a single mode is allowed. If this argument is
True than a string is returned rather than a list of strings, which
makes it easier to work with in the caller method.
Returns
-------
A list containing the received modes as strings, checked to ensure they
are all within the allowed set (if provoided). Also, if the ``single``
argument was True, then a string is returned.
"""
if type(mode) is str:
mode = [mode]
for item in mode:
if (allowed is not None) and (item not in allowed):
raise Exception('\'mode\' must be one of the following: ' +
allowed.__str__())
# Remove duplicates, if any
[mode.remove(L) for L in mode if mode.count(L) > 1]
if single:
if len(mode) > 1:
raise Exception('Multiple modes received when only one mode ' +
'allowed')
else:
mode = mode[0]
return mode |
def _get_response(self, url, **params):
""" Giving a service path and optional specific arguments, returns
the response string.
"""
data = urlencode(params)
url = "%s?%s" % (url, data)
headers = {'User-Agent': self.get_random_agent()}
request = Request(url, headers=headers, method='GET')
def open_request(request, attempts, err=None):
if attempts > self.request_attempts:
raise
attempts += 1
try:
with urlopen(request, timeout=self.timeout) as response:
return response.read()
except HTTPError as err:
if err.getcode() < 500:
raise
print("HTTPError occurred while trying to request the url "
"%s. %s. Trying again in %s seconds..." % (url, err,
self.seconds_between_attempts))
time.sleep(self.seconds_between_attempts)
return open_request(request, attempts, err)
attempts = 0
self.last_response = open_request(request, attempts)
return self.last_response | Giving a service path and optional specific arguments, returns
the response string. | Below is the the instruction that describes the task:
### Input:
Giving a service path and optional specific arguments, returns
the response string.
### Response:
def _get_response(self, url, **params):
""" Giving a service path and optional specific arguments, returns
the response string.
"""
data = urlencode(params)
url = "%s?%s" % (url, data)
headers = {'User-Agent': self.get_random_agent()}
request = Request(url, headers=headers, method='GET')
def open_request(request, attempts, err=None):
if attempts > self.request_attempts:
raise
attempts += 1
try:
with urlopen(request, timeout=self.timeout) as response:
return response.read()
except HTTPError as err:
if err.getcode() < 500:
raise
print("HTTPError occurred while trying to request the url "
"%s. %s. Trying again in %s seconds..." % (url, err,
self.seconds_between_attempts))
time.sleep(self.seconds_between_attempts)
return open_request(request, attempts, err)
attempts = 0
self.last_response = open_request(request, attempts)
return self.last_response |
Subsets and Splits