code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def get_services_health(self) -> dict:
"""Get the health of all services.
Returns:
dict, services id and health status
"""
# Initialise
services_health = {}
# Get Service IDs
services_ids = self._get_services()
for service_id in services_ids:
service_name = DC.get_service_name(service_id)
# Check if the current and actual replica levels are the same
if DC.get_replicas(service_id) != \
DC.get_actual_replica(service_id):
services_health[service_name] = "Unhealthy"
else:
services_health[service_name] = "Healthy"
return services_health | Get the health of all services.
Returns:
dict, services id and health status | Below is the the instruction that describes the task:
### Input:
Get the health of all services.
Returns:
dict, services id and health status
### Response:
def get_services_health(self) -> dict:
"""Get the health of all services.
Returns:
dict, services id and health status
"""
# Initialise
services_health = {}
# Get Service IDs
services_ids = self._get_services()
for service_id in services_ids:
service_name = DC.get_service_name(service_id)
# Check if the current and actual replica levels are the same
if DC.get_replicas(service_id) != \
DC.get_actual_replica(service_id):
services_health[service_name] = "Unhealthy"
else:
services_health[service_name] = "Healthy"
return services_health |
def _bulk_history_create(self, model, batch_size):
"""Save a copy of all instances to the historical model.
:param model: Model you want to bulk create
:param batch_size: number of models to create at once.
:return:
"""
instances = []
history = utils.get_history_manager_for_model(model)
if self.verbosity >= 1:
self.stdout.write(
"Starting bulk creating history models for {} instances {}-{}".format(
model, 0, batch_size
)
)
iterator_kwargs = (
{"chunk_size": batch_size} if django.VERSION >= (2, 0, 0) else {}
)
for index, instance in enumerate(
model._default_manager.iterator(**iterator_kwargs)
):
# Can't Just pass batch_size to bulk_create as this can lead to
# Out of Memory Errors as we load too many models into memory after
# creating them. So we only keep batch_size worth of models in
# historical_instances and clear them after we hit batch_size
if index % batch_size == 0:
history.bulk_history_create(instances, batch_size=batch_size)
instances = []
if self.verbosity >= 1:
self.stdout.write(
"Finished bulk creating history models for {} "
"instances {}-{}, starting next {}".format(
model, index - batch_size, index, batch_size
)
)
instances.append(instance)
# create any we didn't get in the last loop
if instances:
history.bulk_history_create(instances, batch_size=batch_size) | Save a copy of all instances to the historical model.
:param model: Model you want to bulk create
:param batch_size: number of models to create at once.
:return: | Below is the the instruction that describes the task:
### Input:
Save a copy of all instances to the historical model.
:param model: Model you want to bulk create
:param batch_size: number of models to create at once.
:return:
### Response:
def _bulk_history_create(self, model, batch_size):
"""Save a copy of all instances to the historical model.
:param model: Model you want to bulk create
:param batch_size: number of models to create at once.
:return:
"""
instances = []
history = utils.get_history_manager_for_model(model)
if self.verbosity >= 1:
self.stdout.write(
"Starting bulk creating history models for {} instances {}-{}".format(
model, 0, batch_size
)
)
iterator_kwargs = (
{"chunk_size": batch_size} if django.VERSION >= (2, 0, 0) else {}
)
for index, instance in enumerate(
model._default_manager.iterator(**iterator_kwargs)
):
# Can't Just pass batch_size to bulk_create as this can lead to
# Out of Memory Errors as we load too many models into memory after
# creating them. So we only keep batch_size worth of models in
# historical_instances and clear them after we hit batch_size
if index % batch_size == 0:
history.bulk_history_create(instances, batch_size=batch_size)
instances = []
if self.verbosity >= 1:
self.stdout.write(
"Finished bulk creating history models for {} "
"instances {}-{}, starting next {}".format(
model, index - batch_size, index, batch_size
)
)
instances.append(instance)
# create any we didn't get in the last loop
if instances:
history.bulk_history_create(instances, batch_size=batch_size) |
def spkopa(filename):
"""
Open an existing SPK file for subsequent write.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkopa_c.html
:param filename: The name of an existing SPK file.
:type filename: str
:return: A handle attached to the SPK file opened to append.
:rtype: int
"""
filename = stypes.stringToCharP(filename)
handle = ctypes.c_int()
libspice.spkopa_c(filename, ctypes.byref(handle))
return handle.value | Open an existing SPK file for subsequent write.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkopa_c.html
:param filename: The name of an existing SPK file.
:type filename: str
:return: A handle attached to the SPK file opened to append.
:rtype: int | Below is the the instruction that describes the task:
### Input:
Open an existing SPK file for subsequent write.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkopa_c.html
:param filename: The name of an existing SPK file.
:type filename: str
:return: A handle attached to the SPK file opened to append.
:rtype: int
### Response:
def spkopa(filename):
"""
Open an existing SPK file for subsequent write.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkopa_c.html
:param filename: The name of an existing SPK file.
:type filename: str
:return: A handle attached to the SPK file opened to append.
:rtype: int
"""
filename = stypes.stringToCharP(filename)
handle = ctypes.c_int()
libspice.spkopa_c(filename, ctypes.byref(handle))
return handle.value |
def modify_access(src, dst='any', port=None, proto=None, action='allow',
index=None):
"""
Grant access to an address or subnet
:param src: address (e.g. 192.168.1.234) or subnet
(e.g. 192.168.1.0/24).
:param dst: destiny of the connection, if the machine has multiple IPs and
connections to only one of those have to accepted this is the
field has to be set.
:param port: destiny port
:param proto: protocol (tcp or udp)
:param action: `allow` or `delete`
:param index: if different from None the rule is inserted at the given
`index`.
"""
if not is_enabled():
hookenv.log('ufw is disabled, skipping modify_access()', level='WARN')
return
if action == 'delete':
cmd = ['ufw', 'delete', 'allow']
elif index is not None:
cmd = ['ufw', 'insert', str(index), action]
else:
cmd = ['ufw', action]
if src is not None:
cmd += ['from', src]
if dst is not None:
cmd += ['to', dst]
if port is not None:
cmd += ['port', str(port)]
if proto is not None:
cmd += ['proto', proto]
hookenv.log('ufw {}: {}'.format(action, ' '.join(cmd)), level='DEBUG')
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
hookenv.log(stdout, level='INFO')
if p.returncode != 0:
hookenv.log(stderr, level='ERROR')
hookenv.log('Error running: {}, exit code: {}'.format(' '.join(cmd),
p.returncode),
level='ERROR') | Grant access to an address or subnet
:param src: address (e.g. 192.168.1.234) or subnet
(e.g. 192.168.1.0/24).
:param dst: destiny of the connection, if the machine has multiple IPs and
connections to only one of those have to accepted this is the
field has to be set.
:param port: destiny port
:param proto: protocol (tcp or udp)
:param action: `allow` or `delete`
:param index: if different from None the rule is inserted at the given
`index`. | Below is the the instruction that describes the task:
### Input:
Grant access to an address or subnet
:param src: address (e.g. 192.168.1.234) or subnet
(e.g. 192.168.1.0/24).
:param dst: destiny of the connection, if the machine has multiple IPs and
connections to only one of those have to accepted this is the
field has to be set.
:param port: destiny port
:param proto: protocol (tcp or udp)
:param action: `allow` or `delete`
:param index: if different from None the rule is inserted at the given
`index`.
### Response:
def modify_access(src, dst='any', port=None, proto=None, action='allow',
index=None):
"""
Grant access to an address or subnet
:param src: address (e.g. 192.168.1.234) or subnet
(e.g. 192.168.1.0/24).
:param dst: destiny of the connection, if the machine has multiple IPs and
connections to only one of those have to accepted this is the
field has to be set.
:param port: destiny port
:param proto: protocol (tcp or udp)
:param action: `allow` or `delete`
:param index: if different from None the rule is inserted at the given
`index`.
"""
if not is_enabled():
hookenv.log('ufw is disabled, skipping modify_access()', level='WARN')
return
if action == 'delete':
cmd = ['ufw', 'delete', 'allow']
elif index is not None:
cmd = ['ufw', 'insert', str(index), action]
else:
cmd = ['ufw', action]
if src is not None:
cmd += ['from', src]
if dst is not None:
cmd += ['to', dst]
if port is not None:
cmd += ['port', str(port)]
if proto is not None:
cmd += ['proto', proto]
hookenv.log('ufw {}: {}'.format(action, ' '.join(cmd)), level='DEBUG')
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
hookenv.log(stdout, level='INFO')
if p.returncode != 0:
hookenv.log(stderr, level='ERROR')
hookenv.log('Error running: {}, exit code: {}'.format(' '.join(cmd),
p.returncode),
level='ERROR') |
def respond(self,
content=EmptyValue,
content_type=EmptyValue,
always_hash_content=True,
ext=None):
"""
Respond to the request.
This generates the :attr:`mohawk.Receiver.response_header`
attribute.
:param content=EmptyValue: Byte string of response body that will be sent.
:type content=EmptyValue: str
:param content_type=EmptyValue: content-type header value for response.
:type content_type=EmptyValue: str
:param always_hash_content=True:
When True, ``content`` and ``content_type`` must be provided.
Read :ref:`skipping-content-checks` to learn more.
:type always_hash_content=True: bool
:param ext=None:
An external `Hawk`_ string. If not None, this value will be
signed so that the sender can trust it.
:type ext=None: str
.. _`Hawk`: https://github.com/hueniverse/hawk
"""
log.debug('generating response header')
resource = Resource(url=self.resource.url,
credentials=self.resource.credentials,
ext=ext,
app=self.parsed_header.get('app', None),
dlg=self.parsed_header.get('dlg', None),
method=self.resource.method,
content=content,
content_type=content_type,
always_hash_content=always_hash_content,
nonce=self.parsed_header['nonce'],
timestamp=self.parsed_header['ts'])
mac = calculate_mac('response', resource, resource.gen_content_hash())
self.response_header = self._make_header(resource, mac,
additional_keys=['ext'])
return self.response_header | Respond to the request.
This generates the :attr:`mohawk.Receiver.response_header`
attribute.
:param content=EmptyValue: Byte string of response body that will be sent.
:type content=EmptyValue: str
:param content_type=EmptyValue: content-type header value for response.
:type content_type=EmptyValue: str
:param always_hash_content=True:
When True, ``content`` and ``content_type`` must be provided.
Read :ref:`skipping-content-checks` to learn more.
:type always_hash_content=True: bool
:param ext=None:
An external `Hawk`_ string. If not None, this value will be
signed so that the sender can trust it.
:type ext=None: str
.. _`Hawk`: https://github.com/hueniverse/hawk | Below is the the instruction that describes the task:
### Input:
Respond to the request.
This generates the :attr:`mohawk.Receiver.response_header`
attribute.
:param content=EmptyValue: Byte string of response body that will be sent.
:type content=EmptyValue: str
:param content_type=EmptyValue: content-type header value for response.
:type content_type=EmptyValue: str
:param always_hash_content=True:
When True, ``content`` and ``content_type`` must be provided.
Read :ref:`skipping-content-checks` to learn more.
:type always_hash_content=True: bool
:param ext=None:
An external `Hawk`_ string. If not None, this value will be
signed so that the sender can trust it.
:type ext=None: str
.. _`Hawk`: https://github.com/hueniverse/hawk
### Response:
def respond(self,
content=EmptyValue,
content_type=EmptyValue,
always_hash_content=True,
ext=None):
"""
Respond to the request.
This generates the :attr:`mohawk.Receiver.response_header`
attribute.
:param content=EmptyValue: Byte string of response body that will be sent.
:type content=EmptyValue: str
:param content_type=EmptyValue: content-type header value for response.
:type content_type=EmptyValue: str
:param always_hash_content=True:
When True, ``content`` and ``content_type`` must be provided.
Read :ref:`skipping-content-checks` to learn more.
:type always_hash_content=True: bool
:param ext=None:
An external `Hawk`_ string. If not None, this value will be
signed so that the sender can trust it.
:type ext=None: str
.. _`Hawk`: https://github.com/hueniverse/hawk
"""
log.debug('generating response header')
resource = Resource(url=self.resource.url,
credentials=self.resource.credentials,
ext=ext,
app=self.parsed_header.get('app', None),
dlg=self.parsed_header.get('dlg', None),
method=self.resource.method,
content=content,
content_type=content_type,
always_hash_content=always_hash_content,
nonce=self.parsed_header['nonce'],
timestamp=self.parsed_header['ts'])
mac = calculate_mac('response', resource, resource.gen_content_hash())
self.response_header = self._make_header(resource, mac,
additional_keys=['ext'])
return self.response_header |
def printdir(self):
"""Print a table of contents for the zip file."""
print "%-46s %19s %12s" % ("File Name", "Modified ", "Size")
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print "%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size) | Print a table of contents for the zip file. | Below is the the instruction that describes the task:
### Input:
Print a table of contents for the zip file.
### Response:
def printdir(self):
"""Print a table of contents for the zip file."""
print "%-46s %19s %12s" % ("File Name", "Modified ", "Size")
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print "%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size) |
def addFeature(self, features,
gdbVersion=None,
rollbackOnFailure=True):
""" Adds a single feature to the service
Inputs:
feature - list of common.Feature object or a single
common.Feature Object, a FeatureSet object, or a
list of dictionary objects
gdbVersion - Geodatabase version to apply the edits
rollbackOnFailure - Optional parameter to specify if the
edits should be applied only if all
submitted edits succeed. If false, the
server will apply the edits that succeed
even if some of the submitted edits fail.
If true, the server will apply the edits
only if all edits succeed. The default
value is true.
Output:
JSON message as dictionary
"""
url = self._url + "/addFeatures"
params = {
"f" : "json"
}
if gdbVersion is not None:
params['gdbVersion'] = gdbVersion
if isinstance(rollbackOnFailure, bool):
params['rollbackOnFailure'] = rollbackOnFailure
if isinstance(features, list) and \
len(features) > 0:
if isinstance(features[0], Feature):
params['features'] = json.dumps([feature.asDictionary for feature in features],
default=_date_handler)
elif isinstance(features[0], dict):
params['features'] = json.dumps(features,
default=_date_handler)
elif isinstance(features, Feature):
params['features'] = json.dumps([features.asDictionary],
default=_date_handler)
elif isinstance(features, FeatureSet):
params['features'] = json.dumps([feature.asDictionary for feature in features.features],
default=_date_handler)
else:
return None
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url) | Adds a single feature to the service
Inputs:
feature - list of common.Feature object or a single
common.Feature Object, a FeatureSet object, or a
list of dictionary objects
gdbVersion - Geodatabase version to apply the edits
rollbackOnFailure - Optional parameter to specify if the
edits should be applied only if all
submitted edits succeed. If false, the
server will apply the edits that succeed
even if some of the submitted edits fail.
If true, the server will apply the edits
only if all edits succeed. The default
value is true.
Output:
JSON message as dictionary | Below is the the instruction that describes the task:
### Input:
Adds a single feature to the service
Inputs:
feature - list of common.Feature object or a single
common.Feature Object, a FeatureSet object, or a
list of dictionary objects
gdbVersion - Geodatabase version to apply the edits
rollbackOnFailure - Optional parameter to specify if the
edits should be applied only if all
submitted edits succeed. If false, the
server will apply the edits that succeed
even if some of the submitted edits fail.
If true, the server will apply the edits
only if all edits succeed. The default
value is true.
Output:
JSON message as dictionary
### Response:
def addFeature(self, features,
gdbVersion=None,
rollbackOnFailure=True):
""" Adds a single feature to the service
Inputs:
feature - list of common.Feature object or a single
common.Feature Object, a FeatureSet object, or a
list of dictionary objects
gdbVersion - Geodatabase version to apply the edits
rollbackOnFailure - Optional parameter to specify if the
edits should be applied only if all
submitted edits succeed. If false, the
server will apply the edits that succeed
even if some of the submitted edits fail.
If true, the server will apply the edits
only if all edits succeed. The default
value is true.
Output:
JSON message as dictionary
"""
url = self._url + "/addFeatures"
params = {
"f" : "json"
}
if gdbVersion is not None:
params['gdbVersion'] = gdbVersion
if isinstance(rollbackOnFailure, bool):
params['rollbackOnFailure'] = rollbackOnFailure
if isinstance(features, list) and \
len(features) > 0:
if isinstance(features[0], Feature):
params['features'] = json.dumps([feature.asDictionary for feature in features],
default=_date_handler)
elif isinstance(features[0], dict):
params['features'] = json.dumps(features,
default=_date_handler)
elif isinstance(features, Feature):
params['features'] = json.dumps([features.asDictionary],
default=_date_handler)
elif isinstance(features, FeatureSet):
params['features'] = json.dumps([feature.asDictionary for feature in features.features],
default=_date_handler)
else:
return None
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url) |
def ranked_in_list(self, members, **options):
'''
Retrieve a page of leaders from the leaderboard for a given list of members.
@param members [Array] Member names.
@param options [Hash] Options to be used when retrieving the page from the leaderboard.
@return a page of leaders from the leaderboard for a given list of members.
'''
return self.ranked_in_list_in(
self.leaderboard_name, members, **options) | Retrieve a page of leaders from the leaderboard for a given list of members.
@param members [Array] Member names.
@param options [Hash] Options to be used when retrieving the page from the leaderboard.
@return a page of leaders from the leaderboard for a given list of members. | Below is the the instruction that describes the task:
### Input:
Retrieve a page of leaders from the leaderboard for a given list of members.
@param members [Array] Member names.
@param options [Hash] Options to be used when retrieving the page from the leaderboard.
@return a page of leaders from the leaderboard for a given list of members.
### Response:
def ranked_in_list(self, members, **options):
'''
Retrieve a page of leaders from the leaderboard for a given list of members.
@param members [Array] Member names.
@param options [Hash] Options to be used when retrieving the page from the leaderboard.
@return a page of leaders from the leaderboard for a given list of members.
'''
return self.ranked_in_list_in(
self.leaderboard_name, members, **options) |
def _synchronize_controls(self):
"""
Updates the gui based on button configs.
"""
# whether the script is visible
self.grid_script._widget.setVisible(self.button_script.get_value())
# whether we should be able to edit it.
if not self.combo_autoscript.get_index()==0: self.script.disable()
else: self.script.enable() | Updates the gui based on button configs. | Below is the the instruction that describes the task:
### Input:
Updates the gui based on button configs.
### Response:
def _synchronize_controls(self):
"""
Updates the gui based on button configs.
"""
# whether the script is visible
self.grid_script._widget.setVisible(self.button_script.get_value())
# whether we should be able to edit it.
if not self.combo_autoscript.get_index()==0: self.script.disable()
else: self.script.enable() |
def add_dummy_scores(iteratable, score=0):
"""Add zero scores to all sequences"""
for seq in iteratable:
seq.letter_annotations["phred_quality"] = (score,)*len(seq)
yield seq | Add zero scores to all sequences | Below is the the instruction that describes the task:
### Input:
Add zero scores to all sequences
### Response:
def add_dummy_scores(iteratable, score=0):
"""Add zero scores to all sequences"""
for seq in iteratable:
seq.letter_annotations["phred_quality"] = (score,)*len(seq)
yield seq |
def _set_tx_queue(self, v, load=False):
"""
Setter method for tx_queue, mapped from YANG variable /qos/tx_queue (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_tx_queue is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tx_queue() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=tx_queue.tx_queue, is_container='container', presence=False, yang_name="tx-queue", rest_name="tx-queue", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Eegress Queue', u'callpoint': u'qos_transmit_queue', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tx_queue must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=tx_queue.tx_queue, is_container='container', presence=False, yang_name="tx-queue", rest_name="tx-queue", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Eegress Queue', u'callpoint': u'qos_transmit_queue', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='container', is_config=True)""",
})
self.__tx_queue = t
if hasattr(self, '_set'):
self._set() | Setter method for tx_queue, mapped from YANG variable /qos/tx_queue (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_tx_queue is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tx_queue() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for tx_queue, mapped from YANG variable /qos/tx_queue (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_tx_queue is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tx_queue() directly.
### Response:
def _set_tx_queue(self, v, load=False):
"""
Setter method for tx_queue, mapped from YANG variable /qos/tx_queue (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_tx_queue is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tx_queue() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=tx_queue.tx_queue, is_container='container', presence=False, yang_name="tx-queue", rest_name="tx-queue", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Eegress Queue', u'callpoint': u'qos_transmit_queue', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tx_queue must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=tx_queue.tx_queue, is_container='container', presence=False, yang_name="tx-queue", rest_name="tx-queue", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Eegress Queue', u'callpoint': u'qos_transmit_queue', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='container', is_config=True)""",
})
self.__tx_queue = t
if hasattr(self, '_set'):
self._set() |
def send_slack_message(message):
"""Send a message to the slack channel #coretools"""
if 'SLACK_WEB_HOOK' not in os.environ:
raise EnvironmentError("Could not find SLACK_WEB_HOOK environment variable")
webhook = os.environ['SLACK_WEB_HOOK']
r = requests.post(webhook, json={'text':message, 'username': 'Release Bot'})
if r.status_code != 200:
raise RuntimeError("Could not post message to slack channel") | Send a message to the slack channel #coretools | Below is the the instruction that describes the task:
### Input:
Send a message to the slack channel #coretools
### Response:
def send_slack_message(message):
"""Send a message to the slack channel #coretools"""
if 'SLACK_WEB_HOOK' not in os.environ:
raise EnvironmentError("Could not find SLACK_WEB_HOOK environment variable")
webhook = os.environ['SLACK_WEB_HOOK']
r = requests.post(webhook, json={'text':message, 'username': 'Release Bot'})
if r.status_code != 200:
raise RuntimeError("Could not post message to slack channel") |
def process_paths(options, candidates=None, error=True):
"""Process files and log errors."""
errors = check_path(options, rootdir=CURDIR, candidates=candidates)
if options.format in ['pycodestyle', 'pep8']:
pattern = "%(filename)s:%(lnum)s:%(col)s: %(text)s"
elif options.format == 'pylint':
pattern = "%(filename)s:%(lnum)s: [%(type)s] %(text)s"
else: # 'parsable'
pattern = "%(filename)s:%(lnum)s:%(col)s: [%(type)s] %(text)s"
for er in errors:
if options.abspath:
er._info['filename'] = op.abspath(er.filename)
LOGGER.warning(pattern, er._info)
if error:
sys.exit(int(bool(errors)))
return errors | Process files and log errors. | Below is the the instruction that describes the task:
### Input:
Process files and log errors.
### Response:
def process_paths(options, candidates=None, error=True):
"""Process files and log errors."""
errors = check_path(options, rootdir=CURDIR, candidates=candidates)
if options.format in ['pycodestyle', 'pep8']:
pattern = "%(filename)s:%(lnum)s:%(col)s: %(text)s"
elif options.format == 'pylint':
pattern = "%(filename)s:%(lnum)s: [%(type)s] %(text)s"
else: # 'parsable'
pattern = "%(filename)s:%(lnum)s:%(col)s: [%(type)s] %(text)s"
for er in errors:
if options.abspath:
er._info['filename'] = op.abspath(er.filename)
LOGGER.warning(pattern, er._info)
if error:
sys.exit(int(bool(errors)))
return errors |
def _add_delta_tdi(self, other):
"""
Add a delta of a TimedeltaIndex
return the i8 result view
"""
if len(self) != len(other):
raise ValueError("cannot add indices of unequal length")
if isinstance(other, np.ndarray):
# ndarray[timedelta64]; wrap in TimedeltaIndex for op
from pandas import TimedeltaIndex
other = TimedeltaIndex(other)
self_i8 = self.asi8
other_i8 = other.asi8
new_values = checked_add_with_arr(self_i8, other_i8,
arr_mask=self._isnan,
b_mask=other._isnan)
if self._hasnans or other._hasnans:
mask = (self._isnan) | (other._isnan)
new_values[mask] = iNaT
return new_values.view('i8') | Add a delta of a TimedeltaIndex
return the i8 result view | Below is the the instruction that describes the task:
### Input:
Add a delta of a TimedeltaIndex
return the i8 result view
### Response:
def _add_delta_tdi(self, other):
"""
Add a delta of a TimedeltaIndex
return the i8 result view
"""
if len(self) != len(other):
raise ValueError("cannot add indices of unequal length")
if isinstance(other, np.ndarray):
# ndarray[timedelta64]; wrap in TimedeltaIndex for op
from pandas import TimedeltaIndex
other = TimedeltaIndex(other)
self_i8 = self.asi8
other_i8 = other.asi8
new_values = checked_add_with_arr(self_i8, other_i8,
arr_mask=self._isnan,
b_mask=other._isnan)
if self._hasnans or other._hasnans:
mask = (self._isnan) | (other._isnan)
new_values[mask] = iNaT
return new_values.view('i8') |
def _consfcn(self, x):
""" Evaluates nonlinear constraints and their Jacobian for OPF.
"""
h, g = self._gh(x)
dh, dg = self._dgh(x)
return h, g, dh, dg | Evaluates nonlinear constraints and their Jacobian for OPF. | Below is the the instruction that describes the task:
### Input:
Evaluates nonlinear constraints and their Jacobian for OPF.
### Response:
def _consfcn(self, x):
""" Evaluates nonlinear constraints and their Jacobian for OPF.
"""
h, g = self._gh(x)
dh, dg = self._dgh(x)
return h, g, dh, dg |
def bump_option_validator(ctx, param, value):
"""In case a value is provided checks that it is a valid version string. If
is not thrown :class:`click.UsageError`.
Return a :class:`~braulio.version.Version` object or **None**.
"""
if value:
try:
value = Version(value)
except ValueError:
ctx.fail(f"{x_mark} {value} is not a valid version string")
return value | In case a value is provided checks that it is a valid version string. If
is not thrown :class:`click.UsageError`.
Return a :class:`~braulio.version.Version` object or **None**. | Below is the the instruction that describes the task:
### Input:
In case a value is provided checks that it is a valid version string. If
is not thrown :class:`click.UsageError`.
Return a :class:`~braulio.version.Version` object or **None**.
### Response:
def bump_option_validator(ctx, param, value):
"""In case a value is provided checks that it is a valid version string. If
is not thrown :class:`click.UsageError`.
Return a :class:`~braulio.version.Version` object or **None**.
"""
if value:
try:
value = Version(value)
except ValueError:
ctx.fail(f"{x_mark} {value} is not a valid version string")
return value |
def get_days_since_last_modified(filename):
"""
:param filename: Absolute file path
:return: Number of days since filename's last modified time
"""
now = datetime.now()
last_modified = datetime.fromtimestamp(os.path.getmtime(filename))
return (now - last_modified).days | :param filename: Absolute file path
:return: Number of days since filename's last modified time | Below is the the instruction that describes the task:
### Input:
:param filename: Absolute file path
:return: Number of days since filename's last modified time
### Response:
def get_days_since_last_modified(filename):
"""
:param filename: Absolute file path
:return: Number of days since filename's last modified time
"""
now = datetime.now()
last_modified = datetime.fromtimestamp(os.path.getmtime(filename))
return (now - last_modified).days |
def _vmomentdensity(self,R,z,n,m,o,nsigma=None,mc=False,nmc=10000,
_returnmc=False,_vrs=None,_vts=None,_vzs=None,
_rawgausssamples=False,
gl=False,ngl=_DEFAULTNGL,_returngl=False,_glqeval=None,
_return_actions=False,_jr=None,_lz=None,_jz=None,
_return_freqs=False,
_rg=None,_kappa=None,_nu=None,_Omega=None,
_sigmaR1=None,_sigmaz1=None,
**kwargs):
"""Non-physical version of vmomentdensity, otherwise the same"""
if isinstance(R,numpy.ndarray):
return numpy.array([self._vmomentdensity(r,zz,n,m,o,nsigma=nsigma,
mc=mc,nmc=nmc,
gl=gl,ngl=ngl,**kwargs) for r,zz in zip(R,z)])
if isinstance(self._aA,(actionAngle.actionAngleAdiabatic,
actionAngle.actionAngleAdiabaticGrid)):
if n % 2 == 1. or o % 2 == 1.:
return 0. #we know this must be the case
if nsigma == None:
nsigma= _NSIGMA
if _sigmaR1 is None:
sigmaR1= self._sr*numpy.exp((self._refr-R)/self._hsr)
else:
sigmaR1= _sigmaR1
if _sigmaz1 is None:
sigmaz1= self._sz*numpy.exp((self._refr-R)/self._hsz)
else:
sigmaz1= _sigmaz1
thisvc= potential.vcirc(self._pot,R,use_physical=False)
#Use the asymmetric drift equation to estimate va
gamma= numpy.sqrt(0.5)
va= sigmaR1**2./2./thisvc\
*(gamma**2.-1. #Assume close to flat rotation curve, sigphi2/sigR2 =~ 0.5
+R*(1./self._hr+2./self._hsr))
if math.fabs(va) > sigmaR1: va = 0.#To avoid craziness near the center
if gl:
if ngl % 2 == 1:
raise ValueError("ngl must be even")
if not _glqeval is None and ngl != _glqeval.shape[0]:
_glqeval= None
#Use Gauss-Legendre integration for all
if ngl == _DEFAULTNGL:
glx, glw= self._glxdef, self._glwdef
glx12, glw12= self._glxdef12, self._glwdef12
elif ngl == _DEFAULTNGL2:
glx, glw= self._glxdef2, self._glwdef2
glx12, glw12= self._glxdef, self._glwdef
else:
glx, glw= numpy.polynomial.legendre.leggauss(ngl)
glx12, glw12= numpy.polynomial.legendre.leggauss(ngl//2)
#Evaluate everywhere
if isinstance(self._aA,(actionAngle.actionAngleAdiabatic,
actionAngle.actionAngleAdiabaticGrid)):
vRgl= nsigma*sigmaR1/2.*(glx+1.)
vzgl= nsigma*sigmaz1/2.*(glx+1.)
vRglw= glw
vzglw= glw
else:
vRgl= nsigma*sigmaR1/2.*(glx12+1.)
#vRgl= 1.5/2.*(glx12+1.)
vRgl= list(vRgl)
vRgl.extend(-nsigma*sigmaR1/2.*(glx12+1.))
#vRgl.extend(-1.5/2.*(glx12+1.))
vRgl= numpy.array(vRgl)
vzgl= nsigma*sigmaz1/2.*(glx12+1.)
#vzgl= 1.5/2.*(glx12+1.)
vzgl= list(vzgl)
vzgl.extend(-nsigma*sigmaz1/2.*(glx12+1.))
#vzgl.extend(-1.5/2.*(glx12+1.))
vzgl= numpy.array(vzgl)
vRglw= glw12
vRglw= list(vRglw)
vRglw.extend(glw12)
vRglw= numpy.array(vRglw)
vzglw= glw12
vzglw= list(vzglw)
vzglw.extend(glw12)
vzglw= numpy.array(vzglw)
if 'vTmax' in kwargs: vTmax = kwargs['vTmax']
else: vTmax = 1.5
vTgl= vTmax/2.*(glx+1.)
#Tile everything
vTgl= numpy.tile(vTgl,(ngl,ngl,1)).T
vRgl= numpy.tile(numpy.reshape(vRgl,(1,ngl)).T,(ngl,1,ngl))
vzgl= numpy.tile(vzgl,(ngl,ngl,1))
vTglw= numpy.tile(glw,(ngl,ngl,1)).T #also tile weights
vRglw= numpy.tile(numpy.reshape(vRglw,(1,ngl)).T,(ngl,1,ngl))
vzglw= numpy.tile(vzglw,(ngl,ngl,1))
#evaluate
if _glqeval is None and _jr is None:
logqeval, jr, lz, jz, rg, kappa, nu, Omega= self(R+numpy.zeros(ngl*ngl*ngl),
vRgl.flatten(),
vTgl.flatten(),
z+numpy.zeros(ngl*ngl*ngl),
vzgl.flatten(),
log=True,
_return_actions=True,
_return_freqs=True,
use_physical=False)
logqeval= numpy.reshape(logqeval,(ngl,ngl,ngl))
elif not _jr is None and _rg is None:
logqeval, jr, lz, jz, rg, kappa, nu, Omega= self((_jr,_lz,_jz),
log=True,
_return_actions=True,
_return_freqs=True,
use_physical=False)
logqeval= numpy.reshape(logqeval,(ngl,ngl,ngl))
elif not _jr is None and not _rg is None:
logqeval, jr, lz, jz, rg, kappa, nu, Omega= self((_jr,_lz,_jz),
rg=_rg,kappa=_kappa,nu=_nu,
Omega=_Omega,
log=True,
_return_actions=True,
_return_freqs=True,
use_physical=False)
logqeval= numpy.reshape(logqeval,(ngl,ngl,ngl))
else:
logqeval= _glqeval
if _returngl:
return (numpy.sum(numpy.exp(logqeval)*vRgl**n*vTgl**m*vzgl**o
*vTglw*vRglw*vzglw)*sigmaR1*sigmaz1*0.125*vTmax*nsigma**2,
logqeval)
elif _return_actions and _return_freqs:
return (numpy.sum(numpy.exp(logqeval)*vRgl**n*vTgl**m*vzgl**o
*vTglw*vRglw*vzglw)*sigmaR1*sigmaz1*0.125*vTmax*nsigma**2,
jr,lz,jz,
rg,kappa,nu,Omega)
elif _return_actions:
return (numpy.sum(numpy.exp(logqeval)*vRgl**n*vTgl**m*vzgl**o
*vTglw*vRglw*vzglw)*sigmaR1*sigmaz1*0.125*vTmax*nsigma**2,
jr,lz,jz)
else:
return numpy.sum(numpy.exp(logqeval)*vRgl**n*vTgl**m*vzgl**o
*vTglw*vRglw*vzglw*sigmaR1*sigmaz1*0.125*vTmax*nsigma**2)
elif mc:
mvT= (thisvc-va)/gamma/sigmaR1
if _vrs is None:
vrs= numpy.random.normal(size=nmc)
else:
vrs= _vrs
if _vts is None:
vts= numpy.random.normal(size=nmc)+mvT
else:
if _rawgausssamples:
vts= _vts+mvT
else:
vts= _vts
if _vzs is None:
vzs= numpy.random.normal(size=nmc)
else:
vzs= _vzs
Is= _vmomentsurfaceMCIntegrand(vzs,vrs,vts,numpy.ones(nmc)*R,
numpy.ones(nmc)*z,
self,sigmaR1,gamma,sigmaz1,mvT,
n,m,o)
if _returnmc:
if _rawgausssamples:
return (numpy.mean(Is)*sigmaR1**(2.+n+m)*gamma**(1.+m)*sigmaz1**(1.+o),
vrs,vts-mvT,vzs)
else:
return (numpy.mean(Is)*sigmaR1**(2.+n+m)*gamma**(1.+m)*sigmaz1**(1.+o),
vrs,vts,vzs)
else:
return numpy.mean(Is)*sigmaR1**(2.+n+m)*gamma**(1.+m)*sigmaz1**(1.+o)
else: #pragma: no cover because this is too slow; a warning is shown
warnings.warn("Calculations using direct numerical integration using tplquad is not recommended and extremely slow; it has also not been carefully tested",galpyWarning)
return integrate.tplquad(_vmomentsurfaceIntegrand,
1./gamma*(thisvc-va)/sigmaR1-nsigma,
1./gamma*(thisvc-va)/sigmaR1+nsigma,
lambda x: 0., lambda x: nsigma,
lambda x,y: 0., lambda x,y: nsigma,
(R,z,self,sigmaR1,gamma,sigmaz1,n,m,o),
**kwargs)[0]*sigmaR1**(2.+n+m)*gamma**(1.+m)*sigmaz1**(1.+o) | Non-physical version of vmomentdensity, otherwise the same | Below is the the instruction that describes the task:
### Input:
Non-physical version of vmomentdensity, otherwise the same
### Response:
def _vmomentdensity(self,R,z,n,m,o,nsigma=None,mc=False,nmc=10000,
_returnmc=False,_vrs=None,_vts=None,_vzs=None,
_rawgausssamples=False,
gl=False,ngl=_DEFAULTNGL,_returngl=False,_glqeval=None,
_return_actions=False,_jr=None,_lz=None,_jz=None,
_return_freqs=False,
_rg=None,_kappa=None,_nu=None,_Omega=None,
_sigmaR1=None,_sigmaz1=None,
**kwargs):
"""Non-physical version of vmomentdensity, otherwise the same"""
if isinstance(R,numpy.ndarray):
return numpy.array([self._vmomentdensity(r,zz,n,m,o,nsigma=nsigma,
mc=mc,nmc=nmc,
gl=gl,ngl=ngl,**kwargs) for r,zz in zip(R,z)])
if isinstance(self._aA,(actionAngle.actionAngleAdiabatic,
actionAngle.actionAngleAdiabaticGrid)):
if n % 2 == 1. or o % 2 == 1.:
return 0. #we know this must be the case
if nsigma == None:
nsigma= _NSIGMA
if _sigmaR1 is None:
sigmaR1= self._sr*numpy.exp((self._refr-R)/self._hsr)
else:
sigmaR1= _sigmaR1
if _sigmaz1 is None:
sigmaz1= self._sz*numpy.exp((self._refr-R)/self._hsz)
else:
sigmaz1= _sigmaz1
thisvc= potential.vcirc(self._pot,R,use_physical=False)
#Use the asymmetric drift equation to estimate va
gamma= numpy.sqrt(0.5)
va= sigmaR1**2./2./thisvc\
*(gamma**2.-1. #Assume close to flat rotation curve, sigphi2/sigR2 =~ 0.5
+R*(1./self._hr+2./self._hsr))
if math.fabs(va) > sigmaR1: va = 0.#To avoid craziness near the center
if gl:
if ngl % 2 == 1:
raise ValueError("ngl must be even")
if not _glqeval is None and ngl != _glqeval.shape[0]:
_glqeval= None
#Use Gauss-Legendre integration for all
if ngl == _DEFAULTNGL:
glx, glw= self._glxdef, self._glwdef
glx12, glw12= self._glxdef12, self._glwdef12
elif ngl == _DEFAULTNGL2:
glx, glw= self._glxdef2, self._glwdef2
glx12, glw12= self._glxdef, self._glwdef
else:
glx, glw= numpy.polynomial.legendre.leggauss(ngl)
glx12, glw12= numpy.polynomial.legendre.leggauss(ngl//2)
#Evaluate everywhere
if isinstance(self._aA,(actionAngle.actionAngleAdiabatic,
actionAngle.actionAngleAdiabaticGrid)):
vRgl= nsigma*sigmaR1/2.*(glx+1.)
vzgl= nsigma*sigmaz1/2.*(glx+1.)
vRglw= glw
vzglw= glw
else:
vRgl= nsigma*sigmaR1/2.*(glx12+1.)
#vRgl= 1.5/2.*(glx12+1.)
vRgl= list(vRgl)
vRgl.extend(-nsigma*sigmaR1/2.*(glx12+1.))
#vRgl.extend(-1.5/2.*(glx12+1.))
vRgl= numpy.array(vRgl)
vzgl= nsigma*sigmaz1/2.*(glx12+1.)
#vzgl= 1.5/2.*(glx12+1.)
vzgl= list(vzgl)
vzgl.extend(-nsigma*sigmaz1/2.*(glx12+1.))
#vzgl.extend(-1.5/2.*(glx12+1.))
vzgl= numpy.array(vzgl)
vRglw= glw12
vRglw= list(vRglw)
vRglw.extend(glw12)
vRglw= numpy.array(vRglw)
vzglw= glw12
vzglw= list(vzglw)
vzglw.extend(glw12)
vzglw= numpy.array(vzglw)
if 'vTmax' in kwargs: vTmax = kwargs['vTmax']
else: vTmax = 1.5
vTgl= vTmax/2.*(glx+1.)
#Tile everything
vTgl= numpy.tile(vTgl,(ngl,ngl,1)).T
vRgl= numpy.tile(numpy.reshape(vRgl,(1,ngl)).T,(ngl,1,ngl))
vzgl= numpy.tile(vzgl,(ngl,ngl,1))
vTglw= numpy.tile(glw,(ngl,ngl,1)).T #also tile weights
vRglw= numpy.tile(numpy.reshape(vRglw,(1,ngl)).T,(ngl,1,ngl))
vzglw= numpy.tile(vzglw,(ngl,ngl,1))
#evaluate
if _glqeval is None and _jr is None:
logqeval, jr, lz, jz, rg, kappa, nu, Omega= self(R+numpy.zeros(ngl*ngl*ngl),
vRgl.flatten(),
vTgl.flatten(),
z+numpy.zeros(ngl*ngl*ngl),
vzgl.flatten(),
log=True,
_return_actions=True,
_return_freqs=True,
use_physical=False)
logqeval= numpy.reshape(logqeval,(ngl,ngl,ngl))
elif not _jr is None and _rg is None:
logqeval, jr, lz, jz, rg, kappa, nu, Omega= self((_jr,_lz,_jz),
log=True,
_return_actions=True,
_return_freqs=True,
use_physical=False)
logqeval= numpy.reshape(logqeval,(ngl,ngl,ngl))
elif not _jr is None and not _rg is None:
logqeval, jr, lz, jz, rg, kappa, nu, Omega= self((_jr,_lz,_jz),
rg=_rg,kappa=_kappa,nu=_nu,
Omega=_Omega,
log=True,
_return_actions=True,
_return_freqs=True,
use_physical=False)
logqeval= numpy.reshape(logqeval,(ngl,ngl,ngl))
else:
logqeval= _glqeval
if _returngl:
return (numpy.sum(numpy.exp(logqeval)*vRgl**n*vTgl**m*vzgl**o
*vTglw*vRglw*vzglw)*sigmaR1*sigmaz1*0.125*vTmax*nsigma**2,
logqeval)
elif _return_actions and _return_freqs:
return (numpy.sum(numpy.exp(logqeval)*vRgl**n*vTgl**m*vzgl**o
*vTglw*vRglw*vzglw)*sigmaR1*sigmaz1*0.125*vTmax*nsigma**2,
jr,lz,jz,
rg,kappa,nu,Omega)
elif _return_actions:
return (numpy.sum(numpy.exp(logqeval)*vRgl**n*vTgl**m*vzgl**o
*vTglw*vRglw*vzglw)*sigmaR1*sigmaz1*0.125*vTmax*nsigma**2,
jr,lz,jz)
else:
return numpy.sum(numpy.exp(logqeval)*vRgl**n*vTgl**m*vzgl**o
*vTglw*vRglw*vzglw*sigmaR1*sigmaz1*0.125*vTmax*nsigma**2)
elif mc:
mvT= (thisvc-va)/gamma/sigmaR1
if _vrs is None:
vrs= numpy.random.normal(size=nmc)
else:
vrs= _vrs
if _vts is None:
vts= numpy.random.normal(size=nmc)+mvT
else:
if _rawgausssamples:
vts= _vts+mvT
else:
vts= _vts
if _vzs is None:
vzs= numpy.random.normal(size=nmc)
else:
vzs= _vzs
Is= _vmomentsurfaceMCIntegrand(vzs,vrs,vts,numpy.ones(nmc)*R,
numpy.ones(nmc)*z,
self,sigmaR1,gamma,sigmaz1,mvT,
n,m,o)
if _returnmc:
if _rawgausssamples:
return (numpy.mean(Is)*sigmaR1**(2.+n+m)*gamma**(1.+m)*sigmaz1**(1.+o),
vrs,vts-mvT,vzs)
else:
return (numpy.mean(Is)*sigmaR1**(2.+n+m)*gamma**(1.+m)*sigmaz1**(1.+o),
vrs,vts,vzs)
else:
return numpy.mean(Is)*sigmaR1**(2.+n+m)*gamma**(1.+m)*sigmaz1**(1.+o)
else: #pragma: no cover because this is too slow; a warning is shown
warnings.warn("Calculations using direct numerical integration using tplquad is not recommended and extremely slow; it has also not been carefully tested",galpyWarning)
return integrate.tplquad(_vmomentsurfaceIntegrand,
1./gamma*(thisvc-va)/sigmaR1-nsigma,
1./gamma*(thisvc-va)/sigmaR1+nsigma,
lambda x: 0., lambda x: nsigma,
lambda x,y: 0., lambda x,y: nsigma,
(R,z,self,sigmaR1,gamma,sigmaz1,n,m,o),
**kwargs)[0]*sigmaR1**(2.+n+m)*gamma**(1.+m)*sigmaz1**(1.+o) |
def copy(
ctx,
opts,
owner_repo_package,
destination,
skip_errors,
wait_interval,
no_wait_for_sync,
sync_attempts,
):
"""
Copy a package to another repository.
This requires appropriate permissions for both the source
repository/package and the destination repository.
- OWNER/REPO/PACKAGE: Specify the OWNER namespace (i.e. user or org), the
REPO name where the package is stored, and the PACKAGE name (slug) of the
package itself. All separated by a slash.
Example: 'your-org/awesome-repo/better-pkg'.
- DEST: Specify the DEST (destination) repository to copy the package to.
This *must* be in the same namespace as the source repository.
Example: 'other-repo'
Full CLI example:
$ cloudsmith cp your-org/awesome-repo/better-pkg other-repo
"""
owner, source, slug = owner_repo_package
click.echo(
"Copying %(slug)s package from %(source)s to %(dest)s ... "
% {
"slug": click.style(slug, bold=True),
"source": click.style(source, bold=True),
"dest": click.style(destination, bold=True),
},
nl=False,
)
context_msg = "Failed to copy package!"
with handle_api_exceptions(
ctx, opts=opts, context_msg=context_msg, reraise_on_error=skip_errors
):
with maybe_spinner(opts):
_, new_slug = copy_package(
owner=owner, repo=source, identifier=slug, destination=destination
)
click.secho("OK", fg="green")
if no_wait_for_sync:
return
wait_for_package_sync(
ctx=ctx,
opts=opts,
owner=owner,
repo=destination,
slug=new_slug,
wait_interval=wait_interval,
skip_errors=skip_errors,
attempts=sync_attempts,
) | Copy a package to another repository.
This requires appropriate permissions for both the source
repository/package and the destination repository.
- OWNER/REPO/PACKAGE: Specify the OWNER namespace (i.e. user or org), the
REPO name where the package is stored, and the PACKAGE name (slug) of the
package itself. All separated by a slash.
Example: 'your-org/awesome-repo/better-pkg'.
- DEST: Specify the DEST (destination) repository to copy the package to.
This *must* be in the same namespace as the source repository.
Example: 'other-repo'
Full CLI example:
$ cloudsmith cp your-org/awesome-repo/better-pkg other-repo | Below is the the instruction that describes the task:
### Input:
Copy a package to another repository.
This requires appropriate permissions for both the source
repository/package and the destination repository.
- OWNER/REPO/PACKAGE: Specify the OWNER namespace (i.e. user or org), the
REPO name where the package is stored, and the PACKAGE name (slug) of the
package itself. All separated by a slash.
Example: 'your-org/awesome-repo/better-pkg'.
- DEST: Specify the DEST (destination) repository to copy the package to.
This *must* be in the same namespace as the source repository.
Example: 'other-repo'
Full CLI example:
$ cloudsmith cp your-org/awesome-repo/better-pkg other-repo
### Response:
def copy(
ctx,
opts,
owner_repo_package,
destination,
skip_errors,
wait_interval,
no_wait_for_sync,
sync_attempts,
):
"""
Copy a package to another repository.
This requires appropriate permissions for both the source
repository/package and the destination repository.
- OWNER/REPO/PACKAGE: Specify the OWNER namespace (i.e. user or org), the
REPO name where the package is stored, and the PACKAGE name (slug) of the
package itself. All separated by a slash.
Example: 'your-org/awesome-repo/better-pkg'.
- DEST: Specify the DEST (destination) repository to copy the package to.
This *must* be in the same namespace as the source repository.
Example: 'other-repo'
Full CLI example:
$ cloudsmith cp your-org/awesome-repo/better-pkg other-repo
"""
owner, source, slug = owner_repo_package
click.echo(
"Copying %(slug)s package from %(source)s to %(dest)s ... "
% {
"slug": click.style(slug, bold=True),
"source": click.style(source, bold=True),
"dest": click.style(destination, bold=True),
},
nl=False,
)
context_msg = "Failed to copy package!"
with handle_api_exceptions(
ctx, opts=opts, context_msg=context_msg, reraise_on_error=skip_errors
):
with maybe_spinner(opts):
_, new_slug = copy_package(
owner=owner, repo=source, identifier=slug, destination=destination
)
click.secho("OK", fg="green")
if no_wait_for_sync:
return
wait_for_package_sync(
ctx=ctx,
opts=opts,
owner=owner,
repo=destination,
slug=new_slug,
wait_interval=wait_interval,
skip_errors=skip_errors,
attempts=sync_attempts,
) |
def selected(self, interrupt=False):
"""This object has been selected."""
self.ao2.output(self.get_title(), interrupt=interrupt) | This object has been selected. | Below is the the instruction that describes the task:
### Input:
This object has been selected.
### Response:
def selected(self, interrupt=False):
"""This object has been selected."""
self.ao2.output(self.get_title(), interrupt=interrupt) |
def _log_error(self, request, error):
'''Log exceptions during a fetch.'''
_logger.error(
_('Fetching ‘{url}’ encountered an error: {error}'),
url=request.url, error=error
) | Log exceptions during a fetch. | Below is the the instruction that describes the task:
### Input:
Log exceptions during a fetch.
### Response:
def _log_error(self, request, error):
'''Log exceptions during a fetch.'''
_logger.error(
_('Fetching ‘{url}’ encountered an error: {error}'),
url=request.url, error=error
) |
def verify_crl(cert, path, validation_context, use_deltas=True, cert_description=None, end_entity_name_override=None):
"""
Verifies a certificate against a list of CRLs, checking to make sure the
certificate has not been revoked. Uses the algorithm from
https://tools.ietf.org/html/rfc5280#section-6.3 as a basis, but the
implementation differs to allow CRLs from unrecorded locations.
:param cert:
An asn1cyrpto.x509.Certificate object to check for in the CRLs
:param path:
A certvalidator.path.ValidationPath object of the cert's validation path
:param certificate_lists:
A list of asn1crypto.crl.CertificateList objects
:param validation_context:
A certvalidator.context.ValidationContext object to use for caching
validation information
:param use_deltas:
A boolean indicating if delta CRLs should be used
:param cert_description:
A unicode string containing a description of the certificate to be used
in exception messages
:param end_entity_name_override:
None or a unicode string of the name to use for the end-entity
certificate when including in exception messages
:raises:
certvalidator.errors.CRLNoMatchesError - when none of the CRLs match the certificate
certvalidator.errors.CRLValidationError - when any error occurs trying to verify the CertificateList
certvalidator.errors.RevokedError - when the CRL indicates the certificate has been revoked
"""
if not isinstance(cert, x509.Certificate):
raise TypeError(pretty_message(
'''
cert must be an instance of asn1crypto.x509.Certificate, not %s
''',
type_name(cert)
))
if not isinstance(path, ValidationPath):
raise TypeError(pretty_message(
'''
path must be an instance of certvalidator.path.ValidationPath,
not %s
''',
type_name(path)
))
if not isinstance(validation_context, ValidationContext):
raise TypeError(pretty_message(
'''
validation_context must be an instance of
certvalidator.context.ValidationContext, not %s
''',
type_name(validation_context)
))
if cert_description is None:
cert_description = 'the certificate'
if not isinstance(cert_description, str_cls):
raise TypeError(pretty_message(
'''
cert_description must be a unicode string, not %s
''',
type_name(cert_description)
))
moment = validation_context.moment
certificate_registry = validation_context.certificate_registry
certificate_lists = validation_context.retrieve_crls(cert)
cert_issuer = path.find_issuer(cert)
complete_lists_by_issuer = {}
delta_lists_by_issuer = {}
for certificate_list in certificate_lists:
issuer_hashable = certificate_list.issuer.hashable
if certificate_list.delta_crl_indicator_value is None:
if issuer_hashable not in complete_lists_by_issuer:
complete_lists_by_issuer[issuer_hashable] = []
complete_lists_by_issuer[issuer_hashable].append(certificate_list)
else:
if issuer_hashable not in delta_lists_by_issuer:
delta_lists_by_issuer[issuer_hashable] = []
delta_lists_by_issuer[issuer_hashable].append(certificate_list)
# In the main loop, only complete CRLs are processed, so delta CRLs are
# weeded out of the todo list
crls_to_process = []
for issuer_crls in complete_lists_by_issuer.values():
crls_to_process.extend(issuer_crls)
total_crls = len(crls_to_process)
# Build a lookup table for the Distribution point objects associated with
# an issuer name hashable
distribution_point_map = {}
sources = [cert.crl_distribution_points]
if use_deltas:
sources.extend(cert.delta_crl_distribution_points)
for dp_list in sources:
for distribution_point in dp_list:
if isinstance(distribution_point['crl_issuer'], x509.GeneralNames):
dp_name_hashes = []
for general_name in distribution_point['crl_issuer']:
if general_name.name == 'directory_name':
dp_name_hashes.append(general_name.chosen.hashable)
else:
dp_name_hashes = [cert.issuer.hashable]
for dp_name_hash in dp_name_hashes:
if dp_name_hash not in distribution_point_map:
distribution_point_map[dp_name_hash] = []
distribution_point_map[dp_name_hash].append(distribution_point)
valid_reasons = set([
'key_compromise',
'ca_compromise',
'affiliation_changed',
'superseded',
'cessation_of_operation',
'certificate_hold',
'privilege_withdrawn',
'aa_compromise',
])
known_extensions = set([
'issuer_alt_name',
'crl_number',
'delta_crl_indicator',
'issuing_distribution_point',
'authority_key_identifier',
'freshest_crl',
'authority_information_access',
])
checked_reasons = set()
failures = []
issuer_failures = 0
while len(crls_to_process) > 0:
certificate_list = crls_to_process.pop(0)
crl_idp = certificate_list.issuing_distribution_point_value
delta_certificate_list = None
delta_crl_idp = None
interim_reasons = set()
crl_issuer = None
crl_issuer_name = None
is_indirect = False
if crl_idp and crl_idp['indirect_crl'].native:
is_indirect = True
crl_idp_name = crl_idp['distribution_point']
if crl_idp_name:
if crl_idp_name.name == 'full_name':
crl_issuer_name = crl_idp_name.chosen[0].chosen
else:
crl_issuer_name = cert_issuer.subject.copy().chosen.append(
crl_idp_name.chosen
)
elif certificate_list.authority_key_identifier:
tmp_crl_issuer = certificate_registry.retrieve_by_key_identifier(
certificate_list.authority_key_identifier
)
crl_issuer_name = tmp_crl_issuer.subject
else:
failures.append((
'CRL is marked as an indirect CRL, but provides no '
'mechanism for locating the CRL issuer certificate',
certificate_list
))
continue
else:
crl_issuer_name = certificate_list.issuer
if not crl_issuer:
crl_issuer = validation_context.check_crl_issuer(certificate_list)
if not crl_issuer:
candidate_crl_issuers = certificate_registry.retrieve_by_name(crl_issuer_name, cert_issuer)
candidates_skipped = 0
signatures_failed = 0
unauthorized_certs = 0
if not candidate_crl_issuers and crl_issuer_name != certificate_list.issuer:
candidate_crl_issuers = certificate_registry.retrieve_by_name(certificate_list.issuer, cert_issuer)
for candidate_crl_issuer in candidate_crl_issuers:
direct_issuer = candidate_crl_issuer.subject == cert_issuer.subject
# In some cases an indirect CRL issuer is a certificate issued
# by the certificate issuer. However, we need to ensure that
# the candidate CRL issuer is not the certificate being checked,
# otherwise we may be checking an incorrect CRL and produce
# incorrect results.
indirect_issuer = candidate_crl_issuer.issuer == cert_issuer.subject
indirect_issuer = indirect_issuer and candidate_crl_issuer.sha256 != cert.sha256
if not direct_issuer and not indirect_issuer and not is_indirect:
candidates_skipped += 1
continue
# Step f
candidate_crl_issuer_path = None
if validation_context:
candidate_crl_issuer_path = validation_context.check_validation(candidate_crl_issuer)
if candidate_crl_issuer_path is None:
candidate_crl_issuer_path = path.copy().truncate_to_issuer(candidate_crl_issuer)
candidate_crl_issuer_path.append(candidate_crl_issuer)
try:
# Pre-emptively mark a path as validated to prevent recursion
if validation_context:
validation_context.record_validation(candidate_crl_issuer, candidate_crl_issuer_path)
temp_override = end_entity_name_override
if temp_override is None and candidate_crl_issuer.sha256 != cert_issuer.sha256:
temp_override = cert_description + ' CRL issuer'
_validate_path(
validation_context,
candidate_crl_issuer_path,
end_entity_name_override=temp_override
)
except (PathValidationError) as e:
# If the validation did not work out, clear it
if validation_context:
validation_context.clear_validation(candidate_crl_issuer)
# We let a revoked error fall through since step k will catch
# it with a correct error message
if isinstance(e, RevokedError):
raise
raise CRLValidationError('CRL issuer certificate path could not be validated')
key_usage_value = candidate_crl_issuer.key_usage_value
if key_usage_value and 'crl_sign' not in key_usage_value.native:
unauthorized_certs += 1
continue
try:
# Step g
_verify_signature(certificate_list, candidate_crl_issuer)
crl_issuer = candidate_crl_issuer
break
except (CRLValidationError):
signatures_failed += 1
continue
if crl_issuer is None:
if candidates_skipped == len(candidate_crl_issuers):
issuer_failures += 1
else:
if signatures_failed == len(candidate_crl_issuers):
failures.append((
'CRL signature could not be verified',
certificate_list
))
elif unauthorized_certs == len(candidate_crl_issuers):
failures.append((
'The CRL issuer is not authorized to sign CRLs',
certificate_list
))
else:
failures.append((
'Unable to locate CRL issuer certificate',
certificate_list
))
continue
else:
validation_context.record_crl_issuer(certificate_list, crl_issuer)
# Step b 1
has_dp_crl_issuer = False
dp_match = False
dps = cert.crl_distribution_points_value
if dps:
crl_issuer_general_name = x509.GeneralName(
name='directory_name',
value=crl_issuer.subject
)
for dp in dps:
if dp['crl_issuer']:
has_dp_crl_issuer = True
if crl_issuer_general_name in dp['crl_issuer']:
dp_match = True
same_issuer = crl_issuer.subject == cert_issuer.subject
indirect_match = has_dp_crl_issuer and dp_match and is_indirect
missing_idp = has_dp_crl_issuer and (not dp_match or not is_indirect)
indirect_crl_issuer = crl_issuer.issuer == cert_issuer.subject
if (not same_issuer and not indirect_match and not indirect_crl_issuer) or missing_idp:
issuer_failures += 1
continue
# Check to make sure the CRL is valid for the moment specified
if moment < certificate_list['tbs_cert_list']['this_update'].native:
failures.append((
'CRL is from after the validation time',
certificate_list
))
continue
if moment > certificate_list['tbs_cert_list']['next_update'].native:
failures.append((
'CRL should have been regenerated by the validation time',
certificate_list
))
continue
# Step b 2
if crl_idp is not None:
# Step b 2 i
has_idp_name = False
has_dp_name = False
idp_dp_match = False
idp_general_names = []
idp_dp_name = crl_idp['distribution_point']
if idp_dp_name:
has_idp_name = True
if idp_dp_name.name == 'full_name':
for general_name in idp_dp_name.chosen:
idp_general_names.append(general_name)
else:
inner_extended_issuer_name = crl_issuer.subject.copy()
inner_extended_issuer_name.chosen.append(idp_dp_name.chosen.untag())
idp_general_names.append(x509.GeneralName(
name='directory_name',
value=inner_extended_issuer_name
))
dps = cert.crl_distribution_points_value
if dps:
for dp in dps:
if idp_dp_match:
break
dp_name = dp['distribution_point']
if dp_name:
has_dp_name = True
if dp_name.name == 'full_name':
for general_name in dp_name.chosen:
if general_name in idp_general_names:
idp_dp_match = True
break
else:
inner_extended_issuer_name = crl_issuer.subject.copy()
inner_extended_issuer_name.chosen.append(dp_name.chosen.untag())
dp_extended_issuer_name = x509.GeneralName(
name='directory_name',
value=inner_extended_issuer_name
)
if dp_extended_issuer_name in idp_general_names:
idp_dp_match = True
elif dp['crl_issuer']:
has_dp_name = True
for dp_crl_issuer_name in dp['crl_issuer']:
if dp_crl_issuer_name in idp_general_names:
idp_dp_match = True
break
else:
# If there is no DP, we consider the CRL issuer name to be it
has_dp_name = True
general_name = x509.GeneralName(
name='directory_name',
value=crl_issuer_name
)
if general_name in idp_general_names:
idp_dp_match = True
idp_dp_match_failed = has_idp_name and has_dp_name and not idp_dp_match
if idp_dp_match_failed:
failures.append((
pretty_message(
'''
The CRL issuing distribution point extension does not
share any names with the certificate CRL distribution
point extension
'''
),
certificate_list
))
issuer_failures += 1
continue
# Step b 2 ii
if crl_idp['only_contains_user_certs'].native:
if cert.basic_constraints_value and cert.basic_constraints_value['ca'].native:
failures.append((
pretty_message(
'''
CRL only contains end-entity certificates and
certificate is a CA certificate
'''
),
certificate_list
))
continue
# Step b 2 iii
if crl_idp['only_contains_ca_certs'].native:
if not cert.basic_constraints_value or cert.basic_constraints_value['ca'].native is False:
failures.append((
pretty_message(
'''
CRL only contains CA certificates and certificate
is an end-entity certificate
'''
),
certificate_list
))
continue
# Step b 2 iv
if crl_idp['only_contains_attribute_certs'].native:
failures.append((
'CRL only contains attribute certificates',
certificate_list
))
continue
# Step c
if use_deltas and certificate_list.freshest_crl_value and len(certificate_list.freshest_crl_value) > 0:
for candidate_delta_cl in delta_lists_by_issuer.get(crl_issuer_name.hashable, []):
# Step c 1
if candidate_delta_cl.issuer != crl_issuer_name:
continue
# Step c 2
delta_crl_idp = candidate_delta_cl.issuing_distribution_point_value
if (crl_idp is None and delta_crl_idp is not None) or (crl_idp is not None and delta_crl_idp is None):
continue
if crl_idp and crl_idp.native != delta_crl_idp.native:
continue
# Step c 3
if certificate_list.authority_key_identifier != candidate_delta_cl.authority_key_identifier:
continue
delta_certificate_list = candidate_delta_cl
break
# Step d
idp_reasons = None
if crl_idp and crl_idp['only_some_reasons'].native is not None:
idp_reasons = crl_idp['only_some_reasons'].native
reason_keys = None
if idp_reasons:
reason_keys = idp_reasons
if reason_keys is None:
interim_reasons = valid_reasons.copy()
else:
interim_reasons = reason_keys
# Step e
# We don't skip a CRL if it only contains reasons already checked since
# a certificate issuer can self-issue a new cert that is used for CRLs
if certificate_list.critical_extensions - known_extensions:
failures.append((
'One or more unrecognized critical extensions are present in '
'the CRL',
certificate_list
))
continue
if use_deltas and delta_certificate_list and delta_certificate_list.critical_extensions - known_extensions:
failures.append((
'One or more unrecognized critical extensions are present in '
'the delta CRL',
delta_certificate_list
))
continue
# Step h
if use_deltas and delta_certificate_list:
try:
_verify_signature(delta_certificate_list, crl_issuer)
except (CRLValidationError):
failures.append((
'Delta CRL signature could not be verified',
certificate_list,
delta_certificate_list
))
continue
if moment < delta_certificate_list['tbs_cert_list']['this_update'].native:
failures.append((
'Delta CRL is from after the validation time',
certificate_list,
delta_certificate_list
))
continue
if moment > delta_certificate_list['tbs_cert_list']['next_update'].native:
failures.append((
'Delta CRL is from before the validation time',
certificate_list,
delta_certificate_list
))
continue
# Step i
revoked_reason = None
revoked_date = None
if use_deltas and delta_certificate_list:
try:
revoked_date, revoked_reason = _find_cert_in_list(cert, cert_issuer, delta_certificate_list, crl_issuer)
except (NotImplementedError):
failures.append((
'One or more critical extensions are present in the CRL '
'entry for the certificate',
delta_certificate_list
))
continue
# Step j
if revoked_reason is None:
try:
revoked_date, revoked_reason = _find_cert_in_list(cert, cert_issuer, certificate_list, crl_issuer)
except (NotImplementedError):
failures.append((
'One or more critical extensions are present in the CRL '
'entry for the certificate',
certificate_list
))
continue
# Step k
if revoked_reason and revoked_reason.native == 'remove_from_crl':
revoked_reason = None
revoked_date = None
if revoked_reason:
reason = revoked_reason.human_friendly
date = revoked_date.native.strftime('%Y-%m-%d')
time = revoked_date.native.strftime('%H:%M:%S')
raise RevokedError(pretty_message(
'''
CRL indicates %s was revoked at %s on %s, due to %s
''',
cert_description,
time,
date,
reason
))
# Step l
checked_reasons |= interim_reasons
# CRLs should not include this value, but at least one of the examples
# from the NIST test suite does
checked_reasons -= set(['unused'])
if checked_reasons != valid_reasons:
if total_crls == issuer_failures:
raise CRLNoMatchesError(pretty_message(
'''
No CRLs were issued by the issuer of %s, or any indirect CRL
issuer
''',
cert_description
))
if not failures:
failures.append((
'The available CRLs do not cover all revocation reasons',
))
raise CRLValidationIndeterminateError(
pretty_message(
'''
Unable to determine if %s is revoked due to insufficient
information from known CRLs
''',
cert_description
),
failures
) | Verifies a certificate against a list of CRLs, checking to make sure the
certificate has not been revoked. Uses the algorithm from
https://tools.ietf.org/html/rfc5280#section-6.3 as a basis, but the
implementation differs to allow CRLs from unrecorded locations.
:param cert:
An asn1cyrpto.x509.Certificate object to check for in the CRLs
:param path:
A certvalidator.path.ValidationPath object of the cert's validation path
:param certificate_lists:
A list of asn1crypto.crl.CertificateList objects
:param validation_context:
A certvalidator.context.ValidationContext object to use for caching
validation information
:param use_deltas:
A boolean indicating if delta CRLs should be used
:param cert_description:
A unicode string containing a description of the certificate to be used
in exception messages
:param end_entity_name_override:
None or a unicode string of the name to use for the end-entity
certificate when including in exception messages
:raises:
certvalidator.errors.CRLNoMatchesError - when none of the CRLs match the certificate
certvalidator.errors.CRLValidationError - when any error occurs trying to verify the CertificateList
certvalidator.errors.RevokedError - when the CRL indicates the certificate has been revoked | Below is the the instruction that describes the task:
### Input:
Verifies a certificate against a list of CRLs, checking to make sure the
certificate has not been revoked. Uses the algorithm from
https://tools.ietf.org/html/rfc5280#section-6.3 as a basis, but the
implementation differs to allow CRLs from unrecorded locations.
:param cert:
An asn1cyrpto.x509.Certificate object to check for in the CRLs
:param path:
A certvalidator.path.ValidationPath object of the cert's validation path
:param certificate_lists:
A list of asn1crypto.crl.CertificateList objects
:param validation_context:
A certvalidator.context.ValidationContext object to use for caching
validation information
:param use_deltas:
A boolean indicating if delta CRLs should be used
:param cert_description:
A unicode string containing a description of the certificate to be used
in exception messages
:param end_entity_name_override:
None or a unicode string of the name to use for the end-entity
certificate when including in exception messages
:raises:
certvalidator.errors.CRLNoMatchesError - when none of the CRLs match the certificate
certvalidator.errors.CRLValidationError - when any error occurs trying to verify the CertificateList
certvalidator.errors.RevokedError - when the CRL indicates the certificate has been revoked
### Response:
def verify_crl(cert, path, validation_context, use_deltas=True, cert_description=None, end_entity_name_override=None):
"""
Verifies a certificate against a list of CRLs, checking to make sure the
certificate has not been revoked. Uses the algorithm from
https://tools.ietf.org/html/rfc5280#section-6.3 as a basis, but the
implementation differs to allow CRLs from unrecorded locations.
:param cert:
An asn1cyrpto.x509.Certificate object to check for in the CRLs
:param path:
A certvalidator.path.ValidationPath object of the cert's validation path
:param certificate_lists:
A list of asn1crypto.crl.CertificateList objects
:param validation_context:
A certvalidator.context.ValidationContext object to use for caching
validation information
:param use_deltas:
A boolean indicating if delta CRLs should be used
:param cert_description:
A unicode string containing a description of the certificate to be used
in exception messages
:param end_entity_name_override:
None or a unicode string of the name to use for the end-entity
certificate when including in exception messages
:raises:
certvalidator.errors.CRLNoMatchesError - when none of the CRLs match the certificate
certvalidator.errors.CRLValidationError - when any error occurs trying to verify the CertificateList
certvalidator.errors.RevokedError - when the CRL indicates the certificate has been revoked
"""
if not isinstance(cert, x509.Certificate):
raise TypeError(pretty_message(
'''
cert must be an instance of asn1crypto.x509.Certificate, not %s
''',
type_name(cert)
))
if not isinstance(path, ValidationPath):
raise TypeError(pretty_message(
'''
path must be an instance of certvalidator.path.ValidationPath,
not %s
''',
type_name(path)
))
if not isinstance(validation_context, ValidationContext):
raise TypeError(pretty_message(
'''
validation_context must be an instance of
certvalidator.context.ValidationContext, not %s
''',
type_name(validation_context)
))
if cert_description is None:
cert_description = 'the certificate'
if not isinstance(cert_description, str_cls):
raise TypeError(pretty_message(
'''
cert_description must be a unicode string, not %s
''',
type_name(cert_description)
))
moment = validation_context.moment
certificate_registry = validation_context.certificate_registry
certificate_lists = validation_context.retrieve_crls(cert)
cert_issuer = path.find_issuer(cert)
complete_lists_by_issuer = {}
delta_lists_by_issuer = {}
for certificate_list in certificate_lists:
issuer_hashable = certificate_list.issuer.hashable
if certificate_list.delta_crl_indicator_value is None:
if issuer_hashable not in complete_lists_by_issuer:
complete_lists_by_issuer[issuer_hashable] = []
complete_lists_by_issuer[issuer_hashable].append(certificate_list)
else:
if issuer_hashable not in delta_lists_by_issuer:
delta_lists_by_issuer[issuer_hashable] = []
delta_lists_by_issuer[issuer_hashable].append(certificate_list)
# In the main loop, only complete CRLs are processed, so delta CRLs are
# weeded out of the todo list
crls_to_process = []
for issuer_crls in complete_lists_by_issuer.values():
crls_to_process.extend(issuer_crls)
total_crls = len(crls_to_process)
# Build a lookup table for the Distribution point objects associated with
# an issuer name hashable
distribution_point_map = {}
sources = [cert.crl_distribution_points]
if use_deltas:
sources.extend(cert.delta_crl_distribution_points)
for dp_list in sources:
for distribution_point in dp_list:
if isinstance(distribution_point['crl_issuer'], x509.GeneralNames):
dp_name_hashes = []
for general_name in distribution_point['crl_issuer']:
if general_name.name == 'directory_name':
dp_name_hashes.append(general_name.chosen.hashable)
else:
dp_name_hashes = [cert.issuer.hashable]
for dp_name_hash in dp_name_hashes:
if dp_name_hash not in distribution_point_map:
distribution_point_map[dp_name_hash] = []
distribution_point_map[dp_name_hash].append(distribution_point)
valid_reasons = set([
'key_compromise',
'ca_compromise',
'affiliation_changed',
'superseded',
'cessation_of_operation',
'certificate_hold',
'privilege_withdrawn',
'aa_compromise',
])
known_extensions = set([
'issuer_alt_name',
'crl_number',
'delta_crl_indicator',
'issuing_distribution_point',
'authority_key_identifier',
'freshest_crl',
'authority_information_access',
])
checked_reasons = set()
failures = []
issuer_failures = 0
while len(crls_to_process) > 0:
certificate_list = crls_to_process.pop(0)
crl_idp = certificate_list.issuing_distribution_point_value
delta_certificate_list = None
delta_crl_idp = None
interim_reasons = set()
crl_issuer = None
crl_issuer_name = None
is_indirect = False
if crl_idp and crl_idp['indirect_crl'].native:
is_indirect = True
crl_idp_name = crl_idp['distribution_point']
if crl_idp_name:
if crl_idp_name.name == 'full_name':
crl_issuer_name = crl_idp_name.chosen[0].chosen
else:
crl_issuer_name = cert_issuer.subject.copy().chosen.append(
crl_idp_name.chosen
)
elif certificate_list.authority_key_identifier:
tmp_crl_issuer = certificate_registry.retrieve_by_key_identifier(
certificate_list.authority_key_identifier
)
crl_issuer_name = tmp_crl_issuer.subject
else:
failures.append((
'CRL is marked as an indirect CRL, but provides no '
'mechanism for locating the CRL issuer certificate',
certificate_list
))
continue
else:
crl_issuer_name = certificate_list.issuer
if not crl_issuer:
crl_issuer = validation_context.check_crl_issuer(certificate_list)
if not crl_issuer:
candidate_crl_issuers = certificate_registry.retrieve_by_name(crl_issuer_name, cert_issuer)
candidates_skipped = 0
signatures_failed = 0
unauthorized_certs = 0
if not candidate_crl_issuers and crl_issuer_name != certificate_list.issuer:
candidate_crl_issuers = certificate_registry.retrieve_by_name(certificate_list.issuer, cert_issuer)
for candidate_crl_issuer in candidate_crl_issuers:
direct_issuer = candidate_crl_issuer.subject == cert_issuer.subject
# In some cases an indirect CRL issuer is a certificate issued
# by the certificate issuer. However, we need to ensure that
# the candidate CRL issuer is not the certificate being checked,
# otherwise we may be checking an incorrect CRL and produce
# incorrect results.
indirect_issuer = candidate_crl_issuer.issuer == cert_issuer.subject
indirect_issuer = indirect_issuer and candidate_crl_issuer.sha256 != cert.sha256
if not direct_issuer and not indirect_issuer and not is_indirect:
candidates_skipped += 1
continue
# Step f
candidate_crl_issuer_path = None
if validation_context:
candidate_crl_issuer_path = validation_context.check_validation(candidate_crl_issuer)
if candidate_crl_issuer_path is None:
candidate_crl_issuer_path = path.copy().truncate_to_issuer(candidate_crl_issuer)
candidate_crl_issuer_path.append(candidate_crl_issuer)
try:
# Pre-emptively mark a path as validated to prevent recursion
if validation_context:
validation_context.record_validation(candidate_crl_issuer, candidate_crl_issuer_path)
temp_override = end_entity_name_override
if temp_override is None and candidate_crl_issuer.sha256 != cert_issuer.sha256:
temp_override = cert_description + ' CRL issuer'
_validate_path(
validation_context,
candidate_crl_issuer_path,
end_entity_name_override=temp_override
)
except (PathValidationError) as e:
# If the validation did not work out, clear it
if validation_context:
validation_context.clear_validation(candidate_crl_issuer)
# We let a revoked error fall through since step k will catch
# it with a correct error message
if isinstance(e, RevokedError):
raise
raise CRLValidationError('CRL issuer certificate path could not be validated')
key_usage_value = candidate_crl_issuer.key_usage_value
if key_usage_value and 'crl_sign' not in key_usage_value.native:
unauthorized_certs += 1
continue
try:
# Step g
_verify_signature(certificate_list, candidate_crl_issuer)
crl_issuer = candidate_crl_issuer
break
except (CRLValidationError):
signatures_failed += 1
continue
if crl_issuer is None:
if candidates_skipped == len(candidate_crl_issuers):
issuer_failures += 1
else:
if signatures_failed == len(candidate_crl_issuers):
failures.append((
'CRL signature could not be verified',
certificate_list
))
elif unauthorized_certs == len(candidate_crl_issuers):
failures.append((
'The CRL issuer is not authorized to sign CRLs',
certificate_list
))
else:
failures.append((
'Unable to locate CRL issuer certificate',
certificate_list
))
continue
else:
validation_context.record_crl_issuer(certificate_list, crl_issuer)
# Step b 1
has_dp_crl_issuer = False
dp_match = False
dps = cert.crl_distribution_points_value
if dps:
crl_issuer_general_name = x509.GeneralName(
name='directory_name',
value=crl_issuer.subject
)
for dp in dps:
if dp['crl_issuer']:
has_dp_crl_issuer = True
if crl_issuer_general_name in dp['crl_issuer']:
dp_match = True
same_issuer = crl_issuer.subject == cert_issuer.subject
indirect_match = has_dp_crl_issuer and dp_match and is_indirect
missing_idp = has_dp_crl_issuer and (not dp_match or not is_indirect)
indirect_crl_issuer = crl_issuer.issuer == cert_issuer.subject
if (not same_issuer and not indirect_match and not indirect_crl_issuer) or missing_idp:
issuer_failures += 1
continue
# Check to make sure the CRL is valid for the moment specified
if moment < certificate_list['tbs_cert_list']['this_update'].native:
failures.append((
'CRL is from after the validation time',
certificate_list
))
continue
if moment > certificate_list['tbs_cert_list']['next_update'].native:
failures.append((
'CRL should have been regenerated by the validation time',
certificate_list
))
continue
# Step b 2
if crl_idp is not None:
# Step b 2 i
has_idp_name = False
has_dp_name = False
idp_dp_match = False
idp_general_names = []
idp_dp_name = crl_idp['distribution_point']
if idp_dp_name:
has_idp_name = True
if idp_dp_name.name == 'full_name':
for general_name in idp_dp_name.chosen:
idp_general_names.append(general_name)
else:
inner_extended_issuer_name = crl_issuer.subject.copy()
inner_extended_issuer_name.chosen.append(idp_dp_name.chosen.untag())
idp_general_names.append(x509.GeneralName(
name='directory_name',
value=inner_extended_issuer_name
))
dps = cert.crl_distribution_points_value
if dps:
for dp in dps:
if idp_dp_match:
break
dp_name = dp['distribution_point']
if dp_name:
has_dp_name = True
if dp_name.name == 'full_name':
for general_name in dp_name.chosen:
if general_name in idp_general_names:
idp_dp_match = True
break
else:
inner_extended_issuer_name = crl_issuer.subject.copy()
inner_extended_issuer_name.chosen.append(dp_name.chosen.untag())
dp_extended_issuer_name = x509.GeneralName(
name='directory_name',
value=inner_extended_issuer_name
)
if dp_extended_issuer_name in idp_general_names:
idp_dp_match = True
elif dp['crl_issuer']:
has_dp_name = True
for dp_crl_issuer_name in dp['crl_issuer']:
if dp_crl_issuer_name in idp_general_names:
idp_dp_match = True
break
else:
# If there is no DP, we consider the CRL issuer name to be it
has_dp_name = True
general_name = x509.GeneralName(
name='directory_name',
value=crl_issuer_name
)
if general_name in idp_general_names:
idp_dp_match = True
idp_dp_match_failed = has_idp_name and has_dp_name and not idp_dp_match
if idp_dp_match_failed:
failures.append((
pretty_message(
'''
The CRL issuing distribution point extension does not
share any names with the certificate CRL distribution
point extension
'''
),
certificate_list
))
issuer_failures += 1
continue
# Step b 2 ii
if crl_idp['only_contains_user_certs'].native:
if cert.basic_constraints_value and cert.basic_constraints_value['ca'].native:
failures.append((
pretty_message(
'''
CRL only contains end-entity certificates and
certificate is a CA certificate
'''
),
certificate_list
))
continue
# Step b 2 iii
if crl_idp['only_contains_ca_certs'].native:
if not cert.basic_constraints_value or cert.basic_constraints_value['ca'].native is False:
failures.append((
pretty_message(
'''
CRL only contains CA certificates and certificate
is an end-entity certificate
'''
),
certificate_list
))
continue
# Step b 2 iv
if crl_idp['only_contains_attribute_certs'].native:
failures.append((
'CRL only contains attribute certificates',
certificate_list
))
continue
# Step c
if use_deltas and certificate_list.freshest_crl_value and len(certificate_list.freshest_crl_value) > 0:
for candidate_delta_cl in delta_lists_by_issuer.get(crl_issuer_name.hashable, []):
# Step c 1
if candidate_delta_cl.issuer != crl_issuer_name:
continue
# Step c 2
delta_crl_idp = candidate_delta_cl.issuing_distribution_point_value
if (crl_idp is None and delta_crl_idp is not None) or (crl_idp is not None and delta_crl_idp is None):
continue
if crl_idp and crl_idp.native != delta_crl_idp.native:
continue
# Step c 3
if certificate_list.authority_key_identifier != candidate_delta_cl.authority_key_identifier:
continue
delta_certificate_list = candidate_delta_cl
break
# Step d
idp_reasons = None
if crl_idp and crl_idp['only_some_reasons'].native is not None:
idp_reasons = crl_idp['only_some_reasons'].native
reason_keys = None
if idp_reasons:
reason_keys = idp_reasons
if reason_keys is None:
interim_reasons = valid_reasons.copy()
else:
interim_reasons = reason_keys
# Step e
# We don't skip a CRL if it only contains reasons already checked since
# a certificate issuer can self-issue a new cert that is used for CRLs
if certificate_list.critical_extensions - known_extensions:
failures.append((
'One or more unrecognized critical extensions are present in '
'the CRL',
certificate_list
))
continue
if use_deltas and delta_certificate_list and delta_certificate_list.critical_extensions - known_extensions:
failures.append((
'One or more unrecognized critical extensions are present in '
'the delta CRL',
delta_certificate_list
))
continue
# Step h
if use_deltas and delta_certificate_list:
try:
_verify_signature(delta_certificate_list, crl_issuer)
except (CRLValidationError):
failures.append((
'Delta CRL signature could not be verified',
certificate_list,
delta_certificate_list
))
continue
if moment < delta_certificate_list['tbs_cert_list']['this_update'].native:
failures.append((
'Delta CRL is from after the validation time',
certificate_list,
delta_certificate_list
))
continue
if moment > delta_certificate_list['tbs_cert_list']['next_update'].native:
failures.append((
'Delta CRL is from before the validation time',
certificate_list,
delta_certificate_list
))
continue
# Step i
revoked_reason = None
revoked_date = None
if use_deltas and delta_certificate_list:
try:
revoked_date, revoked_reason = _find_cert_in_list(cert, cert_issuer, delta_certificate_list, crl_issuer)
except (NotImplementedError):
failures.append((
'One or more critical extensions are present in the CRL '
'entry for the certificate',
delta_certificate_list
))
continue
# Step j
if revoked_reason is None:
try:
revoked_date, revoked_reason = _find_cert_in_list(cert, cert_issuer, certificate_list, crl_issuer)
except (NotImplementedError):
failures.append((
'One or more critical extensions are present in the CRL '
'entry for the certificate',
certificate_list
))
continue
# Step k
if revoked_reason and revoked_reason.native == 'remove_from_crl':
revoked_reason = None
revoked_date = None
if revoked_reason:
reason = revoked_reason.human_friendly
date = revoked_date.native.strftime('%Y-%m-%d')
time = revoked_date.native.strftime('%H:%M:%S')
raise RevokedError(pretty_message(
'''
CRL indicates %s was revoked at %s on %s, due to %s
''',
cert_description,
time,
date,
reason
))
# Step l
checked_reasons |= interim_reasons
# CRLs should not include this value, but at least one of the examples
# from the NIST test suite does
checked_reasons -= set(['unused'])
if checked_reasons != valid_reasons:
if total_crls == issuer_failures:
raise CRLNoMatchesError(pretty_message(
'''
No CRLs were issued by the issuer of %s, or any indirect CRL
issuer
''',
cert_description
))
if not failures:
failures.append((
'The available CRLs do not cover all revocation reasons',
))
raise CRLValidationIndeterminateError(
pretty_message(
'''
Unable to determine if %s is revoked due to insufficient
information from known CRLs
''',
cert_description
),
failures
) |
def zoom_motion(self, event=None):
"""motion event handler for zoom mode"""
try:
x, y = event.x, event.y
except:
return
self.report_motion(event=event)
if self.zoom_ini is None:
return
ini_x, ini_y, ini_xd, ini_yd = self.zoom_ini
if event.xdata is not None:
self.x_lastmove = event.xdata
if event.ydata is not None:
self.y_lastmove = event.ydata
x0 = min(x, ini_x)
ymax = max(y, ini_y)
width = abs(x-ini_x)
height = abs(y-ini_y)
y0 = self.canvas.figure.bbox.height - ymax
zdc = wx.ClientDC(self.canvas)
zdc.SetLogicalFunction(wx.XOR)
zdc.SetBrush(wx.TRANSPARENT_BRUSH)
zdc.SetPen(wx.Pen('White', 2, wx.SOLID))
zdc.ResetBoundingBox()
if not is_wxPhoenix:
zdc.BeginDrawing()
# erase previous box
if self.rbbox is not None:
zdc.DrawRectangle(*self.rbbox)
self.rbbox = (x0, y0, width, height)
zdc.DrawRectangle(*self.rbbox)
if not is_wxPhoenix:
zdc.EndDrawing() | motion event handler for zoom mode | Below is the the instruction that describes the task:
### Input:
motion event handler for zoom mode
### Response:
def zoom_motion(self, event=None):
"""motion event handler for zoom mode"""
try:
x, y = event.x, event.y
except:
return
self.report_motion(event=event)
if self.zoom_ini is None:
return
ini_x, ini_y, ini_xd, ini_yd = self.zoom_ini
if event.xdata is not None:
self.x_lastmove = event.xdata
if event.ydata is not None:
self.y_lastmove = event.ydata
x0 = min(x, ini_x)
ymax = max(y, ini_y)
width = abs(x-ini_x)
height = abs(y-ini_y)
y0 = self.canvas.figure.bbox.height - ymax
zdc = wx.ClientDC(self.canvas)
zdc.SetLogicalFunction(wx.XOR)
zdc.SetBrush(wx.TRANSPARENT_BRUSH)
zdc.SetPen(wx.Pen('White', 2, wx.SOLID))
zdc.ResetBoundingBox()
if not is_wxPhoenix:
zdc.BeginDrawing()
# erase previous box
if self.rbbox is not None:
zdc.DrawRectangle(*self.rbbox)
self.rbbox = (x0, y0, width, height)
zdc.DrawRectangle(*self.rbbox)
if not is_wxPhoenix:
zdc.EndDrawing() |
def variable(self):
"""
variable : variable
Feature Type Array adds:
variable : variable[expression]
Feature Type Func adds:
variable : variable(arg_list)
"""
var = Var(self.cur_token)
self.eat(TokenTypes.VAR)
if Features.TYPE_ARRAY in self.features:
while self.cur_token.type == TokenTypes.LBRACKET:
self.eat(TokenTypes.LBRACKET)
# Start passed the logical ops.
expr = self.operator_expression(level=2)
self.eat(TokenTypes.RBRACKET)
var = GetArrayItem(left=var, right=expr)
if Features.FUNC in self.features:
if self.cur_token.type == TokenTypes.LPAREN:
self.eat(TokenTypes.LPAREN)
args = self.arg_list()
self.eat(TokenTypes.RPAREN)
var = Call(var, args)
return var | variable : variable
Feature Type Array adds:
variable : variable[expression]
Feature Type Func adds:
variable : variable(arg_list) | Below is the the instruction that describes the task:
### Input:
variable : variable
Feature Type Array adds:
variable : variable[expression]
Feature Type Func adds:
variable : variable(arg_list)
### Response:
def variable(self):
"""
variable : variable
Feature Type Array adds:
variable : variable[expression]
Feature Type Func adds:
variable : variable(arg_list)
"""
var = Var(self.cur_token)
self.eat(TokenTypes.VAR)
if Features.TYPE_ARRAY in self.features:
while self.cur_token.type == TokenTypes.LBRACKET:
self.eat(TokenTypes.LBRACKET)
# Start passed the logical ops.
expr = self.operator_expression(level=2)
self.eat(TokenTypes.RBRACKET)
var = GetArrayItem(left=var, right=expr)
if Features.FUNC in self.features:
if self.cur_token.type == TokenTypes.LPAREN:
self.eat(TokenTypes.LPAREN)
args = self.arg_list()
self.eat(TokenTypes.RPAREN)
var = Call(var, args)
return var |
def merge_params(params, lparams):
"""Merge global ignore/select with linter local params."""
ignore = params.get('ignore', set())
if 'ignore' in lparams:
ignore = ignore | set(lparams['ignore'])
select = params.get('select', set())
if 'select' in lparams:
select = select | set(lparams['select'])
return ignore, select | Merge global ignore/select with linter local params. | Below is the the instruction that describes the task:
### Input:
Merge global ignore/select with linter local params.
### Response:
def merge_params(params, lparams):
"""Merge global ignore/select with linter local params."""
ignore = params.get('ignore', set())
if 'ignore' in lparams:
ignore = ignore | set(lparams['ignore'])
select = params.get('select', set())
if 'select' in lparams:
select = select | set(lparams['select'])
return ignore, select |
def createNote(self, title=None, text=None):
"""Create a new managed note. Any changes to the note will be uploaded when :py:meth:`sync` is called.
Args:
title (str): The title of the note.
text (str): The text of the note.
Returns:
gkeepapi.node.List: The new note.
"""
node = _node.Note()
if title is not None:
node.title = title
if text is not None:
node.text = text
self.add(node)
return node | Create a new managed note. Any changes to the note will be uploaded when :py:meth:`sync` is called.
Args:
title (str): The title of the note.
text (str): The text of the note.
Returns:
gkeepapi.node.List: The new note. | Below is the the instruction that describes the task:
### Input:
Create a new managed note. Any changes to the note will be uploaded when :py:meth:`sync` is called.
Args:
title (str): The title of the note.
text (str): The text of the note.
Returns:
gkeepapi.node.List: The new note.
### Response:
def createNote(self, title=None, text=None):
"""Create a new managed note. Any changes to the note will be uploaded when :py:meth:`sync` is called.
Args:
title (str): The title of the note.
text (str): The text of the note.
Returns:
gkeepapi.node.List: The new note.
"""
node = _node.Note()
if title is not None:
node.title = title
if text is not None:
node.text = text
self.add(node)
return node |
def complete_irradiance(self, times=None, weather=None):
"""
Determine the missing irradiation columns. Only two of the
following data columns (dni, ghi, dhi) are needed to calculate
the missing data.
This function is not safe at the moment. Results can be too high
or negative. Please contribute and help to improve this function
on https://github.com/pvlib/pvlib-python
Parameters
----------
times : None or DatetimeIndex, default None
Times at which to evaluate the model. Can be None if
attribute `times` is already set.
weather : None or pandas.DataFrame, default None
Table with at least two columns containing one of the
following data sets: dni, dhi, ghi. Can be None if attribute
`weather` is already set.
Returns
-------
self
Assigns attributes: times, weather
Examples
--------
This example does not work until the parameters `my_system`,
`my_location`, `my_datetime` and `my_weather` are not defined
properly but shows the basic idea how this method can be used.
>>> from pvlib.modelchain import ModelChain
>>> # my_weather containing 'dhi' and 'ghi'.
>>> mc = ModelChain(my_system, my_location) # doctest: +SKIP
>>> mc.complete_irradiance(my_datetime, my_weather) # doctest: +SKIP
>>> mc.run_model() # doctest: +SKIP
>>> # my_weather containing 'dhi', 'ghi' and 'dni'.
>>> mc = ModelChain(my_system, my_location) # doctest: +SKIP
>>> mc.run_model(my_datetime, my_weather) # doctest: +SKIP
"""
if weather is not None:
self.weather = weather
if times is not None:
self.times = times
self.solar_position = self.location.get_solarposition(
self.times, method=self.solar_position_method)
icolumns = set(self.weather.columns)
wrn_txt = ("This function is not safe at the moment.\n" +
"Results can be too high or negative.\n" +
"Help to improve this function on github:\n" +
"https://github.com/pvlib/pvlib-python \n")
if {'ghi', 'dhi'} <= icolumns and 'dni' not in icolumns:
clearsky = self.location.get_clearsky(
times, solar_position=self.solar_position)
self.weather.loc[:, 'dni'] = pvlib.irradiance.dni(
self.weather.loc[:, 'ghi'], self.weather.loc[:, 'dhi'],
self.solar_position.zenith,
clearsky_dni=clearsky['dni'],
clearsky_tolerance=1.1)
elif {'dni', 'dhi'} <= icolumns and 'ghi' not in icolumns:
warnings.warn(wrn_txt, UserWarning)
self.weather.loc[:, 'ghi'] = (
self.weather.dni * tools.cosd(self.solar_position.zenith) +
self.weather.dhi)
elif {'dni', 'ghi'} <= icolumns and 'dhi' not in icolumns:
warnings.warn(wrn_txt, UserWarning)
self.weather.loc[:, 'dhi'] = (
self.weather.ghi - self.weather.dni *
tools.cosd(self.solar_position.zenith))
return self | Determine the missing irradiation columns. Only two of the
following data columns (dni, ghi, dhi) are needed to calculate
the missing data.
This function is not safe at the moment. Results can be too high
or negative. Please contribute and help to improve this function
on https://github.com/pvlib/pvlib-python
Parameters
----------
times : None or DatetimeIndex, default None
Times at which to evaluate the model. Can be None if
attribute `times` is already set.
weather : None or pandas.DataFrame, default None
Table with at least two columns containing one of the
following data sets: dni, dhi, ghi. Can be None if attribute
`weather` is already set.
Returns
-------
self
Assigns attributes: times, weather
Examples
--------
This example does not work until the parameters `my_system`,
`my_location`, `my_datetime` and `my_weather` are not defined
properly but shows the basic idea how this method can be used.
>>> from pvlib.modelchain import ModelChain
>>> # my_weather containing 'dhi' and 'ghi'.
>>> mc = ModelChain(my_system, my_location) # doctest: +SKIP
>>> mc.complete_irradiance(my_datetime, my_weather) # doctest: +SKIP
>>> mc.run_model() # doctest: +SKIP
>>> # my_weather containing 'dhi', 'ghi' and 'dni'.
>>> mc = ModelChain(my_system, my_location) # doctest: +SKIP
>>> mc.run_model(my_datetime, my_weather) # doctest: +SKIP | Below is the the instruction that describes the task:
### Input:
Determine the missing irradiation columns. Only two of the
following data columns (dni, ghi, dhi) are needed to calculate
the missing data.
This function is not safe at the moment. Results can be too high
or negative. Please contribute and help to improve this function
on https://github.com/pvlib/pvlib-python
Parameters
----------
times : None or DatetimeIndex, default None
Times at which to evaluate the model. Can be None if
attribute `times` is already set.
weather : None or pandas.DataFrame, default None
Table with at least two columns containing one of the
following data sets: dni, dhi, ghi. Can be None if attribute
`weather` is already set.
Returns
-------
self
Assigns attributes: times, weather
Examples
--------
This example does not work until the parameters `my_system`,
`my_location`, `my_datetime` and `my_weather` are not defined
properly but shows the basic idea how this method can be used.
>>> from pvlib.modelchain import ModelChain
>>> # my_weather containing 'dhi' and 'ghi'.
>>> mc = ModelChain(my_system, my_location) # doctest: +SKIP
>>> mc.complete_irradiance(my_datetime, my_weather) # doctest: +SKIP
>>> mc.run_model() # doctest: +SKIP
>>> # my_weather containing 'dhi', 'ghi' and 'dni'.
>>> mc = ModelChain(my_system, my_location) # doctest: +SKIP
>>> mc.run_model(my_datetime, my_weather) # doctest: +SKIP
### Response:
def complete_irradiance(self, times=None, weather=None):
"""
Determine the missing irradiation columns. Only two of the
following data columns (dni, ghi, dhi) are needed to calculate
the missing data.
This function is not safe at the moment. Results can be too high
or negative. Please contribute and help to improve this function
on https://github.com/pvlib/pvlib-python
Parameters
----------
times : None or DatetimeIndex, default None
Times at which to evaluate the model. Can be None if
attribute `times` is already set.
weather : None or pandas.DataFrame, default None
Table with at least two columns containing one of the
following data sets: dni, dhi, ghi. Can be None if attribute
`weather` is already set.
Returns
-------
self
Assigns attributes: times, weather
Examples
--------
This example does not work until the parameters `my_system`,
`my_location`, `my_datetime` and `my_weather` are not defined
properly but shows the basic idea how this method can be used.
>>> from pvlib.modelchain import ModelChain
>>> # my_weather containing 'dhi' and 'ghi'.
>>> mc = ModelChain(my_system, my_location) # doctest: +SKIP
>>> mc.complete_irradiance(my_datetime, my_weather) # doctest: +SKIP
>>> mc.run_model() # doctest: +SKIP
>>> # my_weather containing 'dhi', 'ghi' and 'dni'.
>>> mc = ModelChain(my_system, my_location) # doctest: +SKIP
>>> mc.run_model(my_datetime, my_weather) # doctest: +SKIP
"""
if weather is not None:
self.weather = weather
if times is not None:
self.times = times
self.solar_position = self.location.get_solarposition(
self.times, method=self.solar_position_method)
icolumns = set(self.weather.columns)
wrn_txt = ("This function is not safe at the moment.\n" +
"Results can be too high or negative.\n" +
"Help to improve this function on github:\n" +
"https://github.com/pvlib/pvlib-python \n")
if {'ghi', 'dhi'} <= icolumns and 'dni' not in icolumns:
clearsky = self.location.get_clearsky(
times, solar_position=self.solar_position)
self.weather.loc[:, 'dni'] = pvlib.irradiance.dni(
self.weather.loc[:, 'ghi'], self.weather.loc[:, 'dhi'],
self.solar_position.zenith,
clearsky_dni=clearsky['dni'],
clearsky_tolerance=1.1)
elif {'dni', 'dhi'} <= icolumns and 'ghi' not in icolumns:
warnings.warn(wrn_txt, UserWarning)
self.weather.loc[:, 'ghi'] = (
self.weather.dni * tools.cosd(self.solar_position.zenith) +
self.weather.dhi)
elif {'dni', 'ghi'} <= icolumns and 'dhi' not in icolumns:
warnings.warn(wrn_txt, UserWarning)
self.weather.loc[:, 'dhi'] = (
self.weather.ghi - self.weather.dni *
tools.cosd(self.solar_position.zenith))
return self |
def update(self, index, iterable, commit=True):
"""
Updates the index with current data.
:param index: The search_indexes.Index object
:param iterable: The queryset
:param commit: commit to the backend.
"""
parler = False
# setup here because self.existing_mappings are overridden.
if not self.setup_complete:
try:
self.setup()
except elasticsearch.TransportError as e:
if not self.silently_fail:
raise
self.log.error("Failed to add documents to Elasticsearch: %s", e)
return
if hasattr(iterable, 'language') and hasattr(iterable.language, '__call__'):
parler = True # Django-parler
for language in self.languages:
self.index_name = self._index_name_for_language(language)
# self.log.debug('updating index for {0}'.format(language))
if parler:
# workaround for django-parler
for item in iterable:
item.set_current_language(language)
super(ElasticsearchMultilingualSearchBackend, self).update(
index, iterable, commit)
else:
with translation.override(language):
super(ElasticsearchMultilingualSearchBackend, self).update(
index, iterable, commit) | Updates the index with current data.
:param index: The search_indexes.Index object
:param iterable: The queryset
:param commit: commit to the backend. | Below is the the instruction that describes the task:
### Input:
Updates the index with current data.
:param index: The search_indexes.Index object
:param iterable: The queryset
:param commit: commit to the backend.
### Response:
def update(self, index, iterable, commit=True):
"""
Updates the index with current data.
:param index: The search_indexes.Index object
:param iterable: The queryset
:param commit: commit to the backend.
"""
parler = False
# setup here because self.existing_mappings are overridden.
if not self.setup_complete:
try:
self.setup()
except elasticsearch.TransportError as e:
if not self.silently_fail:
raise
self.log.error("Failed to add documents to Elasticsearch: %s", e)
return
if hasattr(iterable, 'language') and hasattr(iterable.language, '__call__'):
parler = True # Django-parler
for language in self.languages:
self.index_name = self._index_name_for_language(language)
# self.log.debug('updating index for {0}'.format(language))
if parler:
# workaround for django-parler
for item in iterable:
item.set_current_language(language)
super(ElasticsearchMultilingualSearchBackend, self).update(
index, iterable, commit)
else:
with translation.override(language):
super(ElasticsearchMultilingualSearchBackend, self).update(
index, iterable, commit) |
def get_struct(name):
"""Get a struct by it's name.
Args:
name: The name of the struct
Returns:
The struct's id
Raises:
exceptions.SarkStructNotFound: is the struct does not exist.
"""
sid = idc.GetStrucIdByName(name)
if sid == idaapi.BADADDR:
raise exceptions.SarkStructNotFound()
return sid | Get a struct by it's name.
Args:
name: The name of the struct
Returns:
The struct's id
Raises:
exceptions.SarkStructNotFound: is the struct does not exist. | Below is the the instruction that describes the task:
### Input:
Get a struct by it's name.
Args:
name: The name of the struct
Returns:
The struct's id
Raises:
exceptions.SarkStructNotFound: is the struct does not exist.
### Response:
def get_struct(name):
"""Get a struct by it's name.
Args:
name: The name of the struct
Returns:
The struct's id
Raises:
exceptions.SarkStructNotFound: is the struct does not exist.
"""
sid = idc.GetStrucIdByName(name)
if sid == idaapi.BADADDR:
raise exceptions.SarkStructNotFound()
return sid |
def _add_current_codedir(self, path):
"""Adds the directory of the file at the specified path as a base
path to find other files in.
"""
dirpath = self.tramp.dirname(path)
if dirpath not in self.basepaths:
self.basepaths.append(dirpath)
self.rescan() | Adds the directory of the file at the specified path as a base
path to find other files in. | Below is the the instruction that describes the task:
### Input:
Adds the directory of the file at the specified path as a base
path to find other files in.
### Response:
def _add_current_codedir(self, path):
"""Adds the directory of the file at the specified path as a base
path to find other files in.
"""
dirpath = self.tramp.dirname(path)
if dirpath not in self.basepaths:
self.basepaths.append(dirpath)
self.rescan() |
def _on_permission_result(self, code, perms, results):
""" Handles a permission request result by passing it to the
handler with the given code.
"""
#: Get the handler for this request
handler = self._permission_requests.get(code, None)
if handler is not None:
del self._permission_requests[code]
#: Invoke that handler with the permission request response
handler(code, perms, results) | Handles a permission request result by passing it to the
handler with the given code. | Below is the the instruction that describes the task:
### Input:
Handles a permission request result by passing it to the
handler with the given code.
### Response:
def _on_permission_result(self, code, perms, results):
""" Handles a permission request result by passing it to the
handler with the given code.
"""
#: Get the handler for this request
handler = self._permission_requests.get(code, None)
if handler is not None:
del self._permission_requests[code]
#: Invoke that handler with the permission request response
handler(code, perms, results) |
def _inertia_from_labels(X, centers, labels):
"""Compute inertia with cosine distance using known labels.
"""
n_examples, n_features = X.shape
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
inertia[ee] = 1 - X[ee, :].dot(centers[int(labels[ee]), :].T)
return np.sum(inertia) | Compute inertia with cosine distance using known labels. | Below is the the instruction that describes the task:
### Input:
Compute inertia with cosine distance using known labels.
### Response:
def _inertia_from_labels(X, centers, labels):
"""Compute inertia with cosine distance using known labels.
"""
n_examples, n_features = X.shape
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
inertia[ee] = 1 - X[ee, :].dot(centers[int(labels[ee]), :].T)
return np.sum(inertia) |
def stackscripts(self, *filters, **kwargs):
"""
Returns a list of :any:`StackScripts<StackScript>`, both public and
private. You may filter this query to return only
:any:`StackScripts<StackScript>` that match certain criteria. You may
also request only your own private :any:`StackScripts<StackScript>`::
my_stackscripts = client.linode.stackscripts(mine_only=True)
:param filters: Any number of filters to apply to this query.
:param mine_only: If True, returns only private StackScripts
:type mine_only: bool
:returns: A list of StackScripts matching the query.
:rtype: PaginatedList of StackScript
"""
# python2 can't handle *args and a single keyword argument, so this is a workaround
if 'mine_only' in kwargs:
if kwargs['mine_only']:
new_filter = Filter({"mine":True})
if filters:
filters = [ f for f in filters ]
filters[0] = filters[0] & new_filter
else:
filters = [new_filter]
del kwargs['mine_only']
if kwargs:
raise TypeError("stackscripts() got unexpected keyword argument '{}'".format(kwargs.popitem()[0]))
return self.client._get_and_filter(StackScript, *filters) | Returns a list of :any:`StackScripts<StackScript>`, both public and
private. You may filter this query to return only
:any:`StackScripts<StackScript>` that match certain criteria. You may
also request only your own private :any:`StackScripts<StackScript>`::
my_stackscripts = client.linode.stackscripts(mine_only=True)
:param filters: Any number of filters to apply to this query.
:param mine_only: If True, returns only private StackScripts
:type mine_only: bool
:returns: A list of StackScripts matching the query.
:rtype: PaginatedList of StackScript | Below is the the instruction that describes the task:
### Input:
Returns a list of :any:`StackScripts<StackScript>`, both public and
private. You may filter this query to return only
:any:`StackScripts<StackScript>` that match certain criteria. You may
also request only your own private :any:`StackScripts<StackScript>`::
my_stackscripts = client.linode.stackscripts(mine_only=True)
:param filters: Any number of filters to apply to this query.
:param mine_only: If True, returns only private StackScripts
:type mine_only: bool
:returns: A list of StackScripts matching the query.
:rtype: PaginatedList of StackScript
### Response:
def stackscripts(self, *filters, **kwargs):
"""
Returns a list of :any:`StackScripts<StackScript>`, both public and
private. You may filter this query to return only
:any:`StackScripts<StackScript>` that match certain criteria. You may
also request only your own private :any:`StackScripts<StackScript>`::
my_stackscripts = client.linode.stackscripts(mine_only=True)
:param filters: Any number of filters to apply to this query.
:param mine_only: If True, returns only private StackScripts
:type mine_only: bool
:returns: A list of StackScripts matching the query.
:rtype: PaginatedList of StackScript
"""
# python2 can't handle *args and a single keyword argument, so this is a workaround
if 'mine_only' in kwargs:
if kwargs['mine_only']:
new_filter = Filter({"mine":True})
if filters:
filters = [ f for f in filters ]
filters[0] = filters[0] & new_filter
else:
filters = [new_filter]
del kwargs['mine_only']
if kwargs:
raise TypeError("stackscripts() got unexpected keyword argument '{}'".format(kwargs.popitem()[0]))
return self.client._get_and_filter(StackScript, *filters) |
def make_compare(key, value, obj):
"Map a key name to a specific comparison function"
if '__' not in key:
# If no __ exists, default to doing an "exact" comparison
key, comp = key, 'exact'
else:
key, comp = key.rsplit('__', 1)
# Check if comp is valid
if hasattr(Compare, comp):
return getattr(Compare, comp)(key, value, obj)
raise AttributeError("No comparison '%s'" % comp) | Map a key name to a specific comparison function | Below is the the instruction that describes the task:
### Input:
Map a key name to a specific comparison function
### Response:
def make_compare(key, value, obj):
"Map a key name to a specific comparison function"
if '__' not in key:
# If no __ exists, default to doing an "exact" comparison
key, comp = key, 'exact'
else:
key, comp = key.rsplit('__', 1)
# Check if comp is valid
if hasattr(Compare, comp):
return getattr(Compare, comp)(key, value, obj)
raise AttributeError("No comparison '%s'" % comp) |
def add_gateway_to_diagram(self, process_id, gateway_type, gateway_name="", gateway_direction="Unspecified",
node_id=None):
"""
Adds an exclusiveGateway element to BPMN diagram.
:param process_id: string object. ID of parent process,
:param gateway_type: string object. Type of gateway to be added.
:param gateway_name: string object. Name of exclusive gateway,
:param gateway_direction: string object. Accepted values - "Unspecified", "Converging", "Diverging", "Mixed".
Default value - "Unspecified",
:param node_id: string object. ID of node. Default value - None.
:return: a tuple, where first value is gateway ID, second a reference to created object.
"""
gateway_id, gateway = self.add_flow_node_to_diagram(process_id, gateway_type, gateway_name, node_id)
if not (gateway_direction in ("Unspecified", "Converging", "Diverging", "Mixed")):
raise bpmn_exception.BpmnPythonError("Invalid value passed as gatewayDirection parameter. Value passed: "
+ gateway_direction)
self.diagram_graph.node[gateway_id][consts.Consts.gateway_direction] = gateway_direction
return gateway_id, gateway | Adds an exclusiveGateway element to BPMN diagram.
:param process_id: string object. ID of parent process,
:param gateway_type: string object. Type of gateway to be added.
:param gateway_name: string object. Name of exclusive gateway,
:param gateway_direction: string object. Accepted values - "Unspecified", "Converging", "Diverging", "Mixed".
Default value - "Unspecified",
:param node_id: string object. ID of node. Default value - None.
:return: a tuple, where first value is gateway ID, second a reference to created object. | Below is the the instruction that describes the task:
### Input:
Adds an exclusiveGateway element to BPMN diagram.
:param process_id: string object. ID of parent process,
:param gateway_type: string object. Type of gateway to be added.
:param gateway_name: string object. Name of exclusive gateway,
:param gateway_direction: string object. Accepted values - "Unspecified", "Converging", "Diverging", "Mixed".
Default value - "Unspecified",
:param node_id: string object. ID of node. Default value - None.
:return: a tuple, where first value is gateway ID, second a reference to created object.
### Response:
def add_gateway_to_diagram(self, process_id, gateway_type, gateway_name="", gateway_direction="Unspecified",
node_id=None):
"""
Adds an exclusiveGateway element to BPMN diagram.
:param process_id: string object. ID of parent process,
:param gateway_type: string object. Type of gateway to be added.
:param gateway_name: string object. Name of exclusive gateway,
:param gateway_direction: string object. Accepted values - "Unspecified", "Converging", "Diverging", "Mixed".
Default value - "Unspecified",
:param node_id: string object. ID of node. Default value - None.
:return: a tuple, where first value is gateway ID, second a reference to created object.
"""
gateway_id, gateway = self.add_flow_node_to_diagram(process_id, gateway_type, gateway_name, node_id)
if not (gateway_direction in ("Unspecified", "Converging", "Diverging", "Mixed")):
raise bpmn_exception.BpmnPythonError("Invalid value passed as gatewayDirection parameter. Value passed: "
+ gateway_direction)
self.diagram_graph.node[gateway_id][consts.Consts.gateway_direction] = gateway_direction
return gateway_id, gateway |
def _to_operator(rep, data, input_dim, output_dim):
"""Transform a QuantumChannel to the Operator representation."""
if rep == 'Operator':
return data
if rep == 'Stinespring':
return _stinespring_to_operator(data, input_dim, output_dim)
# Convert via Kraus representation
if rep != 'Kraus':
data = _to_kraus(rep, data, input_dim, output_dim)
return _kraus_to_operator(data, input_dim, output_dim) | Transform a QuantumChannel to the Operator representation. | Below is the the instruction that describes the task:
### Input:
Transform a QuantumChannel to the Operator representation.
### Response:
def _to_operator(rep, data, input_dim, output_dim):
"""Transform a QuantumChannel to the Operator representation."""
if rep == 'Operator':
return data
if rep == 'Stinespring':
return _stinespring_to_operator(data, input_dim, output_dim)
# Convert via Kraus representation
if rep != 'Kraus':
data = _to_kraus(rep, data, input_dim, output_dim)
return _kraus_to_operator(data, input_dim, output_dim) |
def _cli_check_basis(name, data_dir):
'''Checks that a basis set exists and if not, raises a helpful exception'''
if name is None:
return None
name = misc.transform_basis_name(name)
metadata = api.get_metadata(data_dir)
if not name in metadata:
errstr = "Basis set '" + name + "' does not exist.\n"
errstr += "For a complete list of basis sets, use the 'bse list-basis-sets' command"
raise RuntimeError(errstr)
return name | Checks that a basis set exists and if not, raises a helpful exception | Below is the the instruction that describes the task:
### Input:
Checks that a basis set exists and if not, raises a helpful exception
### Response:
def _cli_check_basis(name, data_dir):
'''Checks that a basis set exists and if not, raises a helpful exception'''
if name is None:
return None
name = misc.transform_basis_name(name)
metadata = api.get_metadata(data_dir)
if not name in metadata:
errstr = "Basis set '" + name + "' does not exist.\n"
errstr += "For a complete list of basis sets, use the 'bse list-basis-sets' command"
raise RuntimeError(errstr)
return name |
def discoverPoints(bacnetapp, address, devID):
"""
Discover the BACnet points in a BACnet device.
:param bacnetApp: The app itself so we can call read
:param address: address of the device as a string (ex. '2:5')
:param devID: device ID of the bacnet device as a string (ex. '1001')
:returns: a tuple with deviceName, pss, objList, df
* *deviceName* : name of the device
* *pss* : protocole service supported
* *objList* : list of bacnet object (ex. analogInput, 1)
* *df* : is a dataFrame containing pointType, pointAddress, pointName, description
presentValue and units
If pandas can't be found, df will be a simple array
"""
pss = bacnetapp.read(
"{} device {} protocolServicesSupported".format(address, devID)
)
deviceName = bacnetapp.read("{} device {} objectName".format(address, devID))
# print('Device {}- building points list'.format(deviceName))
objList = bacnetapp.read("{} device {] objectList".format(address, devID))
newLine = []
result = []
points = []
for pointType, pointAddr in objList:
if "binary" in pointType: # BI/BO/BV
newLine = [pointType, pointAddr]
infos = bacnetapp.readMultiple(
"{} {} {} objectName description presentValue inactiveText activeText".format(
address, pointType, pointAddr
)
)
newLine.extend(infos[:-2])
newLine.extend([infos[-2:]])
newPoint = BooleanPoint(
pointType=newLine[0],
pointAddress=newLine[1],
pointName=newLine[2],
description=newLine[3],
presentValue=newLine[4],
units_state=newLine[5],
)
elif "multiState" in pointType: # MI/MV/MO
newLine = [pointType, pointAddr]
newLine.extend(
bacnetapp.readMultiple(
"{} {} {} objectName description presentValue stateText".format(
address, pointType, pointAddr
)
)
)
newPoint = EnumPoint(
pointType=newLine[0],
pointAddress=newLine[1],
pointName=newLine[2],
description=newLine[3],
presentValue=newLine[4],
units_state=newLine[5],
)
elif "analog" in pointType: # AI/AO/AV
newLine = [pointType, pointAddr]
newLine.extend(
bacnetapp.readMultiple(
"{} {} {} objectName description presentValue units".format(
address, pointType, pointAddr
)
)
)
newPoint = NumericPoint(
pointType=newLine[0],
pointAddress=newLine[1],
pointName=newLine[2],
description=newLine[3],
presentValue=newLine[4],
units_state=newLine[5],
)
else:
continue # skip
result.append(newLine)
points.append(newPoint)
if _PANDA:
df = pd.DataFrame(
result,
columns=[
"pointType",
"pointAddress",
"pointName",
"description",
"presentValue",
"units_state",
],
).set_index(["pointName"])
else:
df = result
# print('Ready!')
return (deviceName, pss, objList, df, points) | Discover the BACnet points in a BACnet device.
:param bacnetApp: The app itself so we can call read
:param address: address of the device as a string (ex. '2:5')
:param devID: device ID of the bacnet device as a string (ex. '1001')
:returns: a tuple with deviceName, pss, objList, df
* *deviceName* : name of the device
* *pss* : protocole service supported
* *objList* : list of bacnet object (ex. analogInput, 1)
* *df* : is a dataFrame containing pointType, pointAddress, pointName, description
presentValue and units
If pandas can't be found, df will be a simple array | Below is the the instruction that describes the task:
### Input:
Discover the BACnet points in a BACnet device.
:param bacnetApp: The app itself so we can call read
:param address: address of the device as a string (ex. '2:5')
:param devID: device ID of the bacnet device as a string (ex. '1001')
:returns: a tuple with deviceName, pss, objList, df
* *deviceName* : name of the device
* *pss* : protocole service supported
* *objList* : list of bacnet object (ex. analogInput, 1)
* *df* : is a dataFrame containing pointType, pointAddress, pointName, description
presentValue and units
If pandas can't be found, df will be a simple array
### Response:
def discoverPoints(bacnetapp, address, devID):
"""
Discover the BACnet points in a BACnet device.
:param bacnetApp: The app itself so we can call read
:param address: address of the device as a string (ex. '2:5')
:param devID: device ID of the bacnet device as a string (ex. '1001')
:returns: a tuple with deviceName, pss, objList, df
* *deviceName* : name of the device
* *pss* : protocole service supported
* *objList* : list of bacnet object (ex. analogInput, 1)
* *df* : is a dataFrame containing pointType, pointAddress, pointName, description
presentValue and units
If pandas can't be found, df will be a simple array
"""
pss = bacnetapp.read(
"{} device {} protocolServicesSupported".format(address, devID)
)
deviceName = bacnetapp.read("{} device {} objectName".format(address, devID))
# print('Device {}- building points list'.format(deviceName))
objList = bacnetapp.read("{} device {] objectList".format(address, devID))
newLine = []
result = []
points = []
for pointType, pointAddr in objList:
if "binary" in pointType: # BI/BO/BV
newLine = [pointType, pointAddr]
infos = bacnetapp.readMultiple(
"{} {} {} objectName description presentValue inactiveText activeText".format(
address, pointType, pointAddr
)
)
newLine.extend(infos[:-2])
newLine.extend([infos[-2:]])
newPoint = BooleanPoint(
pointType=newLine[0],
pointAddress=newLine[1],
pointName=newLine[2],
description=newLine[3],
presentValue=newLine[4],
units_state=newLine[5],
)
elif "multiState" in pointType: # MI/MV/MO
newLine = [pointType, pointAddr]
newLine.extend(
bacnetapp.readMultiple(
"{} {} {} objectName description presentValue stateText".format(
address, pointType, pointAddr
)
)
)
newPoint = EnumPoint(
pointType=newLine[0],
pointAddress=newLine[1],
pointName=newLine[2],
description=newLine[3],
presentValue=newLine[4],
units_state=newLine[5],
)
elif "analog" in pointType: # AI/AO/AV
newLine = [pointType, pointAddr]
newLine.extend(
bacnetapp.readMultiple(
"{} {} {} objectName description presentValue units".format(
address, pointType, pointAddr
)
)
)
newPoint = NumericPoint(
pointType=newLine[0],
pointAddress=newLine[1],
pointName=newLine[2],
description=newLine[3],
presentValue=newLine[4],
units_state=newLine[5],
)
else:
continue # skip
result.append(newLine)
points.append(newPoint)
if _PANDA:
df = pd.DataFrame(
result,
columns=[
"pointType",
"pointAddress",
"pointName",
"description",
"presentValue",
"units_state",
],
).set_index(["pointName"])
else:
df = result
# print('Ready!')
return (deviceName, pss, objList, df, points) |
def valid(self, cnpj):
"""Check if a CNPJ is valid.
We should avoid sending invalid CNPJ to the web service as we know
it is going to be a waste of bandwidth. Assumes CNPJ is a string.
"""
if len(cnpj) != 14:
return False
tam = 12
nums = cnpj[:tam]
digs = cnpj[tam:]
tot = 0
pos = tam-7
for i in range(tam, 0, -1):
tot = tot + int(nums[tam-i])*pos
pos = pos - 1
if pos < 2:
pos = 9
res = 0 if tot % 11 < 2 else 11 - (tot % 11)
if res != int(digs[0]):
return False
tam = tam + 1
nums = cnpj[:tam]
tot = 0
pos = tam-7
for i in range(tam, 0, -1):
tot = tot + int(nums[tam-i])*pos
pos = pos - 1
if pos < 2:
pos = 9
res = 0 if tot % 11 < 2 else 11 - (tot % 11)
if res != int(digs[1]):
return False
return True | Check if a CNPJ is valid.
We should avoid sending invalid CNPJ to the web service as we know
it is going to be a waste of bandwidth. Assumes CNPJ is a string. | Below is the the instruction that describes the task:
### Input:
Check if a CNPJ is valid.
We should avoid sending invalid CNPJ to the web service as we know
it is going to be a waste of bandwidth. Assumes CNPJ is a string.
### Response:
def valid(self, cnpj):
"""Check if a CNPJ is valid.
We should avoid sending invalid CNPJ to the web service as we know
it is going to be a waste of bandwidth. Assumes CNPJ is a string.
"""
if len(cnpj) != 14:
return False
tam = 12
nums = cnpj[:tam]
digs = cnpj[tam:]
tot = 0
pos = tam-7
for i in range(tam, 0, -1):
tot = tot + int(nums[tam-i])*pos
pos = pos - 1
if pos < 2:
pos = 9
res = 0 if tot % 11 < 2 else 11 - (tot % 11)
if res != int(digs[0]):
return False
tam = tam + 1
nums = cnpj[:tam]
tot = 0
pos = tam-7
for i in range(tam, 0, -1):
tot = tot + int(nums[tam-i])*pos
pos = pos - 1
if pos < 2:
pos = 9
res = 0 if tot % 11 < 2 else 11 - (tot % 11)
if res != int(digs[1]):
return False
return True |
def set_freq(self, fout, freq):
"""
Sets new output frequency, required parameters are real current frequency at output and new required frequency.
"""
hsdiv_tuple = (4, 5, 6, 7, 9, 11) # possible dividers
n1div_tuple = (1,) + tuple(range(2,129,2)) #
fdco_min = 5670.0 # set maximum as minimum
hsdiv = self.get_hs_div() # read curent dividers
n1div = self.get_n1_div() #
if abs((freq-fout)*1e6/fout) > 3500:
# Large change of frequency
fdco = fout * hsdiv * n1div # calculate high frequency oscillator
fxtal = fdco / self.get_rfreq() # should be fxtal = 114.285
for hsdiv_iter in hsdiv_tuple: # find dividers with minimal power consumption
for n1div_iter in n1div_tuple:
fdco_new = freq * hsdiv_iter * n1div_iter
if (fdco_new >= 4850) and (fdco_new <= 5670):
if (fdco_new <= fdco_min):
fdco_min = fdco_new
hsdiv = hsdiv_iter
n1div = n1div_iter
rfreq = fdco_min / fxtal
self.freeze_dco() # write registers
self.set_hs_div(hsdiv)
self.set_n1_div(n1div)
self.set_rfreq(rfreq)
self.unfreeze_dco()
self.new_freq()
else:
# Small change of frequency
rfreq = self.get_rfreq() * (freq/fout)
self.freeze_m() # write registers
self.set_rfreq(rfreq)
self.unfreeze_m() | Sets new output frequency, required parameters are real current frequency at output and new required frequency. | Below is the the instruction that describes the task:
### Input:
Sets new output frequency, required parameters are real current frequency at output and new required frequency.
### Response:
def set_freq(self, fout, freq):
"""
Sets new output frequency, required parameters are real current frequency at output and new required frequency.
"""
hsdiv_tuple = (4, 5, 6, 7, 9, 11) # possible dividers
n1div_tuple = (1,) + tuple(range(2,129,2)) #
fdco_min = 5670.0 # set maximum as minimum
hsdiv = self.get_hs_div() # read curent dividers
n1div = self.get_n1_div() #
if abs((freq-fout)*1e6/fout) > 3500:
# Large change of frequency
fdco = fout * hsdiv * n1div # calculate high frequency oscillator
fxtal = fdco / self.get_rfreq() # should be fxtal = 114.285
for hsdiv_iter in hsdiv_tuple: # find dividers with minimal power consumption
for n1div_iter in n1div_tuple:
fdco_new = freq * hsdiv_iter * n1div_iter
if (fdco_new >= 4850) and (fdco_new <= 5670):
if (fdco_new <= fdco_min):
fdco_min = fdco_new
hsdiv = hsdiv_iter
n1div = n1div_iter
rfreq = fdco_min / fxtal
self.freeze_dco() # write registers
self.set_hs_div(hsdiv)
self.set_n1_div(n1div)
self.set_rfreq(rfreq)
self.unfreeze_dco()
self.new_freq()
else:
# Small change of frequency
rfreq = self.get_rfreq() * (freq/fout)
self.freeze_m() # write registers
self.set_rfreq(rfreq)
self.unfreeze_m() |
def main(argv=None, loop=SharedLoop, max_time=None):
"""Main entry point for iotile-gateway."""
should_raise = argv is not None
if argv is None:
argv = sys.argv[1:]
parser = build_parser()
cmd_args = parser.parse_args(argv)
configure_logging(cmd_args.verbose)
logger = logging.getLogger(__name__)
try:
args = {}
if cmd_args.config is not None:
try:
with open(cmd_args.config, "r") as conf:
args = json.load(conf)
except IOError as exc:
raise ScriptError("Could not open config file %s due to %s"
% (cmd_args.config, str(exc)), 2)
except ValueError as exc:
raise ScriptError("Could not parse JSON from config file %s due to %s"
% (cmd_args.config, str(exc)), 3)
except TypeError as exc:
raise ScriptError("You must pass the path to a json config file", 4)
logger.critical("Starting gateway")
gateway = IOTileGateway(args, loop=loop)
loop.run_coroutine(gateway.start())
logger.critical("Gateway running")
# Run forever until we receive a ctrl-c
# (allow quitting early after max_time seconds for testing)
loop.wait_for_interrupt(max_time=max_time)
loop.run_coroutine(gateway.stop())
except ScriptError as exc:
if should_raise:
raise exc
logger.fatal("Quitting due to error: %s", exc.msg)
return exc.code
except Exception as exc: # pylint: disable=W0703
if should_raise:
raise exc
logger.exception("Fatal error running gateway")
return 1
return 0 | Main entry point for iotile-gateway. | Below is the the instruction that describes the task:
### Input:
Main entry point for iotile-gateway.
### Response:
def main(argv=None, loop=SharedLoop, max_time=None):
"""Main entry point for iotile-gateway."""
should_raise = argv is not None
if argv is None:
argv = sys.argv[1:]
parser = build_parser()
cmd_args = parser.parse_args(argv)
configure_logging(cmd_args.verbose)
logger = logging.getLogger(__name__)
try:
args = {}
if cmd_args.config is not None:
try:
with open(cmd_args.config, "r") as conf:
args = json.load(conf)
except IOError as exc:
raise ScriptError("Could not open config file %s due to %s"
% (cmd_args.config, str(exc)), 2)
except ValueError as exc:
raise ScriptError("Could not parse JSON from config file %s due to %s"
% (cmd_args.config, str(exc)), 3)
except TypeError as exc:
raise ScriptError("You must pass the path to a json config file", 4)
logger.critical("Starting gateway")
gateway = IOTileGateway(args, loop=loop)
loop.run_coroutine(gateway.start())
logger.critical("Gateway running")
# Run forever until we receive a ctrl-c
# (allow quitting early after max_time seconds for testing)
loop.wait_for_interrupt(max_time=max_time)
loop.run_coroutine(gateway.stop())
except ScriptError as exc:
if should_raise:
raise exc
logger.fatal("Quitting due to error: %s", exc.msg)
return exc.code
except Exception as exc: # pylint: disable=W0703
if should_raise:
raise exc
logger.exception("Fatal error running gateway")
return 1
return 0 |
def key_sign(rsakey, message, digest):
"""Sign the given message with the RSA key."""
padding = _asymmetric.padding.PKCS1v15()
signature = rsakey.sign(message, padding, digest)
return signature | Sign the given message with the RSA key. | Below is the the instruction that describes the task:
### Input:
Sign the given message with the RSA key.
### Response:
def key_sign(rsakey, message, digest):
"""Sign the given message with the RSA key."""
padding = _asymmetric.padding.PKCS1v15()
signature = rsakey.sign(message, padding, digest)
return signature |
def hook_point(self, hook_name):
"""Generic function to call modules methods if such method is avalaible
:param hook_name: function name to call
:type hook_name: str
:return:None
"""
self.my_daemon.hook_point(hook_name=hook_name, handle=self) | Generic function to call modules methods if such method is avalaible
:param hook_name: function name to call
:type hook_name: str
:return:None | Below is the the instruction that describes the task:
### Input:
Generic function to call modules methods if such method is avalaible
:param hook_name: function name to call
:type hook_name: str
:return:None
### Response:
def hook_point(self, hook_name):
"""Generic function to call modules methods if such method is avalaible
:param hook_name: function name to call
:type hook_name: str
:return:None
"""
self.my_daemon.hook_point(hook_name=hook_name, handle=self) |
def datetime(self):
"""
Returns a datetime object of the month, day, year, and time the game
was played.
"""
date_string = '%s %s' % (self._date, self._year)
date_string = re.sub(r' \(\d+\)', '', date_string)
return datetime.strptime(date_string, '%A, %b %d %Y') | Returns a datetime object of the month, day, year, and time the game
was played. | Below is the the instruction that describes the task:
### Input:
Returns a datetime object of the month, day, year, and time the game
was played.
### Response:
def datetime(self):
"""
Returns a datetime object of the month, day, year, and time the game
was played.
"""
date_string = '%s %s' % (self._date, self._year)
date_string = re.sub(r' \(\d+\)', '', date_string)
return datetime.strptime(date_string, '%A, %b %d %Y') |
def exec_command(self, command):
"""
Execute a command on the server. If the server allows it, the channel
will then be directly connected to the stdin, stdout, and stderr of
the command being executed.
When the command finishes executing, the channel will be closed and
can't be reused. You must open a new channel if you wish to execute
another command.
:param str command: a shell command to execute.
:raises:
`.SSHException` -- if the request was rejected or the channel was
closed
"""
m = Message()
m.add_byte(cMSG_CHANNEL_REQUEST)
m.add_int(self.remote_chanid)
m.add_string("exec")
m.add_boolean(True)
m.add_string(command)
self._event_pending()
self.transport._send_user_message(m)
self._wait_for_event() | Execute a command on the server. If the server allows it, the channel
will then be directly connected to the stdin, stdout, and stderr of
the command being executed.
When the command finishes executing, the channel will be closed and
can't be reused. You must open a new channel if you wish to execute
another command.
:param str command: a shell command to execute.
:raises:
`.SSHException` -- if the request was rejected or the channel was
closed | Below is the the instruction that describes the task:
### Input:
Execute a command on the server. If the server allows it, the channel
will then be directly connected to the stdin, stdout, and stderr of
the command being executed.
When the command finishes executing, the channel will be closed and
can't be reused. You must open a new channel if you wish to execute
another command.
:param str command: a shell command to execute.
:raises:
`.SSHException` -- if the request was rejected or the channel was
closed
### Response:
def exec_command(self, command):
"""
Execute a command on the server. If the server allows it, the channel
will then be directly connected to the stdin, stdout, and stderr of
the command being executed.
When the command finishes executing, the channel will be closed and
can't be reused. You must open a new channel if you wish to execute
another command.
:param str command: a shell command to execute.
:raises:
`.SSHException` -- if the request was rejected or the channel was
closed
"""
m = Message()
m.add_byte(cMSG_CHANNEL_REQUEST)
m.add_int(self.remote_chanid)
m.add_string("exec")
m.add_boolean(True)
m.add_string(command)
self._event_pending()
self.transport._send_user_message(m)
self._wait_for_event() |
def _Bound_Ph(P, h):
"""Region definition for input P y h
Parameters
----------
P : float
Pressure, [MPa]
h : float
Specific enthalpy, [kJ/kg]
Returns
-------
region : float
IAPWS-97 region code
References
----------
Wagner, W; Kretzschmar, H-J: International Steam Tables: Properties of
Water and Steam Based on the Industrial Formulation IAPWS-IF97; Springer,
2008; doi: 10.1007/978-3-540-74234-0. Fig. 2.5
"""
region = None
if Pmin <= P <= Ps_623:
h14 = _Region1(_TSat_P(P), P)["h"]
h24 = _Region2(_TSat_P(P), P)["h"]
h25 = _Region2(1073.15, P)["h"]
hmin = _Region1(273.15, P)["h"]
hmax = _Region5(2273.15, P)["h"]
if hmin <= h <= h14:
region = 1
elif h14 < h < h24:
region = 4
elif h24 <= h <= h25:
region = 2
elif h25 < h <= hmax:
region = 5
elif Ps_623 < P < Pc:
hmin = _Region1(273.15, P)["h"]
h13 = _Region1(623.15, P)["h"]
h32 = _Region2(_t_P(P), P)["h"]
h25 = _Region2(1073.15, P)["h"]
hmax = _Region5(2273.15, P)["h"]
if hmin <= h <= h13:
region = 1
elif h13 < h < h32:
try:
p34 = _PSat_h(h)
except NotImplementedError:
p34 = Pc
if P < p34:
region = 4
else:
region = 3
elif h32 <= h <= h25:
region = 2
elif h25 < h <= hmax:
region = 5
elif Pc <= P <= 100:
hmin = _Region1(273.15, P)["h"]
h13 = _Region1(623.15, P)["h"]
h32 = _Region2(_t_P(P), P)["h"]
h25 = _Region2(1073.15, P)["h"]
hmax = _Region5(2273.15, P)["h"]
if hmin <= h <= h13:
region = 1
elif h13 < h < h32:
region = 3
elif h32 <= h <= h25:
region = 2
elif P <= 50 and h25 <= h <= hmax:
region = 5
return region | Region definition for input P y h
Parameters
----------
P : float
Pressure, [MPa]
h : float
Specific enthalpy, [kJ/kg]
Returns
-------
region : float
IAPWS-97 region code
References
----------
Wagner, W; Kretzschmar, H-J: International Steam Tables: Properties of
Water and Steam Based on the Industrial Formulation IAPWS-IF97; Springer,
2008; doi: 10.1007/978-3-540-74234-0. Fig. 2.5 | Below is the the instruction that describes the task:
### Input:
Region definition for input P y h
Parameters
----------
P : float
Pressure, [MPa]
h : float
Specific enthalpy, [kJ/kg]
Returns
-------
region : float
IAPWS-97 region code
References
----------
Wagner, W; Kretzschmar, H-J: International Steam Tables: Properties of
Water and Steam Based on the Industrial Formulation IAPWS-IF97; Springer,
2008; doi: 10.1007/978-3-540-74234-0. Fig. 2.5
### Response:
def _Bound_Ph(P, h):
"""Region definition for input P y h
Parameters
----------
P : float
Pressure, [MPa]
h : float
Specific enthalpy, [kJ/kg]
Returns
-------
region : float
IAPWS-97 region code
References
----------
Wagner, W; Kretzschmar, H-J: International Steam Tables: Properties of
Water and Steam Based on the Industrial Formulation IAPWS-IF97; Springer,
2008; doi: 10.1007/978-3-540-74234-0. Fig. 2.5
"""
region = None
if Pmin <= P <= Ps_623:
h14 = _Region1(_TSat_P(P), P)["h"]
h24 = _Region2(_TSat_P(P), P)["h"]
h25 = _Region2(1073.15, P)["h"]
hmin = _Region1(273.15, P)["h"]
hmax = _Region5(2273.15, P)["h"]
if hmin <= h <= h14:
region = 1
elif h14 < h < h24:
region = 4
elif h24 <= h <= h25:
region = 2
elif h25 < h <= hmax:
region = 5
elif Ps_623 < P < Pc:
hmin = _Region1(273.15, P)["h"]
h13 = _Region1(623.15, P)["h"]
h32 = _Region2(_t_P(P), P)["h"]
h25 = _Region2(1073.15, P)["h"]
hmax = _Region5(2273.15, P)["h"]
if hmin <= h <= h13:
region = 1
elif h13 < h < h32:
try:
p34 = _PSat_h(h)
except NotImplementedError:
p34 = Pc
if P < p34:
region = 4
else:
region = 3
elif h32 <= h <= h25:
region = 2
elif h25 < h <= hmax:
region = 5
elif Pc <= P <= 100:
hmin = _Region1(273.15, P)["h"]
h13 = _Region1(623.15, P)["h"]
h32 = _Region2(_t_P(P), P)["h"]
h25 = _Region2(1073.15, P)["h"]
hmax = _Region5(2273.15, P)["h"]
if hmin <= h <= h13:
region = 1
elif h13 < h < h32:
region = 3
elif h32 <= h <= h25:
region = 2
elif P <= 50 and h25 <= h <= hmax:
region = 5
return region |
def create(self, display_name, content=None):
""" Create a new user list :class:`collection <pypump.models.collection.Collection>`.
:param display_name: List title.
:param content: (optional) List description.
Example:
>>> pump.me.lists.create(display_name='Friends', content='List of friends')
>>> myfriends = pump.me.lists['Friends']
>>> print(myfriends)
Friends
"""
activity = {
"verb": "create",
"object": {
"objectType": "collection",
"objectTypes": [self.membertype],
"displayName": display_name,
"content": content
}
}
if self._post_activity(activity, unserialize=False):
return self[display_name] | Create a new user list :class:`collection <pypump.models.collection.Collection>`.
:param display_name: List title.
:param content: (optional) List description.
Example:
>>> pump.me.lists.create(display_name='Friends', content='List of friends')
>>> myfriends = pump.me.lists['Friends']
>>> print(myfriends)
Friends | Below is the the instruction that describes the task:
### Input:
Create a new user list :class:`collection <pypump.models.collection.Collection>`.
:param display_name: List title.
:param content: (optional) List description.
Example:
>>> pump.me.lists.create(display_name='Friends', content='List of friends')
>>> myfriends = pump.me.lists['Friends']
>>> print(myfriends)
Friends
### Response:
def create(self, display_name, content=None):
""" Create a new user list :class:`collection <pypump.models.collection.Collection>`.
:param display_name: List title.
:param content: (optional) List description.
Example:
>>> pump.me.lists.create(display_name='Friends', content='List of friends')
>>> myfriends = pump.me.lists['Friends']
>>> print(myfriends)
Friends
"""
activity = {
"verb": "create",
"object": {
"objectType": "collection",
"objectTypes": [self.membertype],
"displayName": display_name,
"content": content
}
}
if self._post_activity(activity, unserialize=False):
return self[display_name] |
def sections(self):
""" Returns a list of all media sections in this library. Library sections may be any of
:class:`~plexapi.library.MovieSection`, :class:`~plexapi.library.ShowSection`,
:class:`~plexapi.library.MusicSection`, :class:`~plexapi.library.PhotoSection`.
"""
key = '/library/sections'
sections = []
for elem in self._server.query(key):
for cls in (MovieSection, ShowSection, MusicSection, PhotoSection):
if elem.attrib.get('type') == cls.TYPE:
section = cls(self._server, elem, key)
self._sectionsByID[section.key] = section
sections.append(section)
return sections | Returns a list of all media sections in this library. Library sections may be any of
:class:`~plexapi.library.MovieSection`, :class:`~plexapi.library.ShowSection`,
:class:`~plexapi.library.MusicSection`, :class:`~plexapi.library.PhotoSection`. | Below is the the instruction that describes the task:
### Input:
Returns a list of all media sections in this library. Library sections may be any of
:class:`~plexapi.library.MovieSection`, :class:`~plexapi.library.ShowSection`,
:class:`~plexapi.library.MusicSection`, :class:`~plexapi.library.PhotoSection`.
### Response:
def sections(self):
""" Returns a list of all media sections in this library. Library sections may be any of
:class:`~plexapi.library.MovieSection`, :class:`~plexapi.library.ShowSection`,
:class:`~plexapi.library.MusicSection`, :class:`~plexapi.library.PhotoSection`.
"""
key = '/library/sections'
sections = []
for elem in self._server.query(key):
for cls in (MovieSection, ShowSection, MusicSection, PhotoSection):
if elem.attrib.get('type') == cls.TYPE:
section = cls(self._server, elem, key)
self._sectionsByID[section.key] = section
sections.append(section)
return sections |
def _bin(self, bin): # type: (str) -> str
"""
Return path to the given executable.
"""
bin_path = (self._bin_dir / bin).with_suffix(".exe" if self._is_windows else "")
if not bin_path.exists():
return bin
return str(bin_path) | Return path to the given executable. | Below is the the instruction that describes the task:
### Input:
Return path to the given executable.
### Response:
def _bin(self, bin): # type: (str) -> str
"""
Return path to the given executable.
"""
bin_path = (self._bin_dir / bin).with_suffix(".exe" if self._is_windows else "")
if not bin_path.exists():
return bin
return str(bin_path) |
def _round_field(values, name, freq):
"""Indirectly access pandas rounding functions by wrapping data
as a Series and calling through `.dt` attribute.
Parameters
----------
values : np.ndarray or dask.array-like
Array-like container of datetime-like values
name : str (ceil, floor, round)
Name of rounding function
freq : a freq string indicating the rounding resolution
Returns
-------
rounded timestamps : same type as values
Array-like of datetime fields accessed for each element in values
"""
if isinstance(values, dask_array_type):
from dask.array import map_blocks
return map_blocks(_round_series,
values, name, freq=freq, dtype=np.datetime64)
else:
return _round_series(values, name, freq) | Indirectly access pandas rounding functions by wrapping data
as a Series and calling through `.dt` attribute.
Parameters
----------
values : np.ndarray or dask.array-like
Array-like container of datetime-like values
name : str (ceil, floor, round)
Name of rounding function
freq : a freq string indicating the rounding resolution
Returns
-------
rounded timestamps : same type as values
Array-like of datetime fields accessed for each element in values | Below is the the instruction that describes the task:
### Input:
Indirectly access pandas rounding functions by wrapping data
as a Series and calling through `.dt` attribute.
Parameters
----------
values : np.ndarray or dask.array-like
Array-like container of datetime-like values
name : str (ceil, floor, round)
Name of rounding function
freq : a freq string indicating the rounding resolution
Returns
-------
rounded timestamps : same type as values
Array-like of datetime fields accessed for each element in values
### Response:
def _round_field(values, name, freq):
"""Indirectly access pandas rounding functions by wrapping data
as a Series and calling through `.dt` attribute.
Parameters
----------
values : np.ndarray or dask.array-like
Array-like container of datetime-like values
name : str (ceil, floor, round)
Name of rounding function
freq : a freq string indicating the rounding resolution
Returns
-------
rounded timestamps : same type as values
Array-like of datetime fields accessed for each element in values
"""
if isinstance(values, dask_array_type):
from dask.array import map_blocks
return map_blocks(_round_series,
values, name, freq=freq, dtype=np.datetime64)
else:
return _round_series(values, name, freq) |
def _track_modify(self, cls, name, detail, keep, trace):
"""
Modify settings of a tracked class
"""
self._observers[cls].modify(name, detail, keep, trace) | Modify settings of a tracked class | Below is the the instruction that describes the task:
### Input:
Modify settings of a tracked class
### Response:
def _track_modify(self, cls, name, detail, keep, trace):
"""
Modify settings of a tracked class
"""
self._observers[cls].modify(name, detail, keep, trace) |
def pos_branch(incl, chi):
"""
Determines the effective [as defined in Stone, Loeb,
Berger, PRD 87, 084053 (2013)] aligned dimensionless
spin parameter of a NS-BH binary with tilted BH spin.
This means finding the root chi_eff of
ISCO_eq_chi_first(chi_eff, PG_ISSO_solver(chi,incl)).
The result returned by this function belongs to the
branch of the greater solutions, i.e. the greater of
the two possible solutions is returned.
Parameters
-----------
incl: float
the inclination angle between the BH spin and the orbital
angular momentum in radians
chi: float
the BH dimensionless spin parameter
Returns
----------
chi_eff: float
the (greater) effective dimensionless spin parameter solution
"""
if incl == 0:
chi_eff = chi
else:
rISSO = PG_ISSO_solver(chi,incl)
chi_eff = scipy.optimize.fsolve(ISCO_eq_chi_first, 1.0, args=(rISSO))
return chi_eff | Determines the effective [as defined in Stone, Loeb,
Berger, PRD 87, 084053 (2013)] aligned dimensionless
spin parameter of a NS-BH binary with tilted BH spin.
This means finding the root chi_eff of
ISCO_eq_chi_first(chi_eff, PG_ISSO_solver(chi,incl)).
The result returned by this function belongs to the
branch of the greater solutions, i.e. the greater of
the two possible solutions is returned.
Parameters
-----------
incl: float
the inclination angle between the BH spin and the orbital
angular momentum in radians
chi: float
the BH dimensionless spin parameter
Returns
----------
chi_eff: float
the (greater) effective dimensionless spin parameter solution | Below is the the instruction that describes the task:
### Input:
Determines the effective [as defined in Stone, Loeb,
Berger, PRD 87, 084053 (2013)] aligned dimensionless
spin parameter of a NS-BH binary with tilted BH spin.
This means finding the root chi_eff of
ISCO_eq_chi_first(chi_eff, PG_ISSO_solver(chi,incl)).
The result returned by this function belongs to the
branch of the greater solutions, i.e. the greater of
the two possible solutions is returned.
Parameters
-----------
incl: float
the inclination angle between the BH spin and the orbital
angular momentum in radians
chi: float
the BH dimensionless spin parameter
Returns
----------
chi_eff: float
the (greater) effective dimensionless spin parameter solution
### Response:
def pos_branch(incl, chi):
"""
Determines the effective [as defined in Stone, Loeb,
Berger, PRD 87, 084053 (2013)] aligned dimensionless
spin parameter of a NS-BH binary with tilted BH spin.
This means finding the root chi_eff of
ISCO_eq_chi_first(chi_eff, PG_ISSO_solver(chi,incl)).
The result returned by this function belongs to the
branch of the greater solutions, i.e. the greater of
the two possible solutions is returned.
Parameters
-----------
incl: float
the inclination angle between the BH spin and the orbital
angular momentum in radians
chi: float
the BH dimensionless spin parameter
Returns
----------
chi_eff: float
the (greater) effective dimensionless spin parameter solution
"""
if incl == 0:
chi_eff = chi
else:
rISSO = PG_ISSO_solver(chi,incl)
chi_eff = scipy.optimize.fsolve(ISCO_eq_chi_first, 1.0, args=(rISSO))
return chi_eff |
def get_observed_magnitude(self, centroid=True):
# NOTE: this import is only here so that we don't load up IRAF
# unnecessarily (ex: for candidates processing).
"""
Get the magnitude at the current pixel x/y location.
:return: Table
"""
max_count = float(self.astrom_header.get("MAXCOUNT", 30000))
(x, y, hdulist_index) = self.pixel_coord
tmp_file = self._hdu_on_disk(hdulist_index)
try:
from ossos import daophot
phot = daophot.phot_mag(tmp_file,
x, y,
aperture=self.apcor.aperture,
sky=self.apcor.sky,
swidth=self.apcor.swidth,
apcor=self.apcor.apcor,
zmag=self.zmag,
maxcount=max_count, extno=1,
centroid=centroid)
if not self.apcor.valid:
phot['PIER'][0] = 1
return phot
except Exception as ex:
print ex
raise ex
finally:
self.close() | Get the magnitude at the current pixel x/y location.
:return: Table | Below is the the instruction that describes the task:
### Input:
Get the magnitude at the current pixel x/y location.
:return: Table
### Response:
def get_observed_magnitude(self, centroid=True):
# NOTE: this import is only here so that we don't load up IRAF
# unnecessarily (ex: for candidates processing).
"""
Get the magnitude at the current pixel x/y location.
:return: Table
"""
max_count = float(self.astrom_header.get("MAXCOUNT", 30000))
(x, y, hdulist_index) = self.pixel_coord
tmp_file = self._hdu_on_disk(hdulist_index)
try:
from ossos import daophot
phot = daophot.phot_mag(tmp_file,
x, y,
aperture=self.apcor.aperture,
sky=self.apcor.sky,
swidth=self.apcor.swidth,
apcor=self.apcor.apcor,
zmag=self.zmag,
maxcount=max_count, extno=1,
centroid=centroid)
if not self.apcor.valid:
phot['PIER'][0] = 1
return phot
except Exception as ex:
print ex
raise ex
finally:
self.close() |
def post_process_images(self, doctree):
"""Pick the best candidate for all image URIs."""
super(AbstractSlideBuilder, self).post_process_images(doctree)
# figure out where this doctree is in relation to the srcdir
relative_base = (
['..'] *
doctree.attributes.get('source')[len(self.srcdir) + 1:].count('/')
)
for node in doctree.traverse(nodes.image):
if node.get('candidates') is None:
node['candidates'] = ('*',)
# fix up images with absolute paths
if node['uri'].startswith(self.outdir):
node['uri'] = '/'.join(
relative_base + [
node['uri'][len(self.outdir) + 1:]
]
) | Pick the best candidate for all image URIs. | Below is the the instruction that describes the task:
### Input:
Pick the best candidate for all image URIs.
### Response:
def post_process_images(self, doctree):
"""Pick the best candidate for all image URIs."""
super(AbstractSlideBuilder, self).post_process_images(doctree)
# figure out where this doctree is in relation to the srcdir
relative_base = (
['..'] *
doctree.attributes.get('source')[len(self.srcdir) + 1:].count('/')
)
for node in doctree.traverse(nodes.image):
if node.get('candidates') is None:
node['candidates'] = ('*',)
# fix up images with absolute paths
if node['uri'].startswith(self.outdir):
node['uri'] = '/'.join(
relative_base + [
node['uri'][len(self.outdir) + 1:]
]
) |
def unique(cls, iterable, key=None):
"""
Yields unique items from *iterable* whilst preserving the original order.
"""
if key is None:
key = lambda x: x
def generator():
seen = set()
seen_add = seen.add
for item in iterable:
key_val = key(item)
if key_val not in seen:
seen_add(key_val)
yield item
return cls(generator()) | Yields unique items from *iterable* whilst preserving the original order. | Below is the the instruction that describes the task:
### Input:
Yields unique items from *iterable* whilst preserving the original order.
### Response:
def unique(cls, iterable, key=None):
"""
Yields unique items from *iterable* whilst preserving the original order.
"""
if key is None:
key = lambda x: x
def generator():
seen = set()
seen_add = seen.add
for item in iterable:
key_val = key(item)
if key_val not in seen:
seen_add(key_val)
yield item
return cls(generator()) |
def values(self, values):
"""The values for insert ,
it can be a dict row or list tuple row.
"""
if isinstance(values, dict):
l = []
for column in self._columns:
l.append(values[column])
self._values.append(tuple(l))
else:
self._values.append(values)
return self | The values for insert ,
it can be a dict row or list tuple row. | Below is the the instruction that describes the task:
### Input:
The values for insert ,
it can be a dict row or list tuple row.
### Response:
def values(self, values):
"""The values for insert ,
it can be a dict row or list tuple row.
"""
if isinstance(values, dict):
l = []
for column in self._columns:
l.append(values[column])
self._values.append(tuple(l))
else:
self._values.append(values)
return self |
def list_files(dirname, extension=None):
"""
List all files in directory `dirname`, option to filter on file extension
"""
f = []
for (dirpath, dirnames, filenames) in os.walk(dirname):
f.extend(filenames)
break
if extension is not None:
# Filter on extension
filtered = []
for filename in f:
fn, ext = os.path.splitext(filename)
if ext.lower() == '.' + extension.lower():
filtered.append(filename)
f = filtered
return f | List all files in directory `dirname`, option to filter on file extension | Below is the the instruction that describes the task:
### Input:
List all files in directory `dirname`, option to filter on file extension
### Response:
def list_files(dirname, extension=None):
"""
List all files in directory `dirname`, option to filter on file extension
"""
f = []
for (dirpath, dirnames, filenames) in os.walk(dirname):
f.extend(filenames)
break
if extension is not None:
# Filter on extension
filtered = []
for filename in f:
fn, ext = os.path.splitext(filename)
if ext.lower() == '.' + extension.lower():
filtered.append(filename)
f = filtered
return f |
def _get_hit_nearest_ref_end(self, hits):
'''Returns the hit nearest to the end of the ref sequence from the input list of hits'''
nearest_to_end = hits[0]
for hit in hits[1:]:
if hit.ref_coords().end > nearest_to_end.ref_coords().end:
nearest_to_end = hit
return nearest_to_end | Returns the hit nearest to the end of the ref sequence from the input list of hits | Below is the the instruction that describes the task:
### Input:
Returns the hit nearest to the end of the ref sequence from the input list of hits
### Response:
def _get_hit_nearest_ref_end(self, hits):
'''Returns the hit nearest to the end of the ref sequence from the input list of hits'''
nearest_to_end = hits[0]
for hit in hits[1:]:
if hit.ref_coords().end > nearest_to_end.ref_coords().end:
nearest_to_end = hit
return nearest_to_end |
def randset(self):
""" -> a #set of random integers """
return {
self._map_type(int)
for x in range(self.random.randint(3, 10))} | -> a #set of random integers | Below is the the instruction that describes the task:
### Input:
-> a #set of random integers
### Response:
def randset(self):
""" -> a #set of random integers """
return {
self._map_type(int)
for x in range(self.random.randint(3, 10))} |
def cmd_guess_labels(*args):
"""
Arguments: <document id> [-- [--apply]]
Guess the labels that should be set on the document.
Example: paperwork-shell guess_labels -- 20161207_1144_00_8 --apply
Possible JSON replies:
--
{
"status": "error", "exception": "yyy",
"reason": "xxxx", "args": "(xxxx, )"
}
--
{
"status": "ok",
"docid": "xxxx",
"current_labels": ["label_a", "label_b"],
"guessed_labels": ["label_b", "label_c"],
"applied": "yes",
}
"""
args = list(args)
apply_labels = False
if "--apply" in args:
apply_labels = True
args.remove("--apply")
docid = args[0]
dsearch = get_docsearch()
doc = dsearch.get(docid)
if doc is None:
raise Exception(
"Document {} not found. Cannot guess labels".format(
docid
)
)
verbose("Current labels: {}".format(
", ".join([label.name for label in doc.labels])
))
guessed = dsearch.guess_labels(doc)
verbose("Guessed labels: {}".format(
", ".join([label.name for label in guessed])
))
r = {
'docid': doc.docid,
'current_labels': [label.name for label in doc.labels],
'guessed_labels': [label.name for label in guessed],
'applied': "yes" if apply_labels else "no",
}
changed = False
if apply_labels:
for label in guessed:
if label not in doc.labels:
dsearch.add_label(doc, label, update_index=False)
changed = True
for label in doc.labels:
if label not in guessed:
dsearch.remove_label(doc, label, update_index=False)
changed = True
if changed:
index_updater = dsearch.get_index_updater(optimize=False)
index_updater.upd_doc(doc)
index_updater.commit()
verbose("Document {} updated".format(docid))
elif apply_labels:
verbose("Document {} unchanged".format(docid))
reply(r) | Arguments: <document id> [-- [--apply]]
Guess the labels that should be set on the document.
Example: paperwork-shell guess_labels -- 20161207_1144_00_8 --apply
Possible JSON replies:
--
{
"status": "error", "exception": "yyy",
"reason": "xxxx", "args": "(xxxx, )"
}
--
{
"status": "ok",
"docid": "xxxx",
"current_labels": ["label_a", "label_b"],
"guessed_labels": ["label_b", "label_c"],
"applied": "yes",
} | Below is the the instruction that describes the task:
### Input:
Arguments: <document id> [-- [--apply]]
Guess the labels that should be set on the document.
Example: paperwork-shell guess_labels -- 20161207_1144_00_8 --apply
Possible JSON replies:
--
{
"status": "error", "exception": "yyy",
"reason": "xxxx", "args": "(xxxx, )"
}
--
{
"status": "ok",
"docid": "xxxx",
"current_labels": ["label_a", "label_b"],
"guessed_labels": ["label_b", "label_c"],
"applied": "yes",
}
### Response:
def cmd_guess_labels(*args):
"""
Arguments: <document id> [-- [--apply]]
Guess the labels that should be set on the document.
Example: paperwork-shell guess_labels -- 20161207_1144_00_8 --apply
Possible JSON replies:
--
{
"status": "error", "exception": "yyy",
"reason": "xxxx", "args": "(xxxx, )"
}
--
{
"status": "ok",
"docid": "xxxx",
"current_labels": ["label_a", "label_b"],
"guessed_labels": ["label_b", "label_c"],
"applied": "yes",
}
"""
args = list(args)
apply_labels = False
if "--apply" in args:
apply_labels = True
args.remove("--apply")
docid = args[0]
dsearch = get_docsearch()
doc = dsearch.get(docid)
if doc is None:
raise Exception(
"Document {} not found. Cannot guess labels".format(
docid
)
)
verbose("Current labels: {}".format(
", ".join([label.name for label in doc.labels])
))
guessed = dsearch.guess_labels(doc)
verbose("Guessed labels: {}".format(
", ".join([label.name for label in guessed])
))
r = {
'docid': doc.docid,
'current_labels': [label.name for label in doc.labels],
'guessed_labels': [label.name for label in guessed],
'applied': "yes" if apply_labels else "no",
}
changed = False
if apply_labels:
for label in guessed:
if label not in doc.labels:
dsearch.add_label(doc, label, update_index=False)
changed = True
for label in doc.labels:
if label not in guessed:
dsearch.remove_label(doc, label, update_index=False)
changed = True
if changed:
index_updater = dsearch.get_index_updater(optimize=False)
index_updater.upd_doc(doc)
index_updater.commit()
verbose("Document {} updated".format(docid))
elif apply_labels:
verbose("Document {} unchanged".format(docid))
reply(r) |
def simxAuxiliaryConsolePrint(clientID, consoleHandle, txt, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
if (sys.version_info[0] == 3) and (type(txt) is str):
txt=txt.encode('utf-8')
return c_AuxiliaryConsolePrint(clientID, consoleHandle, txt, operationMode) | Please have a look at the function description/documentation in the V-REP user manual | Below is the the instruction that describes the task:
### Input:
Please have a look at the function description/documentation in the V-REP user manual
### Response:
def simxAuxiliaryConsolePrint(clientID, consoleHandle, txt, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
if (sys.version_info[0] == 3) and (type(txt) is str):
txt=txt.encode('utf-8')
return c_AuxiliaryConsolePrint(clientID, consoleHandle, txt, operationMode) |
def email_address_to_list(email_address):
"""Convert an email address to a list."""
realname, address = email.utils.parseaddr(email_address)
return (
[address, realname] if realname and address else
[email_address, email_address]
) | Convert an email address to a list. | Below is the the instruction that describes the task:
### Input:
Convert an email address to a list.
### Response:
def email_address_to_list(email_address):
"""Convert an email address to a list."""
realname, address = email.utils.parseaddr(email_address)
return (
[address, realname] if realname and address else
[email_address, email_address]
) |
def eval_interval(interval):
""" Evaluate an interval expression """
kwargs = {
"years": 0,
"months": 0,
"weeks": 0,
"days": 0,
"hours": 0,
"minutes": 0,
"seconds": 0,
"microseconds": 0,
}
for section in interval[1:]:
name = section.getName()
if name == "year":
kwargs["years"] += int(section[0])
elif name == "month":
kwargs["months"] += int(section[0])
elif name == "week":
kwargs["weeks"] += int(section[0])
elif name == "day":
kwargs["days"] += int(section[0])
elif name == "hour":
kwargs["hours"] += int(section[0])
elif name == "minute":
kwargs["minutes"] += int(section[0])
elif name == "second":
kwargs["seconds"] += int(section[0])
elif name == "millisecond":
kwargs["microseconds"] += 1000 * int(section[0])
elif name == "microsecond":
kwargs["microseconds"] += int(section[0])
else:
raise SyntaxError("Unrecognized interval type %r: %s" % (name, section))
return relativedelta(**kwargs) | Evaluate an interval expression | Below is the the instruction that describes the task:
### Input:
Evaluate an interval expression
### Response:
def eval_interval(interval):
""" Evaluate an interval expression """
kwargs = {
"years": 0,
"months": 0,
"weeks": 0,
"days": 0,
"hours": 0,
"minutes": 0,
"seconds": 0,
"microseconds": 0,
}
for section in interval[1:]:
name = section.getName()
if name == "year":
kwargs["years"] += int(section[0])
elif name == "month":
kwargs["months"] += int(section[0])
elif name == "week":
kwargs["weeks"] += int(section[0])
elif name == "day":
kwargs["days"] += int(section[0])
elif name == "hour":
kwargs["hours"] += int(section[0])
elif name == "minute":
kwargs["minutes"] += int(section[0])
elif name == "second":
kwargs["seconds"] += int(section[0])
elif name == "millisecond":
kwargs["microseconds"] += 1000 * int(section[0])
elif name == "microsecond":
kwargs["microseconds"] += int(section[0])
else:
raise SyntaxError("Unrecognized interval type %r: %s" % (name, section))
return relativedelta(**kwargs) |
def add_menu(self, name):
"""Add a top-level menu.
The menu manager only allows one menu of the same name. However, it does
not make sure that there are no pre-existing menus of that name.
"""
if name in self._menus:
raise exceptions.MenuAlreadyExists("Menu name {!r} already exists.".format(name))
menu = self._menu.addMenu(name)
self._menus[name] = menu | Add a top-level menu.
The menu manager only allows one menu of the same name. However, it does
not make sure that there are no pre-existing menus of that name. | Below is the the instruction that describes the task:
### Input:
Add a top-level menu.
The menu manager only allows one menu of the same name. However, it does
not make sure that there are no pre-existing menus of that name.
### Response:
def add_menu(self, name):
"""Add a top-level menu.
The menu manager only allows one menu of the same name. However, it does
not make sure that there are no pre-existing menus of that name.
"""
if name in self._menus:
raise exceptions.MenuAlreadyExists("Menu name {!r} already exists.".format(name))
menu = self._menu.addMenu(name)
self._menus[name] = menu |
def form_valid(self, form):
"""
The user has provided valid credentials (this was checked in AuthenticationForm.is_valid()). So now we
can check the test cookie stuff and log him in.
"""
self.check_and_delete_test_cookie()
login(self.request, form.get_user())
return super(LoginView, self).form_valid(form) | The user has provided valid credentials (this was checked in AuthenticationForm.is_valid()). So now we
can check the test cookie stuff and log him in. | Below is the the instruction that describes the task:
### Input:
The user has provided valid credentials (this was checked in AuthenticationForm.is_valid()). So now we
can check the test cookie stuff and log him in.
### Response:
def form_valid(self, form):
"""
The user has provided valid credentials (this was checked in AuthenticationForm.is_valid()). So now we
can check the test cookie stuff and log him in.
"""
self.check_and_delete_test_cookie()
login(self.request, form.get_user())
return super(LoginView, self).form_valid(form) |
def raised_funds_by_project(df):
"""
Raised funds organized by project.
"""
df['CaptacaoReal'] = df['CaptacaoReal'].apply(
pd.to_numeric
)
return (
df[['Pronac', 'CaptacaoReal']]
.groupby(['Pronac'])
.sum()
) | Raised funds organized by project. | Below is the the instruction that describes the task:
### Input:
Raised funds organized by project.
### Response:
def raised_funds_by_project(df):
"""
Raised funds organized by project.
"""
df['CaptacaoReal'] = df['CaptacaoReal'].apply(
pd.to_numeric
)
return (
df[['Pronac', 'CaptacaoReal']]
.groupby(['Pronac'])
.sum()
) |
def on_event(self, evt, is_final):
""" this is invoked from in response to COM PumpWaitingMessages - different thread """
for msg in XmlHelper.message_iter(evt):
# Single security element in historical request
node = msg.GetElement('securityData')
if node.HasElement('securityError'):
secid = XmlHelper.get_child_value(node, 'security')
self.security_errors.append(XmlHelper.as_security_error(node.GetElement('securityError'), secid))
else:
self.on_security_data_node(node) | this is invoked from in response to COM PumpWaitingMessages - different thread | Below is the the instruction that describes the task:
### Input:
this is invoked from in response to COM PumpWaitingMessages - different thread
### Response:
def on_event(self, evt, is_final):
""" this is invoked from in response to COM PumpWaitingMessages - different thread """
for msg in XmlHelper.message_iter(evt):
# Single security element in historical request
node = msg.GetElement('securityData')
if node.HasElement('securityError'):
secid = XmlHelper.get_child_value(node, 'security')
self.security_errors.append(XmlHelper.as_security_error(node.GetElement('securityError'), secid))
else:
self.on_security_data_node(node) |
def is_stationary(self):
r""" Whether the MSM is stationary, i.e. whether the initial distribution is the stationary distribution
of the hidden transition matrix. """
# for disconnected matrices, the stationary distribution depends on the estimator, so we can't compute
# it directly. Therefore we test whether the initial distribution is stationary.
return np.allclose(np.dot(self._Pi, self._Tij), self._Pi) | r""" Whether the MSM is stationary, i.e. whether the initial distribution is the stationary distribution
of the hidden transition matrix. | Below is the the instruction that describes the task:
### Input:
r""" Whether the MSM is stationary, i.e. whether the initial distribution is the stationary distribution
of the hidden transition matrix.
### Response:
def is_stationary(self):
r""" Whether the MSM is stationary, i.e. whether the initial distribution is the stationary distribution
of the hidden transition matrix. """
# for disconnected matrices, the stationary distribution depends on the estimator, so we can't compute
# it directly. Therefore we test whether the initial distribution is stationary.
return np.allclose(np.dot(self._Pi, self._Tij), self._Pi) |
def start(self):
'''
Start the actual master.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).start()
NOTE: Run any required code before calling `super()`.
'''
super(SaltAPI, self).start()
if check_user(self.config['user']):
log.info('The salt-api is starting up')
self.api.run() | Start the actual master.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).start()
NOTE: Run any required code before calling `super()`. | Below is the the instruction that describes the task:
### Input:
Start the actual master.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).start()
NOTE: Run any required code before calling `super()`.
### Response:
def start(self):
'''
Start the actual master.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).start()
NOTE: Run any required code before calling `super()`.
'''
super(SaltAPI, self).start()
if check_user(self.config['user']):
log.info('The salt-api is starting up')
self.api.run() |
def arcovar_marple(x, order):
r"""Estimate AR model parameters using covariance method
This implementation is based on [Marple]_. This code is far more
complicated and slower than :func:`arcovar` function, which is now the official version.
See :func:`arcovar` for a detailed description of Covariance method.
This function should be used in place of arcovar only if order<=4, for
which :func:`arcovar` does not work.
Fast algorithm for the solution of the covariance least squares normal
equations from Marple.
:param array X: Array of complex data samples
:param int oder: Order of linear prediction model
:return:
* AF - Array of complex forward linear prediction coefficients
* PF - Real forward linear prediction variance at order IP
* AB - Array of complex backward linear prediction coefficients
* PB - Real backward linear prediction variance at order IP
* PV - store linear prediction coefficients
.. note:: this code and the original code in Marple diverge for ip>10.
it seems that this is related to single precision used with
complex type in fortran whereas numpy uses double precision for
complex type.
:validation: the AR parameters are the same as those returned by
a completely different function :func:`arcovar`.
:References: [Marple]_
"""
assert len(x) >= order, "X must be dimensioned >=N"
# ----------------------------------------------------- Initialization
x = np.array(x)
N = len(x)
# Equations 8.C.42
r0 = sum(abs(x)**2.)
r1 = abs(x[0])**2
rN = abs(x[N-1])**2
pf = r0 - r1
pb = r0 - rN
delta = 1. - r1 / r0
gamma = 1. - rN / r0
c = np.zeros(N, dtype=complex)
d = np.zeros(N, dtype=complex)
r = np.zeros(N, dtype=complex)
af = np.zeros(N, dtype=complex)
ab = np.zeros(N, dtype=complex)
c[0] = x[N-1].conjugate() / r0
d[0] = x[0].conjugate() / r0
# special case
if order == 0:
pf = r0 / float(N)
pb = pf
return af, pf, ab, pb, 0
# ---------------------------------------------------------- MAIN LOOP
#ip +1 because we want to enter in the loop to run the first part of the code.
pbv = []
for m in range(0, order+1):
logging.debug('----------------------------m=', m)
logging.debug(c[0:2])
logging.debug(d[0:2])
r1 = 1./pf
r2 = 1./pb
r3 = 1./delta
r4 = 1./gamma
#logging.debug('starting r1r2r3r4=', r1, r2, r3, r4, pf, pb, delta, gamma)
#Order update: AF and AB vectors ; time update: C and D vectors
temp = 0.+0.j
for k in range(m+1, N):
temp = temp + x[k]*x[k-m-1].conjugate()
r[m] = temp.conjugate()
theta = x[0] * c[m]
#print(('theta', theta))
# print(('c=', c[0:2]))
# print(('d=', d[0:2]))
if m == 0:
pass
else:
for k in range(0, m):
theta = theta + x[m-k] * c[k] # Eq. (8.C.39)
r[k] = r[k] - x[N-m-1] * x[N-m+k].conjugate() # Eq. (8.C.32)
temp = temp + af[m-k-1] * r[k].conjugate()
#print 'loop1 k=', k
#print ' theta=',theta, 'r[k]=',r[k], 'temp=', temp
#print ' c=',c[k], 'af=',af[m-k-1]
"""if m > 0:
if debug:
print((m, N-m))
print(('Xk=0',x[m-0],x[N-m-1], x[N-m+0]))
if m > 1:
if debug:
print('Xk=1',x[m-1],x[N-m-1], x[N-m+1])
"""
c1 = -temp * r2
c2 = -r1 * temp.conjugate()
c3 = theta * r3
c4 = r4 *theta.conjugate()
#if debug:
# print('c1 c2 c3 c4 before af=',c1 ,c2 ,c3 ,c4)
af[m] = c1 # ! Eq. (8.C.19)
ab[m] = c2 # ! Eq. (8.C.22)
save = c[m]
c[m] = save + c3*d[m]
d[m] = d[m] + c4*save
#if debug:
# print('res',m,'af[m]=',af[m], ab[m], save, 'temp=',temp)
if m == 0:
pass
else:
#if debug:print('af before', af[0:2])
for k in range(0, m):
save = af[k]
af[k] = save + c1 * ab[m-k-1] # Eq. (8.C.18)
ab[m-k-1] = ab[m-k-1] + c2 * save # Eq. (8.C.21)
save = c[k]
c[k] = save + c3*d[k] # Eq. (8.C.37)
d[k] = d[k] + c4*save # Eq. (8.C.38)
#if debug:
# print('loop2 k=', k)
# print(' af[k]=', af[k])
# print(' ab[m-k-1]=', ab[m-k-1])
# print(' c[k]=', c[k])
# print(' d[k]=', d[k])
#if debug:
# print('af after=', af[0:2])
# print('ab=', ab[0:2])
r5 = temp.real**2 + temp.imag**2
pf = pf - r5*r2 # Eq. (8.C.20)
pb = pb - r5*r1 # Eq. (8.C.23)
r5 = theta.real**2 + theta.imag**2
delta = delta - r5*r4 # Eq. (8.C.39)
gamma = gamma - r5*r3 # Eq. (8.C.40)
#if debug:
# print('r5r2r1deltagamma', r5, r2, r1 , delta, gamma)
# print('pf before norm', pf, pb, N-m-1)
if m != order-1:
pass
else:
pf = pf / float(N-m-1)
pb = pb / float(N-m-1)
#if debug:
# print('ENDING', N-m-1)
break
#if debug:
# print('pf and pb', pf, pb)
if pf > 0 and pb > 0:
pass
else:
ValueError("Negative PF or PB value")
if (delta > 0. and delta <=1 and gamma > 0. and gamma <=1):
pass
else:
ValueError("Invalid delta or gamma value")
#C Time update: AF and AB vectors; order update: C and D vectors
r1 = 1./pf
r2 = 1./pb
r3 = 1./delta
r4 = 1./gamma
#if debug:
# print('--------time update', r1, r2, r3, r4, m+1, N-m-1, x[m+1], x[N-m-2])
ef = x[m+1]
eb = x[(N-1)-m-1]
for k in range(0,m+1):
#print 'k=', k, 'ef=', ef, ' eb=',eb,' af=',af[k], ab[k]
#print x[m-k],x[N-m+k-1]
ef = ef + af[k] * x[m-k] # Eq. (8.C.1)
eb = eb + ab[k] * x[N-m+k-1] # Eq. (8.C.2)
#ef = sum(af)
#if debug:
# print('efweb', ef , eb)
c1 = ef*r3
c2 = eb*r4
c3 = eb.conjugate() * r2
c4 = ef.conjugate() * r1
#if debug:
# print('c1c2c3c4', c1, c2, c3, c4)
# print('af before', af[0:2])
for k in range(m, -1, -1):
save = af[k]
af[k] = save + c1 * d[k] # Eq. (8.C.33)
d[k+1] = d[k] + c4 * save # Eq. (8.C.25)
save = ab[k]
ab[k] = save + c2 * c[m-k] # Eq. (8.C.35)
c[m-k] = c[m-k] + c3 * save # Eq. (8.C.24)
#if debug:
# print('af after', af[0:2])
# print('d', d[0:2])
# print('ab', ab[0:2])
# print('c', c[0:2])
#if debug:print('Pb before', pf, pb)
c[m+1] = c3
d[0] = c4
#r5 = abs(ef)**2
r5 = ef.real**2 + ef.imag**2
pf = pf - r5 * r3 # Eq. (8.C.34)
delta = delta-r5 * r1 # Eq. (8.C.30)
#r5 = abs(eb)**2
r5 = eb.real**2 + eb.imag**2
pb = pb - r5 * r4 # Eq. (8.C.36)
#if debug:
# print('Pb---------------------', m, pb, r5, r4)
gamma = gamma-r5*r2 # Eq. (8.C.31)
pbv.append(pb)
if (pf > 0. and pb > 0.):
pass
else:
ValueError("Negative PF or PB value")
#if debug:
# print(delta, gamma)
if (delta > 0. and delta <= 1.) and (gamma > 0. and gamma <= 1.):
pass
else:
ValueError("Invalid delta or gamma value")
#af=array of forward coeff
#ab=array of barward coeff
#pb=backward variance
#pf=forward variance
return af, pf, ab, pb, pbv | r"""Estimate AR model parameters using covariance method
This implementation is based on [Marple]_. This code is far more
complicated and slower than :func:`arcovar` function, which is now the official version.
See :func:`arcovar` for a detailed description of Covariance method.
This function should be used in place of arcovar only if order<=4, for
which :func:`arcovar` does not work.
Fast algorithm for the solution of the covariance least squares normal
equations from Marple.
:param array X: Array of complex data samples
:param int oder: Order of linear prediction model
:return:
* AF - Array of complex forward linear prediction coefficients
* PF - Real forward linear prediction variance at order IP
* AB - Array of complex backward linear prediction coefficients
* PB - Real backward linear prediction variance at order IP
* PV - store linear prediction coefficients
.. note:: this code and the original code in Marple diverge for ip>10.
it seems that this is related to single precision used with
complex type in fortran whereas numpy uses double precision for
complex type.
:validation: the AR parameters are the same as those returned by
a completely different function :func:`arcovar`.
:References: [Marple]_ | Below is the the instruction that describes the task:
### Input:
r"""Estimate AR model parameters using covariance method
This implementation is based on [Marple]_. This code is far more
complicated and slower than :func:`arcovar` function, which is now the official version.
See :func:`arcovar` for a detailed description of Covariance method.
This function should be used in place of arcovar only if order<=4, for
which :func:`arcovar` does not work.
Fast algorithm for the solution of the covariance least squares normal
equations from Marple.
:param array X: Array of complex data samples
:param int oder: Order of linear prediction model
:return:
* AF - Array of complex forward linear prediction coefficients
* PF - Real forward linear prediction variance at order IP
* AB - Array of complex backward linear prediction coefficients
* PB - Real backward linear prediction variance at order IP
* PV - store linear prediction coefficients
.. note:: this code and the original code in Marple diverge for ip>10.
it seems that this is related to single precision used with
complex type in fortran whereas numpy uses double precision for
complex type.
:validation: the AR parameters are the same as those returned by
a completely different function :func:`arcovar`.
:References: [Marple]_
### Response:
def arcovar_marple(x, order):
r"""Estimate AR model parameters using covariance method
This implementation is based on [Marple]_. This code is far more
complicated and slower than :func:`arcovar` function, which is now the official version.
See :func:`arcovar` for a detailed description of Covariance method.
This function should be used in place of arcovar only if order<=4, for
which :func:`arcovar` does not work.
Fast algorithm for the solution of the covariance least squares normal
equations from Marple.
:param array X: Array of complex data samples
:param int oder: Order of linear prediction model
:return:
* AF - Array of complex forward linear prediction coefficients
* PF - Real forward linear prediction variance at order IP
* AB - Array of complex backward linear prediction coefficients
* PB - Real backward linear prediction variance at order IP
* PV - store linear prediction coefficients
.. note:: this code and the original code in Marple diverge for ip>10.
it seems that this is related to single precision used with
complex type in fortran whereas numpy uses double precision for
complex type.
:validation: the AR parameters are the same as those returned by
a completely different function :func:`arcovar`.
:References: [Marple]_
"""
assert len(x) >= order, "X must be dimensioned >=N"
# ----------------------------------------------------- Initialization
x = np.array(x)
N = len(x)
# Equations 8.C.42
r0 = sum(abs(x)**2.)
r1 = abs(x[0])**2
rN = abs(x[N-1])**2
pf = r0 - r1
pb = r0 - rN
delta = 1. - r1 / r0
gamma = 1. - rN / r0
c = np.zeros(N, dtype=complex)
d = np.zeros(N, dtype=complex)
r = np.zeros(N, dtype=complex)
af = np.zeros(N, dtype=complex)
ab = np.zeros(N, dtype=complex)
c[0] = x[N-1].conjugate() / r0
d[0] = x[0].conjugate() / r0
# special case
if order == 0:
pf = r0 / float(N)
pb = pf
return af, pf, ab, pb, 0
# ---------------------------------------------------------- MAIN LOOP
#ip +1 because we want to enter in the loop to run the first part of the code.
pbv = []
for m in range(0, order+1):
logging.debug('----------------------------m=', m)
logging.debug(c[0:2])
logging.debug(d[0:2])
r1 = 1./pf
r2 = 1./pb
r3 = 1./delta
r4 = 1./gamma
#logging.debug('starting r1r2r3r4=', r1, r2, r3, r4, pf, pb, delta, gamma)
#Order update: AF and AB vectors ; time update: C and D vectors
temp = 0.+0.j
for k in range(m+1, N):
temp = temp + x[k]*x[k-m-1].conjugate()
r[m] = temp.conjugate()
theta = x[0] * c[m]
#print(('theta', theta))
# print(('c=', c[0:2]))
# print(('d=', d[0:2]))
if m == 0:
pass
else:
for k in range(0, m):
theta = theta + x[m-k] * c[k] # Eq. (8.C.39)
r[k] = r[k] - x[N-m-1] * x[N-m+k].conjugate() # Eq. (8.C.32)
temp = temp + af[m-k-1] * r[k].conjugate()
#print 'loop1 k=', k
#print ' theta=',theta, 'r[k]=',r[k], 'temp=', temp
#print ' c=',c[k], 'af=',af[m-k-1]
"""if m > 0:
if debug:
print((m, N-m))
print(('Xk=0',x[m-0],x[N-m-1], x[N-m+0]))
if m > 1:
if debug:
print('Xk=1',x[m-1],x[N-m-1], x[N-m+1])
"""
c1 = -temp * r2
c2 = -r1 * temp.conjugate()
c3 = theta * r3
c4 = r4 *theta.conjugate()
#if debug:
# print('c1 c2 c3 c4 before af=',c1 ,c2 ,c3 ,c4)
af[m] = c1 # ! Eq. (8.C.19)
ab[m] = c2 # ! Eq. (8.C.22)
save = c[m]
c[m] = save + c3*d[m]
d[m] = d[m] + c4*save
#if debug:
# print('res',m,'af[m]=',af[m], ab[m], save, 'temp=',temp)
if m == 0:
pass
else:
#if debug:print('af before', af[0:2])
for k in range(0, m):
save = af[k]
af[k] = save + c1 * ab[m-k-1] # Eq. (8.C.18)
ab[m-k-1] = ab[m-k-1] + c2 * save # Eq. (8.C.21)
save = c[k]
c[k] = save + c3*d[k] # Eq. (8.C.37)
d[k] = d[k] + c4*save # Eq. (8.C.38)
#if debug:
# print('loop2 k=', k)
# print(' af[k]=', af[k])
# print(' ab[m-k-1]=', ab[m-k-1])
# print(' c[k]=', c[k])
# print(' d[k]=', d[k])
#if debug:
# print('af after=', af[0:2])
# print('ab=', ab[0:2])
r5 = temp.real**2 + temp.imag**2
pf = pf - r5*r2 # Eq. (8.C.20)
pb = pb - r5*r1 # Eq. (8.C.23)
r5 = theta.real**2 + theta.imag**2
delta = delta - r5*r4 # Eq. (8.C.39)
gamma = gamma - r5*r3 # Eq. (8.C.40)
#if debug:
# print('r5r2r1deltagamma', r5, r2, r1 , delta, gamma)
# print('pf before norm', pf, pb, N-m-1)
if m != order-1:
pass
else:
pf = pf / float(N-m-1)
pb = pb / float(N-m-1)
#if debug:
# print('ENDING', N-m-1)
break
#if debug:
# print('pf and pb', pf, pb)
if pf > 0 and pb > 0:
pass
else:
ValueError("Negative PF or PB value")
if (delta > 0. and delta <=1 and gamma > 0. and gamma <=1):
pass
else:
ValueError("Invalid delta or gamma value")
#C Time update: AF and AB vectors; order update: C and D vectors
r1 = 1./pf
r2 = 1./pb
r3 = 1./delta
r4 = 1./gamma
#if debug:
# print('--------time update', r1, r2, r3, r4, m+1, N-m-1, x[m+1], x[N-m-2])
ef = x[m+1]
eb = x[(N-1)-m-1]
for k in range(0,m+1):
#print 'k=', k, 'ef=', ef, ' eb=',eb,' af=',af[k], ab[k]
#print x[m-k],x[N-m+k-1]
ef = ef + af[k] * x[m-k] # Eq. (8.C.1)
eb = eb + ab[k] * x[N-m+k-1] # Eq. (8.C.2)
#ef = sum(af)
#if debug:
# print('efweb', ef , eb)
c1 = ef*r3
c2 = eb*r4
c3 = eb.conjugate() * r2
c4 = ef.conjugate() * r1
#if debug:
# print('c1c2c3c4', c1, c2, c3, c4)
# print('af before', af[0:2])
for k in range(m, -1, -1):
save = af[k]
af[k] = save + c1 * d[k] # Eq. (8.C.33)
d[k+1] = d[k] + c4 * save # Eq. (8.C.25)
save = ab[k]
ab[k] = save + c2 * c[m-k] # Eq. (8.C.35)
c[m-k] = c[m-k] + c3 * save # Eq. (8.C.24)
#if debug:
# print('af after', af[0:2])
# print('d', d[0:2])
# print('ab', ab[0:2])
# print('c', c[0:2])
#if debug:print('Pb before', pf, pb)
c[m+1] = c3
d[0] = c4
#r5 = abs(ef)**2
r5 = ef.real**2 + ef.imag**2
pf = pf - r5 * r3 # Eq. (8.C.34)
delta = delta-r5 * r1 # Eq. (8.C.30)
#r5 = abs(eb)**2
r5 = eb.real**2 + eb.imag**2
pb = pb - r5 * r4 # Eq. (8.C.36)
#if debug:
# print('Pb---------------------', m, pb, r5, r4)
gamma = gamma-r5*r2 # Eq. (8.C.31)
pbv.append(pb)
if (pf > 0. and pb > 0.):
pass
else:
ValueError("Negative PF or PB value")
#if debug:
# print(delta, gamma)
if (delta > 0. and delta <= 1.) and (gamma > 0. and gamma <= 1.):
pass
else:
ValueError("Invalid delta or gamma value")
#af=array of forward coeff
#ab=array of barward coeff
#pb=backward variance
#pf=forward variance
return af, pf, ab, pb, pbv |
def get_body_size(params, boundary):
"""Returns the number of bytes that the multipart/form-data encoding
of ``params`` will be."""
size = sum(p.get_size(boundary) for p in MultipartParam.from_params(params))
return size + len(boundary) + 6 | Returns the number of bytes that the multipart/form-data encoding
of ``params`` will be. | Below is the the instruction that describes the task:
### Input:
Returns the number of bytes that the multipart/form-data encoding
of ``params`` will be.
### Response:
def get_body_size(params, boundary):
"""Returns the number of bytes that the multipart/form-data encoding
of ``params`` will be."""
size = sum(p.get_size(boundary) for p in MultipartParam.from_params(params))
return size + len(boundary) + 6 |
def ParseOptions(cls, options, output_module):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
output_module (OutputModule): output module to configure.
Raises:
BadConfigObject: when the output module object does not have the
SetCredentials or SetDatabaseName methods.
"""
if not hasattr(output_module, 'SetCredentials'):
raise errors.BadConfigObject('Unable to set username information.')
if not hasattr(output_module, 'SetDatabaseName'):
raise errors.BadConfigObject('Unable to set database information.')
username = cls._ParseStringOption(
options, 'username', default_value=cls._DEFAULT_USERNAME)
password = cls._ParseStringOption(
options, 'password', default_value=cls._DEFAULT_PASSWORD)
name = cls._ParseStringOption(
options, 'db_name', default_value=cls._DEFAULT_NAME)
output_module.SetCredentials(username=username, password=password)
output_module.SetDatabaseName(name)
server_config.ServerArgumentsHelper.ParseOptions(options, output_module) | Parses and validates options.
Args:
options (argparse.Namespace): parser options.
output_module (OutputModule): output module to configure.
Raises:
BadConfigObject: when the output module object does not have the
SetCredentials or SetDatabaseName methods. | Below is the the instruction that describes the task:
### Input:
Parses and validates options.
Args:
options (argparse.Namespace): parser options.
output_module (OutputModule): output module to configure.
Raises:
BadConfigObject: when the output module object does not have the
SetCredentials or SetDatabaseName methods.
### Response:
def ParseOptions(cls, options, output_module):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
output_module (OutputModule): output module to configure.
Raises:
BadConfigObject: when the output module object does not have the
SetCredentials or SetDatabaseName methods.
"""
if not hasattr(output_module, 'SetCredentials'):
raise errors.BadConfigObject('Unable to set username information.')
if not hasattr(output_module, 'SetDatabaseName'):
raise errors.BadConfigObject('Unable to set database information.')
username = cls._ParseStringOption(
options, 'username', default_value=cls._DEFAULT_USERNAME)
password = cls._ParseStringOption(
options, 'password', default_value=cls._DEFAULT_PASSWORD)
name = cls._ParseStringOption(
options, 'db_name', default_value=cls._DEFAULT_NAME)
output_module.SetCredentials(username=username, password=password)
output_module.SetDatabaseName(name)
server_config.ServerArgumentsHelper.ParseOptions(options, output_module) |
def _compute_distance(self, rup, dists, C):
"""
equation 3 pag 1960:
``c31 * logR + c32 * (R-Rref)``
"""
rref = 1.0
c31 = -1.7
return (c31 * np.log10(dists.rhypo) + C['c32'] * (dists.rhypo - rref)) | equation 3 pag 1960:
``c31 * logR + c32 * (R-Rref)`` | Below is the the instruction that describes the task:
### Input:
equation 3 pag 1960:
``c31 * logR + c32 * (R-Rref)``
### Response:
def _compute_distance(self, rup, dists, C):
"""
equation 3 pag 1960:
``c31 * logR + c32 * (R-Rref)``
"""
rref = 1.0
c31 = -1.7
return (c31 * np.log10(dists.rhypo) + C['c32'] * (dists.rhypo - rref)) |
def info_player_id(self,name):
'''Get id using name football player'''
number = 0
name=name.title().replace(" ", "+")
headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain",'Referer': 'http://'+self.domain+'/team_news.phtml',"User-Agent": user_agent}
req = self.session.get('http://stats.comunio.es/search.php?name='+name,headers=headers).content
soup = BeautifulSoup(req)
for i in soup.find_all('a',{'class','nowrap'}):
number = re.search("([0-9]+)-", str(i)).group(1)
break # Solo devuelve la primera coincidencia
return number | Get id using name football player | Below is the the instruction that describes the task:
### Input:
Get id using name football player
### Response:
def info_player_id(self,name):
'''Get id using name football player'''
number = 0
name=name.title().replace(" ", "+")
headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain",'Referer': 'http://'+self.domain+'/team_news.phtml',"User-Agent": user_agent}
req = self.session.get('http://stats.comunio.es/search.php?name='+name,headers=headers).content
soup = BeautifulSoup(req)
for i in soup.find_all('a',{'class','nowrap'}):
number = re.search("([0-9]+)-", str(i)).group(1)
break # Solo devuelve la primera coincidencia
return number |
def fetch_access_token_by_client_credentials(self):
'''
There are three ways to let you start using KKBOX's Open/Partner
API. The first way among them is to generate a client
credential to fetch an access token to let KKBOX identify
you. It allows you to access public data from KKBOX such as
public albums, playlists and so on.
However, you cannot use client credentials to access private
data of a user. You have to let users to log-in into KKBOX and
grant permissions for you to do so. You cannot use client
credentials to do media playback either, since it requires a
Premium Membership.
:return: an access token
:rtype: :class:`kkbox_sdk.KKBOXAccessToken`
See `https://docs-en.kkbox.codes/docs/appendix-a`.
'''
client_credential_base = '%s:%s' % (self.client_id, self.client_secret)
try:
client_credentials = base64.b64encode(
bytes(client_credential_base, 'utf-8'))
except:
client_credentials = base64.b64encode(client_credential_base)
client_credentials = client_credentials.decode('utf-8')
headers = {'Authorization': 'Basic ' + client_credentials,
'Content-type': 'application/x-www-form-urlencoded'}
post_parameters = {'grant_type': 'client_credentials',
'scope': 'user_profile user_territory'}
json_object = self.http._post_data(KKBOXOAuth.OAUTH_TOKEN_URL, post_parameters,
headers)
self.access_token = KKBOXAccessToken(**json_object)
return self.access_token | There are three ways to let you start using KKBOX's Open/Partner
API. The first way among them is to generate a client
credential to fetch an access token to let KKBOX identify
you. It allows you to access public data from KKBOX such as
public albums, playlists and so on.
However, you cannot use client credentials to access private
data of a user. You have to let users to log-in into KKBOX and
grant permissions for you to do so. You cannot use client
credentials to do media playback either, since it requires a
Premium Membership.
:return: an access token
:rtype: :class:`kkbox_sdk.KKBOXAccessToken`
See `https://docs-en.kkbox.codes/docs/appendix-a`. | Below is the the instruction that describes the task:
### Input:
There are three ways to let you start using KKBOX's Open/Partner
API. The first way among them is to generate a client
credential to fetch an access token to let KKBOX identify
you. It allows you to access public data from KKBOX such as
public albums, playlists and so on.
However, you cannot use client credentials to access private
data of a user. You have to let users to log-in into KKBOX and
grant permissions for you to do so. You cannot use client
credentials to do media playback either, since it requires a
Premium Membership.
:return: an access token
:rtype: :class:`kkbox_sdk.KKBOXAccessToken`
See `https://docs-en.kkbox.codes/docs/appendix-a`.
### Response:
def fetch_access_token_by_client_credentials(self):
'''
There are three ways to let you start using KKBOX's Open/Partner
API. The first way among them is to generate a client
credential to fetch an access token to let KKBOX identify
you. It allows you to access public data from KKBOX such as
public albums, playlists and so on.
However, you cannot use client credentials to access private
data of a user. You have to let users to log-in into KKBOX and
grant permissions for you to do so. You cannot use client
credentials to do media playback either, since it requires a
Premium Membership.
:return: an access token
:rtype: :class:`kkbox_sdk.KKBOXAccessToken`
See `https://docs-en.kkbox.codes/docs/appendix-a`.
'''
client_credential_base = '%s:%s' % (self.client_id, self.client_secret)
try:
client_credentials = base64.b64encode(
bytes(client_credential_base, 'utf-8'))
except:
client_credentials = base64.b64encode(client_credential_base)
client_credentials = client_credentials.decode('utf-8')
headers = {'Authorization': 'Basic ' + client_credentials,
'Content-type': 'application/x-www-form-urlencoded'}
post_parameters = {'grant_type': 'client_credentials',
'scope': 'user_profile user_territory'}
json_object = self.http._post_data(KKBOXOAuth.OAUTH_TOKEN_URL, post_parameters,
headers)
self.access_token = KKBOXAccessToken(**json_object)
return self.access_token |
def service(ctx):
"""Install systemd service configuration"""
install_service(ctx.obj['instance'], ctx.obj['dbhost'], ctx.obj['dbname'], ctx.obj['port']) | Install systemd service configuration | Below is the the instruction that describes the task:
### Input:
Install systemd service configuration
### Response:
def service(ctx):
"""Install systemd service configuration"""
install_service(ctx.obj['instance'], ctx.obj['dbhost'], ctx.obj['dbname'], ctx.obj['port']) |
def _reset_em(self):
"""Resets self.em and the shared instances."""
self.em = _ExtendedManager(self.addr, self.authkey, mode=self.mode, start=False)
self.em.start()
self._set_shared_instances() | Resets self.em and the shared instances. | Below is the the instruction that describes the task:
### Input:
Resets self.em and the shared instances.
### Response:
def _reset_em(self):
"""Resets self.em and the shared instances."""
self.em = _ExtendedManager(self.addr, self.authkey, mode=self.mode, start=False)
self.em.start()
self._set_shared_instances() |
def bruggeman_refractive(m, mix):
"""Bruggeman EMA for the refractive index.
For instructions, see mg_refractive in this module, except this routine
only works for two components.
"""
f1 = mix[0]/sum(mix)
f2 = mix[1]/sum(mix)
e1 = m[0]**2
e2 = m[1]**2
a = -2*(f1+f2)
b = (2*f1*e1 - f1*e2 + 2*f2*e2 - f2*e1)
c = (f1+f2)*e1*e2
e_eff = (-b - np.sqrt(b**2-4*a*c))/(2*a)
return np.sqrt(e_eff) | Bruggeman EMA for the refractive index.
For instructions, see mg_refractive in this module, except this routine
only works for two components. | Below is the the instruction that describes the task:
### Input:
Bruggeman EMA for the refractive index.
For instructions, see mg_refractive in this module, except this routine
only works for two components.
### Response:
def bruggeman_refractive(m, mix):
"""Bruggeman EMA for the refractive index.
For instructions, see mg_refractive in this module, except this routine
only works for two components.
"""
f1 = mix[0]/sum(mix)
f2 = mix[1]/sum(mix)
e1 = m[0]**2
e2 = m[1]**2
a = -2*(f1+f2)
b = (2*f1*e1 - f1*e2 + 2*f2*e2 - f2*e1)
c = (f1+f2)*e1*e2
e_eff = (-b - np.sqrt(b**2-4*a*c))/(2*a)
return np.sqrt(e_eff) |
def refreshUserMembership(self, users):
"""
This operation iterates over every enterprise group configured in
the portal and determines if the input user accounts belong to any
of the configured enterprise groups. If there is any change in
membership, the database and the indexes are updated for each user
account. While portal automatically refreshes the memberships
during a user login and during a periodic refresh (configured
through the Update Identity Store operation), this operation allows
an administrator to force a refresh.
Parameters:
users - comma seperated list of user names
"""
params = {
"f" : "json",
"users" : users
}
url = self._url + "/users/refreshMembership"
return self._post(url=url,
param_dict=params,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url) | This operation iterates over every enterprise group configured in
the portal and determines if the input user accounts belong to any
of the configured enterprise groups. If there is any change in
membership, the database and the indexes are updated for each user
account. While portal automatically refreshes the memberships
during a user login and during a periodic refresh (configured
through the Update Identity Store operation), this operation allows
an administrator to force a refresh.
Parameters:
users - comma seperated list of user names | Below is the the instruction that describes the task:
### Input:
This operation iterates over every enterprise group configured in
the portal and determines if the input user accounts belong to any
of the configured enterprise groups. If there is any change in
membership, the database and the indexes are updated for each user
account. While portal automatically refreshes the memberships
during a user login and during a periodic refresh (configured
through the Update Identity Store operation), this operation allows
an administrator to force a refresh.
Parameters:
users - comma seperated list of user names
### Response:
def refreshUserMembership(self, users):
"""
This operation iterates over every enterprise group configured in
the portal and determines if the input user accounts belong to any
of the configured enterprise groups. If there is any change in
membership, the database and the indexes are updated for each user
account. While portal automatically refreshes the memberships
during a user login and during a periodic refresh (configured
through the Update Identity Store operation), this operation allows
an administrator to force a refresh.
Parameters:
users - comma seperated list of user names
"""
params = {
"f" : "json",
"users" : users
}
url = self._url + "/users/refreshMembership"
return self._post(url=url,
param_dict=params,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url) |
def _unlock(self):
"""Unlock the config DB."""
if self.locked:
try:
self.device.cu.unlock()
self.locked = False
except JnrpUnlockError as jue:
raise UnlockError(jue.messsage) | Unlock the config DB. | Below is the the instruction that describes the task:
### Input:
Unlock the config DB.
### Response:
def _unlock(self):
"""Unlock the config DB."""
if self.locked:
try:
self.device.cu.unlock()
self.locked = False
except JnrpUnlockError as jue:
raise UnlockError(jue.messsage) |
def get_execution_details(self):
"""
Retrieves execution details for a task.
:return: Execution details instance.
"""
extra = {
'resource': self.__class__.__name__,
'query': {'id': self.id}
}
logger.info('Get execution details', extra=extra)
data = self._api.get(
self._URL['execution_details'].format(id=self.id)).json()
return ExecutionDetails(api=self._api, **data) | Retrieves execution details for a task.
:return: Execution details instance. | Below is the the instruction that describes the task:
### Input:
Retrieves execution details for a task.
:return: Execution details instance.
### Response:
def get_execution_details(self):
"""
Retrieves execution details for a task.
:return: Execution details instance.
"""
extra = {
'resource': self.__class__.__name__,
'query': {'id': self.id}
}
logger.info('Get execution details', extra=extra)
data = self._api.get(
self._URL['execution_details'].format(id=self.id)).json()
return ExecutionDetails(api=self._api, **data) |
def vqa_attention_base():
"""VQA attention baseline hparams."""
hparams = common_hparams.basic_params1()
hparams.batch_size = 128
hparams.use_fixed_batch_size = True,
hparams.optimizer = "adam"
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.999
hparams.optimizer_adam_epsilon = 1e-8
hparams.weight_decay = 0.
hparams.clip_grad_norm = 0.
hparams.initializer = "xavier"
hparams.learning_rate = 0.5
hparams.learning_rate_schedule = "legacy"
hparams.learning_rate_warmup_steps = 0
hparams.learning_rate_decay_scheme = "exp"
hparams.learning_rate_decay_rate = 0.5
hparams.learning_rate_decay_steps = 50000
hparams.dropout = 0.5
hparams.summarize_grads = True
hparams.summarize_vars = True
# not used hparams
hparams.label_smoothing = 0.
hparams.multiply_embedding_mode = ""
# add new hparams
# preprocess
hparams.add_hparam("resize_side", 512)
hparams.add_hparam("height", 448)
hparams.add_hparam("width", 448)
hparams.add_hparam("distort", True)
hparams.add_hparam("train_resnet", False)
hparams.add_hparam("rnn_type", "lstm")
hparams.add_hparam("num_rnn_layers", 1)
hparams.add_hparam("max_question_length", 15)
# lstm hidden size
hparams.hidden_size = 512
hparams.add_hparam("attn_dim", 512)
hparams.add_hparam("num_glimps", 2)
hparams.add_hparam("num_mlp_layers", 1)
hparams.add_hparam("mlp_dim", 1024)
hparams.add_hparam("image_input_type", "image")
hparams.add_hparam("image_model_fn", "resnet_v1_152")
hparams.add_hparam("image_feat_size", 0)
# self attention parts
hparams.norm_type = "layer"
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.layer_prepostprocess_dropout = 0.3
hparams.attention_dropout = 0.1
hparams.relu_dropout = 0.1
hparams.image_hidden_size = 2048
hparams.add_hparam("num_encoder_layers", 1)
# Attention-related flags.
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("attention_key_channels", 0)
hparams.add_hparam("attention_value_channels", 0)
hparams.add_hparam("image_filter_size", 1024)
hparams.add_hparam("self_attention_type", "dot_product")
hparams.add_hparam("scale_dotproduct", True)
return hparams | VQA attention baseline hparams. | Below is the the instruction that describes the task:
### Input:
VQA attention baseline hparams.
### Response:
def vqa_attention_base():
"""VQA attention baseline hparams."""
hparams = common_hparams.basic_params1()
hparams.batch_size = 128
hparams.use_fixed_batch_size = True,
hparams.optimizer = "adam"
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.999
hparams.optimizer_adam_epsilon = 1e-8
hparams.weight_decay = 0.
hparams.clip_grad_norm = 0.
hparams.initializer = "xavier"
hparams.learning_rate = 0.5
hparams.learning_rate_schedule = "legacy"
hparams.learning_rate_warmup_steps = 0
hparams.learning_rate_decay_scheme = "exp"
hparams.learning_rate_decay_rate = 0.5
hparams.learning_rate_decay_steps = 50000
hparams.dropout = 0.5
hparams.summarize_grads = True
hparams.summarize_vars = True
# not used hparams
hparams.label_smoothing = 0.
hparams.multiply_embedding_mode = ""
# add new hparams
# preprocess
hparams.add_hparam("resize_side", 512)
hparams.add_hparam("height", 448)
hparams.add_hparam("width", 448)
hparams.add_hparam("distort", True)
hparams.add_hparam("train_resnet", False)
hparams.add_hparam("rnn_type", "lstm")
hparams.add_hparam("num_rnn_layers", 1)
hparams.add_hparam("max_question_length", 15)
# lstm hidden size
hparams.hidden_size = 512
hparams.add_hparam("attn_dim", 512)
hparams.add_hparam("num_glimps", 2)
hparams.add_hparam("num_mlp_layers", 1)
hparams.add_hparam("mlp_dim", 1024)
hparams.add_hparam("image_input_type", "image")
hparams.add_hparam("image_model_fn", "resnet_v1_152")
hparams.add_hparam("image_feat_size", 0)
# self attention parts
hparams.norm_type = "layer"
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.layer_prepostprocess_dropout = 0.3
hparams.attention_dropout = 0.1
hparams.relu_dropout = 0.1
hparams.image_hidden_size = 2048
hparams.add_hparam("num_encoder_layers", 1)
# Attention-related flags.
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("attention_key_channels", 0)
hparams.add_hparam("attention_value_channels", 0)
hparams.add_hparam("image_filter_size", 1024)
hparams.add_hparam("self_attention_type", "dot_product")
hparams.add_hparam("scale_dotproduct", True)
return hparams |
def return_dat(self, chan, begsam, endsam):
"""Return the data as 2D numpy.ndarray.
Parameters
----------
chan : int or list
index (indices) of the channels to read
begsam : int
index of the first sample
endsam : int
index of the last sample
Returns
-------
numpy.ndarray
A 2d matrix, with dimension chan X samples
"""
if type(chan) == int: # if single value is provided it needs to be transformed to list to generate a 2d matrix
chan = [chan, ]
if (begsam >= self._n_smp) or (endsam < 0):
dat = empty((len(chan), endsam - begsam))
dat.fill(NaN)
return dat
if begsam < 0:
begpad = -1 * begsam
begsam = 0
else:
begpad = 0
if endsam > self._n_smp:
endpad = endsam - self._n_smp
endsam = self._n_smp
else:
endpad = 0
dshape = (self._n_chan, endsam - begsam)
sig_dtype = 'u' + str(self._n_bytes)
offset = self._bodata + begsam * self._n_bytes * self._n_chan
dat = memmap(str(self.filename), dtype=sig_dtype, order='F', mode='r',
shape=dshape, offset=offset).astype('float')
dat = pad(dat[chan, :], ((0, 0), (begpad, endpad)), mode='constant',
constant_values=NaN)
return (dat - self._offset[chan, None]) * self._factors[chan, None] | Return the data as 2D numpy.ndarray.
Parameters
----------
chan : int or list
index (indices) of the channels to read
begsam : int
index of the first sample
endsam : int
index of the last sample
Returns
-------
numpy.ndarray
A 2d matrix, with dimension chan X samples | Below is the the instruction that describes the task:
### Input:
Return the data as 2D numpy.ndarray.
Parameters
----------
chan : int or list
index (indices) of the channels to read
begsam : int
index of the first sample
endsam : int
index of the last sample
Returns
-------
numpy.ndarray
A 2d matrix, with dimension chan X samples
### Response:
def return_dat(self, chan, begsam, endsam):
"""Return the data as 2D numpy.ndarray.
Parameters
----------
chan : int or list
index (indices) of the channels to read
begsam : int
index of the first sample
endsam : int
index of the last sample
Returns
-------
numpy.ndarray
A 2d matrix, with dimension chan X samples
"""
if type(chan) == int: # if single value is provided it needs to be transformed to list to generate a 2d matrix
chan = [chan, ]
if (begsam >= self._n_smp) or (endsam < 0):
dat = empty((len(chan), endsam - begsam))
dat.fill(NaN)
return dat
if begsam < 0:
begpad = -1 * begsam
begsam = 0
else:
begpad = 0
if endsam > self._n_smp:
endpad = endsam - self._n_smp
endsam = self._n_smp
else:
endpad = 0
dshape = (self._n_chan, endsam - begsam)
sig_dtype = 'u' + str(self._n_bytes)
offset = self._bodata + begsam * self._n_bytes * self._n_chan
dat = memmap(str(self.filename), dtype=sig_dtype, order='F', mode='r',
shape=dshape, offset=offset).astype('float')
dat = pad(dat[chan, :], ((0, 0), (begpad, endpad)), mode='constant',
constant_values=NaN)
return (dat - self._offset[chan, None]) * self._factors[chan, None] |
def setup_requires():
"""
Return required packages
Plus any version tests and warnings
"""
from pkg_resources import parse_version
required = ['cython>=0.24.0']
numpy_requirement = 'numpy>=1.7.1'
try:
import numpy
except Exception:
required.append(numpy_requirement)
else:
if parse_version(numpy.__version__) < parse_version('1.7.1'):
required.append(numpy_requirement)
return required | Return required packages
Plus any version tests and warnings | Below is the the instruction that describes the task:
### Input:
Return required packages
Plus any version tests and warnings
### Response:
def setup_requires():
"""
Return required packages
Plus any version tests and warnings
"""
from pkg_resources import parse_version
required = ['cython>=0.24.0']
numpy_requirement = 'numpy>=1.7.1'
try:
import numpy
except Exception:
required.append(numpy_requirement)
else:
if parse_version(numpy.__version__) < parse_version('1.7.1'):
required.append(numpy_requirement)
return required |
def when_all_players_ready(self):
"""Initializes decisions based on ``player.initial_decision()``.
If :attr:`num_subperiods` is set, starts a timed task to run the
sub-periods.
"""
self.group_decisions = {}
self.subperiod_group_decisions = {}
for player in self.get_players():
self.group_decisions[player.participant.code] = player.initial_decision()
self.subperiod_group_decisions[player.participant.code] = player.initial_decision()
if self.num_subperiods():
emitter = DiscreteEventEmitter(
self.period_length() / self.num_subperiods(),
self.period_length(),
self,
self._subperiod_tick)
emitter.start()
elif self.rate_limit():
def _tick(current_interval, intervals):
self.refresh_from_db()
if self._group_decisions_updated:
self.send('group_decisions', self.group_decisions)
self._group_decisions_updated = False
self.save(update_fields=['_group_decisions_updated'])
update_period = self.rate_limit()
emitter = DiscreteEventEmitter(
update_period,
self.period_length(),
self,
_tick)
emitter.start()
self.save() | Initializes decisions based on ``player.initial_decision()``.
If :attr:`num_subperiods` is set, starts a timed task to run the
sub-periods. | Below is the the instruction that describes the task:
### Input:
Initializes decisions based on ``player.initial_decision()``.
If :attr:`num_subperiods` is set, starts a timed task to run the
sub-periods.
### Response:
def when_all_players_ready(self):
"""Initializes decisions based on ``player.initial_decision()``.
If :attr:`num_subperiods` is set, starts a timed task to run the
sub-periods.
"""
self.group_decisions = {}
self.subperiod_group_decisions = {}
for player in self.get_players():
self.group_decisions[player.participant.code] = player.initial_decision()
self.subperiod_group_decisions[player.participant.code] = player.initial_decision()
if self.num_subperiods():
emitter = DiscreteEventEmitter(
self.period_length() / self.num_subperiods(),
self.period_length(),
self,
self._subperiod_tick)
emitter.start()
elif self.rate_limit():
def _tick(current_interval, intervals):
self.refresh_from_db()
if self._group_decisions_updated:
self.send('group_decisions', self.group_decisions)
self._group_decisions_updated = False
self.save(update_fields=['_group_decisions_updated'])
update_period = self.rate_limit()
emitter = DiscreteEventEmitter(
update_period,
self.period_length(),
self,
_tick)
emitter.start()
self.save() |
def _EnsureFileExists(self):
"""Touches a file; returns False on error, True on success."""
if not os.path.exists(self._filename):
old_umask = os.umask(0o177)
try:
open(self._filename, 'a+b').close()
except OSError:
return False
finally:
os.umask(old_umask)
return True | Touches a file; returns False on error, True on success. | Below is the the instruction that describes the task:
### Input:
Touches a file; returns False on error, True on success.
### Response:
def _EnsureFileExists(self):
"""Touches a file; returns False on error, True on success."""
if not os.path.exists(self._filename):
old_umask = os.umask(0o177)
try:
open(self._filename, 'a+b').close()
except OSError:
return False
finally:
os.umask(old_umask)
return True |
def nonzero(self):
""" property decorated method to get a new ObservationEnsemble
of only non-zero weighted observations
Returns
-------
ObservationEnsemble : ObservationEnsemble
"""
df = self.loc[:,self.pst.nnz_obs_names]
return ObservationEnsemble.from_dataframe(df=df,
pst=self.pst.get(obs_names=self.pst.nnz_obs_names)) | property decorated method to get a new ObservationEnsemble
of only non-zero weighted observations
Returns
-------
ObservationEnsemble : ObservationEnsemble | Below is the the instruction that describes the task:
### Input:
property decorated method to get a new ObservationEnsemble
of only non-zero weighted observations
Returns
-------
ObservationEnsemble : ObservationEnsemble
### Response:
def nonzero(self):
""" property decorated method to get a new ObservationEnsemble
of only non-zero weighted observations
Returns
-------
ObservationEnsemble : ObservationEnsemble
"""
df = self.loc[:,self.pst.nnz_obs_names]
return ObservationEnsemble.from_dataframe(df=df,
pst=self.pst.get(obs_names=self.pst.nnz_obs_names)) |
def _generate_api_gateway_deployment(self):
"""
Generate the API Gateway Deployment/Stage, and add to self.tf_conf
"""
# finally, the deployment
# this resource MUST come last
dep_on = []
for rtype in sorted(self.tf_conf['resource'].keys()):
for rname in sorted(self.tf_conf['resource'][rtype].keys()):
dep_on.append('%s.%s' % (rtype, rname))
self.tf_conf['resource']['aws_api_gateway_deployment']['depl'] = {
'rest_api_id': '${aws_api_gateway_rest_api.rest_api.id}',
'description': self.description,
'stage_name': self.config.stage_name,
'depends_on': dep_on
}
self.tf_conf['output']['deployment_id'] = {
'value': '${aws_api_gateway_deployment.depl.id}'
} | Generate the API Gateway Deployment/Stage, and add to self.tf_conf | Below is the the instruction that describes the task:
### Input:
Generate the API Gateway Deployment/Stage, and add to self.tf_conf
### Response:
def _generate_api_gateway_deployment(self):
"""
Generate the API Gateway Deployment/Stage, and add to self.tf_conf
"""
# finally, the deployment
# this resource MUST come last
dep_on = []
for rtype in sorted(self.tf_conf['resource'].keys()):
for rname in sorted(self.tf_conf['resource'][rtype].keys()):
dep_on.append('%s.%s' % (rtype, rname))
self.tf_conf['resource']['aws_api_gateway_deployment']['depl'] = {
'rest_api_id': '${aws_api_gateway_rest_api.rest_api.id}',
'description': self.description,
'stage_name': self.config.stage_name,
'depends_on': dep_on
}
self.tf_conf['output']['deployment_id'] = {
'value': '${aws_api_gateway_deployment.depl.id}'
} |
def pt_shift(pt=(0.0, 0.0), shift=[0.0, 0.0]):
'''Return given point shifted in N dimensions.
'''
assert isinstance(pt, tuple)
l_pt = len(pt)
assert l_pt > 1
for i in pt:
assert isinstance(i, float)
assert isinstance(shift, list)
l_sh = len(shift)
assert l_sh == l_pt
for i in shift:
assert isinstance(i, float)
return tuple([pt[i] + shift[i] for i in range(l_pt)]) | Return given point shifted in N dimensions. | Below is the the instruction that describes the task:
### Input:
Return given point shifted in N dimensions.
### Response:
def pt_shift(pt=(0.0, 0.0), shift=[0.0, 0.0]):
'''Return given point shifted in N dimensions.
'''
assert isinstance(pt, tuple)
l_pt = len(pt)
assert l_pt > 1
for i in pt:
assert isinstance(i, float)
assert isinstance(shift, list)
l_sh = len(shift)
assert l_sh == l_pt
for i in shift:
assert isinstance(i, float)
return tuple([pt[i] + shift[i] for i in range(l_pt)]) |
def _freeze(self, final_text, err=False):
"""Stop spinner, compose last frame and 'freeze' it."""
if not final_text:
final_text = ""
target = self.stderr if err else self.stdout
if target.closed:
target = sys.stderr if err else sys.stdout
text = to_text(final_text)
last_frame = self._compose_out(text, mode="last")
self._last_frame = decode_output(last_frame, target_stream=target)
# Should be stopped here, otherwise prints after
# self._freeze call will mess up the spinner
self.stop()
target.write(self._last_frame) | Stop spinner, compose last frame and 'freeze' it. | Below is the the instruction that describes the task:
### Input:
Stop spinner, compose last frame and 'freeze' it.
### Response:
def _freeze(self, final_text, err=False):
"""Stop spinner, compose last frame and 'freeze' it."""
if not final_text:
final_text = ""
target = self.stderr if err else self.stdout
if target.closed:
target = sys.stderr if err else sys.stdout
text = to_text(final_text)
last_frame = self._compose_out(text, mode="last")
self._last_frame = decode_output(last_frame, target_stream=target)
# Should be stopped here, otherwise prints after
# self._freeze call will mess up the spinner
self.stop()
target.write(self._last_frame) |
def entry_index(request, limit=0, template='djournal/entry_index.html'):
'''Returns a reponse of a fixed number of entries; all of them, by default. '''
entries = Entry.public.all()
if limit > 0:
entries = entries[:limit]
context = {
'entries': entries,
}
return render_to_response(
template,
context,
context_instance=RequestContext(request),
) | Returns a reponse of a fixed number of entries; all of them, by default. | Below is the the instruction that describes the task:
### Input:
Returns a reponse of a fixed number of entries; all of them, by default.
### Response:
def entry_index(request, limit=0, template='djournal/entry_index.html'):
'''Returns a reponse of a fixed number of entries; all of them, by default. '''
entries = Entry.public.all()
if limit > 0:
entries = entries[:limit]
context = {
'entries': entries,
}
return render_to_response(
template,
context,
context_instance=RequestContext(request),
) |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'emotion') and self.emotion is not None:
_dict['emotion'] = self.emotion._to_dict()
return _dict | Return a json dictionary representing this model. | Below is the the instruction that describes the task:
### Input:
Return a json dictionary representing this model.
### Response:
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'emotion') and self.emotion is not None:
_dict['emotion'] = self.emotion._to_dict()
return _dict |
def get_matrix_from_list(self, rows, columns, matrix_list, rowBased=True):
"""Create a new Matrix instance from a matrix_list.
:note: This method is used to create a Matrix instance using cpython.
:param integer rows: The height of the Matrix.
:param integer columns: The width of the Matrix.
:param matrix_list: A one dimensional list containing the
values for Matrix. Depending on the
rowBased parameter, either the rows are
combined or the columns.
:param rowBased Boolean: Only necessary if the oneDimArray is given.
Indicates whether the oneDimArray combines
rows together (rowBased=True) or columns
(rowBased=False).
"""
resultMatrix = Matrix(columns, rows, matrix_list, rowBased)
return resultMatrix | Create a new Matrix instance from a matrix_list.
:note: This method is used to create a Matrix instance using cpython.
:param integer rows: The height of the Matrix.
:param integer columns: The width of the Matrix.
:param matrix_list: A one dimensional list containing the
values for Matrix. Depending on the
rowBased parameter, either the rows are
combined or the columns.
:param rowBased Boolean: Only necessary if the oneDimArray is given.
Indicates whether the oneDimArray combines
rows together (rowBased=True) or columns
(rowBased=False). | Below is the the instruction that describes the task:
### Input:
Create a new Matrix instance from a matrix_list.
:note: This method is used to create a Matrix instance using cpython.
:param integer rows: The height of the Matrix.
:param integer columns: The width of the Matrix.
:param matrix_list: A one dimensional list containing the
values for Matrix. Depending on the
rowBased parameter, either the rows are
combined or the columns.
:param rowBased Boolean: Only necessary if the oneDimArray is given.
Indicates whether the oneDimArray combines
rows together (rowBased=True) or columns
(rowBased=False).
### Response:
def get_matrix_from_list(self, rows, columns, matrix_list, rowBased=True):
"""Create a new Matrix instance from a matrix_list.
:note: This method is used to create a Matrix instance using cpython.
:param integer rows: The height of the Matrix.
:param integer columns: The width of the Matrix.
:param matrix_list: A one dimensional list containing the
values for Matrix. Depending on the
rowBased parameter, either the rows are
combined or the columns.
:param rowBased Boolean: Only necessary if the oneDimArray is given.
Indicates whether the oneDimArray combines
rows together (rowBased=True) or columns
(rowBased=False).
"""
resultMatrix = Matrix(columns, rows, matrix_list, rowBased)
return resultMatrix |
def _add_ps2q(self, throat, queue):
"""
Helper method to add pores to the cluster queue
"""
net = self.project.network
elem_type = 'pore'
# Find pores connected to newly invaded throat
Ps = net['throat.conns'][throat]
# Remove already invaded pores from Ps
Ps = Ps[self['pore.invasion_sequence'][Ps] <= 0]
if len(Ps) > 0:
self._interface_Ps[Ps] = True
for P in Ps:
data = []
# Pc
data.append(self["pore.entry_pressure"][P])
# Element Index
data.append(P)
# Element Type (Pore of Throat)
data.append(elem_type)
hq.heappush(queue, data) | Helper method to add pores to the cluster queue | Below is the the instruction that describes the task:
### Input:
Helper method to add pores to the cluster queue
### Response:
def _add_ps2q(self, throat, queue):
"""
Helper method to add pores to the cluster queue
"""
net = self.project.network
elem_type = 'pore'
# Find pores connected to newly invaded throat
Ps = net['throat.conns'][throat]
# Remove already invaded pores from Ps
Ps = Ps[self['pore.invasion_sequence'][Ps] <= 0]
if len(Ps) > 0:
self._interface_Ps[Ps] = True
for P in Ps:
data = []
# Pc
data.append(self["pore.entry_pressure"][P])
# Element Index
data.append(P)
# Element Type (Pore of Throat)
data.append(elem_type)
hq.heappush(queue, data) |
def container_fs_usage_bytes(self, metric, scraper_config):
"""
Number of bytes that are consumed by the container on this filesystem.
"""
metric_name = scraper_config['namespace'] + '.filesystem.usage'
if metric.type not in METRIC_TYPES:
self.log.error("Metric type %s unsupported for metric %s" % (metric.type, metric.name))
return
self._process_usage_metric(metric_name, metric, self.fs_usage_bytes, scraper_config) | Number of bytes that are consumed by the container on this filesystem. | Below is the the instruction that describes the task:
### Input:
Number of bytes that are consumed by the container on this filesystem.
### Response:
def container_fs_usage_bytes(self, metric, scraper_config):
"""
Number of bytes that are consumed by the container on this filesystem.
"""
metric_name = scraper_config['namespace'] + '.filesystem.usage'
if metric.type not in METRIC_TYPES:
self.log.error("Metric type %s unsupported for metric %s" % (metric.type, metric.name))
return
self._process_usage_metric(metric_name, metric, self.fs_usage_bytes, scraper_config) |
def find_train_knns(self, data_activations):
"""
Given a data_activation dictionary that contains a np array with activations for each layer,
find the knns in the training data.
"""
knns_ind = {}
knns_labels = {}
for layer in self.layers:
# Pre-process representations of data to normalize and remove training data mean.
data_activations_layer = copy.copy(data_activations[layer])
nb_data = data_activations_layer.shape[0]
data_activations_layer /= np.linalg.norm(
data_activations_layer, axis=1).reshape(-1, 1)
data_activations_layer -= self.centers[layer]
# Use FALCONN to find indices of nearest neighbors in training data.
knns_ind[layer] = np.zeros(
(data_activations_layer.shape[0], self.neighbors), dtype=np.int32)
knn_errors = 0
for i in range(data_activations_layer.shape[0]):
query_res = self.query_objects[layer].find_k_nearest_neighbors(
data_activations_layer[i], self.neighbors)
try:
knns_ind[layer][i, :] = query_res
except: # pylint: disable-msg=W0702
knns_ind[layer][i, :len(query_res)] = query_res
knn_errors += knns_ind[layer].shape[1] - len(query_res)
# Find labels of neighbors found in the training data.
knns_labels[layer] = np.zeros((nb_data, self.neighbors), dtype=np.int32)
for data_id in range(nb_data):
knns_labels[layer][data_id, :] = self.train_labels[knns_ind[layer][data_id]]
return knns_ind, knns_labels | Given a data_activation dictionary that contains a np array with activations for each layer,
find the knns in the training data. | Below is the the instruction that describes the task:
### Input:
Given a data_activation dictionary that contains a np array with activations for each layer,
find the knns in the training data.
### Response:
def find_train_knns(self, data_activations):
"""
Given a data_activation dictionary that contains a np array with activations for each layer,
find the knns in the training data.
"""
knns_ind = {}
knns_labels = {}
for layer in self.layers:
# Pre-process representations of data to normalize and remove training data mean.
data_activations_layer = copy.copy(data_activations[layer])
nb_data = data_activations_layer.shape[0]
data_activations_layer /= np.linalg.norm(
data_activations_layer, axis=1).reshape(-1, 1)
data_activations_layer -= self.centers[layer]
# Use FALCONN to find indices of nearest neighbors in training data.
knns_ind[layer] = np.zeros(
(data_activations_layer.shape[0], self.neighbors), dtype=np.int32)
knn_errors = 0
for i in range(data_activations_layer.shape[0]):
query_res = self.query_objects[layer].find_k_nearest_neighbors(
data_activations_layer[i], self.neighbors)
try:
knns_ind[layer][i, :] = query_res
except: # pylint: disable-msg=W0702
knns_ind[layer][i, :len(query_res)] = query_res
knn_errors += knns_ind[layer].shape[1] - len(query_res)
# Find labels of neighbors found in the training data.
knns_labels[layer] = np.zeros((nb_data, self.neighbors), dtype=np.int32)
for data_id in range(nb_data):
knns_labels[layer][data_id, :] = self.train_labels[knns_ind[layer][data_id]]
return knns_ind, knns_labels |
def generate_proto(source, require = True):
"""Invokes the Protocol Compiler to generate a _pb2.py from the given
.proto file. Does nothing if the output already exists and is newer than
the input."""
if not require and not os.path.exists(source):
return
output = source.replace(".proto", "_pb2.py").replace("../src/", "")
if (not os.path.exists(output) or
(os.path.exists(source) and
os.path.getmtime(source) > os.path.getmtime(output))):
print("Generating %s..." % output)
if not os.path.exists(source):
sys.stderr.write("Can't find required file: %s\n" % source)
sys.exit(-1)
if protoc is None:
sys.stderr.write(
"protoc is not installed nor found in ../src. Please compile it "
"or install the binary package.\n")
sys.exit(-1)
protoc_command = [ protoc, "-I../src", "-I.", "--python_out=.", source ]
if subprocess.call(protoc_command) != 0:
sys.exit(-1) | Invokes the Protocol Compiler to generate a _pb2.py from the given
.proto file. Does nothing if the output already exists and is newer than
the input. | Below is the the instruction that describes the task:
### Input:
Invokes the Protocol Compiler to generate a _pb2.py from the given
.proto file. Does nothing if the output already exists and is newer than
the input.
### Response:
def generate_proto(source, require = True):
"""Invokes the Protocol Compiler to generate a _pb2.py from the given
.proto file. Does nothing if the output already exists and is newer than
the input."""
if not require and not os.path.exists(source):
return
output = source.replace(".proto", "_pb2.py").replace("../src/", "")
if (not os.path.exists(output) or
(os.path.exists(source) and
os.path.getmtime(source) > os.path.getmtime(output))):
print("Generating %s..." % output)
if not os.path.exists(source):
sys.stderr.write("Can't find required file: %s\n" % source)
sys.exit(-1)
if protoc is None:
sys.stderr.write(
"protoc is not installed nor found in ../src. Please compile it "
"or install the binary package.\n")
sys.exit(-1)
protoc_command = [ protoc, "-I../src", "-I.", "--python_out=.", source ]
if subprocess.call(protoc_command) != 0:
sys.exit(-1) |
def enrich_citation_model(manager, citation, p) -> bool:
"""Enrich a citation model with the information from PubMed.
:param pybel.manager.Manager manager:
:param Citation citation: A citation model
:param dict p: The dictionary from PubMed E-Utils corresponding to d["result"][pmid]
"""
if 'error' in p:
log.warning('Error downloading PubMed')
return False
citation.name = p['fulljournalname']
citation.title = p['title']
citation.volume = p['volume']
citation.issue = p['issue']
citation.pages = p['pages']
citation.first = manager.get_or_create_author(p['sortfirstauthor'])
citation.last = manager.get_or_create_author(p['lastauthor'])
if 'authors' in p:
for author in p['authors']:
author_model = manager.get_or_create_author(author['name'])
if author_model not in citation.authors:
citation.authors.append(author_model)
publication_date = p['pubdate']
sanitized_publication_date = sanitize_date(publication_date)
if sanitized_publication_date:
citation.date = datetime.strptime(sanitized_publication_date, '%Y-%m-%d')
else:
log.info('result had date with strange format: %s', publication_date)
return True | Enrich a citation model with the information from PubMed.
:param pybel.manager.Manager manager:
:param Citation citation: A citation model
:param dict p: The dictionary from PubMed E-Utils corresponding to d["result"][pmid] | Below is the the instruction that describes the task:
### Input:
Enrich a citation model with the information from PubMed.
:param pybel.manager.Manager manager:
:param Citation citation: A citation model
:param dict p: The dictionary from PubMed E-Utils corresponding to d["result"][pmid]
### Response:
def enrich_citation_model(manager, citation, p) -> bool:
"""Enrich a citation model with the information from PubMed.
:param pybel.manager.Manager manager:
:param Citation citation: A citation model
:param dict p: The dictionary from PubMed E-Utils corresponding to d["result"][pmid]
"""
if 'error' in p:
log.warning('Error downloading PubMed')
return False
citation.name = p['fulljournalname']
citation.title = p['title']
citation.volume = p['volume']
citation.issue = p['issue']
citation.pages = p['pages']
citation.first = manager.get_or_create_author(p['sortfirstauthor'])
citation.last = manager.get_or_create_author(p['lastauthor'])
if 'authors' in p:
for author in p['authors']:
author_model = manager.get_or_create_author(author['name'])
if author_model not in citation.authors:
citation.authors.append(author_model)
publication_date = p['pubdate']
sanitized_publication_date = sanitize_date(publication_date)
if sanitized_publication_date:
citation.date = datetime.strptime(sanitized_publication_date, '%Y-%m-%d')
else:
log.info('result had date with strange format: %s', publication_date)
return True |
def remove_system(self, system):
'''
Removes system from world and kills system
'''
if system in self._systems:
self._systems.remove(system)
else:
raise UnmanagedSystemError(system) | Removes system from world and kills system | Below is the the instruction that describes the task:
### Input:
Removes system from world and kills system
### Response:
def remove_system(self, system):
'''
Removes system from world and kills system
'''
if system in self._systems:
self._systems.remove(system)
else:
raise UnmanagedSystemError(system) |
def _readline_echo(self, char, echo):
"""Echo a recieved character, move cursor etc..."""
if self._readline_do_echo(echo):
self.write(char) | Echo a recieved character, move cursor etc... | Below is the the instruction that describes the task:
### Input:
Echo a recieved character, move cursor etc...
### Response:
def _readline_echo(self, char, echo):
"""Echo a recieved character, move cursor etc..."""
if self._readline_do_echo(echo):
self.write(char) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.