text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _setup_db(cls):
"""
Setup the DB connection if DB_URL is set
"""
uri = cls._app.config.get("DB_URL")
if uri:
db.connect__(uri, cls._app) | 0.015544 |
def naive(gold_schemes):
"""find naive baseline (most common scheme of a given length)?"""
scheme_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', 'schemes.json')
with open(scheme_path, 'r') as f:
dist = json.loads(f.read())
best_schemes = {}
for i in dist.keys():
if not dist[i]:
continue
best_schemes[int(i)] = tuple(int(j) for j in (max(dist[i], key=lambda x: x[1])[0]).split())
naive_schemes = []
for g in gold_schemes:
naive_schemes.append(best_schemes[len(g)])
return naive_schemes | 0.005102 |
def key_from_password(password):
"""This method just hashes self.password."""
if isinstance(password, unicode):
password = password.encode('utf-8')
if not isinstance(password, bytes):
raise TypeError("password must be byte string, not %s" % type(password))
sha = SHA256.new()
sha.update(password)
return sha.digest() | 0.00831 |
def rx_filter(header, data):
"""Check if the message in rx_data matches to the information in header.
The following checks are done:
- Header checksum
- Payload checksum
- NetFn matching
- LUN matching
- Command Id matching
header: the header to compare with
data: the received message as bytestring
"""
rsp_header = IpmbHeaderRsp()
rsp_header.decode(data)
data = array('B', data)
checks = [
(checksum(data[0:3]), 0, 'Header checksum failed'),
(checksum(data[3:]), 0, 'payload checksum failed'),
# rsp_header.rq_sa, header.rq_sa, 'slave address mismatch'),
(rsp_header.netfn, header.netfn | 1, 'NetFn mismatch'),
# rsp_header.rs_sa, header.rs_sa, 'target address mismatch'),
# rsp_header.rq_lun, header.rq_lun, 'request LUN mismatch'),
(rsp_header.rs_lun, header.rs_lun, 'responder LUN mismatch'),
(rsp_header.rq_seq, header.rq_seq, 'sequence number mismatch'),
(rsp_header.cmd_id, header.cmd_id, 'command id mismatch'),
]
match = True
for left, right, msg in checks:
if left != right:
log().debug('{:s}: {:d} {:d}'.format(msg, left, right))
match = False
return match | 0.000792 |
def export_pipeline(url, pipeline_id, auth, verify_ssl):
"""Export the config and rules for a pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
export_result = requests.get(url + '/' + pipeline_id + '/export', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
if export_result.status_code == 404:
logging.error('Pipeline not found: ' + pipeline_id)
export_result.raise_for_status()
return export_result.json() | 0.002774 |
def bids_to_pwl(self, bids):
""" Updates the piece-wise linear total cost function using the given
bid blocks.
Based on off2case.m from MATPOWER by Ray Zimmerman, developed at PSERC
Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more info.
"""
assert self.is_load
# Apply only those bids associated with this dispatchable load.
vl_bids = [bid for bid in bids if bid.vLoad == self]
# Filter out zero quantity bids.
gt_zero = [bid for bid in vl_bids if round(bid.quantity, 4) > 0.0]
# Ignore withheld offers.
valid_bids = [bid for bid in gt_zero if not bid.withheld]
p_bids = [v for v in valid_bids if not v.reactive]
q_bids = [v for v in valid_bids if v.reactive]
if p_bids:
self.p_cost = self._offbids_to_points(p_bids, True)
self.pcost_model = PW_LINEAR
self.online = True
else:
self.p_cost = [(0.0, 0.0), (self.p_max, 0.0)]
self.pcost_model = PW_LINEAR
logger.info("No valid active power bids for dispatchable load "
"[%s], shutting down." % self.name)
self.online = False
if q_bids:
self.q_cost = self._offbids_to_points(q_bids, True)
self.qcost_model = PW_LINEAR
self.online = True
else:
self.q_cost = [(self.q_min, 0.0), (0.0, 0.0), (self.q_max, 0.0)]
self.qcost_model = PW_LINEAR
# logger.info("No valid bids for dispatchable load, shutting down.")
# self.online = False
self._adjust_limits() | 0.001207 |
def filter(self, table, group_types, filter_string):
"""Naive case-insensitive search."""
query = filter_string.lower()
return [group_type for group_type in group_types
if query in group_type.name.lower()] | 0.008163 |
def future(self, request, timeout=None, metadata=None, credentials=None):
"""Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
timeout: An optional duration of time in seconds to allow for the RPC.
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
Returns:
An object that is both a Call for the RPC and a Future. In the event of
RPC completion, the return Call-Future's result value will be the
response message of the RPC. Should the event terminate with non-OK
status, the returned Call-Future's exception value will be an RpcError.
"""
return _utils.wrap_future_call(self._inner.future(request, timeout, metadata, credentials),
self._loop, self._executor) | 0.003261 |
def delete(self):
"""
Remove entries from the table. Often combined with `where`, as it acts
on all records in the table unless restricted.
"""
cmd = "delete from {table} {where_clause}".format(
table=self.table_name,
where_clause=self.where_clause
).rstrip()
Repo.db.execute(cmd, self.where_values) | 0.005277 |
def stopObserver(self):
""" Stops this region's observer loop.
If this is running in a subprocess, the subprocess will end automatically.
"""
self._observer.isStopped = True
self._observer.isRunning = False | 0.012146 |
def load_config(self, **kwargs):
"""Load the configuration for the user or seed it with defaults.
:return: Boolean if successful
"""
virgin_config = False
if not os.path.exists(CONFIG_PATH):
virgin_config = True
os.makedirs(CONFIG_PATH)
if not os.path.exists(CONFIG_FILE):
virgin_config = True
if not virgin_config:
self.config = json.load(open(CONFIG_FILE))
else:
self.logger.info('[!] Processing whitelists, this may take a few minutes...')
process_whitelists()
if kwargs:
self.config.update(kwargs)
if virgin_config or kwargs:
self.write_config()
if 'api_key' not in self.config:
sys.stderr.write('configuration missing API key\n')
if 'email' not in self.config:
sys.stderr.write('configuration missing email\n')
if not ('api_key' in self.config and 'email' in self.config):
sys.stderr.write('Errors have been reported. Run blockade-cfg '
'to fix these warnings.\n')
try:
last_update = datetime.strptime(self.config['whitelist_date'],
"%Y-%m-%d")
current = datetime.now()
delta = (current - last_update).days
if delta > 14:
self.logger.info('[!] Refreshing whitelists, this may take a few minutes...')
process_whitelists()
self.config['whitelist_date'] = datetime.now().strftime("%Y-%m-%d")
self.write_config()
except Exception as e:
self.logger.error(str(e))
self.logger.info('[!] Processing whitelists, this may take a few minutes...')
process_whitelists()
self.config['whitelist_date'] = datetime.now().strftime("%Y-%m-%d")
self.write_config()
return True | 0.003049 |
def _allocate(self, dut_configuration): # pylint: disable=too-many-branches
"""
Internal allocation function. Allocates a single resource based on dut_configuration.
:param dut_configuration: ResourceRequirements object which describes a required resource
:return: True
:raises: AllocationError if suitable resource was not found or if the platform was not
allowed to be used.
"""
if dut_configuration["type"] == "hardware":
dut_configuration.set("type", "mbed")
if dut_configuration["type"] == "mbed":
if not self._available_devices:
raise AllocationError("No available devices to allocate from")
dut_reqs = dut_configuration.get_requirements()
platforms = None if 'allowed_platforms' not in dut_reqs else dut_reqs[
'allowed_platforms']
platform_name = None if 'platform_name' not in dut_reqs else dut_reqs[
"platform_name"]
if platform_name is None and platforms:
platform_name = platforms[0]
if platform_name and platforms:
if platform_name not in platforms:
raise AllocationError("Platform name not in allowed platforms.")
# Enumerate through all available devices
for dev in self._available_devices:
if platform_name and dev["platform_name"] != platform_name:
self.logger.debug("Skipping device %s because of mismatching platform. "
"Required %s but device was %s", dev['target_id'],
platform_name, dev['platform_name'])
continue
if dev['state'] == 'allocated':
self.logger.debug("Skipping device %s because it was "
"already allocated", dev['target_id'])
continue
if DutDetection.is_port_usable(dev['serial_port']):
dev['state'] = "allocated"
dut_reqs['allocated'] = dev
self.logger.info("Allocated device %s", dev['target_id'])
return True
else:
self.logger.info("Could not open serial port (%s) of "
"allocated device %s", dev['serial_port'], dev['target_id'])
# Didn't find a matching device to allocate so allocation failed
raise AllocationError("No suitable local device available")
elif dut_configuration["type"] == "serial":
dut_reqs = dut_configuration.get_requirements()
if not dut_reqs.get("serial_port"):
raise AllocationError("Serial port not defined for requirement {}".format(dut_reqs))
if not DutDetection.is_port_usable(dut_reqs['serial_port']):
raise AllocationError("Serial port {} not usable".format(dut_reqs['serial_port']))
# Successful allocation, return True
return True | 0.004202 |
def set(self, target, value):
"""Set the value of this attribute for the passed object.
"""
if not self._set:
return
if self.path is None:
# There is no path defined on this resource.
# We can do no magic to set the value.
self.set = lambda *a: None
return None
if self._segments[target.__class__]:
# Attempt to resolve access to this attribute.
self.get(target)
if self._segments[target.__class__]:
# Attribute is not fully resolved; an interim segment is null.
return
# Resolve access to the parent object.
# For a single-segment path this will effectively be a no-op.
parent_getter = compose(*self._getters[target.__class__][:-1])
target = parent_getter(target)
# Make the setter.
func = self._make_setter(self.path.split('.')[-1], target.__class__)
# Apply the setter now.
func(target, value)
# Replace this function with the constructed setter.
def setter(target, value):
func(parent_getter(target), value)
self.set = setter | 0.001675 |
def dead(cls, reason=None, **kwargs):
"""
Syntax helper to construct a dead message.
"""
kwargs['data'], _ = UTF8_CODEC.encode(reason or u'')
return cls(reply_to=IS_DEAD, **kwargs) | 0.009091 |
def run(options, exit_codeword=None):
"""Actually execute the program.
Calling this method can be done from tests to simulate executing the
application from command line.
Parameters:
options -- `optionparser` from config file.
exit_codeword -- an optional exit_message that will shut down Rewind. Used
for testing.
returns -- exit code for the application. Non-zero for errors.
"""
QUERY_ENDP_OPT = 'query-bind-endpoint'
STREAM_ENDP_OPT = 'streaming-bind-endpoint'
ZMQ_NTHREADS = "zmq-nthreads"
if not options.has_section(config.DEFAULT_SECTION):
msg = "Missing default section, `{0}`."
fmsg = msg.format(config.DEFAULT_SECTION)
raise config.ConfigurationError(fmsg)
if not options.has_option(config.DEFAULT_SECTION, QUERY_ENDP_OPT):
msg = "Missing (query) bind endpoint in option file: {0}:{1}"
fmsg = msg.format(config.DEFAULT_SECTION, QUERY_ENDP_OPT)
raise config.ConfigurationError(fmsg)
queryendp = options.get(config.DEFAULT_SECTION, QUERY_ENDP_OPT).split(",")
streamendp = _get_with_fallback(options, config.DEFAULT_SECTION,
STREAM_ENDP_OPT, '').split(",")
queryendp = filter(lambda x: x.strip(), queryendp)
streamendp = filter(lambda x: x.strip(), streamendp)
try:
eventstore = config.construct_eventstore(options)
except config.ConfigurationError as e:
_logger.exception("Could instantiate event store from config file.")
raise
zmq_nthreads = _get_with_fallback(options, config.DEFAULT_SECTION,
ZMQ_NTHREADS, '3')
try:
zmq_nthreads = int(zmq_nthreads)
except ValueError:
msg = "{0}:{1} must be an integer".format(config.DEFAULT_SECTION,
ZMQ_NTHREADS)
_logger.fatal(msg)
return 1
with _zmq_context_context(zmq_nthreads) as context, \
_zmq_socket_context(context, zmq.REP, queryendp) as querysock, \
_zmq_socket_context(context, zmq.PUB,
streamendp) as streamsock:
# Executing the program in the context of ZeroMQ context as well as
# ZeroMQ sockets. Using with here to make sure are correctly closing
# things in the correct order, particularly also if we have an
# exception or similar.
runner = _RewindRunner(eventstore, querysock, streamsock,
(exit_codeword.encode()
if exit_codeword
else None))
runner.run()
return 0 | 0.000372 |
def prefix_size(size, base=1024):
'''Return size in B (bytes), kB, MB, GB or TB.'''
if ARGS.prefix == 'None':
for i, prefix in enumerate(['', 'ki', 'Mi', 'Gi', 'Ti']):
if size < pow(base, i + 1):
return '{0} {1}B'.format(round(float(size) / pow(base, i), 1),
prefix)
else:
try:
prefix = {'': 0, 'k': 1, 'M': 2, 'G': 3, 'T': 4}[ARGS.prefix]
except KeyError:
exit('Invalid prefix.')
return '{0} {1}B'.format(round(float(size) / pow(base, prefix), 1),
ARGS.prefix) | 0.001585 |
def list_to_str(lst):
"""
Turn a list into a comma- and/or and-separated string.
Parameters
----------
lst : :obj:`list`
A list of strings to join into a single string.
Returns
-------
str_ : :obj:`str`
A string with commas and/or ands separating th elements from ``lst``.
"""
if len(lst) == 1:
str_ = lst[0]
elif len(lst) == 2:
str_ = ' and '.join(lst)
elif len(lst) > 2:
str_ = ', '.join(lst[:-1])
str_ += ', and {0}'.format(lst[-1])
else:
raise ValueError('List of length 0 provided.')
return str_ | 0.001626 |
def _filter_in(self, term_list, field_name, field_type, is_not):
"""
Returns a query that matches exactly ANY term in term_list.
Notice that:
A in {B,C} <=> (A = B or A = C)
~(A in {B,C}) <=> ~(A = B or A = C)
Because OP_AND_NOT(C, D) <=> (C and ~D), then D=(A in {B,C}) requires `is_not=False`.
Assumes term is a list.
"""
query_list = [self._filter_exact(term, field_name, field_type, is_not=False)
for term in term_list]
if is_not:
return xapian.Query(xapian.Query.OP_AND_NOT, self._all_query(),
xapian.Query(xapian.Query.OP_OR, query_list))
else:
return xapian.Query(xapian.Query.OP_OR, query_list) | 0.005175 |
def get_black(self):
"""Return blacklist packages from /etc/slpkg/blacklist
configuration file."""
blacklist = []
for read in self.black_conf.splitlines():
read = read.lstrip()
if not read.startswith("#"):
blacklist.append(read.replace("\n", ""))
return blacklist | 0.005831 |
def process_request_thread(self, mainthread):
"""obtain request from queue instead of directly from server socket"""
life_time = time.time()
nb_requests = 0
while not mainthread.killed():
if self.max_life_time > 0:
if (time.time() - life_time) >= self.max_life_time:
mainthread.add_worker(1)
return
try:
SocketServer.ThreadingTCPServer.process_request_thread(self, *self.requests.get(True, 0.5))
except Queue.Empty:
continue
else:
SocketServer.ThreadingTCPServer.process_request_thread(self, *self.requests.get())
LOG.debug("nb_requests: %d, max_requests: %d", nb_requests, self.max_requests)
nb_requests += 1
if self.max_requests > 0 and nb_requests >= self.max_requests:
mainthread.add_worker(1)
return | 0.006141 |
def find(self, query):
'''Passes the query to the upstream, if it exists'''
if self.upstream:
return self.upstream.find(query)
else:
return False | 0.010363 |
def samples(self):
"""Yield samples as dictionaries, keyed by dimensions."""
names = self.series.dimensions
for n, offset in enumerate(self.series.offsets):
dt = datetime.timedelta(microseconds=offset * 1000)
d = {"ts": self.ts + dt}
for name in names:
d[name] = getattr(self.series, name)[n]
yield d | 0.005155 |
def clear(self, apply_to='all'):
"""
Clear range values, format, fill, border, etc.
:param str apply_to: Optional. Determines the type of clear action.
The possible values are: all, formats, contents.
"""
url = self.build_url(self._endpoints.get('clear_range'))
return bool(self.session.post(url, data={'applyTo': apply_to.capitalize()})) | 0.007595 |
def charges_net_effect(self):
"""
The total effect of the net_affecting charges (note affect vs effect here).
Currently this is single currency only (AMAAS-110).
Cast to Decimal in case the result is zero (no net_affecting charges).
:return:
"""
return Decimal(sum([charge.charge_value for charge in self.charges.values()
if charge.net_affecting])) | 0.009195 |
def security_rule_absent(name, security_group, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a security rule does not exist in the network security group.
:param name:
Name of the security rule.
:param security_group:
The network security group containing the security rule.
:param resource_group:
The resource group assigned to the network security group.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
rule = __salt__['azurearm_network.security_rule_get'](
name,
security_group,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in rule:
ret['result'] = True
ret['comment'] = 'Security rule {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Security rule {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': rule,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.security_rule_delete'](name, security_group, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Security rule {0} has been deleted.'.format(name)
ret['changes'] = {
'old': rule,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete security rule {0}!'.format(name)
return ret | 0.002646 |
def close_threads(self, parent):
"""Close threads associated to parent_id"""
logger.debug("Call ThreadManager's 'close_threads'")
if parent is None:
# Closing all threads
self.pending_threads = []
threadlist = []
for threads in list(self.started_threads.values()):
threadlist += threads
else:
parent_id = id(parent)
self.pending_threads = [(_th, _id) for (_th, _id)
in self.pending_threads
if _id != parent_id]
threadlist = self.started_threads.get(parent_id, [])
for thread in threadlist:
logger.debug("Waiting for thread %r to finish" % thread)
while thread.isRunning():
# We can't terminate thread safely, so we simply wait...
QApplication.processEvents() | 0.002116 |
def zpipe(ctx):
"""build inproc pipe for talking to threads
mimic pipe used in czmq zthread_fork.
Returns a pair of PAIRs connected via inproc
"""
a = ctx.socket(zmq.PAIR)
a.linger = 0
b = ctx.socket(zmq.PAIR)
b.linger = 0
socket_set_hwm(a, 1)
socket_set_hwm(b, 1)
iface = "inproc://%s" % binascii.hexlify(os.urandom(8))
a.bind(iface)
b.connect(iface)
return a, b | 0.002375 |
def get_field_mappings(self, field):
"""Converts ES field mappings to .kibana field mappings"""
retdict = {}
retdict['indexed'] = False
retdict['analyzed'] = False
for (key, val) in iteritems(field):
if key in self.mappings:
if (key == 'type' and
(val == "long" or
val == "integer" or
val == "double" or
val == "float")):
val = "number"
# self.pr_dbg("\t\t\tkey: %s" % key)
# self.pr_dbg("\t\t\t\tval: %s" % val)
retdict[key] = val
if key == 'index' and val != "no":
retdict['indexed'] = True
# self.pr_dbg("\t\t\tkey: %s" % key)
# self.pr_dbg("\t\t\t\tval: %s" % val)
if val == "analyzed":
retdict['analyzed'] = True
return retdict | 0.002094 |
def _refresh(self):
"""Refresh the API token using the currently bound credentials.
This is simply a convenience method to be invoked automatically if authentication fails
during normal client use.
"""
# Request and set a new API token.
new_token = self.authenticate(self._username, self._password)
self._token = new_token
logger.info('New API token received: "{}".'.format(new_token))
return self._token | 0.006289 |
def get_observed_mmax_sigma(self, default=None):
"""
:returns: the sigma for the maximum observed magnitude
"""
if not isinstance(self.data['sigmaMagnitude'], np.ndarray):
obsmaxsig = default
else:
obsmaxsig = self.data['sigmaMagnitude'][
np.argmax(self.data['magnitude'])]
return obsmaxsig | 0.005291 |
def _CountClientStatisticByLabel(self, statistic, day_buckets, cursor):
"""Returns client-activity metrics for a given statistic.
Args:
statistic: The name of the statistic, which should also be a column in the
'clients' table.
day_buckets: A set of n-day-active buckets.
cursor: MySQL cursor for executing queries.
"""
day_buckets = sorted(day_buckets)
sum_clauses = []
ping_cast_clauses = []
timestamp_buckets = []
now = rdfvalue.RDFDatetime.Now()
for day_bucket in day_buckets:
column_name = "days_active_{}".format(day_bucket)
sum_clauses.append(
"CAST(SUM({0}) AS UNSIGNED) AS {0}".format(column_name))
ping_cast_clauses.append(
"CAST(c.last_ping > FROM_UNIXTIME(%s) AS UNSIGNED) AS {}".format(
column_name))
timestamp_bucket = now - rdfvalue.Duration.FromDays(day_bucket)
timestamp_buckets.append(
mysql_utils.RDFDatetimeToTimestamp(timestamp_bucket))
query = """
SELECT j.{statistic}, j.label, {sum_clauses}
FROM (
SELECT c.{statistic} AS {statistic}, l.label AS label, {ping_cast_clauses}
FROM clients c
LEFT JOIN client_labels l USING(client_id)
WHERE c.last_ping IS NOT NULL AND l.owner_username = 'GRR'
) AS j
GROUP BY j.{statistic}, j.label
""".format(
statistic=statistic,
sum_clauses=", ".join(sum_clauses),
ping_cast_clauses=", ".join(ping_cast_clauses))
cursor.execute(query, timestamp_buckets)
counts = {}
for response_row in cursor.fetchall():
statistic_value, client_label = response_row[:2]
for i, num_actives in enumerate(response_row[2:]):
if num_actives <= 0:
continue
stats_key = (statistic_value, client_label, day_buckets[i])
counts[stats_key] = num_actives
return counts | 0.005876 |
def post(self, result_id, project_id):
"""POST /api/v1/results/<int:id>/commands."""
result = db.session.query(Result).filter_by(id=result_id).first()
if result is None:
return jsonify({
'result': None,
'message': 'No interface defined for URL.'
}), 404
job_status = CommandsState.job_status(result.path_name)
if job_status != JobStatus.RUNNING:
if job_status == JobStatus.NO_EXTENSION_ERROR:
return jsonify({
'message': '\'CommandsExtension\' is not set or disabled.'
}), 400
elif job_status == JobStatus.INITIALIZED:
return jsonify({
'message': 'The target training job has not run, yet'
}), 400
elif job_status == JobStatus.STOPPED:
return jsonify({
'message': 'The target training job has already stopped'
}), 400
else:
return jsonify({
'message': 'Cannot get the target training job status'
}), 400
request_json = request.get_json()
if request_json is None:
return jsonify({
'message': 'Empty request.'
}), 400
command_name = request_json.get('name', None)
if command_name is None:
return jsonify({
'message': 'Name is required.'
}), 400
schedule = request_json.get('schedule', None)
if not CommandItem.is_valid_schedule(schedule):
return jsonify({
'message': 'Schedule is invalid.'
}), 400
command = CommandItem(
name=command_name,
)
command.set_request(
CommandItem.REQUEST_OPEN,
request_json.get('body', None),
request_json.get('schedule', None)
)
commands = CommandItem.load_commands(result.path_name)
commands.append(command)
CommandItem.dump_commands(commands, result.path_name)
new_result = crawl_result(result, force=True)
new_result_dict = new_result.serialize
return jsonify({'commands': new_result_dict['commands']}) | 0.000873 |
def call_on_commit(self, callback):
"""Call a callback upon successful commit of a transaction.
If not in a transaction, the callback is called immediately.
In a transaction, multiple callbacks may be registered and will be
called once the transaction commits, in the order in which they
were registered. If the transaction fails, the callbacks will not
be called.
If the callback raises an exception, it bubbles up normally. This
means: If the callback is called immediately, any exception it
raises will bubble up immediately. If the call is postponed until
commit, remaining callbacks will be skipped and the exception will
bubble up through the transaction() call. (However, the
transaction is already committed at that point.)
"""
if not self.in_transaction():
callback()
else:
self._on_commit_queue.append(callback) | 0.003333 |
def register_model(self, model_id, properties, parameters, outputs, connector):
"""Create an experiment object for the subject and image group. Objects
are referenced by their identifier. The reference to a functional data
object is optional.
Raises ValueError if no valid experiment name is given in property list.
Parameters
----------
model_id : string
Unique model identifier
properties : Dictionary
Dictionary of model specific properties.
parameters : list(scodata.attribute.AttributeDefinition)
List of attribute definitions for model run parameters
outputs : ModelOutputs
Description of model outputs
connector : dict
Connection information to communicate with model workers
Returns
-------
ModelHandle
Handle for created model object in database
"""
# Create object handle and store it in database before returning it
obj = ModelHandle(model_id, properties, parameters, outputs, connector)
self.insert_object(obj)
return obj | 0.002586 |
def get_minions():
'''
Return a list of minions
'''
query = '''SELECT DISTINCT minion_id
FROM {keyspace}.minions;'''.format(keyspace=_get_keyspace())
ret = []
# cassandra_cql.cql_query may raise a CommandExecutionError
try:
data = __salt__['cassandra_cql.cql_query'](query)
if data:
for row in data:
minion = row.get('minion_id')
if minion:
ret.append(minion)
except CommandExecutionError:
log.critical('Could not get the list of minions.')
raise
except Exception as e:
log.critical(
'Unexpected error while getting list of minions: %s', e)
raise
return ret | 0.001353 |
def cleanup(self):
"""Cleanup all the expired keys"""
keys = self.client.smembers(self.keys_container)
for key in keys:
entry = self.client.get(key)
if entry:
entry = pickle.loads(entry)
if self._is_expired(entry, self.timeout):
self.delete_entry(key) | 0.005698 |
def update_column(self, header, column):
"""Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
header : str
Header of the column
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If length of `column` is shorter than number of rows.
ValueError:
If no column exists with title `header`.
"""
index = self.get_column_index(header)
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
for row, new_item in zip(self._table, column):
row[index] = new_item | 0.002378 |
def keys_to_string(data):
"""
Function to convert all the unicode keys in string keys
"""
if isinstance(data, dict):
for key in list(data.keys()):
if isinstance(key, six.string_types):
value = data[key]
val = keys_to_string(value)
del data[key]
data[key.encode("utf8", "ignore")] = val
return data | 0.002494 |
def parse_directive_definition(lexer: Lexer) -> DirectiveDefinitionNode:
"""InputObjectTypeExtension"""
start = lexer.token
description = parse_description(lexer)
expect_keyword(lexer, "directive")
expect_token(lexer, TokenKind.AT)
name = parse_name(lexer)
args = parse_argument_defs(lexer)
expect_keyword(lexer, "on")
locations = parse_directive_locations(lexer)
return DirectiveDefinitionNode(
description=description,
name=name,
arguments=args,
locations=locations,
loc=loc(lexer, start),
) | 0.001733 |
def close_all_pages(self):
"""Closes all tabs of the states editor"""
states_to_be_closed = []
for state_identifier in self.tabs:
states_to_be_closed.append(state_identifier)
for state_identifier in states_to_be_closed:
self.close_page(state_identifier, delete=False) | 0.006192 |
def self_build(self, field_pos_list=None):
# type: (Any) -> str
"""self_build is overridden because type and len are determined at
build time, based on the "data" field internal type
"""
if self.getfieldval('type') is None:
self.type = 1 if isinstance(self.getfieldval('data'), HPackZString) else 0 # noqa: E501
return super(HPackHdrString, self).self_build(field_pos_list) | 0.006912 |
def fetch(self, country_code=values.unset, type=values.unset,
add_ons=values.unset, add_ons_data=values.unset):
"""
Fetch a PhoneNumberInstance
:param unicode country_code: The ISO country code of the phone number
:param unicode type: The type of information to return
:param unicode add_ons: The unique_name of an Add-on you would like to invoke
:param dict add_ons_data: Data specific to the add-on you would like to invoke
:returns: Fetched PhoneNumberInstance
:rtype: twilio.rest.lookups.v1.phone_number.PhoneNumberInstance
"""
params = values.of({
'CountryCode': country_code,
'Type': serialize.map(type, lambda e: e),
'AddOns': serialize.map(add_ons, lambda e: e),
})
params.update(serialize.prefixed_collapsible_map(add_ons_data, 'AddOns'))
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return PhoneNumberInstance(self._version, payload, phone_number=self._solution['phone_number'], ) | 0.006244 |
def _get_edges(self):
"""Get the edges for the current surface.
If they haven't been computed yet, first compute and store them.
This is provided as a means for internal calls to get the edges
without copying (since :attr:`.edges` copies before giving to
a user to keep the stored data immutable).
Returns:
Tuple[~bezier.curve.Curve, ~bezier.curve.Curve, \
~bezier.curve.Curve]: The edges of
the surface.
"""
if self._edges is None:
self._edges = self._compute_edges()
return self._edges | 0.003247 |
def _closing_bracket_index(self, text, bpair=('(', ')')):
"""Return the index of the closing bracket that matches the opening bracket at the start of the text."""
level = 1
for i, char in enumerate(text[1:]):
if char == bpair[0]:
level += 1
elif char == bpair[1]:
level -= 1
if level == 0:
return i + 1 | 0.007317 |
def export_to_crtomo_seit_manager(self, grid):
"""Return a ready-initialized seit-manager object from the CRTomo
tools. This function only works if the crtomo_tools are installed.
"""
import crtomo
g = self.data.groupby('frequency')
seit_data = {}
for name, item in g:
print(name, item.shape, item.size)
if item.shape[0] > 0:
seit_data[name] = item[
['a', 'b', 'm', 'n', 'r', 'rpha']
].values
seit = crtomo.eitMan(grid=grid, seit_data=seit_data)
return seit | 0.003306 |
def get_dependencies_from_cache(ireq):
"""Retrieves dependencies for the given install requirement from the dependency cache.
:param ireq: A single InstallRequirement
:type ireq: :class:`~pip._internal.req.req_install.InstallRequirement`
:return: A set of dependency lines for generating new InstallRequirements.
:rtype: set(str) or None
"""
if ireq.editable or not is_pinned_requirement(ireq):
return
if ireq not in DEPENDENCY_CACHE:
return
cached = set(DEPENDENCY_CACHE[ireq])
# Preserving sanity: Run through the cache and make sure every entry if
# valid. If this fails, something is wrong with the cache. Drop it.
try:
broken = False
for line in cached:
dep_ireq = pip_shims.shims.InstallRequirement.from_line(line)
name = canonicalize_name(dep_ireq.name)
if _marker_contains_extra(dep_ireq):
broken = True # The "extra =" marker breaks everything.
elif name == canonicalize_name(ireq.name):
broken = True # A package cannot depend on itself.
if broken:
break
except Exception:
broken = True
if broken:
del DEPENDENCY_CACHE[ireq]
return
return cached | 0.001554 |
def drop_matching_records(self, check):
"""Remove a record from the DB."""
matches = self._match(check)
for m in matches:
del self._records[m['msg_id']] | 0.010638 |
def verbosity(self, *args):
'''
get/set the verbosity level.
The verbosity level filters messages that are output
to the console. Only messages tagged with a verbosity
less-than-or-equal-to the class verbosity are output.
This does not affect output to non-console devices
such as files or remote sockets.
verbosity(): returns the current level
verbosity(<N>): sets the verbosity to <N>
'''
if len(args):
self._verbosity = args[0]
else:
return self._verbosity | 0.006601 |
def select_one_album(albums):
"""Display the albums returned by search api.
:params albums: API['result']['albums']
:return: a Album object.
"""
if len(albums) == 1:
select_i = 0
else:
table = PrettyTable(['Sequence', 'Album Name', 'Artist Name'])
for i, album in enumerate(albums, 1):
table.add_row([i, album['name'], album['artist']['name']])
click.echo(table)
select_i = click.prompt('Select one album', type=int, default=1)
while select_i < 1 or select_i > len(albums):
select_i = click.prompt('Error Select! Select Again', type=int)
album_id = albums[select_i-1]['id']
album_name = albums[select_i-1]['name']
album = Album(album_id, album_name)
return album | 0.002353 |
def makedirs(self, dir_name, mode=PERM_DEF, exist_ok=None):
"""Create a leaf Fake directory + create any non-existent parent dirs.
Args:
dir_name: (str) Name of directory to create.
mode: (int) Mode to create directory (and any necessary parent
directories) with. This argument defaults to 0o777.
The umask is applied to this mode.
exist_ok: (boolean) If exist_ok is False (the default), an OSError
is raised if the target directory already exists.
New in Python 3.2.
Raises:
OSError: if the directory already exists and exist_ok=False, or as
per :py:meth:`FakeFilesystem.create_dir`.
"""
if exist_ok is None:
exist_ok = False
elif sys.version_info < (3, 2):
raise TypeError("makedir() got an unexpected "
"keyword argument 'exist_ok'")
self.filesystem.makedirs(dir_name, mode, exist_ok) | 0.001951 |
def check_messages(msgs, cmd, value=None):
"""Check if specific message is present.
Parameters
----------
cmd : string
Command to check for in bytestring from microscope CAM interface. If
``value`` is falsey, value of received command does not matter.
value : string
Check if ``cmd:value`` is received.
Returns
-------
collections.OrderedDict
Correct messsage or None if no correct message if found.
"""
for msg in msgs:
if value and msg.get(cmd) == value:
return msg
if not value and msg.get(cmd):
return msg
return None | 0.001563 |
def api_user(request, userPk, key=None, hproPk=None):
"""Return information about an user"""
if not check_api_key(request, key, hproPk):
return HttpResponseForbidden
if settings.PIAPI_STANDALONE:
if not settings.PIAPI_REALUSERS:
user = generate_user(pk=userPk)
if user is None:
return HttpResponseNotFound()
else:
user = get_object_or_404(DUser, pk=userPk)
hproject = None
else:
from users.models import TechUser
user = get_object_or_404(TechUser, pk=userPk)
(_, _, hproject) = getPlugItObject(hproPk)
user.ebuio_member = hproject.isMemberRead(user)
user.ebuio_admin = hproject.isMemberWrite(user)
user.subscription_labels = _get_subscription_labels(user, hproject)
retour = {}
# Append properties for the user data
for prop in settings.PIAPI_USERDATA:
if hasattr(user, prop):
retour[prop] = getattr(user, prop)
retour['id'] = str(retour['pk'])
# Append the users organisation and access levels
orgas = {}
if user:
limitedOrgas = []
if hproject and hproject.plugItLimitOrgaJoinable:
# Get List of Plugit Available Orgas first
projectOrgaIds = hproject.plugItOrgaJoinable.order_by('name').values_list('pk', flat=True)
for (orga, isAdmin) in user.getOrgas(distinct=True):
if orga.pk in projectOrgaIds:
limitedOrgas.append((orga, isAdmin))
elif hasattr(user, 'getOrgas'):
limitedOrgas = user.getOrgas(distinct=True)
# Create List
orgas = [{'id': orga.pk, 'name': orga.name, 'codops': orga.ebu_codops, 'is_admin': isAdmin} for (orga, isAdmin)
in limitedOrgas]
retour['orgas'] = orgas
return HttpResponse(json.dumps(retour), content_type="application/json") | 0.001571 |
def _build_ref_data_names(self, project, build_system):
'''
We want all reference data names for every task that runs on a specific project.
For example:
* Buildbot - "Windows 8 64-bit mozilla-inbound debug test web-platform-tests-1"
* TaskCluster = "test-linux64/opt-mochitest-webgl-e10s-1"
'''
ignored_jobs = []
ref_data_names = {}
runnable_jobs = list_runnable_jobs(project)
for job in runnable_jobs:
# get testtype e.g. web-platform-tests-4
testtype = parse_testtype(
build_system_type=job['build_system_type'],
job_type_name=job['job_type_name'],
platform_option=job['platform_option'],
ref_data_name=job['ref_data_name']
)
if not valid_platform(job['platform']):
continue
if is_job_blacklisted(testtype):
ignored_jobs.append(job['ref_data_name'])
continue
key = unique_key(testtype=testtype,
buildtype=job['platform_option'],
platform=job['platform'])
if build_system == '*':
ref_data_names[key] = job['ref_data_name']
elif job['build_system_type'] == build_system:
ref_data_names[key] = job['ref_data_name']
for ref_data_name in sorted(ignored_jobs):
logger.info('Ignoring %s', ref_data_name)
return ref_data_names | 0.002591 |
def bitterness(self, ibu_method, early_og, batch_size):
"Calculate bitterness based on chosen method"
if ibu_method == "tinseth":
bitterness = 1.65 * math.pow(0.000125, early_og - 1.0) * ((1 - math.pow(math.e, -0.04 * self.time)) / 4.15) * ((self.alpha / 100.0 * self.amount * 1000000) / batch_size) * self.utilization_factor()
elif ibu_method == "rager":
utilization = 18.11 + 13.86 * math.tanh((self.time - 31.32) / 18.27)
adjustment = max(0, (early_og - 1.050) / 0.2)
bitterness = self.amount * 100 * utilization * self.utilization_factor() * self.alpha / (batch_size * (1 + adjustment))
else:
raise Exception("Unknown IBU method %s!" % ibu_method)
return bitterness | 0.006468 |
def polarization_vector(phi, theta, alpha, beta, p,
numeric=False, abstract=False):
"""This function returns a unitary vector describing the polarization
of plane waves.:
INPUT:
- ``phi`` - The spherical coordinates azimuthal angle of the wave vector\
k.
- ``theta`` - The spherical coordinates polar angle of the wave vector k.
- ``alpha`` - The rotation of a half-wave plate.
- ``beta`` - The rotation of a quarter-wave plate.
- ``p`` - either 1 or -1 to indicate whether to return epsilon^(+) or\
epsilon^(-) respectively.
If alpha and beta are zero, the result will be linearly polarized light
along some fast axis. alpha and beta are measured from that fast axis.
Propagation towards y, linear polarization (for pi transitions):
>>> from sympy import pi
>>> polarization_vector(phi=pi/2, theta=pi/2, alpha=pi/2, beta= 0,p=1)
Matrix([
[0],
[0],
[1]])
Propagation towards +z, circular polarization (for sigma + transitions):
>>> polarization_vector(phi=0, theta= 0, alpha=pi/2, beta= pi/8,p=1)
Matrix([
[ -sqrt(2)/2],
[-sqrt(2)*I/2],
[ 0]])
Propagation towards -z, circular polarization for sigma + transitions:
>>> polarization_vector(phi=0, theta=pi, alpha= 0, beta=-pi/8,p=1)
Matrix([
[ -sqrt(2)/2],
[-sqrt(2)*I/2],
[ 0]])
Components + and - are complex conjugates of each other
>>> from sympy import symbols
>>> phi, theta, alpha, beta = symbols("phi theta alpha beta", real=True)
>>> ep = polarization_vector(phi,theta,alpha,beta, 1)
>>> em = polarization_vector(phi,theta,alpha,beta,-1)
>>> ep-em.conjugate()
Matrix([
[0],
[0],
[0]])
We can also define abstract polarization vectors without explicit \
components
>>> polarization_vector(0, 0, 0, 0, 1, abstract=True)
epsilonp
>>> polarization_vector(0, 0, 0, 0, -1, abstract=True)
epsilonm
"""
if abstract:
Nl = symbols("N_l", integer=True)
if p == 1:
epsilon = Vector3D(IndexedBase("epsilonp", shape=(Nl,)))
else:
epsilon = Vector3D(IndexedBase("epsilonm", shape=(Nl,)))
return epsilon
epsilon = Matrix([cos(2*beta), p*I*sin(2*beta), 0])
R1 = Matrix([[cos(2*alpha), -sin(2*alpha), 0],
[sin(2*alpha), cos(2*alpha), 0],
[0, 0, 1]])
R2 = Matrix([[cos(theta), 0, sin(theta)],
[0, 1, 0],
[-sin(theta), 0, cos(theta)]])
R3 = Matrix([[cos(phi), -sin(phi), 0],
[sin(phi), cos(phi), 0],
[0, 0, 1]])
epsilon = R3*R2*R1*epsilon
if numeric:
epsilon = nparray([complex(epsilon[i]) for i in range(3)])
return epsilon | 0.000355 |
def amplitude(self, caldb, calv, atten=0):
"""Calculates the voltage amplitude for this stimulus, using
internal intensity value and the given reference intensity & voltage
:param caldb: calibration intensity in dbSPL
:type caldb: float
:param calv: calibration voltage that was used to record the intensity provided
:type calv: float
"""
amp = (10 ** (float(self._intensity+atten-caldb)/20)*calv)
return amp | 0.006237 |
def service_delete(auth=None, **kwargs):
'''
Delete a service
CLI Example:
.. code-block:: bash
salt '*' keystoneng.service_delete name=glance
salt '*' keystoneng.service_delete name=39cc1327cdf744ab815331554430e8ec
'''
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.delete_service(**kwargs) | 0.005348 |
def argument_types(self):
"""Retrieve a container for the non-variadic arguments for this type.
The returned object is iterable and indexable. Each item in the
container is a Type instance.
"""
class ArgumentsIterator(collections.Sequence):
def __init__(self, parent):
self.parent = parent
self.length = None
def __len__(self):
if self.length is None:
self.length = conf.lib.clang_getNumArgTypes(self.parent)
return self.length
def __getitem__(self, key):
# FIXME Support slice objects.
if not isinstance(key, int):
raise TypeError("Must supply a non-negative int.")
if key < 0:
raise IndexError("Only non-negative indexes are accepted.")
if key >= len(self):
raise IndexError("Index greater than container length: "
"%d > %d" % ( key, len(self) ))
result = conf.lib.clang_getArgType(self.parent, key)
if result.kind == TypeKind.INVALID:
raise IndexError("Argument could not be retrieved.")
return result
assert self.kind == TypeKind.FUNCTIONPROTO
return ArgumentsIterator(self) | 0.002874 |
def isalive(self):
'''This tests if the child process is running or not. This is
non-blocking. If the child was terminated then this will read the
exitstatus or signalstatus of the child. This returns True if the child
process appears to be running or False if not. It can take literally
SECONDS for Solaris to return the right status. '''
if self.terminated:
return False
if self.flag_eof:
# This is for Linux, which requires the blocking form
# of waitpid to get the status of a defunct process.
# This is super-lame. The flag_eof would have been set
# in read_nonblocking(), so this should be safe.
waitpid_options = 0
else:
waitpid_options = os.WNOHANG
try:
pid, status = os.waitpid(self.pid, waitpid_options)
except OSError as e:
# No child processes
if e.errno == errno.ECHILD:
raise PtyProcessError('isalive() encountered condition ' +
'where "terminated" is 0, but there was no child ' +
'process. Did someone else call waitpid() ' +
'on our process?')
else:
raise
# I have to do this twice for Solaris.
# I can't even believe that I figured this out...
# If waitpid() returns 0 it means that no child process
# wishes to report, and the value of status is undefined.
if pid == 0:
try:
### os.WNOHANG) # Solaris!
pid, status = os.waitpid(self.pid, waitpid_options)
except OSError as e: # pragma: no cover
# This should never happen...
if e.errno == errno.ECHILD:
raise PtyProcessError('isalive() encountered condition ' +
'that should never happen. There was no child ' +
'process. Did someone else call waitpid() ' +
'on our process?')
else:
raise
# If pid is still 0 after two calls to waitpid() then the process
# really is alive. This seems to work on all platforms, except for
# Irix which seems to require a blocking call on waitpid or select,
# so I let read_nonblocking take care of this situation
# (unfortunately, this requires waiting through the timeout).
if pid == 0:
return True
if pid == 0:
return True
if os.WIFEXITED(status):
self.status = status
self.exitstatus = os.WEXITSTATUS(status)
self.signalstatus = None
self.terminated = True
elif os.WIFSIGNALED(status):
self.status = status
self.exitstatus = None
self.signalstatus = os.WTERMSIG(status)
self.terminated = True
elif os.WIFSTOPPED(status):
raise PtyProcessError('isalive() encountered condition ' +
'where child process is stopped. This is not ' +
'supported. Is some other process attempting ' +
'job control with our child pid?')
return False | 0.003605 |
def get_package_status(owner, repo, identifier):
"""Get the status for a package in a repository."""
client = get_packages_api()
with catch_raise_api_exception():
data, _, headers = client.packages_status_with_http_info(
owner=owner, repo=repo, identifier=identifier
)
ratelimits.maybe_rate_limit(client, headers)
# pylint: disable=no-member
# Pylint detects the returned value as a tuple
return (
data.is_sync_completed,
data.is_sync_failed,
data.sync_progress,
data.status_str,
data.stage_str,
data.status_reason,
) | 0.001592 |
def render(self):
""" Returns the rendered release notes from all parsers as a string """
release_notes = []
for parser in self.parsers:
parser_content = parser.render()
if parser_content is not None:
release_notes.append(parser_content)
return u"\r\n\r\n".join(release_notes) | 0.005747 |
def get_svnpath():
'''
This subroutine gives back the path of the whole svn tree
installation, which is necessary for the script to run.
'''
svnpathtmp = __file__
splitsvnpath = svnpathtmp.split('/')
if len(splitsvnpath) == 1:
svnpath = os.path.abspath('.') + '/../../'
else:
svnpath = ''
for i in range(len(splitsvnpath)-3):
svnpath += splitsvnpath[i] + '/'
return svnpath | 0.002242 |
def removeSessionWithKey(self, key):
"""
Remove a persistent session, if it exists.
@type key: L{bytes}
@param key: The persistent session identifier.
"""
self.store.query(
PersistentSession,
PersistentSession.sessionKey == key).deleteFromStore() | 0.00627 |
def configure_stack():
"""Set up the OpenDNP3 configuration."""
stack_config = asiodnp3.OutstationStackConfig(opendnp3.DatabaseSizes.AllTypes(10))
stack_config.outstation.eventBufferConfig = opendnp3.EventBufferConfig().AllTypes(10)
stack_config.outstation.params.allowUnsolicited = True
stack_config.link.LocalAddr = 10
stack_config.link.RemoteAddr = 1
stack_config.link.KeepAliveTimeout = openpal.TimeDuration().Max()
return stack_config | 0.007952 |
def render_html(html_str):
"""
makes a temporary html rendering
"""
import utool as ut
from os.path import abspath
import webbrowser
try:
html_str = html_str.decode('utf8')
except Exception:
pass
html_dpath = ut.ensure_app_resource_dir('utool', 'temp_html')
fpath = abspath(ut.unixjoin(html_dpath, 'temp.html'))
url = 'file://' + fpath
ut.writeto(fpath, html_str)
webbrowser.open(url) | 0.002203 |
def _shutdown_proc(p, timeout):
"""Wait for a proc to shut down, then terminate or kill it after `timeout`."""
freq = 10 # how often to check per second
for _ in range(1 + timeout * freq):
ret = p.poll()
if ret is not None:
logging.info("Shutdown gracefully.")
return ret
time.sleep(1 / freq)
logging.warning("Killing the process.")
p.kill()
return p.wait() | 0.025381 |
def custom_getter_router(custom_getter_map, name_fn):
"""Creates a custom getter than matches requests to dict of custom getters.
Custom getters are callables which implement the
[custom getter API]
(https://www.tensorflow.org/versions/r1.0/api_docs/python/tf/get_variable).
The returned custom getter dispatches calls based on pattern matching the
name of the requested variable to the keys of custom_getter_map. For example,
{
".*/w": snt.custom_getters.stop_gradient,
}
will match all variables named with the suffix "/w". The `name_fn` is
provided to allow processing of the name, such as stripping off a scope prefix
before matching.
Args:
custom_getter_map: Mapping of regular expressions to custom getter
functions.
name_fn: Callable to map variable name through before matching to regular
expressions. This might, for example, strip off a scope prefix.
Returns:
A custom getter.
Raises:
TypeError: If an entry in `custom_getter_map` is not a callable function.
"""
for custom_getter in custom_getter_map.values():
if not callable(custom_getter):
raise TypeError("Given custom_getter is not callable.")
def _custom_getter(getter, name, *args, **kwargs):
"""A custom getter that routes based on pattern matching the variable name.
Args:
getter: The true getter to call.
name: The fully qualified variable name, i.e. including all scopes.
*args: Arguments, in the same format as tf.get_variable.
**kwargs: Keyword arguments, in the same format as tf.get_variable.
Returns:
The return value of the appropriate custom getter. If there are no
matches, it returns the return value of `getter`.
Raises:
KeyError: If more than one pattern matches the variable name.
"""
bare_name = name_fn(name)
matches = [
(custom_getter, pattern)
for pattern, custom_getter in custom_getter_map.items()
if re.match(pattern, bare_name) is not None]
num_matches = len(matches)
if num_matches == 0:
return getter(name, *args, **kwargs)
elif num_matches == 1:
custom_getter, pattern = matches[0]
return custom_getter(getter, name, *args, **kwargs)
else:
raise KeyError("More than one custom_getter matched {} ({}): {}".format(
name, bare_name, [pattern for _, pattern in matches]))
return _custom_getter | 0.004521 |
def getConf(self, conftype):
'''
conftype must be a Zooborg constant
'''
zooconf={}
if conftype not in [ZooConst.CLIENT, ZooConst.WORKER, ZooConst.BROKER]:
raise Exception('Zooborg.getConf: invalid type')
self.initconn()
if conftype in [ZooConst.CLIENT, ZooConst.WORKER]:
zooconf={'broker': {'connectionstr': None}}
zoopath='/distark/' + conftype + '/conf/broker/connectionstr'
zooconf['broker']['connectionstr'], stat = self.zk.get(zoopath)
if conftype in [ZooConst.BROKER]:
zooconf={'bindstr': None}
zoopath='/distark/' + conftype + '/conf/bindstr'
zooconf['bindstr'], stat = self.zk.get(zoopath)
return zooconf | 0.009067 |
def refreshButton(self):
"""
Refreshes the button for this toolbar.
"""
collapsed = self.isCollapsed()
btn = self._collapseButton
if not btn:
return
btn.setMaximumSize(MAX_SIZE, MAX_SIZE)
# set up a vertical scrollbar
if self.orientation() == Qt.Vertical:
btn.setMaximumHeight(12)
else:
btn.setMaximumWidth(12)
icon = ''
# collapse/expand a vertical toolbar
if self.orientation() == Qt.Vertical:
if collapsed:
self.setFixedWidth(self._collapsedSize)
btn.setMaximumHeight(MAX_SIZE)
btn.setArrowType(Qt.RightArrow)
else:
self.setMaximumWidth(MAX_SIZE)
self._precollapseSize = None
btn.setMaximumHeight(12)
btn.setArrowType(Qt.LeftArrow)
else:
if collapsed:
self.setFixedHeight(self._collapsedSize)
btn.setMaximumWidth(MAX_SIZE)
btn.setArrowType(Qt.DownArrow)
else:
self.setMaximumHeight(1000)
self._precollapseSize = None
btn.setMaximumWidth(12)
btn.setArrowType(Qt.UpArrow)
for index in range(1, self.layout().count()):
item = self.layout().itemAt(index)
if not item.widget():
continue
if collapsed:
item.widget().setMaximumSize(0, 0)
else:
item.widget().setMaximumSize(MAX_SIZE, MAX_SIZE)
if not self.isCollapsable():
btn.hide()
else:
btn.show() | 0.007143 |
def queuedb_find(path, queue_id, name, offset=None, limit=None):
"""
Find a record by name and queue ID.
Return the rows on success (empty list if not found)
Raise on error
"""
return queuedb_findall(path, queue_id, name=name, offset=offset, limit=limit) | 0.007194 |
def AddConnectedPeer(self, peer):
"""
Add a new connect peer to the known peers list.
Args:
peer (NeoNode): instance.
"""
# if present
self.RemoveFromQueue(peer.address)
self.AddKnownAddress(peer.address)
if len(self.Peers) > settings.CONNECTED_PEER_MAX:
peer.Disconnect("Max connected peers reached", isDead=False)
if peer not in self.Peers:
self.Peers.append(peer)
else:
# either peer is already in the list and it has reconnected before it timed out on our side
# or it's trying to connect multiple times
# or we hit the max connected peer count
self.RemoveKnownAddress(peer.address)
peer.Disconnect() | 0.003831 |
def _generate(cls, strategy, params):
"""generate the object.
Args:
params (dict): attributes to use for generating the object
strategy: the strategy to use
"""
if cls._meta.abstract:
raise errors.FactoryError(
"Cannot generate instances of abstract factory %(f)s; "
"Ensure %(f)s.Meta.model is set and %(f)s.Meta.abstract "
"is either not set or False." % dict(f=cls.__name__))
step = builder.StepBuilder(cls._meta, params, strategy)
return step.build() | 0.003401 |
def eval_conditions(conditions=None, data={}):
'''
Evaluates conditions and returns Boolean value.
Args:
conditions (tuple) for the format of the tuple, see below
data (dict) the keys of which can be used in conditions
Returns:
(boolea)
Raises:
ValueError if an invalid operator value is specified
TypeError if:
conditions are not a 3-item tuple
the arguments of the condition don't have the same type
e.g. ('abc', 'eq', 3)
if a boolean operator does not get boolean arguments
e.g. (True, 'and', 15)
The format of the condition tuple is:
(arg1, op, arg2)
where:
arg1, arg2 can be numerical values, strings or condition tuples
op is a valid operator from the operator module
If arg is a string, and the string is a key in <data> it is treated as
a variable with value data[arg].
Notes:
* If no conditions are specified True is returned.
* empty or 0 values do *not* evaluate to booleans
'''
#CONSIDER: implementing addition/subtraction/multiplication/division
if not conditions:
return True
if isinstance(conditions, str) or isinstance(conditions, unicode):
conditions = str2tuple(conditions)
if not isinstance(conditions, tuple) or not len(conditions) == 3:
raise TypeError('conditions must be a tuple with 3 items.')
arg1 = conditions[0]
op = conditions[1]
arg2 = conditions[2]
if arg1 in data:
arg1 = data[arg1]
elif isinstance(arg1, tuple):
arg1 = eval_conditions(arg1, data)
if arg2 in data:
arg2 = data[arg2]
elif isinstance(arg2, tuple):
arg2 = eval_conditions(arg2, data)
if op in ('lt', 'le', 'eq', 'ne', 'ge', 'gt'):
if not (type(arg1) in (float, int) and type(arg2) in (float,int)) and \
type(arg1) != type(arg2):
raise TypeError('both arguments must have the same type {}, {}'.\
format(arg1, arg2))
elif op in ('and', 'or'):
if not isinstance(arg1, bool) or not isinstance(arg2, bool):
raise TypeError('boolean operator {} needs boolean arguments {},'\
' {}'.format(op, arg1, arg2))
op += '_'
else:
raise ValueError('operator {} not supported', op)
return getattr(operator, op)(arg1, arg2) | 0.003279 |
def run_changed_file_cmd(cmd, fp, pretty):
""" running commands on changes.
pretty the parsed file
"""
with open(fp) as f:
raw = f.read()
# go sure regarding quotes:
for ph in (dir_mon_filepath_ph, dir_mon_content_raw,
dir_mon_content_pretty):
if ph in cmd and not ('"%s"' % ph) in cmd \
and not ("'%s'" % ph) in cmd:
cmd = cmd.replace(ph, '"%s"' % ph)
cmd = cmd.replace(dir_mon_filepath_ph, fp)
print col('Running %s' % cmd, H1)
for r, what in ((dir_mon_content_raw, raw),
(dir_mon_content_pretty, pretty)):
cmd = cmd.replace(r, what.encode('base64'))
# yeah, i know, sub bla bla...
if os.system(cmd):
print col('(the command failed)', R) | 0.002522 |
def find(cls, channel, start, end, frametype=None, pad=None,
scaled=None, dtype=None, nproc=1, verbose=False, **readargs):
"""Find and read data from frames for a channel
Parameters
----------
channel : `str`, `~gwpy.detector.Channel`
the name of the channel to read, or a `Channel` object.
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS start time of required data,
any input parseable by `~gwpy.time.to_gps` is fine
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
GPS end time of required data,
any input parseable by `~gwpy.time.to_gps` is fine
frametype : `str`, optional
name of frametype in which this channel is stored, will search
for containing frame types if necessary
pad : `float`, optional
value with which to fill gaps in the source data,
by default gaps will result in a `ValueError`.
scaled : `bool`, optional
apply slope and bias calibration to ADC data, for non-ADC data
this option has no effect.
nproc : `int`, optional, default: `1`
number of parallel processes to use, serial process by
default.
dtype : `numpy.dtype`, `str`, `type`, or `dict`
numeric data type for returned data, e.g. `numpy.float`, or
`dict` of (`channel`, `dtype`) pairs
allow_tape : `bool`, optional, default: `True`
allow reading from frame files on (slow) magnetic tape
verbose : `bool`, optional
print verbose output about read progress, if ``verbose``
is specified as a string, this defines the prefix for the
progress meter
**readargs
any other keyword arguments to be passed to `.read()`
"""
return cls.DictClass.find(
[channel], start, end,
frametype=frametype,
verbose=verbose,
pad=pad,
scaled=scaled,
dtype=dtype,
nproc=nproc,
**readargs
)[str(channel)] | 0.001391 |
def run_from_argv(self, argv):
"""
Runs command for given arguments.
:param argv: arguments
"""
parser = self.get_parser(argv[0], argv[1])
options, args = parser.parse_args(argv[2:])
self.execute(*args, **options.__dict__) | 0.007168 |
def _set_src_port_any(self, v, load=False):
"""
Setter method for src_port_any, mapped from YANG variable /overlay/access_list/type/vxlan/extended/ext_seq/src_port_any (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_src_port_any is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_src_port_any() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="src-port-any", rest_name="src-port-any", parent=self, choice=(u'choice-src-port', u'case-src-port-any'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'src-port-any', u'display-when': u'(../dst-port) or (../dst-port-any)'}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """src_port_any must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="src-port-any", rest_name="src-port-any", parent=self, choice=(u'choice-src-port', u'case-src-port-any'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'src-port-any', u'display-when': u'(../dst-port) or (../dst-port-any)'}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='empty', is_config=True)""",
})
self.__src_port_any = t
if hasattr(self, '_set'):
self._set() | 0.005552 |
def parse_venue(data):
"""
Parse a ``MeetupVenue`` from the given response data.
Returns
-------
A `pythonkc_meetups.types.`MeetupVenue``.
"""
return MeetupVenue(
id=data.get('id', None),
name=data.get('name', None),
address_1=data.get('address_1', None),
address_2=data.get('address_2', None),
address_3=data.get('address_3', None),
city=data.get('city', None),
state=data.get('state', None),
zip=data.get('zip', None),
country=data.get('country', None),
lat=data.get('lat', None),
lon=data.get('lon', None)
) | 0.001582 |
def get_ids_g_goids(self, goids):
"""Get database IDs (DB_IDs), given a set of GO IDs."""
return set(nt.DB_ID for nt in self.associations if nt.GO_ID in goids) | 0.011429 |
def IsLink(self):
"""Determines if the file entry is a link.
Returns:
bool: True if the file entry is a link.
"""
if self._stat_object is None:
self._stat_object = self._GetStat()
if self._stat_object is not None:
self.entry_type = self._stat_object.type
return self.entry_type == definitions.FILE_ENTRY_TYPE_LINK | 0.008427 |
def update_model(self, name, **kw):
"""
Update a model in the registry (create if needed)
:param name: name for the model
:param datapackage_url: origin URL for the datapackage which is the
source for this model
:param datapackage: datapackage object from which this model was
derived
:param dataset_name: Title of the dataset
:param author: Author of the dataset
:param model: model to save
:param status: What's the status for loading
:param loaded: Was the package loaded successfully
"""
# If `model` or `datapackage` in kw args, completely remove respective
# field from the ES item, so it can be replaced below (rather than
# merged).
if 'model' in kw:
body_remove_model = \
'{"script" : "ctx._source.remove(\u0027model\u0027)"}'
self.es.update(index=self.index_name, doc_type=self.DOC_TYPE,
body=body_remove_model, id=name)
if 'datapackage' in kw:
body_remove_datapackage = \
'{"script" : "ctx._source.remove(\u0027package\u0027)"}'
self.es.update(index=self.index_name, doc_type=self.DOC_TYPE,
body=body_remove_datapackage, id=name)
document = {
'id': name,
'last_update': time.time()
}
for key, param in [
('model', 'model'),
('package', 'datapackage'),
('origin_url', 'datapackage_url'),
('dataset', 'dataset_name'),
('author', 'author'),
('loading_status', 'status'),
('loaded', 'loaded')]:
if param in kw:
document[key] = kw[param]
body = dict(
doc=document,
doc_as_upsert=True
)
self.es.update(index=self.index_name, doc_type=self.DOC_TYPE,
body=body, id=name)
# Make sure that the data is saved
self.es.indices.flush(self.index_name)
# Clear the api cache (if api_cache isn't None)
try:
api_cache.clear(name)
except AttributeError:
pass | 0.000884 |
def load_balancer_get(name, resource_group, **kwargs):
'''
.. versionadded:: 2019.2.0
Get details about a specific load balancer.
:param name: The name of the load balancer to query.
:param resource_group: The resource group name assigned to the
load balancer.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.load_balancer_get testlb testgroup
'''
netconn = __utils__['azurearm.get_client']('network', **kwargs)
try:
load_balancer = netconn.load_balancers.get(
load_balancer_name=name,
resource_group_name=resource_group
)
result = load_balancer.as_dict()
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
result = {'error': str(exc)}
return result | 0.001188 |
def setState(self, state):
"""See comments in base class."""
self._position = state['_position']
self._velocity = state['velocity']
self._bestPosition = state['bestPosition']
self._bestResult = state['bestResult'] | 0.004292 |
def add(self, key, value):
"""Add an entry to a list preference
Add `value` to the list of entries for the `key` preference.
"""
if not key in self.prefs:
self.prefs[key] = []
self.prefs[key].append(value) | 0.011583 |
def setup_fake_forward_run(pst,new_pst_name,org_cwd='.',bak_suffix="._bak",new_cwd='.'):
"""setup a fake forward run for a pst. The fake
forward run simply copies existing backup versions of
model output files to the outfiles pest(pp) is looking
for. This is really a development option for debugging
Parameters
----------
pst : pyemu.Pst
new_pst_name : str
org_cwd : str
existing working dir
new_cwd : str
new working dir
"""
if new_cwd != org_cwd and not os.path.exists(new_cwd):
os.mkdir(new_cwd)
pairs = {}
for output_file in pst.output_files:
org_pth = os.path.join(org_cwd,output_file)
new_pth = os.path.join(new_cwd,os.path.split(output_file)[-1])
assert os.path.exists(org_pth),org_pth
shutil.copy2(org_pth,new_pth+bak_suffix)
pairs[output_file] = os.path.split(output_file)[-1]+bak_suffix
if new_cwd != org_cwd:
for files in [pst.template_files,pst.instruction_files]:
for f in files:
raw = os.path.split(f)
if len(raw[0]) == 0:
raw = raw[1:]
if len(raw) > 1:
pth = os.path.join(*raw[:-1])
pth = os.path.join(new_cwd,pth)
if not os.path.exists(pth):
os.makedirs(pth)
org_pth = os.path.join(org_cwd, f)
new_pth = os.path.join(new_cwd, f)
assert os.path.exists(org_pth), org_pth
shutil.copy2(org_pth,new_pth)
for f in pst.input_files:
raw = os.path.split(f)
if len(raw[0]) == 0:
raw = raw[1:]
if len(raw) > 1:
pth = os.path.join(*raw[:-1])
pth = os.path.join(new_cwd, pth)
if not os.path.exists(pth):
os.makedirs(pth)
for key,f in pst.pestpp_options.items():
if not isinstance(f,str):
continue
raw = os.path.split(f)
if len(raw[0]) == 0:
raw = raw[1:]
if len(raw) > 1:
pth = os.path.join(*raw[:-1])
pth = os.path.join(new_cwd, pth)
if not os.path.exists(pth):
os.makedirs(pth)
org_pth = os.path.join(org_cwd, f)
new_pth = os.path.join(new_cwd, f)
if os.path.exists(org_pth):
shutil.copy2(org_pth,new_pth)
with open(os.path.join(new_cwd,"fake_forward_run.py"),'w') as f:
f.write("import os\nimport shutil\n")
for org,bak in pairs.items():
f.write("shutil.copy2('{0}','{1}')\n".format(bak,org))
pst.model_command = "python fake_forward_run.py"
pst.write(os.path.join(new_cwd,new_pst_name))
return pst | 0.008287 |
def access_add(name, event, cid, uid, **kwargs):
"""
Creates a new record with specified cid/uid in the event authorization.
Requests with token that contains such cid/uid will have access to the specified event of a
service.
"""
ctx = Context(**kwargs)
ctx.execute_action('access:add', **{
'unicorn': ctx.repo.create_secure_service('unicorn'),
'service': name,
'event': event,
'cids': cid,
'uids': uid,
}) | 0.004175 |
def add_string(self, s):
"""
Add a string to the stream.
:param str s: string to add
"""
s = asbytes(s)
self.add_size(len(s))
self.packet.write(s)
return self | 0.012987 |
def gaussian(data, mean, covariance):
"""!
@brief Calculates gaussian for dataset using specified mean (mathematical expectation) and variance or covariance in case
multi-dimensional data.
@param[in] data (list): Data that is used for gaussian calculation.
@param[in] mean (float|numpy.array): Mathematical expectation used for calculation.
@param[in] covariance (float|numpy.array): Variance or covariance matrix for calculation.
@return (list) Value of gaussian function for each point in dataset.
"""
dimension = float(len(data[0]))
if dimension != 1.0:
inv_variance = numpy.linalg.pinv(covariance)
else:
inv_variance = 1.0 / covariance
divider = (pi * 2.0) ** (dimension / 2.0) * numpy.sqrt(numpy.linalg.norm(covariance))
if divider != 0.0:
right_const = 1.0 / divider
else:
right_const = float('inf')
result = []
for point in data:
mean_delta = point - mean
point_gaussian = right_const * numpy.exp( -0.5 * mean_delta.dot(inv_variance).dot(numpy.transpose(mean_delta)) )
result.append(point_gaussian)
return result | 0.013115 |
def read_memory(self, space, offset, width, extended=False):
"""Reads in an 8-bit, 16-bit, 32-bit, or 64-bit value from the specified memory space and offset.
:param space: Specifies the address space. (Constants.*SPACE*)
:param offset: Offset (in bytes) of the address or register from which to read.
:param width: Number of bits to read.
:param extended: Use 64 bits offset independent of the platform.
:return: Data read from memory.
Corresponds to viIn* functions of the visa library.
"""
return self.visalib.read_memory(self.session, space, offset, width, extended) | 0.007764 |
def retract(self, e, a, v):
""" redact the value of an attribute
"""
ta = datetime.datetime.now()
ret = u"[:db/retract %i :%s %s]" % (e, a, dump_edn_val(v))
rs = self.tx(ret)
tb = datetime.datetime.now() - ta
print cl('<<< retracted %s,%s,%s in %sms' % (e,a,v, tb.microseconds/1000.0), 'cyan')
return rs | 0.01194 |
def logout(self):
"""
Log out, revoking the access tokens
and forgetting the login details if they were given.
"""
self.revoke_refresh_token()
self.revoke_access_token()
self._username, self._password = None, None | 0.00738 |
def take_until_including(condition):
"""
>>> [1, 4, 6, 4, 1] > take_until_including(X > 5) | list
[1, 4, 6]
"""
def take_until_including_(interable):
for i in interable:
if not condition(i):
yield i
else:
yield i
break
return take_until_including_ | 0.002849 |
def chunks_str(str, n, separator="\n", fill_blanks_last=True):
"""returns lines with max n characters
:Example:
>>> print (chunks_str('123456X', 3))
123
456
X
"""
return separator.join(chunks(str, n)) | 0.004016 |
def _create_job_info(self, job_dir):
"""Create information for given job.
Meta file will be loaded if exists, and the job information will
be saved in db backend.
Args:
job_dir (str): Directory path of the job.
"""
meta = self._build_job_meta(job_dir)
self.logger.debug("Create job: %s" % meta)
job_record = JobRecord.from_json(meta)
job_record.save() | 0.004556 |
def _nelec(self):
""" Particles per unit lorentz factor
"""
pd = self.particle_distribution(self._gam * mec2)
return pd.to(1 / mec2_unit).value | 0.011429 |
def quic_graph_lasso_cv(X, metric):
"""Run QuicGraphicalLassoCV on data with metric of choice.
Compare results with GridSearchCV + quic_graph_lasso. The number of
lambdas tested should be much lower with similar final lam_ selected.
"""
print("QuicGraphicalLassoCV with:")
print(" metric: {}".format(metric))
model = QuicGraphicalLassoCV(
cv=2, # cant deal w more folds at small size
n_refinements=6,
n_jobs=1,
init_method="cov",
score_metric=metric,
)
model.fit(X)
print(" len(cv_lams): {}".format(len(model.cv_lams_)))
print(" lam_scale_: {}".format(model.lam_scale_))
print(" lam_: {}".format(model.lam_))
return model.covariance_, model.precision_, model.lam_ | 0.001305 |
def compute_histogram(values, edges, use_orig_distr=False):
"""Computes histogram (density) for a given vector of values."""
if use_orig_distr:
return values
# ignoring invalid values: Inf and Nan
values = check_array(values).compressed()
hist, bin_edges = np.histogram(values, bins=edges, density=True)
hist = preprocess_histogram(hist, values, edges)
return hist | 0.002475 |
def opt_rankings(n_items, data, alpha=1e-6, method="Newton-CG",
initial_params=None, max_iter=None, tol=1e-5):
"""Compute the ML estimate of model parameters using ``scipy.optimize``.
This function computes the maximum-likelihood estimate of model parameters
given ranking data (see :ref:`data-rankings`), using optimizers provided by
the ``scipy.optimize`` module.
If ``alpha > 0``, the function returns the maximum a-posteriori (MAP)
estimate under an isotropic Gaussian prior with variance ``1 / alpha``. See
:ref:`regularization` for details.
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Ranking data.
alpha : float, optional
Regularization strength.
method : str, optional
Optimization method. Either "BFGS" or "Newton-CG".
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Tolerance for termination (method-specific).
Returns
-------
params : numpy.ndarray
The (penalized) ML estimate of model parameters.
Raises
------
ValueError
If the method is not "BFGS" or "Newton-CG".
"""
fcts = Top1Fcts.from_rankings(data, alpha)
return _opt(n_items, fcts, method, initial_params, max_iter, tol) | 0.001385 |
def plot_observer(population, num_generations, num_evaluations, args):
"""Plot the output of the evolutionary computation as a graph.
This function plots the performance of the EC as a line graph
using matplotlib and numpy. The graph consists of a blue line
representing the best fitness, a green line representing the
average fitness, and a red line representing the median fitness.
It modifies the keyword arguments variable 'args' by including an
entry called 'plot_data'.
If this observer is used, the calling script should also import
the matplotlib library and should end the script with::
matplotlib.pyplot.show()
Otherwise, the program may generate a runtime error.
.. note::
This function makes use of the matplotlib and numpy libraries.
.. Arguments:
population -- the population of Individuals
num_generations -- the number of elapsed generations
num_evaluations -- the number of candidate solution evaluations
args -- a dictionary of keyword arguments
"""
import matplotlib.pyplot as plt
import numpy
stats = inspyred.ec.analysis.fitness_statistics(population)
best_fitness = stats['best']
worst_fitness = stats['worst']
median_fitness = stats['median']
average_fitness = stats['mean']
colors = ['black', 'blue', 'green', 'red']
labels = ['average', 'median', 'best', 'worst']
data = []
if num_generations == 0:
plt.ion()
data = [[num_evaluations], [average_fitness], [median_fitness], [best_fitness], [worst_fitness]]
lines = []
for i in range(4):
line, = plt.plot(data[0], data[i+1], color=colors[i], label=labels[i])
lines.append(line)
# Add the legend when the first data is added.
plt.legend(loc='lower right')
args['plot_data'] = data
args['plot_lines'] = lines
plt.xlabel('Evaluations')
plt.ylabel('Fitness')
else:
data = args['plot_data']
data[0].append(num_evaluations)
data[1].append(average_fitness)
data[2].append(median_fitness)
data[3].append(best_fitness)
data[4].append(worst_fitness)
lines = args['plot_lines']
for i, line in enumerate(lines):
line.set_xdata(numpy.array(data[0]))
line.set_ydata(numpy.array(data[i+1]))
args['plot_data'] = data
args['plot_lines'] = lines
ymin = min([min(d) for d in data[1:]])
ymax = max([max(d) for d in data[1:]])
yrange = ymax - ymin
plt.xlim((0, num_evaluations))
plt.ylim((ymin - 0.1*yrange, ymax + 0.1*yrange))
plt.draw() | 0.005889 |
def parse_definite_clause(s):
"Return the antecedents and the consequent of a definite clause."
assert is_definite_clause(s)
if is_symbol(s.op):
return [], s
else:
antecedent, consequent = s.args
return conjuncts(antecedent), consequent | 0.003623 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.