text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def OnCellFont(self, event):
"""Cell font event handler"""
with undo.group(_("Font")):
self.grid.actions.set_attr("textfont", event.font)
self.grid.ForceRefresh()
self.grid.update_attribute_toolbar()
event.Skip() | 0.007463 |
def iter_predecessors(self, graph, dest, branch, turn, tick, *, forward=None):
"""Iterate over predecessors to a given destination node at a given time."""
if self.db._no_kc:
yield from self._adds_dels_sucpred(self.predecessors[graph, dest], branch, turn, tick)[0]
return
if forward is None:
forward = self.db._forward
yield from self._get_origcache(graph, dest, branch, turn, tick, forward=forward) | 0.010707 |
def route_stanza(self, stanza):
"""Process stanza not addressed to us.
Return "recipient-unavailable" return if it is not
"error" nor "result" stanza.
This method should be overriden in derived classes if they
are supposed to handle stanzas not addressed directly to local
stream endpoint.
:Parameters:
- `stanza`: presence stanza to be processed
"""
if stanza.stanza_type not in ("error", "result"):
response = stanza.make_error_response(u"recipient-unavailable")
self.send(response)
return True | 0.003252 |
def find_requirements(filename):
"""
Find requirements in file.
"""
import string
content = read(filename)
requirements = []
for line in content.splitlines():
line = line.strip()
if line and line[:1] in string.ascii_letters:
requirements.append(line)
return requirements | 0.00303 |
def source_file_name(self):
"""
File name where the object is implemented (e.g. pandas/core/frame.py).
"""
try:
fname = inspect.getsourcefile(self.code_obj)
except TypeError:
# In some cases the object is something complex like a cython
# object that can't be easily introspected. An it's better to
# return the source code file of the object as None, than crash
pass
else:
if fname:
fname = os.path.relpath(fname, BASE_PATH)
return fname | 0.00339 |
def gp_norm(infile):
"""indentify normalization region"""
inDir, outDir = getWorkDirs()
data, titles = [], []
for eidx,energy in enumerate(['19', '27', '39', '62']):
file_url = os.path.realpath(os.path.join(
inDir, 'rawdata', energy, 'pt-integrated', infile+'.dat'
))
data_import = np.loadtxt(open(file_url, 'rb'))
data_import[:,1] += eidx * 0.2
data_import[:,4] = data_import[:,3]
data_import[:,(2,3)] = 0
data.append(data_import)
titles.append(' '.join([getEnergy4Key(energy), 'GeV']))
nData = len(data)
lines = dict(
('x={}'.format(1+i*0.2), 'lc {} lt 2 lw 4'.format(default_colors[-2]))
for i in range(nData)
)
lines.update(dict(
('x={}'.format(1+i*0.2+0.02), 'lc {} lt 3 lw 4'.format(default_colors[-5]))
for i in range(nData)
))
lines.update(dict(
('x={}'.format(1+i*0.2-0.02), 'lc {} lt 3 lw 4'.format(default_colors[-5]))
for i in range(nData)
))
lines.update({'y=0.9': 'lc {} lt 1 lw 4'.format(default_colors[-2])})
charges = '++' if infile == 'rpp' else '--'
make_plot(
name = '%s/norm_range_%s' % (outDir,infile), xr = [0,2], yr = [0.9,1.7],
data = data, properties = [
'lt 1 lw 3 lc %s pt 1' % (default_colors[i]) # (i/2)%4
for i in range(nData)
], titles = titles, size = '8in,8in',
lmargin = 0.05, rmargin = 0.99, tmargin = 0.93, bmargin = 0.14,
xlabel = 'dielectron invariant mass, M_{ee} (GeV/c^{2})',
lines = lines, key = [
'maxrows 1', 'nobox', 'samplen 0.1', 'width -1', 'at graph 1,1.1'
], labels = {
'SE_{%s} / ME@_{%s}^N' % (charges, charges): (0.3, 1.3)
}, gpcalls = [
'ytics (1,"1" 1.2, "1" 1.4, "1" 1.6)', 'boxwidth 0.002',
],
) | 0.024612 |
def load_forecasts(self):
"""
Load the forecast files into memory.
"""
run_date_str = self.run_date.strftime("%Y%m%d")
for model_name in self.model_names:
self.raw_forecasts[model_name] = {}
forecast_file = self.forecast_path + run_date_str + "/" + \
model_name.replace(" ", "-") + "_hailprobs_{0}_{1}.nc".format(self.ensemble_member, run_date_str)
forecast_obj = Dataset(forecast_file)
forecast_hours = forecast_obj.variables["forecast_hour"][:]
valid_hour_indices = np.where((self.start_hour <= forecast_hours) & (forecast_hours <= self.end_hour))[0]
for size_threshold in self.size_thresholds:
self.raw_forecasts[model_name][size_threshold] = \
forecast_obj.variables["prob_hail_{0:02d}_mm".format(size_threshold)][valid_hour_indices]
forecast_obj.close() | 0.005353 |
def _chk_docopt_kws(self, docdict, exp):
"""Check for common user errors when running from the command-line."""
for key, val in docdict.items():
if isinstance(val, str):
assert '=' not in val, self._err("'=' FOUND IN VALUE", key, val, exp)
elif key != 'help' and key not in self.exp_keys and key not in self.exp_elems:
raise RuntimeError(self._err("UNKNOWN KEY", key, val, exp)) | 0.008889 |
def get_ignored_files(self):
"""Returns the list of files being ignored in this repository.
Note that file names, not directories, are returned.
So, we will get the following:
a/b.txt
a/c.txt
instead of just:
a/
Returns:
List[str] - list of ignored files. The paths are absolute.
"""
return [os.path.join(self.path, p) for p in
self.run('ls-files', '--ignored', '--exclude-standard',
'--others').strip().split()
] | 0.003521 |
def add_bridge(self, bridge):
""" Add bridge groups.
:param bridge: Add groups from this bridge.
"""
for group in bridge.groups:
self._groups[group.name] = group | 0.009709 |
def pull(collector, image, **kwargs):
"""Pull an image"""
if not image.image_index:
raise BadOption("The chosen image does not have a image_index configuration", wanted=image.name)
tag = kwargs["artifact"]
if tag is NotSpecified:
collector.configuration["harpoon"].tag
if tag is not NotSpecified:
image.tag = tag
log.info("Pulling tag: %s", tag)
Syncer().pull(image, ignore_missing=image.harpoon.ignore_missing) | 0.004283 |
def sign(self, data, **kwargs):
"""Create a signature for a message string or file.
Note that this method is not for signing other keys. (In GnuPG's
terms, what we all usually call 'keysigning' is actually termed
'certification'...) Even though they are cryptographically the same
operation, GnuPG differentiates between them, presumedly because these
operations are also the same as the decryption operation. If the
``key_usage``s ``C (certification)``, ``S (sign)``, and ``E
(encrypt)``, were all the same key, the key would "wear down" through
frequent signing usage -- since signing data is usually done often --
meaning that the secret portion of the keypair, also used for
decryption in this scenario, would have a statistically higher
probability of an adversary obtaining an oracle for it (or for a
portion of the rounds in the cipher algorithm, depending on the family
of cryptanalytic attack used).
In simpler terms: this function isn't for signing your friends' keys,
it's for something like signing an email.
:type data: :obj:`str` or :obj:`file`
:param data: A string or file stream to sign.
:param str default_key: The key to sign with.
:param str passphrase: The passphrase to pipe to stdin.
:param bool clearsign: If True, create a cleartext signature.
:param bool detach: If True, create a detached signature.
:param bool binary: If True, do not ascii armour the output.
:param str digest_algo: The hash digest to use. Again, to see which
hashes your GnuPG is capable of using, do:
:command:`$ gpg --with-colons --list-config digestname`.
The default, if unspecified, is ``'SHA512'``.
"""
if 'default_key' in kwargs:
log.info("Signing message '%r' with keyid: %s"
% (data, kwargs['default_key']))
else:
log.warn("No 'default_key' given! Using first key on secring.")
if hasattr(data, 'read'):
result = self._sign_file(data, **kwargs)
elif not _is_stream(data):
stream = _make_binary_stream(data, self._encoding)
result = self._sign_file(stream, **kwargs)
stream.close()
else:
log.warn("Unable to sign message '%s' with type %s"
% (data, type(data)))
result = None
return result | 0.000794 |
def detect_interval(
self,
min_head_length=None,
max_head_length=None,
min_tail_length=None,
max_tail_length=None
):
"""
Detect the interval of the audio file
containing the fragments in the text file.
Return the audio interval as a tuple of two
:class:`~aeneas.exacttiming.TimeValue` objects,
representing the begin and end time, in seconds,
with respect to the full wave duration.
If one of the parameters is ``None``, the default value
(``0.0`` for min, ``10.0`` for max) will be used.
:param min_head_length: estimated minimum head length
:type min_head_length: :class:`~aeneas.exacttiming.TimeValue`
:param max_head_length: estimated maximum head length
:type max_head_length: :class:`~aeneas.exacttiming.TimeValue`
:param min_tail_length: estimated minimum tail length
:type min_tail_length: :class:`~aeneas.exacttiming.TimeValue`
:param max_tail_length: estimated maximum tail length
:type max_tail_length: :class:`~aeneas.exacttiming.TimeValue`
:rtype: (:class:`~aeneas.exacttiming.TimeValue`, :class:`~aeneas.exacttiming.TimeValue`)
:raises: TypeError: if one of the parameters is not ``None`` or a number
:raises: ValueError: if one of the parameters is negative
"""
head = self.detect_head(min_head_length, max_head_length)
tail = self.detect_tail(min_tail_length, max_tail_length)
begin = head
end = self.real_wave_mfcc.audio_length - tail
self.log([u"Audio length: %.3f", self.real_wave_mfcc.audio_length])
self.log([u"Head length: %.3f", head])
self.log([u"Tail length: %.3f", tail])
self.log([u"Begin: %.3f", begin])
self.log([u"End: %.3f", end])
if (begin >= TimeValue("0.000")) and (end > begin):
self.log([u"Returning %.3f %.3f", begin, end])
return (begin, end)
self.log(u"Returning (0.000, 0.000)")
return (TimeValue("0.000"), TimeValue("0.000")) | 0.002333 |
def make_eventrule(date_rule, time_rule, cal, half_days=True):
"""
Constructs an event rule from the factory api.
"""
# Insert the calendar in to the individual rules
date_rule.cal = cal
time_rule.cal = cal
if half_days:
inner_rule = date_rule & time_rule
else:
nhd_rule = NotHalfDay()
nhd_rule.cal = cal
inner_rule = date_rule & time_rule & nhd_rule
return OncePerDay(rule=inner_rule) | 0.002193 |
def _request_status(self):
""" Checks the api endpoint to check if the async job progress """
if self.item_id:
return True
response = self.con.get(self.monitor_url)
if not response:
return False
data = response.json()
self.status = data.get('status', 'inProgress')
self.completion_percentage = data.get(self._cc('percentageComplete'),
0)
self.item_id = data.get(self._cc('resourceId'), None)
return self.item_id is not None | 0.003515 |
def valid_status(*valid):
"""Decorator to assert that we're in a valid state."""
def decorator(func):
@functools.wraps(func)
def _valid_status(self, *args, **kwargs):
if self.status not in valid:
raise protocol.ProtocolError(
"`%s` called while in state: %s, valid: (%s)" % (
func.__name__, self.status, ",".join(map(str, valid))))
return func(self, *args, **kwargs)
return _valid_status
return decorator | 0.012766 |
def auto_detect(self, args):
"""Check for already Slackware binary packages exist
"""
suffixes = [
".tgz",
".txz",
".tbz",
".tlz"
]
if (not args[0].startswith("-") and args[0] not in self.commands and
args[0].endswith(tuple(suffixes))):
packages, not_found = [], []
for pkg in args:
if pkg.endswith(tuple(suffixes)):
if os.path.isfile(pkg):
packages.append(pkg)
else:
not_found.append(pkg)
if packages:
Auto(packages).select()
if not_found:
for ntf in not_found:
self.msg.pkg_not_found("", ntf, "Not installed", "")
raise SystemExit() | 0.002342 |
def normalize_hostname(hostname):
'''Normalizes a hostname so that it is ASCII and valid domain name.'''
try:
new_hostname = hostname.encode('idna').decode('ascii').lower()
except UnicodeError as error:
raise UnicodeError('Hostname {} rejected: {}'.format(hostname, error)) from error
if hostname != new_hostname:
# Check for round-trip. May raise UnicodeError
new_hostname.encode('idna')
return new_hostname | 0.004329 |
def split_segments(text, closing_paren=False):
"""Return objects representing segments."""
buf = StringIO()
# The segments we're building, and the combinators used to combine them.
# Note that after this is complete, this should be true:
# len(segments) == len(combinators) + 1
# Thus we can understand the relationship between segments and combinators
# like so:
# s1 (c1) s2 (c2) s3 (c3) where sN are segments and cN are combination
# functions.
# TODO: Figure out exactly where the querystring died and post cool
# error messages about it.
segments = []
combinators = []
# A flag dictating if the last character we processed was a group.
# This is used to determine if the next character (being a combinator)
# is allowed to
last_group = False
# The recursive nature of this function relies on keeping track of the
# state of iteration. This iterator will be passed down to recursed calls.
iterator = iter(text)
# Detection for exclamation points. only matters for this situation:
# foo=bar&!(bar=baz)
last_negation = False
for character in iterator:
if character in COMBINATORS:
if last_negation:
buf.write(constants.OPERATOR_NEGATION)
# The string representation of our segment.
val = buf.getvalue()
reset_stringio(buf)
if not last_group and not len(val):
raise ValueError('Unexpected %s.' % character)
# When a group happens, the previous value is empty.
if len(val):
segments.append(parse_segment(val))
combinators.append(COMBINATORS[character])
elif character == constants.GROUP_BEGIN:
# Recursively go into the next group.
if buf.tell():
raise ValueError('Unexpected %s' % character)
seg = split_segments(iterator, True)
if last_negation:
seg = UnarySegmentCombinator(seg)
segments.append(seg)
# Flag that the last entry was a grouping, so that we don't panic
# when the next character is a logical combinator
last_group = True
continue
elif character == constants.GROUP_END:
# Build the segment for anything remaining, and then combine
# all the segments.
val = buf.getvalue()
# Check for unbalanced parens or an empty thing: foo=bar&();bar=baz
if not buf.tell() or not closing_paren:
raise ValueError('Unexpected %s' % character)
segments.append(parse_segment(val))
return combine(segments, combinators)
elif character == constants.OPERATOR_NEGATION and not buf.tell():
last_negation = True
continue
else:
if last_negation:
buf.write(constants.OPERATOR_NEGATION)
if last_group:
raise ValueError('Unexpected %s' % character)
buf.write(character)
last_negation = False
last_group = False
else:
# Check and see if the iterator exited early (unbalanced parens)
if closing_paren:
raise ValueError('Expected %s.' % constants.GROUP_END)
if not last_group:
# Add the final segment.
segments.append(parse_segment(buf.getvalue()))
# Everything completed normally, combine all the segments into one
# and return them.
return combine(segments, combinators) | 0.000279 |
def create(self, type, friendly_name=values.unset, certificate=values.unset,
private_key=values.unset, sandbox=values.unset, api_key=values.unset,
secret=values.unset):
"""
Create a new CredentialInstance
:param CredentialInstance.PushService type: The Credential type
:param unicode friendly_name: A string to describe the resource
:param unicode certificate: [APN only] The URL-encoded representation of the certificate
:param unicode private_key: [APN only] URL-encoded representation of the private key
:param bool sandbox: [APN only] Whether to send the credential to sandbox APNs
:param unicode api_key: [GCM only] The `Server key` of your project from Firebase console under Settings / Cloud messaging
:param unicode secret: [FCM only] The `Server key` of your project from Firebase console under Settings / Cloud messaging
:returns: Newly created CredentialInstance
:rtype: twilio.rest.notify.v1.credential.CredentialInstance
"""
data = values.of({
'Type': type,
'FriendlyName': friendly_name,
'Certificate': certificate,
'PrivateKey': private_key,
'Sandbox': sandbox,
'ApiKey': api_key,
'Secret': secret,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return CredentialInstance(self._version, payload, ) | 0.00657 |
def _load_from_yaml(self, filename: str, model_identifiers: Dict[str, List[str]]):
"""
Load fixtures from the given filename
"""
class_name = filename[:filename.rfind('.')]
rendered_yaml = self.env.get_template(filename).render(
model_identifiers=model_identifiers)
fixture_data, self.relationships[class_name] = self._post_process_yaml_data(
yaml.load(rendered_yaml),
self.factory.get_relationships(class_name))
for identifier_key, data in fixture_data.items():
self.model_fixtures[class_name][identifier_key] = data | 0.006431 |
def _read_record(self, stream):
"""
Read a complete record from a GDSII stream file.
Parameters
----------
stream : file
GDSII stream file to be imported.
Returns
-------
out : 2-tuple
Record type and data (as a numpy.array)
"""
header = stream.read(4)
if len(header) < 4:
return None
size, rec_type = struct.unpack('>HH', header)
data_type = (rec_type & 0x00ff)
rec_type = rec_type // 256
data = None
if size > 4:
if data_type == 0x01:
data = numpy.array(
struct.unpack('>{0}H'.format((size - 4) // 2),
stream.read(size - 4)),
dtype='uint')
elif data_type == 0x02:
data = numpy.array(
struct.unpack('>{0}h'.format((size - 4) // 2),
stream.read(size - 4)),
dtype='int')
elif data_type == 0x03:
data = numpy.array(
struct.unpack('>{0}l'.format((size - 4) // 4),
stream.read(size - 4)),
dtype='int')
elif data_type == 0x05:
data = numpy.array([
_eight_byte_real_to_float(stream.read(8))
for _ in range((size - 4) // 8)
])
else:
data = stream.read(size - 4)
if str is not bytes:
if data[-1] == 0:
data = data[:-1].decode('ascii')
else:
data = data.decode('ascii')
elif data[-1] == '\0':
data = data[:-1]
return [rec_type, data] | 0.001078 |
def str_strip(arr, to_strip=None, side='both'):
"""
Strip whitespace (including newlines) from each string in the
Series/Index.
Parameters
----------
to_strip : str or unicode
side : {'left', 'right', 'both'}, default 'both'
Returns
-------
Series or Index
"""
if side == 'both':
f = lambda x: x.strip(to_strip)
elif side == 'left':
f = lambda x: x.lstrip(to_strip)
elif side == 'right':
f = lambda x: x.rstrip(to_strip)
else: # pragma: no cover
raise ValueError('Invalid side')
return _na_map(f, arr) | 0.006667 |
def create_from_stack(cls, shape, components, ylims, weights=None):
""" Combine the log-likelihoods from a number of components.
Parameters
----------
shape : tuple
The shape of the return array
components : [~fermipy.castro.CastroData_Base]
The components to be stacked
weights : array-like
Returns
-------
castro : `~fermipy.castro.CastroData`
"""
if len(components) == 0:
return None
norm_vals, nll_vals, nll_offsets = CastroData_Base.stack_nll(shape,
components,
ylims,
weights)
return cls(norm_vals, nll_vals,
components[0].refSpec,
components[0].norm_type) | 0.003145 |
def run(sc, map_fun, tf_args, num_executors, num_ps, tensorboard=False, input_mode=InputMode.TENSORFLOW,
log_dir=None, driver_ps_nodes=False, master_node=None, reservation_timeout=600, queues=['input', 'output', 'error'],
eval_node=False):
"""Starts the TensorFlowOnSpark cluster and Runs the TensorFlow "main" function on the Spark executors
Args:
:sc: SparkContext
:map_fun: user-supplied TensorFlow "main" function
:tf_args: ``argparse`` args, or command-line ``ARGV``. These will be passed to the ``map_fun``.
:num_executors: number of Spark executors. This should match your Spark job's ``--num_executors``.
:num_ps: number of Spark executors which are reserved for TensorFlow PS nodes. All other executors will be used as TensorFlow worker nodes.
:tensorboard: boolean indicating if the chief worker should spawn a Tensorboard server.
:input_mode: TFCluster.InputMode
:log_dir: directory to save tensorboard event logs. If None, defaults to a fixed path on local filesystem.
:driver_ps_nodes: run the PS nodes on the driver locally instead of on the spark executors; this help maximizing computing resources (esp. GPU). You will need to set cluster_size = num_executors + num_ps
:master_node: name of the "master" or "chief" node in the cluster_template, used for `tf.estimator` applications.
:reservation_timeout: number of seconds after which cluster reservation times out (600 sec default)
:queues: *INTERNAL_USE*
:eval_node: run evaluator node for distributed Tensorflow
Returns:
A TFCluster object representing the started cluster.
"""
logging.info("Reserving TFSparkNodes {0}".format("w/ TensorBoard" if tensorboard else ""))
if driver_ps_nodes and input_mode != InputMode.TENSORFLOW:
raise Exception('running PS nodes on driver locally is only supported in InputMode.TENSORFLOW')
if eval_node and input_mode != InputMode.TENSORFLOW:
raise Exception('running evaluator nodes is only supported in InputMode.TENSORFLOW')
# compute size of TF cluster and validate against number of Spark executors
num_master = 1 if master_node else 0
num_eval = 1 if eval_node else 0
num_workers = max(num_executors - num_ps - num_eval - num_master, 0)
total_nodes = num_ps + num_master + num_eval + num_workers
assert total_nodes == num_executors, "TensorFlow cluster requires {} nodes, but only {} executors available".format(total_nodes, num_executors)
assert num_master + num_workers > 0, "TensorFlow cluster requires at least one worker or master/chief node"
# create a cluster template for scheduling TF nodes onto executors
executors = list(range(num_executors))
cluster_template = {}
if num_ps > 0:
cluster_template['ps'] = executors[:num_ps]
del executors[:num_ps]
if master_node:
cluster_template[master_node] = executors[:1]
del executors[:1]
if eval_node:
cluster_template['evaluator'] = executors[:1]
del executors[:1]
if num_workers > 0:
cluster_template['worker'] = executors[:num_workers]
logging.info("cluster_template: {}".format(cluster_template))
# get default filesystem from spark
defaultFS = sc._jsc.hadoopConfiguration().get("fs.defaultFS")
# strip trailing "root" slash from "file:///" to be consistent w/ "hdfs://..."
if defaultFS.startswith("file://") and len(defaultFS) > 7 and defaultFS.endswith("/"):
defaultFS = defaultFS[:-1]
# get current working dir of spark launch
working_dir = os.getcwd()
# start a server to listen for reservations and broadcast cluster_spec
server = reservation.Server(num_executors)
server_addr = server.start()
# start TF nodes on all executors
logging.info("Starting TensorFlow on executors")
cluster_meta = {
'id': random.getrandbits(64),
'cluster_template': cluster_template,
'num_executors': num_executors,
'default_fs': defaultFS,
'working_dir': working_dir,
'server_addr': server_addr
}
if driver_ps_nodes:
nodeRDD = sc.parallelize(range(num_ps, num_executors), num_executors - num_ps)
else:
nodeRDD = sc.parallelize(range(num_executors), num_executors)
if driver_ps_nodes:
def _start_ps(node_index):
logging.info("starting ps node locally %d" % node_index)
TFSparkNode.run(map_fun,
tf_args,
cluster_meta,
tensorboard,
log_dir,
queues,
background=(input_mode == InputMode.SPARK))([node_index])
for i in cluster_template['ps']:
ps_thread = threading.Thread(target=lambda: _start_ps(i))
ps_thread.daemon = True
ps_thread.start()
# start TF on a background thread (on Spark driver) to allow for feeding job
def _start(status):
try:
nodeRDD.foreachPartition(TFSparkNode.run(map_fun,
tf_args,
cluster_meta,
tensorboard,
log_dir,
queues,
background=(input_mode == InputMode.SPARK)))
except Exception as e:
logging.error("Exception in TF background thread")
status['error'] = str(e)
t = threading.Thread(target=_start, args=(tf_status,))
# run as daemon thread so that in spark mode main thread can exit
# if feeder spark stage fails and main thread can't do explicit shutdown
t.daemon = True
t.start()
# wait for executors to register and start TFNodes before continuing
logging.info("Waiting for TFSparkNodes to start")
cluster_info = server.await_reservations(sc, tf_status, reservation_timeout)
logging.info("All TFSparkNodes started")
# print cluster_info and extract TensorBoard URL
tb_url = None
for node in cluster_info:
logging.info(node)
if node['tb_port'] != 0:
tb_url = "http://{0}:{1}".format(node['host'], node['tb_port'])
if tb_url is not None:
logging.info("========================================================================================")
logging.info("")
logging.info("TensorBoard running at: {0}".format(tb_url))
logging.info("")
logging.info("========================================================================================")
# since our "primary key" for each executor's TFManager is (host, executor_id), sanity check for duplicates
# Note: this may occur if Spark retries failed Python tasks on the same executor.
tb_nodes = set()
for node in cluster_info:
node_id = (node['host'], node['executor_id'])
if node_id in tb_nodes:
msg = '''
Duplicate cluster node id detected (host={0}, executor_id={1})
Please ensure that:
1. Number of executors >= number of TensorFlow nodes
2. Number of tasks per executors is 1
3, TFCluster.shutdown() is successfully invoked when done.
'''.strip()
raise Exception(msg.format(node_id[0], node_id[1]))
else:
tb_nodes.add(node_id)
# create TFCluster object
cluster = TFCluster()
cluster.sc = sc
cluster.meta = cluster_meta
cluster.nodeRDD = nodeRDD
cluster.cluster_info = cluster_info
cluster.cluster_meta = cluster_meta
cluster.input_mode = input_mode
cluster.queues = queues
cluster.server = server
return cluster | 0.014469 |
def target(key, full=True):
'''
Return the basename of a SysFS key path
:param key: the location to resolve within SysFS
:param full: full path instead of basename
:return: fullpath or basename of path
CLI example:
.. code-block:: bash
salt '*' sysfs.read class/ttyS0
'''
if not key.startswith('/sys'):
key = os.path.join('/sys', key)
key = os.path.realpath(key)
if not os.path.exists(key):
log.debug('Unkown SysFS key %s', key)
return False
elif full:
return key
else:
return os.path.basename(key) | 0.001653 |
def module(self):
"""The module in which the Function is defined.
Python equivalent of the CLIPS deffunction-module command.
"""
modname = ffi.string(lib.EnvDeffunctionModule(self._env, self._fnc))
defmodule = lib.EnvFindDefmodule(self._env, modname)
return Module(self._env, defmodule) | 0.005935 |
def update(self, key, item):
"""
Update item into hash table with specified key and item.
If key is already present, destroys old item and inserts new one.
Use free_fn method to ensure deallocator is properly called on item.
"""
return lib.zhash_update(self._as_parameter_, key, item) | 0.006329 |
def parse_input(command_input=None):
"""Parses command line input."""
parser = argparse.ArgumentParser(
prog='python3 ok',
description=__doc__,
usage='%(prog)s [--help] [options]',
formatter_class=argparse.RawDescriptionHelpFormatter)
testing = parser.add_argument_group('running tests')
testing.add_argument('-q', '--question', type=str, action='append',
help="run tests for a specific question")
testing.add_argument('--suite', type=str, default=None,
help="run cases from a specific suite")
testing.add_argument('--case', type=str, action='append',
help="run specific cases")
testing.add_argument('-u', '--unlock', action='store_true',
help="unlock tests interactively")
testing.add_argument('-i', '--interactive', action='store_true',
help="start the Python interpreter after a failed test")
testing.add_argument('-v', '--verbose', action='store_true',
help="show all tests, not just passing tests")
testing.add_argument('-t', '--testing', nargs='?', type=str, const='mytests.rst',
help='run tests from rst file (default: mytests.rst)')
testing.add_argument('--all', action='store_true',
help="run tests for all questions in config file")
testing.add_argument('--submit', action='store_true',
help="submit the assignment")
testing.add_argument('--backup', action='store_true',
help="attempt to reliably backup your work")
testing.add_argument('--revise', action='store_true',
help="submit composition revision")
testing.add_argument('--timeout', type=int, default=10,
help="set the timeout duration (in seconds) for running tests")
testing.add_argument('-cov', '--coverage', action='store_true',
help="get suggestions on what lines to add tests for")
# Experiments
experiment = parser.add_argument_group('experiment options')
experiment.add_argument('--no-experiments', action='store_true',
help="do not run experimental features")
experiment.add_argument('--hint', action='store_true',
help="give a hint (if available)")
experiment.add_argument('--style', action='store_true',
help="run AutoStyle feedback system")
experiment.add_argument('--collab', action='store_true',
help="launch collaborative programming environment")
# Debug information
debug = parser.add_argument_group('debugging options')
debug.add_argument('--version', action='store_true',
help="print the version number and exit")
debug.add_argument('--tests', action='store_true',
help="display a list of all available tests")
debug.add_argument('--debug', action='store_true',
help="show debugging output")
# Grading
grading = parser.add_argument_group('grading options')
grading.add_argument('--lock', action='store_true',
help="lock the tests in a directory")
grading.add_argument('--score', action='store_true',
help="score the assignment")
grading.add_argument('--score-out', type=str,
nargs='?', const=None, default=None,
help="write scores to a file")
grading.add_argument('--config', type=str,
help="use a specific configuration file")
# Server parameters
server = parser.add_argument_group('server options')
server.add_argument('--local', action='store_true',
help="disable any network activity")
server.add_argument('--server', type=str,
default='okpy.org',
help="set the server address")
server.add_argument('--authenticate', action='store_true',
help="authenticate, ignoring previous authentication")
server.add_argument('--no-browser', action='store_true',
help="do not use a web browser for authentication")
server.add_argument('--get-token', action='store_true',
help="get ok access token")
server.add_argument('--insecure', action='store_true',
help="use http instead of https")
server.add_argument('--no-update', action='store_true',
help="do not check for ok updates")
server.add_argument('--update', action='store_true',
help="update ok and exit")
return parser.parse_args(command_input) | 0.006241 |
def set_from_template_string(self, string):
"""
Reads the given template (SMTP formatted) and sets all fields
accordingly.
:type string: string
:param string: The template.
"""
in_header = True
body = ''
for line in string.split('\n'):
if not in_header:
body += line + '\n'
continue
if not _is_header_line(line):
body += line + '\n'
in_header = False
continue
key, value = _get_var_from_header_line(line)
if key == 'from':
self.set_sender(value)
elif key == 'to':
self.add_to(value)
elif key == 'cc':
self.add_cc(value)
elif key == 'bcc':
self.add_bcc(value)
elif key == 'subject':
self.set_subject(value)
else:
raise Exception('Invalid header field "%s"' % key)
self.set_body(body.strip()) | 0.001894 |
def _config_net_topology(self, conf):
"""
Initialize and populate all the network related elements, like
reserving ips and populating network specs of the given confiiguration
spec
Args:
conf (dict): Configuration spec to initalize
Returns:
None
"""
conf = self._init_net_specs(conf)
mgmts = self._select_mgmt_networks(conf)
self._validate_netconfig(conf)
allocated_subnets, conf = self._allocate_subnets(conf)
try:
self._add_mgmt_to_domains(conf, mgmts)
self._register_preallocated_ips(conf)
self._allocate_ips_to_nics(conf)
self._set_mtu_to_nics(conf)
self._add_dns_records(conf, mgmts)
except:
self._subnet_store.release(allocated_subnets)
raise
return conf | 0.003401 |
def beat_track(input_file, output_csv):
'''Beat tracking function
:parameters:
- input_file : str
Path to input audio file (wav, mp3, m4a, flac, etc.)
- output_file : str
Path to save beat event timestamps as a CSV file
'''
print('Loading ', input_file)
y, sr = librosa.load(input_file, sr=22050)
# Use a default hop size of 512 samples @ 22KHz ~= 23ms
hop_length = 512
# This is the window length used by default in stft
print('Tracking beats')
tempo, beats = librosa.beat.beat_track(y=y, sr=sr, hop_length=hop_length)
print('Estimated tempo: {:0.2f} beats per minute'.format(tempo))
# save output
# 'beats' will contain the frame numbers of beat events.
beat_times = librosa.frames_to_time(beats, sr=sr, hop_length=hop_length)
print('Saving output to ', output_csv)
librosa.output.times_csv(output_csv, beat_times)
print('done!') | 0.001065 |
def kullback_leibler(h1, h2): # 83 us @array, 109 us @list \w 100 bins
r"""
Kullback-Leibler divergence.
Compute how inefficient it would to be code one histogram into another.
Actually computes :math:`\frac{d_{KL}(h1, h2) + d_{KL}(h2, h1)}{2}` to achieve symmetry.
The Kullback-Leibler divergence between two histograms :math:`H` and :math:`H'` of size
:math:`m` is defined as:
.. math::
d_{KL}(H, H') = \sum_{m=1}^M H_m\log\frac{H_m}{H'_m}
*Attributes:*
- quasimetric (but made symetric)
*Attributes for normalized histograms:*
- :math:`d(H, H')\in[0, \infty)`
- :math:`d(H, H) = 0`
- :math:`d(H, H') = d(H', H)`
*Attributes for not-normalized histograms:*
- not applicable
*Attributes for not-equal histograms:*
- not applicable
Parameters
----------
h1 : sequence
The first histogram, where h1[i] > 0 for any i such that h2[i] > 0, normalized.
h2 : sequence
The second histogram, where h2[i] > 0 for any i such that h1[i] > 0, normalized, same bins as ``h1``.
Returns
-------
kullback_leibler : float
Kullback-Leibler divergence.
"""
old_err_state = scipy.seterr(divide='raise')
try:
h1, h2 = __prepare_histogram(h1, h2)
result = (__kullback_leibler(h1, h2) + __kullback_leibler(h2, h1)) / 2.
scipy.seterr(**old_err_state)
return result
except FloatingPointError:
scipy.seterr(**old_err_state)
raise ValueError('h1 can only contain zero values where h2 also contains zero values and vice-versa') | 0.010303 |
def _single_resource_json_response(resource, depth=0):
"""Return the JSON representation of *resource*.
:param resource: :class:`sandman.model.Model` to render
:type resource: :class:`sandman.model.Model`
:rtype: :class:`flask.Response`
"""
links = resource.links()
response = jsonify(**resource.as_dict(depth))
response.headers['Link'] = ''
for link in links:
response.headers['Link'] += '<{}>; rel="{}",'.format(
link['uri'], link['rel'])
response.headers['Link'] = response.headers['Link'][:-1]
return response | 0.001727 |
def galactic_latlon(self):
"""Compute galactic coordinates (lat, lon, distance)"""
vector = _GALACTIC.dot(self.position.au)
d, lat, lon = to_polar(vector)
return (Angle(radians=lat, signed=True),
Angle(radians=lon),
Distance(au=d)) | 0.00678 |
def transform(self, jam):
'''Bypass transformations.
Parameters
----------
jam : pyjams.JAMS
A muda-enabled JAMS object
Yields
------
jam_out : pyjams.JAMS iterator
The first result is `jam` (unmodified), by reference
All subsequent results are generated by `transformer`
'''
# Step 1: yield the unmodified jam
yield jam
# Step 2: yield from the transformer
for jam_out in self.transformer.transform(jam):
yield jam_out | 0.003534 |
def tile(self, z, x, y):
"""
Download the specified tile from `tiles_url`
"""
logger.debug(_("Download tile %s") % ((z, x, y),))
# Render each keyword in URL ({s}, {x}, {y}, {z}, {size} ... )
size = self.tilesize
s = self.tiles_subdomains[(x + y) % len(self.tiles_subdomains)];
try:
url = self.tiles_url.format(**locals())
except KeyError as e:
raise DownloadError(_("Unknown keyword %s in URL") % e)
logger.debug(_("Retrieve tile at %s") % url)
r = DOWNLOAD_RETRIES
sleeptime = 1
while r > 0:
try:
request = requests.get(url, headers=self.headers)
if request.status_code == 200:
return request.content
raise DownloadError(_("Status code : %s, url : %s") % (request.status_code, url))
except requests.exceptions.ConnectionError as e:
logger.debug(_("Download error, retry (%s left). (%s)") % (r, e))
r -= 1
time.sleep(sleeptime)
# progressivly sleep longer to wait for this tile
if (sleeptime <= 10) and (r % 2 == 0):
sleeptime += 1 # increase wait
raise DownloadError(_("Cannot download URL %s") % url) | 0.003751 |
def makeDigraph(automaton, inputAsString=repr,
outputAsString=repr,
stateAsString=repr):
"""
Produce a L{graphviz.Digraph} object from an automaton.
"""
digraph = graphviz.Digraph(graph_attr={'pack': 'true',
'dpi': '100'},
node_attr={'fontname': 'Menlo'},
edge_attr={'fontname': 'Menlo'})
for state in automaton.states():
if state is automaton.initialState:
stateShape = "bold"
fontName = "Menlo-Bold"
else:
stateShape = ""
fontName = "Menlo"
digraph.node(stateAsString(state),
fontame=fontName,
shape="ellipse",
style=stateShape,
color="blue")
for n, eachTransition in enumerate(automaton.allTransitions()):
inState, inputSymbol, outState, outputSymbols = eachTransition
thisTransition = "t{}".format(n)
inputLabel = inputAsString(inputSymbol)
port = "tableport"
table = tableMaker(inputLabel, [outputAsString(outputSymbol)
for outputSymbol in outputSymbols],
port=port)
digraph.node(thisTransition,
label=_gvhtml(table), margin="0.2", shape="none")
digraph.edge(stateAsString(inState),
'{}:{}:w'.format(thisTransition, port),
arrowhead="none")
digraph.edge('{}:{}:e'.format(thisTransition, port),
stateAsString(outState))
return digraph | 0.000596 |
def chdir(new_dir):
"""
stolen from bcbio.
Context manager to temporarily change to a new directory.
http://lucentbeing.com/blog/context-managers-and-the-with-statement-in-python/
"""
cur_dir = os.getcwd()
_mkdir(new_dir)
os.chdir(new_dir)
try:
yield
finally:
os.chdir(cur_dir) | 0.002994 |
def load_x11_color_map(paths=X11_RGB_PATHS):
''' Load and parse X11's rgb.txt.
Loads:
x11_color_map: { name_lower: ('R', 'G', 'B') }
'''
if type(paths) is str:
paths = (paths,)
x11_color_map = color_tables.x11_color_map
for path in paths:
try:
with open(path) as infile:
for line in infile:
if line.startswith('!') or line.isspace():
continue
tokens = line.rstrip().split(maxsplit=3)
key = tokens[3]
if ' ' in key: # skip names with spaces to match webcolors
continue
x11_color_map[key.lower()] = tuple(tokens[:3])
log.debug('X11 palette found at %r.', path)
break
except FileNotFoundError as err:
log.debug('X11 palette file not found: %r', path)
except IOError as err:
log.debug('X11 palette file not read: %s', err) | 0.000981 |
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'calendar-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials | 0.00185 |
def get_item_list(self, item_list_url):
""" Retrieve an item list from the server as an ItemList object
:type item_list_url: String or ItemList
:param item_list_url: URL of the item list to retrieve, or an
ItemList object
:rtype: ItemList
:returns: The ItemList
:raises: APIError if the request was not successful
"""
resp = self.api_request(str(item_list_url))
return ItemList(resp['items'], self, str(item_list_url), resp['name']) | 0.003839 |
def export_kml_file(self):
"""Generate KML element tree from ``Placemarks``.
Returns:
etree.ElementTree: KML element tree depicting ``Placemarks``
"""
kml = create_elem('kml')
kml.Document = create_elem('Document')
for place in sorted(self.values(), key=lambda x: x.name):
kml.Document.append(place.tokml())
return etree.ElementTree(kml) | 0.004773 |
def emit(self, name, *args, **kwargs):
"""Dispatches an event to any subscribed listeners
Note:
If a listener returns :obj:`False`, the event will stop dispatching to
other listeners. Any other return value is ignored.
Args:
name (str): The name of the :class:`Event` to dispatch
*args (Optional): Positional arguments to be sent to listeners
**kwargs (Optional): Keyword arguments to be sent to listeners
"""
e = self.__property_events.get(name)
if e is None:
e = self.__events[name]
return e(*args, **kwargs) | 0.004688 |
def _get_minidom_tag_value(station, tag_name):
"""get a value from a tag (if it exists)"""
tag = station.getElementsByTagName(tag_name)[0].firstChild
if tag:
return tag.nodeValue
return None | 0.004651 |
def _build(self, inputs):
"""Connects the MergeDims module into the graph.
Args:
inputs: Tensor or a nested list of Tensors to merge. Its rank must be
greater than or equal to `start` + `size`.
Returns:
The merged Tensor or a nested list of merged Tensors.
Raises:
ValueError: If any of the `inputs` tensors has insufficient rank.
"""
if nest.is_sequence(inputs):
merged_tensors = [self._merge(tensor) for tensor in nest.flatten(inputs)]
return nest.pack_sequence_as(inputs, merged_tensors)
# inputs is a single tf.Tensor
return self._merge(inputs) | 0.004808 |
def measureSize(self, diffTo, diffFrom, estimatedSize, chunkSize, isInteractive):
""" Spend some time to get an accurate size. """
diff = self.toObj.diff(diffTo, diffFrom, estimatedSize)
isInteractive = self.toObj.bool(isInteractive)
self.butterStore.showProgress = None if isInteractive else False
self.butterStore.measureSize(diff, int(chunkSize))
return self.toDict.diff(diff) | 0.007026 |
def get_length(self, byte_stream):
''' In Hadoop protobuf RPC, some parts of the stream are delimited with protobuf varint,
while others are delimited with 4 byte integers. This reads 4 bytes from the byte stream
and retruns the length of the delimited part that follows, by unpacking the 4 bytes
and returning the first element from a tuple. The tuple that is returned from struc.unpack()
only contains one element.
'''
length = struct.unpack("!i", byte_stream.read(4))[0]
log.debug("4 bytes delimited part length: %d" % length)
return length | 0.009756 |
def batch_rename_file(path, f, t):
"""根据replaces中定义的规则,批量重命名"""
files = os.listdir(path)
for file in files:
if f in file:
new_fn = file.replace(f, t)
old = os.path.join(path, file)
new = os.path.join(path, new_fn)
os.rename(old, new) | 0.003322 |
def getSaveFileName(self, *args, **kwargs):
"""
analogue to QtWidgets.QFileDialog.getSaveFileNameAndFilter
but returns the filename + chosen file ending even if not typed in gui
"""
if 'directory' not in kwargs:
if self.opts['save']:
if self.opts['save']:
kwargs['directory'] = self.opts['save']
fname = QtWidgets.QFileDialog.getSaveFileName(**kwargs)
if fname:
if type(fname) == tuple:
#only happened since qt5
#getSaveFileName returns (path, ftype)
if not fname[0]:
return
p = PathStr(fname[0])
if not p.filetype():
ftyp = self._extractFtype(fname[1])
p = p.setFiletype(ftyp)
else:
p = PathStr(fname)
self.opts['save'] = p.dirname()
if self.opts['open'] is None:
self.opts['open'] = self.opts['save']
return p | 0.004771 |
def pixel_coord(self):
"""
Return the coordinates of the source in the cutout reference frame.
@return:
"""
return self.get_pixel_coordinates(self.reading.pix_coord, self.reading.get_ccd_num()) | 0.012876 |
def change_history_fields(self, fields, value=None):
r"""
"""
if not isinstance(fields, list):
raise Exception('fields should be a list')
self._change_history['fields'] = fields
if value:
self._change_history['value'] = value
return self | 0.00641 |
def from_dict(cls, data):
"""Transforms a Python dictionary to an Output object.
Note:
To pass a serialization cycle multiple times, a
Cryptoconditions Fulfillment needs to be present in the
passed-in dictionary, as Condition URIs are not serializable
anymore.
Args:
data (dict): The dict to be transformed.
Returns:
:class:`~bigchaindb.common.transaction.Output`
"""
try:
fulfillment = _fulfillment_from_details(data['condition']['details'])
except KeyError:
# NOTE: Hashlock condition case
fulfillment = data['condition']['uri']
try:
amount = int(data['amount'])
except ValueError:
raise AmountError('Invalid amount: %s' % data['amount'])
return cls(fulfillment, data['public_keys'], amount) | 0.003178 |
def CheckCommandSpaces(filename, linenumber, clean_lines, errors):
"""
No extra spaces between command and parenthesis
"""
line = clean_lines.lines[linenumber]
match = ContainsCommand(line)
if match and len(match.group(2)):
errors(filename, linenumber, 'whitespace/extra',
"Extra spaces between '%s' and its ()"%(match.group(1)))
if match:
spaces_after_open = len(_RE_COMMAND_START_SPACES.match(line).group(1))
initial_spaces = GetInitialSpaces(line)
initial_linenumber = linenumber
end = None
while True:
line = clean_lines.lines[linenumber]
end = _RE_COMMAND_END_SPACES.search(line)
if end:
break
linenumber += 1
if linenumber >= len(clean_lines.lines):
break
if linenumber == len(clean_lines.lines) and not end:
errors(filename, initial_linenumber, 'syntax',
'Unable to find the end of this command')
if end:
spaces_before_end = len(end.group(1))
initial_spaces = GetInitialSpaces(line)
if initial_linenumber != linenumber and spaces_before_end >= initial_spaces:
spaces_before_end -= initial_spaces
if spaces_after_open != spaces_before_end:
errors(filename, initial_linenumber, 'whitespace/mismatch',
'Mismatching spaces inside () after command') | 0.004032 |
def send_response(self, code, message=None):
"""Add the response header to the headers buffer and log the
response code.
Also send two standard headers with the server software
version and the current date.
"""
self.log_request(code)
self.send_response_only(code, message)
self.send_header('Server', self.version_string())
self.send_header('Date', self.date_time_string()) | 0.004484 |
def _link_fastqs(self, path=None, force=False, append=False, splitnames="_",
fields=None, ipyclient=None):
"""
Create Sample objects from demultiplexed fastq files in sorted_fastq_path,
or append additional fastq files to existing Samples. This provides
more flexible file input through the API than available in step1 of the
command line interface. If passed ipyclient it will run in parallel.
Note
----
This function is called during step 1 if files are specified in
'sorted_fastq_path'.
Parameters
----------
path : str
Path to the fastq files to be linked to Sample objects. The default
location is to select all files in the 'sorted_fastq_path'.
Alternatively a different path can be entered here.
append : bool
The default action is to overwrite fastq files linked to Samples if
they already have linked files. Use append=True to instead append
additional fastq files to a Sample (file names should be formatted
the same as usual, e.g., [name]_R1_[optional].fastq.gz).
splitnames : str
A string character used to file names. In combination with the
fields argument can be used to subselect filename fields names.
fields : list
A list of indices for the fields to be included in names after
filnames are split on the splitnames character. Useful for appending
sequence names which must match existing names. If the largest index
is greater than the number of split strings in the name the index
if ignored. e.g., [2,3,4] ## excludes 0, 1 and >4
force : bool
Overwrites existing Sample data and statistics.
Returns
-------
str
Prints the number of new Sample objects created and the number of
fastq files linked to Sample objects in the Assembly object.
"""
## cannot both force and append at once
if force and append:
raise IPyradError("Cannot use force and append at the same time.")
if self.samples and not (force or append):
raise IPyradError("Files already linked to `{}`.".format(self.name)\
+" Use force=True to replace all files, or append=True to add"
+" additional files to existing Samples.")
## make sure there is a workdir and workdir/fastqdir
self.dirs.fastqs = os.path.join(self.paramsdict["project_dir"],
self.name+"_fastqs")
if not os.path.exists(self.paramsdict["project_dir"]):
os.mkdir(self.paramsdict["project_dir"])
## get path to data files
if not path:
path = self.paramsdict["sorted_fastq_path"]
## but grab fastq/fq/gz, and then sort
fastqs = glob.glob(path)
## Assert files are not .bz2 format
if any([i for i in fastqs if i.endswith(".bz2")]):
raise IPyradError(NO_SUPPORT_FOR_BZ2.format(path))
fastqs = [i for i in fastqs if i.endswith(".gz") \
or i.endswith(".fastq") \
or i.endswith(".fq")]
fastqs.sort()
LOGGER.debug("Linking these fastq files:\n{}".format(fastqs))
## raise error if no files are found
if not fastqs:
raise IPyradError(NO_FILES_FOUND_PAIRS\
.format(self.paramsdict["sorted_fastq_path"]))
## link pairs into tuples
if 'pair' in self.paramsdict["datatype"]:
## check that names fit the paired naming convention
## trying to support flexible types (_R2_, _2.fastq)
r1_try1 = [i for i in fastqs if "_R1_" in i]
r1_try2 = [i for i in fastqs if i.endswith("_1.fastq.gz")]
r1_try3 = [i for i in fastqs if i.endswith("_R1.fastq.gz")]
r2_try1 = [i for i in fastqs if "_R2_" in i]
r2_try2 = [i for i in fastqs if i.endswith("_2.fastq.gz")]
r2_try3 = [i for i in fastqs if i.endswith("_R2.fastq.gz")]
r1s = [r1_try1, r1_try2, r1_try3]
r2s = [r2_try1, r2_try2, r2_try3]
## check that something was found
if not r1_try1 + r1_try2 + r1_try3:
raise IPyradWarningExit(
"Paired filenames are improperly formatted. See Documentation")
if not r2_try1 + r2_try2 + r2_try3:
raise IPyradWarningExit(
"Paired filenames are improperly formatted. See Documentation")
## find the one with the right number of R1s
for idx, tri in enumerate(r1s):
if len(tri) == len(fastqs)/2:
break
r1_files = r1s[idx]
r2_files = r2s[idx]
if len(r1_files) != len(r2_files):
raise IPyradWarningExit(R1_R2_name_error\
.format(len(r1_files), len(r2_files)))
fastqs = [(i, j) for i, j in zip(r1_files, r2_files)]
## data are not paired, create empty tuple pair
else:
## print warning if _R2_ is in names when not paired
idx = 0
if any(["_R2_" in i for i in fastqs]):
print(NAMES_LOOK_PAIRED_WARNING)
fastqs = [(i, "") for i in fastqs]
## counters for the printed output
linked = 0
appended = 0
## clear samples if force
if force:
self.samples = {}
## track parallel jobs
linkjobs = {}
if ipyclient:
lbview = ipyclient.load_balanced_view()
## iterate over input files
for fastqtuple in list(fastqs):
assert isinstance(fastqtuple, tuple), "fastqs not a tuple."
## local counters
createdinc = 0
linkedinc = 0
appendinc = 0
## remove file extension from name
if idx == 0:
sname = _name_from_file(fastqtuple[0], splitnames, fields)
elif idx == 1:
sname = os.path.basename(fastqtuple[0].rsplit("_1.fastq.gz", 1)[0])
elif idx == 2:
sname = os.path.basename(fastqtuple[0].rsplit("_R1.fastq.gz", 1)[0])
LOGGER.debug("New Sample name {}".format(sname))
if sname not in self.samples:
## create new Sample
LOGGER.debug("Creating new sample - ".format(sname))
self.samples[sname] = Sample(sname)
self.samples[sname].stats.state = 1
self.samples[sname].barcode = None
self.samples[sname].files.fastqs.append(fastqtuple)
createdinc += 1
linkedinc += 1
else:
## if not forcing, shouldn't be here with existing Samples
if append:
#if fastqtuple not in self.samples[sname].files.fastqs:
self.samples[sname].files.fastqs.append(fastqtuple)
appendinc += 1
elif force:
## overwrite/create new Sample
LOGGER.debug("Overwriting sample - ".format(sname))
self.samples[sname] = Sample(sname)
self.samples[sname].stats.state = 1
self.samples[sname].barcode = None
self.samples[sname].files.fastqs.append(fastqtuple)
createdinc += 1
linkedinc += 1
else:
print("""
The files {} are already in Sample. Use append=True to append additional
files to a Sample or force=True to replace all existing Samples.
""".format(sname))
## support serial execution w/o ipyclient
if not ipyclient:
if any([linkedinc, createdinc, appendinc]):
gzipped = bool(fastqtuple[0].endswith(".gz"))
nreads = 0
for alltuples in self.samples[sname].files.fastqs:
nreads += _zbufcountlines(alltuples[0], gzipped)
self.samples[sname].stats.reads_raw = nreads/4
self.samples[sname].stats_dfs.s1["reads_raw"] = nreads/4
self.samples[sname].state = 1
LOGGER.debug("Got reads for sample - {} {}".format(sname,\
self.samples[sname].stats.reads_raw))
#created += createdinc
linked += linkedinc
appended += appendinc
## do counting in parallel
else:
if any([linkedinc, createdinc, appendinc]):
gzipped = bool(fastqtuple[0].endswith(".gz"))
for sidx, tup in enumerate(self.samples[sname].files.fastqs):
key = sname+"_{}".format(sidx)
linkjobs[key] = lbview.apply(_bufcountlines,
*(tup[0], gzipped))
LOGGER.debug("sent count job for {}".format(sname))
#created += createdinc
linked += linkedinc
appended += appendinc
## wait for link jobs to finish if parallel
if ipyclient:
start = time.time()
printstr = ' loading reads | {} | s1 |'
while 1:
fin = [i.ready() for i in linkjobs.values()]
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(len(fin), sum(fin),
printstr.format(elapsed), spacer=self._spacer)
time.sleep(0.1)
if len(fin) == sum(fin):
print("")
break
## collect link job results
sampdict = {i:0 for i in self.samples}
for result in linkjobs:
sname = result.rsplit("_", 1)[0]
nreads = linkjobs[result].result()
sampdict[sname] += nreads
for sname in sampdict:
self.samples[sname].stats.reads_raw = sampdict[sname]/4
self.samples[sname].stats_dfs.s1["reads_raw"] = sampdict[sname]/4
self.samples[sname].state = 1
## print if data were linked
#print(" {} new Samples created in '{}'.".format(created, self.name))
if linked:
## double for paired data
if 'pair' in self.paramsdict["datatype"]:
linked = linked*2
if self._headers:
print("{}{} fastq files loaded to {} Samples.".\
format(self._spacer, linked, len(self.samples)))
## save the location where these files are located
self.dirs.fastqs = os.path.realpath(os.path.dirname(path))
if appended:
if self._headers:
print("{}{} fastq files appended to {} existing Samples.".\
format(self._spacer, appended, len(self.samples)))
## save step-1 stats. We don't want to write this to the fastq dir, b/c
## it is not necessarily inside our project dir. Instead, we'll write
## this file into our project dir in the case of linked_fastqs.
self.stats_dfs.s1 = self._build_stat("s1")
self.stats_files.s1 = os.path.join(self.paramsdict["project_dir"],
self.name+
'_s1_demultiplex_stats.txt')
with open(self.stats_files.s1, 'w') as outfile:
self.stats_dfs.s1.fillna(value=0).astype(np.int).to_string(outfile) | 0.006066 |
def _sorted_resource_labels(labels):
"""Sort label names, putting well-known resource labels first."""
head = [label for label in TOP_RESOURCE_LABELS if label in labels]
tail = sorted(label for label in labels if label not in TOP_RESOURCE_LABELS)
return head + tail | 0.007117 |
def hook(name=None, priority=-1):
"""
Decorator
"""
def _hook(hook_func):
return register_hook(name, hook_func=hook_func, priority=priority)
return _hook | 0.005464 |
def get_queryset(self):
"""
Check if relation_names is correctly set and
do a prefetch related on the queryset with it.
"""
if self.relation_names is None:
raise ImproperlyConfigured(
"'%s' must define 'relation_names'" %
self.__class__.__name__)
if not isinstance(self.relation_names, (tuple, list)):
raise ImproperlyConfigured(
"%s's relation_names property must be a tuple or list." %
self.__class__.__name__)
return super(PrefetchRelatedMixin, self
).get_queryset().prefetch_related(*self.relation_names) | 0.002972 |
def update(self, path, data=None):
"""Send an update request to the given path of the CRUD API, with the given data dict, which will be converted
into json"""
return self.handleresult(self.r.put(urljoin(self.url + CRUD_PATH,
path),
data=json.dumps(data))) | 0.008 |
def unset_values(self):
"""
Resets the user values of all symbols, as if Kconfig.load_config() or
Symbol.set_value() had never been called.
"""
self._warn_for_no_prompt = False
try:
# set_value() already rejects undefined symbols, and they don't
# need to be invalidated (because their value never changes), so we
# can just iterate over defined symbols
for sym in self.unique_defined_syms:
sym.unset_value()
for choice in self.unique_choices:
choice.unset_value()
finally:
self._warn_for_no_prompt = True | 0.003003 |
def is_valid_folder(parser, arg):
"""Check if arg is a valid file that already exists on the file system."""
arg = os.path.abspath(arg)
if not os.path.isdir(arg):
parser.error("The folder %s does not exist!" % arg)
else:
return arg | 0.003802 |
def _guess_next_poll_interval(self):
"""
Determine when to query the progress status next.
This function is used if the external progress function did not return time interval for when it should be
queried next.
"""
time_elapsed = self._progress_data[-1][0] - self._progress_data[0][0]
real_progress = self._get_real_progress()
return min(0.2 * time_elapsed, 0.5 + (1 - real_progress)**0.5) | 0.006593 |
def scalarcoords(self):
"""A dictionary of values that don't label any axes (point-like)."""
return {k: v.values for k, v in self.coords.items() if v.dims==()} | 0.017143 |
def nltides_gw_phase_difference(f, f0, amplitude, n, m1, m2):
"""Calculate the gravitational-wave phase shift bwtween
f and f_coalescence = infinity due to non-linear tides.
To compute the phase shift between e.g. f_low and f_isco,
call this function twice and compute the difference.
Parameters
----------
f: float or numpy.array
Frequency from which to compute phase
f0: float or numpy.array
Frequency that NL effects switch on
amplitude: float or numpy.array
Amplitude of effect
n: float or numpy.array
Growth dependence of effect
m1: float or numpy.array
Mass of component 1
m2: float or numpy.array
Mass of component 2
Returns
-------
delta_phi: float or numpy.array
Phase in radians
"""
f, f0, amplitude, n, m1, m2, input_is_array = ensurearray(
f, f0, amplitude, n, m1, m2)
delta_phi = numpy.zeros(m1.shape)
f_ref, _, phi_of_f_factor = nltides_coefs(amplitude, n, m1, m2)
mask = f <= f0
delta_phi[mask] = - phi_of_f_factor[mask] * (f0[mask]/f_ref)**(n[mask]-3.)
mask = f > f0
delta_phi[mask] = - phi_of_f_factor[mask] * (f[mask]/f_ref)**(n[mask]-3.)
return formatreturn(delta_phi, input_is_array) | 0.000787 |
def index(request, template_name="index.html"):
"""\
The index view, which basically just displays a button and increments
a counter.
"""
if request.GET.get('ic-request'):
counter, created = Counter.objects.get_or_create(pk=1)
counter.value += 1
counter.save()
else:
counter, created = Counter.objects.get_or_create(pk=1)
print(counter.value)
context = dict(
value=counter.value,
)
return render(request, template_name, context=context) | 0.001923 |
def currentSchemaPath(self):
"""
Returns the column path for the current item. This will be a '.'
joined path based on the root schema to the given column.
:return <str>
"""
item = self.currentItem()
path = []
while item:
path.append(nativestring(item.text(0)))
item = item.parent()
return '.'.join(reversed(path)) | 0.008969 |
def configure_urls(apps, index_view=None, prefixes=None):
'''
Configure urls from a list of apps.
'''
prefixes = prefixes or {}
urlpatterns = patterns('')
if index_view:
from django.views.generic.base import RedirectView
urlpatterns += patterns('',
url(r'^$', RedirectView.as_view(pattern_name=index_view, permanent=False)),
)
for app_name in apps:
app_module = importlib.import_module(app_name)
if module_has_submodule(app_module, 'urls'):
module = importlib.import_module("%s.urls" % app_name)
if not hasattr(module, 'urlpatterns'):
# Resolver will break if the urls.py file is completely blank.
continue
app_prefix = prefixes.get(app_name, app_name.replace("_","-"))
urlpatterns += patterns(
'',
url(
r'^%s/' % app_prefix if app_prefix else '',
include("%s.urls" % app_name),
),
)
return urlpatterns | 0.004673 |
def open_resource(self, resource, *mode):
"""
Return an open file object for a particular named resource in this
reference package.
"""
return self.open(self.resource_name(resource), *mode) | 0.008734 |
def pl_resolve(ci, cj):
"""Return all clauses that can be obtained by resolving clauses ci and cj.
>>> for res in pl_resolve(to_cnf(A|B|C), to_cnf(~B|~C|F)):
... ppset(disjuncts(res))
set([A, C, F, ~C])
set([A, B, F, ~B])
"""
clauses = []
for di in disjuncts(ci):
for dj in disjuncts(cj):
if di == ~dj or ~di == dj:
dnew = unique(removeall(di, disjuncts(ci)) +
removeall(dj, disjuncts(cj)))
clauses.append(associate('|', dnew))
return clauses | 0.001776 |
def get_setter(cls, prop_name, # @NoSelf
user_setter=None, setter_takes_name=False,
user_getter=None, getter_takes_name=False):
"""Similar to get_getter, but for setting property
values. If user_getter is specified, that it may be used to
get the old value of the property before setting it (this
is the case in some derived classes' implementation). if
getter_takes_name is True and user_getter is not None, than
the property name is passed to the given getter to retrieve
the property value."""
if user_setter:
if setter_takes_name:
# wraps the property name
def _setter(self, val):
return user_setter(self, prop_name, val)
else: _setter = user_setter
return _setter
def _setter(self, val): # @DuplicatedSignature
setattr(self, PROP_NAME % {'prop_name' : prop_name}, val)
return
return _setter | 0.005825 |
def draw_hsv(mag, ang, dtype=uint8, fn=None):
"""
mag must be uint8, uint16, uint32 and 2-D
ang is in radians (float)
"""
assert mag.shape == ang.shape
assert mag.ndim == 2
maxval = iinfo(dtype).max
hsv = dstack(((degrees(ang)/2).astype(dtype), # /2 to keep less than 255
ones_like(mag)*maxval, # maxval must be after in 1-D case
cv2.normalize(mag, alpha=0, beta=maxval, norm_type=cv2.NORM_MINMAX)))
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
if fn is not None:
print('writing ' + fn)
cv2.imwrite(fn, rgb)
return rgb | 0.003241 |
def rename_with_prefix(self, prefix="", new_path=None, in_place=True, remove_desc=True):
"""Rename every sequence based on a prefix."""
# Temporary path #
if new_path is None: prefixed = self.__class__(new_temp_path())
else: prefixed = self.__class__(new_path)
# Generator #
def prefixed_iterator():
for i,read in enumerate(self):
read.id = prefix + read.id
if remove_desc: read.description = ""
yield read
# Do it #
prefixed.write(prefixed_iterator())
prefixed.close()
# Replace it #
if in_place:
os.remove(self.path)
shutil.move(prefixed, self.path)
return prefixed | 0.010499 |
def make_osa_report(repo_dir, old_commit, new_commit,
args):
"""Create initial RST report header for OpenStack-Ansible."""
update_repo(repo_dir, args.osa_repo_url, args.update)
# Are these commits valid?
validate_commits(repo_dir, [old_commit, new_commit])
# Do we have a valid commit range?
validate_commit_range(repo_dir, old_commit, new_commit)
# Get the commits in the range
commits = get_commits(repo_dir, old_commit, new_commit)
# Start off our report with a header and our OpenStack-Ansible commits.
template_vars = {
'args': args,
'repo': 'openstack-ansible',
'commits': commits,
'commit_base_url': get_commit_url(args.osa_repo_url),
'old_sha': old_commit,
'new_sha': new_commit
}
return render_template('offline-header.j2', template_vars) | 0.001153 |
def update_sequence_rule(self, sequence_rule_form):
"""Updates an existing sequence rule.
arg: sequence_rule_form
(osid.assessment.authoring.SequenceRuleForm): the form
containing the elements to be updated
raise: IllegalState - ``sequence_rule_form`` already used in an
update transaction
raise: InvalidArgument - the form contains an invalid value
raise: NullArgument - ``sequence_rule_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``sequence_rule_form`` did not originate
from ``get_sequence_rule_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.update_resource_template
collection = JSONClientValidated('assessment_authoring',
collection='SequenceRule',
runtime=self._runtime)
if not isinstance(sequence_rule_form, ABCSequenceRuleForm):
raise errors.InvalidArgument('argument type is not an SequenceRuleForm')
if not sequence_rule_form.is_for_update():
raise errors.InvalidArgument('the SequenceRuleForm is for update only, not create')
try:
if self._forms[sequence_rule_form.get_id().get_identifier()] == UPDATED:
raise errors.IllegalState('sequence_rule_form already used in an update transaction')
except KeyError:
raise errors.Unsupported('sequence_rule_form did not originate from this session')
if not sequence_rule_form.is_valid():
raise errors.InvalidArgument('one or more of the form elements is invalid')
collection.save(sequence_rule_form._my_map)
self._forms[sequence_rule_form.get_id().get_identifier()] = UPDATED
# Note: this is out of spec. The OSIDs don't require an object to be returned:
return objects.SequenceRule(
osid_object_map=sequence_rule_form._my_map,
runtime=self._runtime,
proxy=self._proxy) | 0.003967 |
def is_prime(n):
"""
Check if n is a prime number
"""
if n % 2 == 0 and n > 2:
return False
return all(n % i for i in range(3, int(math.sqrt(n)) + 1, 2)) | 0.005525 |
def _get_env_vars_value(filename):
"""
If the user provided a file containing values of environment variables, this method will read the file and
return its value
:param string filename: Path to file containing environment variable values
:return dict: Value of environment variables, if provided. None otherwise
:raises InvokeContextException: If the file was not found or not a valid JSON
"""
if not filename:
return None
# Try to read the file and parse it as JSON
try:
with open(filename, 'r') as fp:
return json.load(fp)
except Exception as ex:
raise InvokeContextException("Could not read environment variables overrides from file {}: {}".format(
filename,
str(ex))) | 0.007795 |
def __post(self, path, **kargs):
'''
Make a HTTP POST request to the Dominos UK API with the given
parameters for the current session.
:param string path: The API endpoint path.
:params list kargs: A list of arguments.
:return: A response from the Dominos UK API.
:rtype: response.Response
'''
return self.__call_api(self.session.post, path, **kargs) | 0.004728 |
def strip_brackets(text, brackets=None):
"""Strip brackets and what is inside brackets from text.
.. note::
If the text contains only one opening bracket, the rest of the text
will be ignored. This is a feature, not a bug, as we want to avoid that
this function raises errors too easily.
"""
res = []
for c, type_ in _tokens(text, brackets=brackets):
if type_ == TextType.text:
res.append(c)
return ''.join(res).strip() | 0.002049 |
def _get_labels_left(self, validate=None):
"""Get all labels of the left dataframe."""
labels = []
for compare_func in self.features:
labels = labels + listify(compare_func.labels_left)
# check requested labels (for better error messages)
if not is_label_dataframe(labels, validate):
error_msg = "label is not found in the dataframe"
raise KeyError(error_msg)
return unique(labels) | 0.004255 |
def get_standard_vars(cls, context, variant, build_type, install,
build_path, install_path=None):
"""Returns a standard set of environment variables that can be set
for the build system to use
"""
from rez.config import config
package = variant.parent
variant_requires = map(str, variant.variant_requires)
if variant.index is None:
variant_subpath = ''
else:
variant_subpath = variant._non_shortlinked_subpath
vars_ = {
'REZ_BUILD_ENV': 1,
'REZ_BUILD_PATH': build_path,
'REZ_BUILD_THREAD_COUNT': package.config.build_thread_count,
'REZ_BUILD_VARIANT_INDEX': variant.index or 0,
'REZ_BUILD_VARIANT_REQUIRES': ' '.join(variant_requires),
'REZ_BUILD_VARIANT_SUBPATH': variant_subpath,
'REZ_BUILD_PROJECT_VERSION': str(package.version),
'REZ_BUILD_PROJECT_NAME': package.name,
'REZ_BUILD_PROJECT_DESCRIPTION': (package.description or '').strip(),
'REZ_BUILD_PROJECT_FILE': package.filepath,
'REZ_BUILD_SOURCE_PATH': os.path.dirname(package.filepath),
'REZ_BUILD_REQUIRES': ' '.join(
str(x) for x in context.requested_packages(True)
),
'REZ_BUILD_REQUIRES_UNVERSIONED': ' '.join(
x.name for x in context.requested_packages(True)
),
'REZ_BUILD_TYPE': build_type.name,
'REZ_BUILD_INSTALL': 1 if install else 0,
}
if install_path:
vars_['REZ_BUILD_INSTALL_PATH'] = install_path
if config.rez_1_environment_variables and \
not config.disable_rez_1_compatibility and \
build_type == BuildType.central:
vars_['REZ_IN_REZ_RELEASE'] = 1
return vars_ | 0.002119 |
def pick_q_v1(self):
"""Update inflow."""
flu = self.sequences.fluxes.fastaccess
inl = self.sequences.inlets.fastaccess
flu.qin = 0.
for idx in range(inl.len_q):
flu.qin += inl.q[idx][0] | 0.004673 |
def ToJSon(self, columns_order=None, order_by=()):
"""Returns a string that can be used in a JS DataTable constructor.
This method writes a JSON string that can be passed directly into a Google
Visualization API DataTable constructor. Use this output if you are
hosting the visualization HTML on your site, and want to code the data
table in Python. Pass this string into the
google.visualization.DataTable constructor, e.g,:
... on my page that hosts my visualization ...
google.setOnLoadCallback(drawTable);
function drawTable() {
var data = new google.visualization.DataTable(_my_JSon_string, 0.6);
myTable.draw(data);
}
Args:
columns_order: Optional. Specifies the order of columns in the
output table. Specify a list of all column IDs in the order
in which you want the table created.
Note that you must list all column IDs in this parameter,
if you use it.
order_by: Optional. Specifies the name of the column(s) to sort by.
Passed as is to _PreparedData().
Returns:
A JSon constructor string to generate a JS DataTable with the data
stored in the DataTable object.
Example result (the result is without the newlines):
{cols: [{id:"a",label:"a",type:"number"},
{id:"b",label:"b",type:"string"},
{id:"c",label:"c",type:"number"}],
rows: [{c:[{v:1},{v:"z"},{v:2}]}, c:{[{v:3,f:"3$"},{v:"w"},null]}],
p: {'foo': 'bar'}}
Raises:
DataTableException: The data does not match the type.
"""
encoded_response_str = DataTableJSONEncoder().encode(self._ToJSonObj(columns_order, order_by))
if not isinstance(encoded_response_str, str):
return encoded_response_str.encode("utf-8")
return encoded_response_str | 0.002111 |
def to_(self, attrvals):
""" Create a list of Attribute instances.
:param attrvals: A dictionary of attributes and values
:return: A list of Attribute instances
"""
attributes = []
for key, value in attrvals.items():
key = key.lower()
attributes.append(factory(saml.Attribute,
name=key,
name_format=self.name_format,
attribute_value=do_ava(value)))
return attributes | 0.003559 |
def getUnionLocations(encoder, x, y, r, step=1):
"""
Return a union of location encodings that correspond to the union of all locations
within the specified circle.
"""
output = np.zeros(encoder.getWidth(), dtype=defaultDtype)
locations = set()
for dx in range(-r, r+1, step):
for dy in range(-r, r+1, step):
if dx*dx + dy*dy <= r*r:
e = encodeLocation(encoder, x+dx, y+dy, output)
locations = locations.union(set(e))
return locations | 0.016771 |
def plot(self, atrix, atriy, fname=None, numtype='ndump',
legend=None, labelx=None, labely=None, indexx=None,
indexy=None, title=None, shape='.', logx=False,
logy=False, path='/', base=10, sparse=1, show=True, pdf=False,limits=None,
markevery=None, linewidth=1):
"""
Simple function that plots atriy as a function of atrix
This method will automatically find and plot the requested data.
Parameters
----------
atrix : string
The name of the attribute you want on the x axis.
atriy : string
The name of the attribute you want on the Y axis.
fname : optional
Be the filename, Ndump or time, or cycle, If fname is a
list, this method will then save a png for each cycle in the
list. Warning, this must be a list of cycles and not a
list of filenames. The default is None.
numtype : string, optional
designates how this function acts and how it interprets
fname. if numtype is 'file', this function will get the
desird attribute from that file. if numtype is 'NDump'
function will look at the cycle with that nDump. if numtype
is 't' or 'time' function will find the _cycle with the
closest time stamp. The default is 'ndump'.
legend : list or intager, optional
A list of legends for each of your cycles, or one legend for
all of the cycles. The default is None.
labelx : string, optional
The label on the X axis. The default is None.
labely : string, optional
The label on the Y axis. The default is None.
indexx : optional
Depreciated: If the get method returns a list of lists,
indexx would be the list at the index indexx in the list.
The default is None.
indexy : optional
Depreciated: If the get method returns a list of lists,
indexy would be the list at the index indexx in the list.
The default is None.
title : string, optional
The Title of the Graph. The default is None.
shape : string, optional
What shape and colour the user would like their plot in.
Please see
http://matplotlib.sourceforge.net/api/pyplot_api.html#matplotlib.pyplot.plot
for all possible choices. The default is '.'.
logx : boolean, optional
A boolean of weather the user wants the x axi
logarithmically. The default is False.
logy : boolean, optional
A boolean of weather the user wants the Y axis
logarithmically. The default is False.
path : string, optional
Usef for PlotMulti, give the path where to save the Figures
base : integer, optional
The base of the logarithm. The Default is 10.
sparse : integer, optional
Argument that skips every so many data points. For example
if this argument was 5, This method would plot the 0th, 5th,
10th ... elements. The default is 1.
show : boolean, optional
A boolean of if the plot should be displayed useful with the
multiPlot method. The default is True.
pdf : boolean, optional
PDF for PlotMulti? Default: False
limits : list, optional
The length four list of the x and y limits. The order of the
list is xmin, xmax, ymin, ymax. The defautl is .
markevery : integer or tupler, optional
Set the markevery property to subsample the plot when
using markers. markevery can be None, very point will be
plotted. It can be an integer N, Every N-th marker will be
plotted starting with marker 0. It can be a tuple,
markevery=(start, N) will start at point start and plot
every N-th marker. The default is None.
linewidth : integer, optional
Set linewidth. The default is 1.
Notes
-----
WARNING: Unstable if get returns a list with only one element (x=[0]).
parameters: indexx and indexy have been deprecated.
"""
t1=time.time()
#Setting the axis labels
if labelx== None :
labelx=atrix
if labely== None :
labely=atriy
if title!=None:
title=title
else:
title=labely+' vs '+labelx
if str(fname.__class__)=="<type 'list'>":
self.plotMulti(atrix,atriy,fname,title,path,legend,labelx,labely,logx, logy, 10,1,pdf,limits)
return
tmpX=[]
tmpY=[]
singleX=False
singleY=False
#Getting data
plotType=self._classTest()
if plotType=='YProfile':
if fname==None:
fname=self.cycles[-1]
listY=self.get(atriy,fname, numtype,resolution='a')
listX=self.get(atrix,fname, numtype,resolution='a')
elif plotType=='se':
if fname==None:
listY=self.get( atriy,sparse=sparse)
listX=self.get(atrix,sparse=sparse)
else:
listY=self.get(fname, atriy,sparse=sparse)
listX=self.get(fname, atrix,sparse=sparse)
t2= time.time()
print(t2 -t1)
elif plotType=='PPN' :
if fname==None and atrix not in self.cattrs and atriy not in self.cattrs:
fname=len(self.files)-1
if numtype=='ndump':
numtype='cycNum'
listY=self.get(atriy,fname,numtype)
listX=self.get(atrix,fname,numtype)
elif plotType=='xtime' or plotType=='mesa_profile' or plotType=='AsciiTable' or plotType=='mesa.star_log' or plotType=='starobs':
listY=self.get(atriy)
listX=self.get(atrix)
else:
listY=self.get(atriy)
listX=self.get(atrix)
tmpX=[]
tmpY=[]
if isinstance(listX[0], basestring) or isinstance(listY[0], basestring):
for i in range(len(listX)):
if '*****' == listX[i] or '*****' == listY[i]:
print('There seems to be a string of * in the lists')
print('Cutting out elements in both the lists that have an index equal to or greater than the index of the location of the string of *')
break
tmpX.append(float(listX[i]))
tmpY.append(float(listY[i]))
listX=tmpX
listY=tmpY
#Determining if listX is a list or a list of lists
try:
j=listX[0][0]
except:
singleX = True
if len(listX) == 1: # If it is a list of lists with one element.
tmpX=listX[0]
elif singleX == True:# If it is a plain list of values.
tmpX=listX
elif indexx==None and len(listX)>1: # If it is a list of lists of values.
# take the largest
tmpX=listX[0]
for i in range(len(listX)):
if len(tmpX)<len(listX[i]):
tmpX=listX[i]
elif indexx<len(listX): # If an index is specified, use that index
tmpX=listX[indexx]
else:
print('Sorry that indexx does not exist, returning None')
return None
#Determining if listY is a list or a list of lists
try:
j=listY[0][0]
except:
singleY = True
if len(listY) == 1: # If it is a list of lists with one element.
#print 'hello'
tmpY=listY[0]
elif singleY == True: # If it is a plain list of values.
#print 'world'
tmpY=listY
elif indexy==None and len(listY)>1:# If it is a list of lists of values.
# take the largest
#print 'fourth'
tmpY=listY[0]
for i in range(len(listY)):
if len(tmpY)<len(listY[i]):
tmpY=listY[i]
elif indexy<len(listY): # If an index is specified, use that index
#print 'sixth'
tmpY=listY[indexy]
else:
print('Sorry that indexy does not exist, returning None')
return None
'''
elif indexy==None and len(listY)==1:
#print 'fifth'
tmpY=listY
'''
#Here, if we end up with different sized lists to plot, it
#searches for a list that is of an equal length
if len(tmpY)!=len(tmpX):
found=False
print("It seems like the lists are not of equal length")
print("Now attempting to find a compatible list for ListX")
for i in range(len(listY)):
if not singleY and len(tmpX)==len(listY[i]):
tmpY=listY[i]
found=True
if not found:
print("Now attempting to find a compatible list for ListY")
for i in range(len(listX)):
if not singleX and len(tmpY)==len(listX[i]):
tmpX=listX[i]
found=True
if found:
print("Suitable list found")
else:
print("There is no suitalble list, returning None")
return None
if len(tmpY)!=len(tmpX) and single == True:
print('It seems that the selected lists are of different\nsize, now returning none')
return None
# Sparse stuff
if plotType!='se':
tmpX,tmpY=self._sparse(tmpX,tmpY, sparse)
# Logarithm stuff
if logy or logx:
tmpX,tmpY=self._logarithm(tmpX,tmpY,logx,logy,base)
# Here it ensures that if we are plotting ncycle no values of '*' will be plotted
tmX=[]
tmY=[]
for i in range(len(tmpX)):
tmX.append(str(tmpX[i]))
tmY.append(str(tmpY[i]))
tmpX=[]
tmpY=[]
for i in range(len(tmX)):
if '*' in tmX[i] or '*' in tmY[i]:
print('There seems to be a string of * in the lists')
print('Cutting out elements in both the lists that have an index equal to or greater than the index of the location of the string of *')
break
tmpX.append(float(tmX[i]))
tmpY.append(float(tmY[i]))
listX=tmpX
listY=tmpY
#Setting the axis labels
if logx:
labelx='log '+labelx
if logy:
labely='log '+labely
if legend!=None:
legend=legend
else:
legend=labely+' vs '+labelx
pl.plot(listX,listY,shape,label=legend,markevery=markevery,linewidth=linewidth)
pl.legend()
pl.title(title)
pl.xlabel(labelx)
pl.ylabel(labely)
if show:
pl.show()
if limits != None and len(limits)==4:
pl.xlim(limits[0],limits[1])
pl.ylim(limits[2],limits[3]) | 0.01681 |
def new_stories(self, raw=False, limit=None):
"""Returns list of item ids of current new stories
Args:
limit (int): specifies the number of stories to be returned.
raw (bool): Flag to indicate whether to transform all
objects into raw json.
Returns:
`list` object containing ids of new stories.
"""
new_stories = self._get_stories('newstories', limit)
if raw:
new_stories = [story.raw for story in new_stories]
return new_stories | 0.003623 |
def encode(self, s):
"""Transform a string with a filename into a list of float32.
Args:
s: path to the file with a waveform.
Returns:
samples: list of int16s
"""
# Make sure that the data is a single channel, 16bit, 16kHz wave.
# TODO(chorowski): the directory may not be writable, this should fallback
# to a temp path, and provide instructions for installing sox.
if s.endswith(".mp3"):
# TODO(dliebling) On Linux, check if libsox-fmt-mp3 is installed.
out_filepath = s[:-4] + ".wav"
call([
"sox", "--guard", s, "-r", "16k", "-b", "16", "-c", "1", out_filepath
])
s = out_filepath
elif not s.endswith(".wav"):
out_filepath = s + ".wav"
if not os.path.exists(out_filepath):
call(["sox", "-r", "16k", "-b", "16", "-c", "1", s, out_filepath])
s = out_filepath
rate, data = wavfile.read(s)
assert rate == self._sample_rate
assert len(data.shape) == 1
if data.dtype not in [np.float32, np.float64]:
data = data.astype(np.float32) / np.iinfo(data.dtype).max
return data.tolist() | 0.008057 |
def create_parser(description):
"""
Create and return command-line argument parser.
"""
parser = argparse.ArgumentParser(description=description,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
display_types = get_display_types()
display_choices = [display for k, v in display_types.items() for display in v]
interface_types = get_interface_types()
framebuffer_choices = get_choices('luma.core.framebuffer')
rotation_choices = [0, 1, 2, 3]
block_orientation_choices = [0, 90, -90, 180]
color_choices = ['1', 'RGB', 'RGBA']
general_group = parser.add_argument_group('General')
general_group.add_argument('--config', '-f', type=str, help='Load configuration settings from a file')
general_group.add_argument('--display', '-d', type=str, default=display_choices[0], help='Display type, supports real devices or emulators. Allowed values are: {0}'.format(', '.join(display_choices)), choices=display_choices, metavar='DISPLAY')
general_group.add_argument('--width', type=int, default=128, help='Width of the device in pixels')
general_group.add_argument('--height', type=int, default=64, help='Height of the device in pixels')
general_group.add_argument('--rotate', '-r', type=int, default=0, help='Rotation factor. Allowed values are: {0}'.format(', '.join([str(x) for x in rotation_choices])), choices=rotation_choices, metavar='ROTATION')
general_group.add_argument('--interface', '-i', type=str, default=interface_types[0], help='Serial interface type. Allowed values are: {0}'.format(', '.join(interface_types)), choices=interface_types, metavar='INTERFACE')
i2c_group = parser.add_argument_group('I2C')
i2c_group.add_argument('--i2c-port', type=int, default=1, help='I2C bus number')
i2c_group.add_argument('--i2c-address', type=str, default='0x3C', help='I2C display address')
spi_group = parser.add_argument_group('SPI')
spi_group.add_argument('--spi-port', type=int, default=0, help='SPI port number')
spi_group.add_argument('--spi-device', type=int, default=0, help='SPI device')
spi_group.add_argument('--spi-bus-speed', type=int, default=8000000, help='SPI max bus speed (Hz)')
spi_group.add_argument('--spi-cs-high', type=bool, default=False, help='SPI chip select is high')
spi_group.add_argument('--spi-transfer-size', type=int, default=4096, help='SPI bus max transfer unit (bytes)')
gpio_group = parser.add_argument_group('GPIO')
gpio_group.add_argument('--gpio', type=str, default=None, help='Alternative RPi.GPIO compatible implementation (SPI devices only)')
gpio_group.add_argument('--gpio-mode', type=str, default=None, help='Alternative pin mapping mode (SPI devices only)')
gpio_group.add_argument('--gpio-data-command', type=int, default=24, help='GPIO pin for D/C RESET (SPI devices only)')
gpio_group.add_argument('--gpio-reset', type=int, default=25, help='GPIO pin for RESET (SPI devices only)')
gpio_group.add_argument('--gpio-backlight', type=int, default=18, help='GPIO pin for backlight (PCD8544, ST7735 devices only)')
misc_group = parser.add_argument_group('Misc')
misc_group.add_argument('--block-orientation', type=int, default=0, help='Fix 90° phase error (MAX7219 LED matrix only). Allowed values are: {0}'.format(', '.join([str(x) for x in block_orientation_choices])), choices=block_orientation_choices, metavar='ORIENTATION')
misc_group.add_argument('--mode', type=str, default='RGB', help='Colour mode (SSD1322, SSD1325 and emulator only). Allowed values are: {0}'.format(', '.join(color_choices)), choices=color_choices, metavar='MODE')
misc_group.add_argument('--framebuffer', type=str, default=framebuffer_choices[0], help='Framebuffer implementation (SSD1331, SSD1322, ST7735 displays only). Allowed values are: {0}'.format(', '.join(framebuffer_choices)), choices=framebuffer_choices, metavar='FRAMEBUFFER')
misc_group.add_argument('--bgr', dest='bgr', action='store_true', help='Set if LCD pixels laid out in BGR (ST7735 displays only).')
misc_group.set_defaults(bgr=False)
misc_group.add_argument('--h-offset', type=int, default=0, help='Horizontal offset (in pixels) of screen to display memory (ST7735 displays only)')
misc_group.add_argument('--v-offset', type=int, default=0, help='Vertical offset (in pixels) of screen to display memory (ST7735 displays only)')
misc_group.add_argument('--backlight-active', type=str, default='low', help='Set to \"low\" if LCD backlight is active low, else \"high\" otherwise (PCD8544, ST7735 displays only). Allowed values are: low, high', choices=["low", "high"], metavar='VALUE')
if len(display_types['emulator']) > 0:
transformer_choices = get_transformer_choices()
emulator_group = parser.add_argument_group('Emulator')
emulator_group.add_argument('--transform', type=str, default='scale2x', help='Scaling transform to apply (emulator only). Allowed values are: {0}'.format(', '.join(transformer_choices)), choices=transformer_choices, metavar='TRANSFORM')
emulator_group.add_argument('--scale', type=int, default=2, help='Scaling factor to apply (emulator only)')
emulator_group.add_argument('--duration', type=float, default=0.01, help='Animation frame duration (gifanim emulator only)')
emulator_group.add_argument('--loop', type=int, default=0, help='Repeat loop, zero=forever (gifanim emulator only)')
emulator_group.add_argument('--max-frames', type=int, help='Maximum frames to record (gifanim emulator only)')
try: # pragma: no cover
import argcomplete
argcomplete.autocomplete(parser)
except ImportError:
pass
return parser | 0.005776 |
def generic_parse(self, node, pad=0):
"""A Generic parser for arbitrary tags in a node.
Parameters:
- node: A node in the DOM.
- pad: `int` (default: 0)
If 0 the node data is not padded with newlines. If 1 it
appends a newline after parsing the childNodes. If 2 it
pads before and after the nodes are processed. Defaults to
0.
"""
npiece = 0
if pad:
npiece = len(self.pieces)
if pad == 2:
self.add_text('\n')
for n in node.childNodes:
self.parse(n)
if pad:
if len(self.pieces) > npiece:
self.add_text('\n') | 0.002809 |
def get_plaintext_citations(bibtex):
"""
Parse a BibTeX file to get a clean list of plaintext citations.
:param bibtex: Either the path to the BibTeX file or the content of a \
BibTeX file.
:returns: A list of cleaned plaintext citations.
"""
parser = BibTexParser()
parser.customization = convert_to_unicode
# Load the BibTeX
if os.path.isfile(bibtex):
with open(bibtex) as fh:
bib_database = bibtexparser.load(fh, parser=parser)
else:
bib_database = bibtexparser.loads(bibtex, parser=parser)
# Convert bibentries to plaintext
bibentries = [bibentry_as_plaintext(bibentry)
for bibentry in bib_database.entries]
# Return them
return bibentries | 0.001318 |
def _compute_stationary(self):
"""
Store the stationary distributions in self._stationary_distributions.
"""
if self.is_irreducible:
if not self.is_sparse: # Dense
stationary_dists = gth_solve(self.P).reshape(1, self.n)
else: # Sparse
stationary_dists = \
gth_solve(self.P.toarray(),
overwrite=True).reshape(1, self.n)
else:
rec_classes = self.recurrent_classes_indices
stationary_dists = np.zeros((len(rec_classes), self.n))
for i, rec_class in enumerate(rec_classes):
P_rec_class = self.P[np.ix_(rec_class, rec_class)]
if self.is_sparse:
P_rec_class = P_rec_class.toarray()
stationary_dists[i, rec_class] = \
gth_solve(P_rec_class, overwrite=True)
self._stationary_dists = stationary_dists | 0.002055 |
def delete_experiment(self):
'''Deletes the experiment.
See also
--------
:func:`tmserver.api.experiment.delete_experiment`
:class:`tmlib.models.experiment.ExperimentReference`
:class:`tmlib.models.experiment.Experiment`
'''
logger.info('delete experiment "%s"', self.experiment_name)
url = self._build_api_url(
'/experiments/{experiment_id}'.format(
experiment_id=self._experiment_id
)
)
res = self._session.delete(url)
res.raise_for_status()
del self.__experiment_id | 0.003263 |
def from_grib_date_time(message, date_key='dataDate', time_key='dataTime', epoch=DEFAULT_EPOCH):
# type: (T.Mapping, str, str, datetime.datetime) -> int
"""
Return the number of seconds since the ``epoch`` from the values of the ``message`` keys,
using datetime.total_seconds().
:param message: the target GRIB message
:param date_key: the date key, defaults to "dataDate"
:param time_key: the time key, defaults to "dataTime"
:param epoch: the reference datetime
"""
date = message[date_key]
time = message[time_key]
hour = time // 100
minute = time % 100
year = date // 10000
month = date // 100 % 100
day = date % 100
data_datetime = datetime.datetime(year, month, day, hour, minute)
# Python 2 compatible timestamp implementation without timezone hurdle
# see: https://docs.python.org/3/library/datetime.html#datetime.datetime.timestamp
return int((data_datetime - epoch).total_seconds()) | 0.004111 |
def _find_schema(data_path, schema_name):
""" Checks if `schema_name` is a valid file, if not
searches in `data_path` for it. """
path = glob.glob(schema_name)
for p in path:
if os.path.isfile(p):
return p
return _find_data_path_schema(data_path, schema_name) | 0.003322 |
def default_reverse_key_func(full_key):
"""
Reverse of Django's default_key_func, i.e. undoing:
def default_key_func(key, key_prefix, version):
return '%s:%s:%s' % (key_prefix, version, key)
"""
match = reverse_key_re.match(full_key)
return match.group(3), match.group(1), int(match.group(2)) | 0.003003 |
def computeFunctional(x, cooP):
'''
Compute value of functional J(X) = ||PX - PA||^2_F,
where P is projector into index subspace of known elements,
X is our approximation,
A is original tensor.
Parameters:
:tt.vector: x
current approximation [X]
:dict: cooP
dictionary with two records
- 'indices': numpy.array of P x d shape,
contains index subspace of P known elements;
each string is an index of one element.
- 'values': numpy array of size P,
contains P known values.
Returns:
:float: result
value of functional
'''
indices = cooP['indices']
values = cooP['values']
[P, d] = indices.shape
assert P == len(values)
result = 0
for p in xrange(P):
index = tuple(indices[p, :])
result += (x[index] - values[p])**2
result *= 0.5
return result | 0.00501 |
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_blade_app(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries")
fwdl_entries = ET.SubElement(cluster_fwdl_entries, "fwdl-entries")
blade_app = ET.SubElement(fwdl_entries, "blade-app")
blade_app.text = kwargs.pop('blade_app')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.004121 |
def get(self):
"""
Retrieve the GUI elements for program use.
:return: a dictionary containing all \
of the data from the key/value entries
"""
data = dict()
for label, entry in zip(self.keys, self.values):
data[label.cget('text')] = entry.get()
return data | 0.00597 |
def _contains_span(span0, span1):
"""Return true if span0 contains span1, False otherwise."""
if (span0 == span1 or span0[0] > span1[0] or span0[1] < span1[1]):
return False
return True | 0.004878 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.