text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def set_file_notice(self, doc, text):
"""Raises OrderError if no package or file defined.
Raises CardinalityError if more than one.
"""
if self.has_package(doc) and self.has_file(doc):
if not self.file_notice_set:
self.file_notice_set = True
self.file(doc).notice = tagvaluebuilders.str_from_text(text)
return True
else:
raise CardinalityError('File::Notice')
else:
raise OrderError('File::Notice') | 0.003717 |
def sitetree_children(parser, token):
"""Parses sitetree_children tag parameters.
Six arguments:
{% sitetree_children of someitem for menu template "sitetree/mychildren.html" %}
Used to render child items of specific site tree 'someitem'
using template "sitetree/mychildren.html" for menu navigation.
Basically template argument should contain path to current template itself.
Allowed navigation types: 1) menu; 2) sitetree.
"""
tokens = token.split_contents()
use_template = detect_clause(parser, 'template', tokens)
tokens_num = len(tokens)
clauses_in_places = (
tokens_num == 5 and tokens[1] == 'of' and tokens[3] == 'for' and tokens[4] in ('menu', 'sitetree')
)
if clauses_in_places and use_template is not None:
tree_item = tokens[2]
navigation_type = tokens[4]
return sitetree_childrenNode(tree_item, navigation_type, use_template)
else:
raise template.TemplateSyntaxError(
'%r tag requires six arguments. '
'E.g. {%% sitetree_children of someitem for menu template "sitetree/mychildren.html" %%}.' % tokens[0]) | 0.004223 |
def _update_vdr_vxrheadtail(self, f, vdr_offset, VXRoffset):
'''
This sets a VXR to be the first and last VXR in the VDR
'''
# VDR's VXRhead
self._update_offset_value(f, vdr_offset+28, 8, VXRoffset)
# VDR's VXRtail
self._update_offset_value(f, vdr_offset+36, 8, VXRoffset) | 0.006098 |
def handle_user_exception(self, e):
"""This method is called whenever an exception occurs that should be
handled. A special case are
:class:`~werkzeug.exception.HTTPException`\s which are forwarded by
this function to the :meth:`handle_http_exception` method. This
function will either return a response value or reraise the
exception with the same traceback.
.. versionadded:: 0.7
"""
exc_type, exc_value, tb = sys.exc_info()
assert exc_value is e
# ensure not to trash sys.exc_info() at that point in case someone
# wants the traceback preserved in handle_http_exception. Of course
# we cannot prevent users from trashing it themselves in a custom
# trap_http_exception method so that's their fault then.
if isinstance(e, HTTPException) and not self.trap_http_exception(e):
return self.handle_http_exception(e)
blueprint_handlers = ()
handlers = self.error_handler_spec.get(request.blueprint)
if handlers is not None:
blueprint_handlers = handlers.get(None, ())
app_handlers = self.error_handler_spec[None].get(None, ())
for typecheck, handler in chain(blueprint_handlers, app_handlers):
if isinstance(e, typecheck):
return handler(e)
reraise(exc_type, exc_value, tb) | 0.002149 |
def _mirbase_stats(data, out_dir):
"""Create stats from miraligner"""
utils.safe_makedir(out_dir)
out_file = os.path.join(out_dir, "%s_bcbio_mirbase.txt" % dd.get_sample_name(data))
out_file_novel = os.path.join(out_dir, "%s_bcbio_mirdeeep2.txt" % dd.get_sample_name(data))
mirbase_fn = data.get("seqbuster", None)
if mirbase_fn:
_get_stats_from_miraligner(mirbase_fn, out_file, "seqbuster")
mirdeep_fn = data.get("seqbuster_novel", None)
if mirdeep_fn:
_get_stats_from_miraligner(mirdeep_fn, out_file_novel, "mirdeep2")
return {"base": out_file, "secondary": [out_file_novel]} | 0.004769 |
def register(self, plugin, columnType=None, columnName=None):
"""
Registers a plugin to handle particular column types and column names
based on user selection.
:param plugin | <XOrbQueryPlugin>
columnType | <orb.ColumnType> || None
columnName | <str> || None
"""
self._plugins[(columnType, columnName)] = plugin | 0.007026 |
def change_location(src, tgt, move=False, verbose=True):
'''
Copies/moves/deletes locations
:param src:
Source location where to copy from
:param tgt:
Target location where to copy to
* To backup `src`, set `tgt` explicitly to ``True``. \
`tgt` will be set to `src` + '_backup_' + \
:func:`util.system.get_timestamp` then
:param move:
Deletes original location after copy (a.k.a. move)
* To delete `src` , set `tgt` explicitly to ``False`` \
and `move` to ``True`` (be careful!!1!)
:param verbose:
Show warnings
'''
from photon.util.system import shell_notify
if _path.exists(src):
if tgt:
if _path.isfile(src):
_copy2(src, search_location(
tgt, create_in=_path.dirname(tgt), verbose=verbose)
)
else:
for l in _listdir(src):
change_location(
_path.abspath(_path.join(src, l)),
_path.abspath(_path.join(tgt, l))
)
if move:
if _path.isdir(src) and not _path.islink(src):
_rmtree(src)
else:
_remove(src)
if verbose:
shell_notify(
'%s location' % (
'deleted'
if not tgt and move else
'moved'
if move else
'copied'
),
more=dict(src=src, tgt=tgt)
) | 0.001259 |
def get_missing_options(self):
"""
Get a list of options that are required, but with default values
of None.
"""
return [option.name for option in self._options.values() if option.required and option.value is None] | 0.035714 |
def create_mnist_model(hyper_params, input_shape=(H, W, 1), num_classes=NUM_CLASSES):
'''
Create simple convolutional model
'''
layers = [
Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D(pool_size=(2, 2)),
Flatten(),
Dense(100, activation='relu'),
Dense(num_classes, activation='softmax')
]
model = Sequential(layers)
if hyper_params['optimizer'] == 'Adam':
optimizer = keras.optimizers.Adam(lr=hyper_params['learning_rate'])
else:
optimizer = keras.optimizers.SGD(lr=hyper_params['learning_rate'], momentum=0.9)
model.compile(loss=keras.losses.categorical_crossentropy, optimizer=optimizer, metrics=['accuracy'])
return model | 0.00615 |
def config(self, averaging=1, datarate=15, mode=MODE_NORMAL):
"""
Set the base config for sensor
:param averaging: Sets the numer of samples that are internally averaged
:param datarate: Datarate in hertz
:param mode: one of the MODE_* constants
"""
averaging_conf = {
1: 0,
2: 1,
4: 2,
8: 3
}
if averaging not in averaging_conf.keys():
raise Exception('Averaging should be one of: 1,2,4,8')
datarates = {
0.75: 0,
1.5: 1,
3: 2,
7.5: 4,
15: 5,
30: 6,
75: 7
}
if datarate not in datarates.keys():
raise Exception(
'Datarate of {} Hz is not support choose one of: {}'.format(datarate, ', '.join(datarates.keys())))
config_a = 0
config_a &= averaging_conf[averaging] << 5
config_a &= datarates[datarate] << 2
config_a &= mode
self.i2c_write_register(0x00, config_a) | 0.003711 |
def commit_input_persist_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
commit = ET.Element("commit")
config = commit
input = ET.SubElement(commit, "input")
persist_id = ET.SubElement(input, "persist-id")
persist_id.text = kwargs.pop('persist_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.00463 |
def _add_header_domains_xml(self, document):
"""
Generates the XML elements for allowed header domains.
"""
for domain, attrs in self.header_domains.items():
header_element = document.createElement(
'allow-http-request-headers-from'
)
header_element.setAttribute('domain', domain)
header_element.setAttribute('headers', ','.join(attrs['headers']))
if not attrs['secure']:
header_element.setAttribute('secure', 'false')
document.documentElement.appendChild(header_element) | 0.003289 |
def get_from_cache(self, org_id, id):
'''
Get an object from the cache
Use all cache folders available (primary first, then secondary in order) and look for the ID in the dir
if found unpickle and return the object, else return False
FIXME: Check for expiry of object! Return false is expired (will auto-refetch and overwrite)
'''
current_time = datetime.now()
# Check memory cache first
if id in self.memory_cache[org_id]:
obj = self.memory_cache[org_id][id]
if obj.created_at > current_time - self.expire_records_after:
return obj
for cache in [self.cache_path] + self.secondary_cache_paths:
read_path = os.path.join( cache, org_id, id )
try:
with open(read_path, 'rb') as f:
obj = pickle.load(f)
except:
# Continue to try the next cache
pass
else:
# It worked so we have obj
# Check for expiry date; if it's not expired return it else continue
if obj.created_at > current_time - self.expire_records_after:
# If we're here it mustn't be in the memory cache
self.memory_cache[org_id][id] = obj
if len(self.memory_cache[org_id]) > self.max_memory_cache:
self.memory_cache[org_id].popitem(last=False)
return obj
# Else continue looking
# We found nothing (or all expired)
return None | 0.008929 |
def visit_UnaryOp(self, node: AST, dfltChaining: bool = True) -> str:
"""Return representation of `node`s operator and operand."""
op = node.op
with self.op_man(op):
return self.visit(op) + self.visit(node.operand) | 0.008 |
def _hm_form_message(
self,
thermostat_id,
protocol,
source,
function,
start,
payload
):
"""Forms a message payload, excluding CRC"""
if protocol == constants.HMV3_ID:
start_low = (start & constants.BYTEMASK)
start_high = (start >> 8) & constants.BYTEMASK
if function == constants.FUNC_READ:
payload_length = 0
length_low = (constants.RW_LENGTH_ALL & constants.BYTEMASK)
length_high = (constants.RW_LENGTH_ALL
>> 8) & constants.BYTEMASK
else:
payload_length = len(payload)
length_low = (payload_length & constants.BYTEMASK)
length_high = (payload_length >> 8) & constants.BYTEMASK
msg = [
thermostat_id,
10 + payload_length,
source,
function,
start_low,
start_high,
length_low,
length_high
]
if function == constants.FUNC_WRITE:
msg = msg + payload
type(msg)
return msg
else:
assert 0, "Un-supported protocol found %s" % protocol | 0.002185 |
def score_file(filename):
"""Score each line in a file and return the scores."""
# Prepare model.
hparams = create_hparams()
encoders = registry.problem(FLAGS.problem).feature_encoders(FLAGS.data_dir)
has_inputs = "inputs" in encoders
# Prepare features for feeding into the model.
if has_inputs:
inputs_ph = tf.placeholder(dtype=tf.int32) # Just length dimension.
batch_inputs = tf.reshape(inputs_ph, [1, -1, 1, 1]) # Make it 4D.
targets_ph = tf.placeholder(dtype=tf.int32) # Just length dimension.
batch_targets = tf.reshape(targets_ph, [1, -1, 1, 1]) # Make it 4D.
if has_inputs:
features = {"inputs": batch_inputs, "targets": batch_targets}
else:
features = {"targets": batch_targets}
# Prepare the model and the graph when model runs on features.
model = registry.model(FLAGS.model)(hparams, tf.estimator.ModeKeys.EVAL)
_, losses = model(features)
saver = tf.train.Saver()
with tf.Session() as sess:
# Load weights from checkpoint.
if FLAGS.checkpoint_path is None:
ckpts = tf.train.get_checkpoint_state(FLAGS.output_dir)
ckpt = ckpts.model_checkpoint_path
else:
ckpt = FLAGS.checkpoint_path
saver.restore(sess, ckpt)
# Run on each line.
with tf.gfile.Open(filename) as f:
lines = f.readlines()
results = []
for line in lines:
tab_split = line.split("\t")
if len(tab_split) > 2:
raise ValueError("Each line must have at most one tab separator.")
if len(tab_split) == 1:
targets = tab_split[0].strip()
else:
targets = tab_split[1].strip()
inputs = tab_split[0].strip()
# Run encoders and append EOS symbol.
targets_numpy = encoders["targets"].encode(
targets) + [text_encoder.EOS_ID]
if has_inputs:
inputs_numpy = encoders["inputs"].encode(inputs) + [text_encoder.EOS_ID]
# Prepare the feed.
if has_inputs:
feed = {inputs_ph: inputs_numpy, targets_ph: targets_numpy}
else:
feed = {targets_ph: targets_numpy}
# Get the score.
np_loss = sess.run(losses["training"], feed)
results.append(np_loss)
return results | 0.016598 |
def process_raw_trace(raw_trace):
"""Processes raw trace data and returns the UI data."""
trace = trace_events_pb2.Trace()
trace.ParseFromString(raw_trace)
return ''.join(trace_events_json.TraceEventsJsonStream(trace)) | 0.022124 |
def addslashes(s, escaped_chars=None):
"""Add slashes for given characters. Default is for ``\`` and ``'``.
:param s: string
:param escaped_chars: list of characters to prefix with a slash ``\``
:return: string with slashed characters
:rtype: str
:Example:
>>> addslashes("'")
"\\'"
"""
if escaped_chars is None:
escaped_chars = ["\\", "'", ]
# l = ["\\", '"', "'", "\0", ]
for i in escaped_chars:
if i in s:
s = s.replace(i, '\\' + i)
return s | 0.005607 |
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(OpenvzCollector, self).get_default_config()
config.update({
'path': 'openvz',
'bin': '/usr/sbin/vzlist',
'keyname': 'hostname'
})
return config | 0.006116 |
def dataset_dict(
self, dataset_name, imagesize, voxelres,
offset, timerange, scalinglevels, scaling):
"""Generate the dataset dictionary"""
dataset_dict = {}
dataset_dict['dataset_name'] = dataset_name
dataset_dict['imagesize'] = imagesize
dataset_dict['voxelres'] = voxelres
if offset is not None:
dataset_dict['offset'] = offset
if timerange is not None:
dataset_dict['timerange'] = timerange
if scalinglevels is not None:
dataset_dict['scalinglevels'] = scalinglevels
if scaling is not None:
dataset_dict['scaling'] = scaling
return dataset_dict | 0.004304 |
def estimate_entropy(X, epsilon=None):
r"""Estimate a dataset's Shannon entropy.
This function can take datasets of mixed discrete and continuous
features, and uses a set of heuristics to determine which functions
to apply to each.
Because this function is a subroutine in a mutual information estimator,
we employ the Kozachenko Estimator[1] for continuous features when this
function is _not_ used for mutual information and an adaptation of the
Kraskov Estimator[2] when it is.
Let X be made of continuous features c and discrete features d.
To deal with both continuous and discrete features, We use the
following reworking of entropy:
$ H(X) = H(c,d) = \sum_{x \in d} p(x) \times H(c(x)) + H(d) $
Where c(x) is a dataset that represents the rows of the continuous dataset
in the same row as a discrete column with value x in the original dataset.
Args:
X (array-like): An array-like (np arr, pandas df, etc.) with shape
(n_samples, n_features) or (n_samples)
epsilon (array-like): An array with shape (n_samples, 1) that is
the epsilon used in Kraskov Estimator. Represents the chebyshev
distance from an element to its k-th nearest neighbor in the full
dataset.
Returns:
float: A floating-point number representing the entropy in X.
If the dataset is fully discrete, an exact calculation is done.
If this is not the case and epsilon is not provided, this
will be the Kozacheko Estimator of the dataset's entropy.
If epsilon is provided, this is a partial estimation of the
Kraskov entropy estimator. The bias is cancelled out when
computing mutual information.
References:
.. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004.
.. [2] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy
of a Random Vector:, Probl. Peredachi Inf., 23:2 (1987), 9-16.
"""
X = asarray2d(X)
n_samples, n_features = X.shape
if n_features < 1:
return 0
disc_mask = _get_discrete_columns(X)
cont_mask = ~disc_mask
# If our dataset is fully discrete/continuous, do something easier
if np.all(disc_mask):
return calculate_disc_entropy(X)
elif np.all(cont_mask):
return estimate_cont_entropy(X, epsilon)
# Separate the dataset into discrete and continuous datasets d,c
disc_features = asarray2d(X[:, disc_mask])
cont_features = asarray2d(X[:, cont_mask])
entropy = 0
uniques, counts = np.unique(disc_features, axis=0, return_counts=True)
empirical_p = counts / n_samples
# $\sum_{x \in d} p(x) \times H(c(x))$
for i in range(counts.size):
unique_mask = np.all(disc_features == uniques[i], axis=1)
selected_cont_samples = cont_features[unique_mask, :]
if epsilon is None:
selected_epsilon = None
else:
selected_epsilon = epsilon[unique_mask, :]
conditional_cont_entropy = estimate_cont_entropy(
selected_cont_samples, selected_epsilon)
entropy += empirical_p[i] * conditional_cont_entropy
# H(d)
entropy += calculate_disc_entropy(disc_features)
if epsilon is None:
entropy = max(0, entropy)
return entropy | 0.000294 |
def process_request(self, request):
"""Process a request."""
batcher = PrioritizedBatcher.global_instance()
if batcher.is_started:
# This can happen in old-style middleware if consequent middleware
# raises exception and thus `process_response` is not called.
# Described under 3rd point of differences:
# https://docs.djangoproject.com/en/1.11/topics/http/middleware/#upgrading-pre-django-1-10-style-middleware
batcher.rollback()
logger.warning(
"Uncommited batcher transaction from previous request was rollbacked."
)
batcher.start() | 0.004491 |
def is_package_installed(self, package_name):
"""Check if the RPM package is installed."""
if not package_name:
raise ValueError('package_name required.')
installed = True
try:
Cmd.sh_e('{0} --query {1} --quiet'.format(self.rpm_path,
package_name))
except InstallError:
installed = False
return installed | 0.004515 |
def launch(self, args, unknown):
"""Launch something according to the provided arguments
:param args: arguments from the launch parser
:type args: Namespace
:param unknown: list of unknown arguments
:type unknown: list
:returns: None
:rtype: None
:raises: SystemExit
"""
pm = mayaplugins.MayaPluginManager.get()
addon = pm.get_plugin(args.addon)
isgui = isinstance(addon, coreplugins.JB_StandaloneGuiPlugin)
print "Launching %s..." % args.addon
addon.run()
if isgui:
app = guimain.get_qapp()
sys.exit(app.exec_()) | 0.00304 |
def _parse_supl(content):
"""Parse supplemental measurements data.
Parameters
----------
content : str
Data to parse
Returns
-------
:class:`pandas.DataFrame` containing the data
"""
col_names = ['year', 'month', 'day', 'hour', 'minute',
'hourly_low_pressure', 'hourly_low_pressure_time',
'hourly_high_wind', 'hourly_high_wind_direction',
'hourly_high_wind_time']
col_units = {'hourly_low_pressure': 'hPa',
'hourly_low_pressure_time': None,
'hourly_high_wind': 'meters/second',
'hourly_high_wind_direction': 'degrees',
'hourly_high_wind_time': None,
'time': None}
df = pd.read_table(StringIO(content), comment='#', na_values='MM',
names=col_names, sep=r'\s+')
df['time'] = pd.to_datetime(df[['year', 'month', 'day', 'hour', 'minute']], utc=True)
df['hours'] = np.floor(df['hourly_low_pressure_time'] / 100)
df['minutes'] = df['hourly_low_pressure_time'] - df['hours'] * 100
df['hours'] = df['hours'].replace(99, np.nan)
df['minutes'] = df['minutes'].replace(99, np.nan)
df['hourly_low_pressure_time'] = pd.to_datetime(df[['year', 'month', 'day', 'hours',
'minutes']], utc=True)
df['hours'] = np.floor(df['hourly_high_wind_time'] / 100)
df['minutes'] = df['hourly_high_wind_time'] - df['hours'] * 100
df['hours'] = df['hours'].replace(99, np.nan)
df['minutes'] = df['minutes'].replace(99, np.nan)
df['hourly_high_wind_time'] = pd.to_datetime(df[['year', 'month', 'day',
'hours', 'minutes']], utc=True)
df = df.drop(columns=['year', 'month', 'day', 'hour', 'minute', 'hours', 'minutes'])
df.units = col_units
return df | 0.003899 |
def print_rows(self, num_rows=10, num_columns=40, max_column_width=30,
max_row_width=80, output_file=None):
"""
Print the first M rows and N columns of the SFrame in human readable
format.
Parameters
----------
num_rows : int, optional
Number of rows to print.
num_columns : int, optional
Number of columns to print.
max_column_width : int, optional
Maximum width of a column. Columns use fewer characters if possible.
max_row_width : int, optional
Maximum width of a printed row. Columns beyond this width wrap to a
new line. `max_row_width` is automatically reset to be the
larger of itself and `max_column_width`.
output_file: file, optional
The stream or file that receives the output. By default the output
goes to sys.stdout, but it can also be redirected to a file or a
string (using an object of type StringIO).
See Also
--------
head, tail
"""
if output_file is None:
output_file = sys.stdout
max_row_width = max(max_row_width, max_column_width + 1)
printed_sf = self._imagecols_to_stringcols(num_rows)
row_of_tables = printed_sf.__get_pretty_tables__(wrap_text=False,
max_rows_to_display=num_rows,
max_columns=num_columns,
max_column_width=max_column_width,
max_row_width=max_row_width)
footer = "[%d rows x %d columns]\n" % self.shape
print('\n'.join([str(tb) for tb in row_of_tables]) + "\n" + footer, file=output_file) | 0.004823 |
def dropbox_basename(url):
""" Strip off the dl=0 suffix from dropbox links
>>> dropbox_basename('https://www.dropbox.com/s/yviic64qv84x73j/aclImdb_v1.tar.gz?dl=1')
'aclImdb_v1.tar.gz'
"""
filename = os.path.basename(url)
match = re.findall(r'\?dl=[0-9]$', filename)
if match:
return filename[:-len(match[0])]
return filename | 0.008108 |
def accept(self, **kws):
"""Accept a connection. The socket must be bound to an address and
listening for connections. The return value is a pair (conn, address)
where conn is a new socket object usable to send and receive data on the
connection, and address is the address bound to the socket on the other
end of the connection.
Example:
{{{
conn, address = yield mysock.accept()
}}}
"""
return yield_(Accept(self, timeout=self._timeout, **kws)) | 0.005484 |
def kv_format_dict(d, keys=None, separator=DEFAULT_SEPARATOR):
"""Formats the given dictionary ``d``.
For more details see :func:`kv_format`.
:param collections.Mapping d:
Dictionary containing values to format.
:param collections.Iterable keys:
List of keys to extract from the dict.
:param str separator:
Value between two pairs.
:return:
Key-Value formatted content generated from ``d``.
:rtype:
:data:`six.text_type <six:six.text_type>`
"""
return _format_pairs(dump_dict(d, keys), separator=separator) | 0.001712 |
def cprint(self, cstr):
"""
Clear line, then reprint on same line
:param cstr: string to print on current line
"""
cstr = str(cstr) # Force it to be a string
cstr_len = len(cstr)
prev_cstr_len = len(self._prev_cstr)
num_spaces = 0
if cstr_len < prev_cstr_len:
num_spaces = abs(prev_cstr_len - cstr_len)
try:
print(cstr + " " * num_spaces, end='\r')
self._prev_cstr = cstr
except UnicodeEncodeError:
print('Processing...', end='\r')
self._prev_cstr = 'Processing...' | 0.003257 |
def quick_sort(array, left, right):
"""快速排序"""
if left >= right:
return
low = left
high = right
key = array[low]
while left < right:
# 将大于key的值放到右边
while left < right and array[right] > key:
right -= 1
array[left] = array[right]
# 将小于key的值放到左边
while left < right and array[left] <= key:
left += 1
array[right] = array[left]
# 把key放到对应的位置
array[right] = key
# 递归执行(左右同时排序)
quick_sort(array, low, left - 1)
quick_sort(array, left + 1, high) | 0.001761 |
def calc_mse(q1, q2):
"""Compare the results of two simulations"""
# Difference in positions between two simulations
dq = q2 - q1
# Mean squared error in AUs
return np.sqrt(np.mean(dq*dq))/au2m | 0.004695 |
def _check_data_flow_types(self, check_data_flow):
"""Checks the validity of the data flow connection
Checks whether the ports of a data flow have matching data types.
:param rafcon.core.data_flow.DataFlow check_data_flow: The data flow to be checked
:return bool validity, str message: validity is True, when the data flow is valid, False else. message gives
more information especially if the data flow is not valid
"""
# Check whether the data types or origin and target fit
from_data_port = self.get_data_port(check_data_flow.from_state, check_data_flow.from_key)
to_data_port = self.get_data_port(check_data_flow.to_state, check_data_flow.to_key)
# Connections from the data port type "object" are always allowed
if from_data_port.data_type is object:
return True, "valid"
if not type_inherits_of_type(from_data_port.data_type, to_data_port.data_type):
return False, "Data flow (id: {0}) with origin state \"{1}\" (from data port name: {2}) " \
"and target state \"{3}\" (to data port name: {4}) " \
"do not have matching data types (from '{5}' to '{6}')".format(
check_data_flow.data_flow_id,
from_data_port.parent.name,
from_data_port.name,
to_data_port.parent.name,
to_data_port.name,
from_data_port.data_type,
to_data_port.data_type)
return True, "valid" | 0.006028 |
def bookmark_show(bookmark_id_or_name):
"""
Executor for `globus bookmark show`
"""
client = get_client()
res = resolve_id_or_name(client, bookmark_id_or_name)
formatted_print(
res,
text_format=FORMAT_TEXT_RECORD,
fields=(
("ID", "id"),
("Name", "name"),
("Endpoint ID", "endpoint_id"),
("Path", "path"),
),
simple_text=(
# standard output is endpoint:path format
"{}:{}".format(res["endpoint_id"], res["path"])
# verbose output includes all fields
if not is_verbose()
else None
),
) | 0.001499 |
def preprocess_rules(self):
"""Calculate shortest reference-paths of each rule (and Or field),
and prune all unreachable rules.
"""
to_prune = self._find_shortest_paths()
self._prune_rules(to_prune)
self._rules_processed = True | 0.007246 |
def rank(expr, sort=None, ascending=True):
"""
Calculate rank of a sequence expression.
:param expr: expression for calculation
:param sort: name of the sort column
:param ascending: whether to sort in ascending order
:return: calculated column
"""
return _rank_op(expr, Rank, types.int64, sort=sort, ascending=ascending) | 0.002825 |
def cancelPendingResultsFor( self, ps ):
"""Cancel all pending results for the given parameters. Note that
this only affects the notebook's record, not any job running in a lab.
:param ps: the parameters"""
k = self._parametersAsIndex(ps)
if k in self._results.keys():
# remove from results
rs = self._results[k]
js = [ j for j in rs if not isinstance(j, dict) ]
self._results[k] = [ rc for rc in rs if isinstance(rc, dict) ]
# ...and from pending jobs list
for j in js:
del self._pending[j] | 0.012862 |
def send_report(report, config):
"""
Sends the report to IOpipe's collector.
:param report: The report to be sent.
:param config: The IOpipe agent configuration.
"""
headers = {"Authorization": "Bearer {}".format(config["token"])}
url = "https://{host}{path}".format(**config)
try:
response = session.post(
url, json=report, headers=headers, timeout=config["network_timeout"]
)
response.raise_for_status()
except Exception as e:
logger.debug("Error sending report to IOpipe: %s" % e)
else:
logger.debug("Report sent to IOpipe successfully") | 0.003155 |
def name_from_base(base, max_length=63, short=False):
"""Append a timestamp to the provided string.
This function assures that the total length of the resulting string is not
longer than the specified max length, trimming the input parameter if necessary.
Args:
base (str): String used as prefix to generate the unique name.
max_length (int): Maximum length for the resulting string.
short (bool): Whether or not to use a truncated timestamp.
Returns:
str: Input parameter with appended timestamp.
"""
timestamp = sagemaker_short_timestamp() if short else sagemaker_timestamp()
trimmed_base = base[:max_length - len(timestamp) - 1]
return '{}-{}'.format(trimmed_base, timestamp) | 0.00267 |
def less_or_equal(a, b, *args):
"""Implements the '<=' operator with JS-style type coertion."""
return (
less(a, b) or soft_equals(a, b)
) and (not args or less_or_equal(b, *args)) | 0.005 |
def merge_chromosome_dfs(df_tuple):
# type: (Tuple[pd.DataFrame, pd.DataFrame]) -> pd.DataFrame
"""Merges data from the two strands into strand-agnostic counts."""
plus_df, minus_df = df_tuple
index_cols = "Chromosome Bin".split()
count_column = plus_df.columns[0]
if plus_df.empty:
return return_other(minus_df, count_column, index_cols)
if minus_df.empty:
return return_other(plus_df, count_column, index_cols)
# sum duplicate bins
# TODO: why are there duplicate bins here in the first place?
plus_df = plus_df.groupby(index_cols).sum()
minus_df = minus_df.groupby(index_cols).sum()
# first sum the two bins from each strand
df = pd.concat([plus_df, minus_df], axis=1).fillna(0).sum(axis=1)
df = df.reset_index().sort_values(by="Bin")
df.columns = ["Chromosome", "Bin", count_column]
df = df.sort_values(["Chromosome", "Bin"])
df[["Bin", count_column]] = df[["Bin", count_column]].astype(int32)
df = df[[count_column, "Chromosome", "Bin"]]
return df.reset_index(drop=True) | 0.000932 |
def AddValue(self, registry_value):
"""Adds a value.
Args:
registry_value (WinRegistryValue): Windows Registry value.
Raises:
KeyError: if the value already exists.
"""
name = registry_value.name.upper()
if name in self._values:
raise KeyError(
'Value: {0:s} already exists.'.format(registry_value.name))
self._values[name] = registry_value | 0.005013 |
def srp(x, promisc=None, iface=None, iface_hint=None, filter=None,
nofilter=0, type=ETH_P_ALL, *args, **kargs):
"""Send and receive packets at layer 2"""
if iface is None and iface_hint is not None:
iface = conf.route.route(iface_hint)[0]
s = conf.L2socket(promisc=promisc, iface=iface,
filter=filter, nofilter=nofilter, type=type)
result = sndrcv(s, x, *args, **kargs)
s.close()
return result | 0.002198 |
def design_stat_heating(self, value="Heating"):
"""Corresponds to IDD Field `design_stat_heating`
Args:
value (str): value for IDD Field `design_stat_heating`
Accepted values are:
- Heating
Default value: Heating
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = str(value)
except ValueError:
raise ValueError(
'value {} need to be of type str '
'for field `design_stat_heating`'.format(value))
if ',' in value:
raise ValueError('value should not contain a comma '
'for field `design_stat_heating`')
vals = set()
vals.add("Heating")
if value not in vals:
raise ValueError('value {} is not an accepted value for '
'field `design_stat_heating`'.format(value))
self._design_stat_heating = value | 0.001612 |
def read_file(rel_path, paths=None, raw=False, as_list=False, as_iter=False,
*args, **kwargs):
'''
find a file that lives somewhere within a set of paths and
return its contents. Default paths include 'static_dir'
'''
if not rel_path:
raise ValueError("rel_path can not be null!")
paths = str2list(paths)
# try looking the file up in a directory called static relative
# to SRC_DIR, eg assuming metrique git repo is in ~/metrique
# we'd look in ~/metrique/static
paths.extend([STATIC_DIR, os.path.join(SRC_DIR, 'static')])
paths = [os.path.expanduser(p) for p in set(paths)]
for path in paths:
path = os.path.join(path, rel_path)
logger.debug("trying to read: %s " % path)
if os.path.exists(path):
break
else:
raise IOError("path %s does not exist!" % rel_path)
args = args if args else ['rU']
fd = open(path, *args, **kwargs)
if raw:
return fd
if as_iter:
return read_in_chunks(fd)
else:
fd_lines = fd.readlines()
if as_list:
return fd_lines
else:
return ''.join(fd_lines) | 0.000855 |
def _emplace_transcript(transcripts, parent):
"""Retrieve the primary transcript and discard all others."""
transcripts.sort(key=lambda t: (len(t), t.get_attribute('ID')))
pt = transcripts.pop()
parent.children = [pt] | 0.004292 |
def getMaxDelay(inferences):
"""
Returns the maximum delay for the InferenceElements in the inference
dictionary
Parameters:
-----------------------------------------------------------------------
inferences: A dictionary where the keys are InferenceElements
"""
maxDelay = 0
for inferenceElement, inference in inferences.iteritems():
if isinstance(inference, dict):
for key in inference.iterkeys():
maxDelay = max(InferenceElement.getTemporalDelay(inferenceElement,
key),
maxDelay)
else:
maxDelay = max(InferenceElement.getTemporalDelay(inferenceElement),
maxDelay)
return maxDelay | 0.007762 |
def format_price(price, currency='$'):
"""
Format the price to have the appropriate currency and digits..
:param price: The price amount.
:param currency: The currency for the price.
:return: A formatted price string, i.e. '$10', '$10.52'.
"""
if int(price) == price:
return '{}{}'.format(currency, int(price))
return '{}{:0.2f}'.format(currency, price) | 0.002538 |
def ins(mnemonic):
"""Lookup instruction information.
Lookup an instruction by its mnemonic.
"""
try:
opcode = bytecode.opcode_table[mnemonic]
except KeyError:
click.secho(u'No definition found.', fg='red')
return
click.echo(u'{mnemonic} (0x{op})'.format(
mnemonic=click.style(opcode['mnemonic'], fg='green', underline=True),
op=click.style(format(opcode['op'], '02x'), fg='green')
))
if opcode.get('desc'):
click.secho('Description:', fg='yellow')
click.echo(opcode['desc'])
if opcode['can_be_wide']:
click.echo(u'This instruction can be prefixed by the WIDE opcode.')
if opcode.get('runtime'):
click.secho('Possible runtime exceptions:', fg='yellow')
for runtime_exception in opcode['runtime']:
click.echo('- {runtime_exception}'.format(
runtime_exception=click.style(runtime_exception, fg='red')
))
if opcode['operands']:
click.secho(u'Operand Format:', fg='yellow')
for operand_fmt, operand_type in opcode['operands']:
click.echo(u'- {ty} as a {fmt}'.format(
ty=click.style(operand_type.name, fg='yellow'),
fmt=click.style(operand_fmt.name, fg='yellow')
))
elif opcode['op'] in (0xAB, 0xAA, 0xC4):
# lookup[table|switch] and WIDE.
click.secho(u'\nOperand Format:', fg='yellow')
click.echo(
u'This is a special-case opcode with variable operand parsing.'
) | 0.000646 |
def tryAcquire(self, lockID, callback=None, sync=False, timeout=None):
"""Attempt to acquire lock.
:param lockID: unique lock identifier.
:type lockID: str
:param sync: True - to wait until lock is acquired or failed to acquire.
:type sync: bool
:param callback: if sync is False - callback will be called with operation result.
:type callback: func(opResult, error)
:param timeout: max operation time (default - unlimited)
:type timeout: float
:return True if acquired, False - somebody else already acquired lock
"""
return self.__lockImpl.acquire(lockID, self.__selfID, time.time(), callback=callback, sync=sync, timeout=timeout) | 0.006859 |
def from_composition_and_pd(comp, pd, working_ion_symbol="Li"):
"""
Convenience constructor to make a ConversionElectrode from a
composition and a phase diagram.
Args:
comp:
Starting composition for ConversionElectrode, e.g.,
Composition("FeF3")
pd:
A PhaseDiagram of the relevant system (e.g., Li-Fe-F)
working_ion_symbol:
Element symbol of working ion. Defaults to Li.
"""
working_ion = Element(working_ion_symbol)
entry = None
working_ion_entry = None
for e in pd.stable_entries:
if e.composition.reduced_formula == comp.reduced_formula:
entry = e
elif e.is_element and \
e.composition.reduced_formula == working_ion_symbol:
working_ion_entry = e
if not entry:
raise ValueError("Not stable compound found at composition {}."
.format(comp))
profile = pd.get_element_profile(working_ion, comp)
# Need to reverse because voltage goes form most charged to most
# discharged.
profile.reverse()
if len(profile) < 2:
return None
working_ion_entry = working_ion_entry
working_ion = working_ion_entry.composition.elements[0].symbol
normalization_els = {}
for el, amt in comp.items():
if el != Element(working_ion):
normalization_els[el] = amt
vpairs = [ConversionVoltagePair.from_steps(profile[i], profile[i + 1],
normalization_els)
for i in range(len(profile) - 1)]
return ConversionElectrode(vpairs, working_ion_entry, comp) | 0.001101 |
def create_content_type(json):
"""Create :class:`.resource.ContentType` from JSON.
:param json: JSON dict.
:return: ContentType instance.
"""
result = ContentType(json['sys'])
for field in json['fields']:
field_id = field['id']
del field['id']
result.fields[field_id] = field
result.name = json['name']
result.display_field = json.get('displayField')
return result | 0.004202 |
def get_token(url: str, scopes: str, credentials_dir: str) -> dict:
"""
Get access token info.
"""
tokens.configure(url=url, dir=credentials_dir)
tokens.manage('lizzy', [scopes])
tokens.start()
return tokens.get('lizzy') | 0.004 |
def add_tunnel_interface(self, interface_id, address, network_value,
zone_ref=None, comment=None):
"""
Creates a tunnel interface for a virtual engine.
:param str,int interface_id: the tunnel id for the interface, used as nicid also
:param str address: ip address of interface
:param str network_value: network cidr for interface; format: 1.1.1.0/24
:param str zone_ref: zone reference for interface can be name, href or Zone
:raises EngineCommandFailed: failure during creation
:return: None
"""
interfaces = [{'nodes': [{'address': address, 'network_value': network_value}]}]
interface = {'interface_id': interface_id, 'interfaces': interfaces,
'zone_ref': zone_ref, 'comment': comment}
tunnel_interface = TunnelInterface(**interface)
self._engine.add_interface(tunnel_interface) | 0.00863 |
def fault_barrier(fn):
"""Method decorator to catch and log errors, then send fail message."""
@functools.wraps(fn)
def process(self, tup):
try:
return fn(self, tup)
except Exception as e:
if isinstance(e, KeyboardInterrupt):
return
print(str(e), file=sys.stderr)
self.fail(tup)
return process | 0.002571 |
def set_target_variable (self, targets, variable, value, append=0):
""" Sets a target variable.
The 'variable' will be available to bjam when it decides
where to generate targets, and will also be available to
updating rule for that 'taret'.
"""
if isinstance (targets, str):
targets = [targets]
if isinstance(value, str):
value = [value]
assert is_iterable(targets)
assert isinstance(variable, basestring)
assert is_iterable(value)
if targets:
if append:
bjam_interface.call("set-target-variable", targets, variable, value, "true")
else:
bjam_interface.call("set-target-variable", targets, variable, value) | 0.007712 |
def leave(self, node):
"""walk on the tree from <node>, getting callbacks from handler"""
method = self.get_callbacks(node)[1]
if method is not None:
method(node) | 0.010101 |
def _on_connection_failed(self, conn_id, handle, clean, reason):
"""Callback called from another thread when a connection attempt has failed.
"""
with self.count_lock:
self.connecting_count -= 1
self._logger.info("_on_connection_failed conn_id=%d, reason=%s", conn_id, str(reason))
conndata = self._get_connection(handle)
if conndata is None:
self._logger.info("Unable to obtain connection data on unknown connection %d", conn_id)
return
callback = conndata['callback']
conn_id = conndata['connection_id']
failure_reason = conndata['failure_reason']
# If this was an early disconnect from the device, automatically retry
if 'error_code' in conndata and conndata['error_code'] == 0x23e and conndata['retries'] > 0:
self._remove_connection(handle)
self.connect_async(conn_id, conndata['connection_string'], callback, conndata['retries'] - 1)
else:
callback(conn_id, self.id, False, failure_reason)
self._remove_connection(handle) | 0.006289 |
def freeNSynapses(self, numToFree, inactiveSynapseIndices, verbosity= 0):
"""Free up some synapses in this segment. We always free up inactive
synapses (lowest permanence freed up first) before we start to free up
active ones.
@param numToFree number of synapses to free up
@param inactiveSynapseIndices list of the inactive synapse indices.
"""
# Make sure numToFree isn't larger than the total number of syns we have
assert (numToFree <= len(self.syns))
if (verbosity >= 4):
print "\nIn PY freeNSynapses with numToFree =", numToFree,
print "inactiveSynapseIndices =",
for i in inactiveSynapseIndices:
print self.syns[i][0:2],
print
# Remove the lowest perm inactive synapses first
if len(inactiveSynapseIndices) > 0:
perms = numpy.array([self.syns[i][2] for i in inactiveSynapseIndices])
candidates = numpy.array(inactiveSynapseIndices)[
perms.argsort()[0:numToFree]]
candidates = list(candidates)
else:
candidates = []
# Do we need more? if so, remove the lowest perm active synapses too
if len(candidates) < numToFree:
activeSynIndices = [i for i in xrange(len(self.syns))
if i not in inactiveSynapseIndices]
perms = numpy.array([self.syns[i][2] for i in activeSynIndices])
moreToFree = numToFree - len(candidates)
moreCandidates = numpy.array(activeSynIndices)[
perms.argsort()[0:moreToFree]]
candidates += list(moreCandidates)
if verbosity >= 4:
print "Deleting %d synapses from segment to make room for new ones:" % (
len(candidates)), candidates
print "BEFORE:",
self.printSegment()
# Free up all the candidates now
synsToDelete = [self.syns[i] for i in candidates]
for syn in synsToDelete:
self.syns.remove(syn)
if verbosity >= 4:
print "AFTER:",
self.printSegment() | 0.010775 |
def release(self, resource):
"""release(resource)
Returns a resource to the pool. Most of the time you will want
to use :meth:`transaction`, but if you use :meth:`acquire`,
you must release the acquired resource back to the pool when
finished. Failure to do so could result in deadlock.
:param resource: Resource
"""
with self.releaser:
resource.claimed = False
self.releaser.notify_all() | 0.004184 |
def _eval_call(self, node):
"""
Evaluate a function call
:param node: Node to eval
:return: Result of node
"""
try:
func = self.functions[node.func.id]
except KeyError:
raise NameError(node.func.id)
value = func(
*(self._eval(a) for a in node.args),
**dict(self._eval(k) for k in node.keywords)
)
if value is True:
return 1
elif value is False:
return 0
else:
return value | 0.003604 |
def eth_addr(f):
"""eth_addr
:param f: eth frame
"""
data = "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x" % (f[0],
f[1],
f[2],
f[3],
f[4],
f[5])
return data | 0.002545 |
def _get_parameter(self, name, tp, timeout=1.0, max_retries=2):
""" Gets the specified drive parameter.
Gets a parameter from the drive. Only supports ``bool``,
``int``, and ``float`` parameters.
Parameters
----------
name : str
Name of the parameter to check. It is always the command to
set it but without the value.
tp : type {bool, int, float}
The type of the parameter.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
value : bool, int, or float
The value of the specified parameter.
Raises
------
TypeError
If 'tp' is not an allowed type (``bool``, ``int``,
``float``).
CommandError
If the command to retrieve the parameter returned an error.
ValueError
If the value returned to the drive cannot be converted to
the proper type.
See Also
--------
_set_parameter : Set a parameter.
"""
# Raise a TypeError if tp isn't one of the valid types.
if tp not in (bool, int, float):
raise TypeError('Only supports bool, int, and float; not '
+ str(tp))
# Sending a command of name queries the state for that
# parameter. The response will have name preceeded by an '*' and
# then followed by a number which will have to be converted.
response = self.driver.send_command(name, timeout=timeout,
immediate=True,
max_retries=max_retries)
# If the response has an error, there are no response lines, or
# the first response line isn't '*'+name; then there was an
# error and an exception needs to be thrown.
if self.driver.command_error(response) \
or len(response[4]) == 0 \
or not response[4][0].startswith('*' + name):
raise CommandError('Couldn''t retrieve parameter '
+ name)
# Extract the string representation of the value, which is after
# the '*'+name.
value_str = response[4][0][(len(name)+1):]
# Convert the value string to the appropriate type and return
# it. Throw an error if it is not supported.
if tp == bool:
return (value_str == '1')
elif tp == int:
return int(value_str)
elif tp == float:
return float(value_str) | 0.001041 |
def environment_exists(self, name=None, prefix=None, abspath=True,
log=True):
"""Check if an environment exists by 'name' or by 'prefix'.
If query is by 'name' only the default conda environments directory is
searched.
"""
if log:
logger.debug(str((name, prefix)))
if name and prefix:
raise TypeError("Exactly one of 'name' or 'prefix' is required.")
if name:
prefix = self.get_prefix_envname(name, log=log)
if prefix is None:
prefix = self.ROOT_PREFIX
return os.path.isdir(os.path.join(prefix, 'conda-meta')) | 0.004539 |
def _expand_nbest_translation(translation: Translation) -> List[Translation]:
"""
Expand nbest translations in a single Translation object to one Translation
object per nbest translation.
:param translation: A Translation object.
:return: A list of Translation objects.
"""
nbest_list = [] # type = List[Translation]
for target_ids, attention_matrix, score in zip(translation.nbest_translations.target_ids_list,
translation.nbest_translations.attention_matrices,
translation.nbest_translations.scores):
nbest_list.append(Translation(target_ids, attention_matrix, score, translation.beam_histories,
estimated_reference_length=translation.estimated_reference_length))
return nbest_list | 0.006865 |
def hfft(a, n=None, axis=-1, norm=None):
"""
Compute the FFT of a signal which has Hermitian symmetry (real spectrum).
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([1, 2, 3, 4, 3, 2])
>>> np.fft.fft(signal)
array([ 15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j])
>>> np.fft.hfft(signal[:4]) # Input first half of signal
array([ 15., -4., 0., -1., 0., -4.])
>>> np.fft.hfft(signal, 6) # Input entire signal and truncate
array([ 15., -4., 0., -1., 0., -4.])
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
return irfft(conjugate(a), n, axis) * (sqrt(n) if unitary else n) | 0.000361 |
def render_template(tpl, context):
'''
A shortcut function to render a partial template with context and return
the output.
'''
templates = [tpl] if type(tpl) != list else tpl
tpl_instance = None
for tpl in templates:
try:
tpl_instance = template.loader.get_template(tpl)
break
except template.TemplateDoesNotExist:
pass
if not tpl_instance:
raise Exception('Template does not exist: ' + templates[-1])
return tpl_instance.render(template.Context(context)) | 0.001739 |
def tuples2ids(tuples, ids):
"""Update `ids` according to `tuples`, e.g. (3, 0, X), (4, 0, X)..."""
for value in tuples:
if value[0] == 6 and value[2]:
ids = value[2]
elif value[0] == 5:
ids[:] = []
elif value[0] == 4 and value[1] and value[1] not in ids:
ids.append(value[1])
elif value[0] == 3 and value[1] and value[1] in ids:
ids.remove(value[1])
return ids | 0.002208 |
def multidict(ordered_pairs):
"""Convert duplicate keys values to lists."""
# read all values into lists
d = defaultdict(list)
for k, v in ordered_pairs:
d[k].append(v)
# unpack lists that have only 1 item
dict_copy = deepcopy(d)
for k, v in iteritems(dict_copy):
if len(v) == 1:
d[k] = v[0]
return dict(d) | 0.002732 |
def get_last_live_chat(self):
""" Check if there is a live chat that ended in the last 3 days, and
return it. We will display a link to it on the articles page.
"""
now = datetime.now()
lcqs = self.get_query_set()
lcqs = lcqs.filter(
chat_ends_at__lte=now,
).order_by('-chat_ends_at')
for itm in lcqs:
if itm.chat_ends_at + timedelta(days=3) > now:
return itm
return None | 0.004065 |
def _interception(self, joinpoint):
"""Intercept call of joinpoint callee in doing pre/post conditions.
"""
if self.pre_cond is not None:
self.pre_cond(joinpoint)
result = joinpoint.proceed()
if self.post_cond is not None:
joinpoint.exec_ctx[Condition.RESULT] = result
self.post_cond(joinpoint)
return result | 0.005051 |
def add_nodes_from(self, nodes, **kwargs):
"""
Add multiple nodes to the cluster graph.
Parameters
----------
nodes: iterable container
A container of nodes (list, dict, set, etc.).
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b'), ('a', 'b', 'c')])
"""
for node in nodes:
self.add_node(node, **kwargs) | 0.004073 |
def _extract_gcs_api_response_error(message):
""" A helper function to extract user-friendly error messages from service exceptions.
Args:
message: An error message from an exception. If this is from our HTTP client code, it
will actually be a tuple.
Returns:
A modified version of the message that is less cryptic.
"""
try:
if len(message) == 3:
# Try treat the last part as JSON
data = json.loads(message[2])
return data['error']['errors'][0]['message']
except Exception:
pass
return message | 0.018149 |
def to_cloudformation(self, **kwargs):
"""Returns the API Gateway RestApi, Deployment, and Stage to which this SAM Api corresponds.
:param dict kwargs: already-converted resources that may need to be modified when converting this \
macro to pure CloudFormation
:returns: a list of vanilla CloudFormation Resources, to which this Function expands
:rtype: list
"""
resources = []
api_generator = ApiGenerator(self.logical_id,
self.CacheClusterEnabled,
self.CacheClusterSize,
self.Variables,
self.depends_on,
self.DefinitionBody,
self.DefinitionUri,
self.Name,
self.StageName,
endpoint_configuration=self.EndpointConfiguration,
method_settings=self.MethodSettings,
binary_media=self.BinaryMediaTypes,
minimum_compression_size=self.MinimumCompressionSize,
cors=self.Cors,
auth=self.Auth,
gateway_responses=self.GatewayResponses,
access_log_setting=self.AccessLogSetting,
canary_setting=self.CanarySetting,
tracing_enabled=self.TracingEnabled,
resource_attributes=self.resource_attributes,
passthrough_resource_attributes=self.get_passthrough_resource_attributes())
rest_api, deployment, stage, permissions = api_generator.to_cloudformation()
resources.extend([rest_api, deployment, stage])
resources.extend(permissions)
return resources | 0.004824 |
def apply_parameters(self, parameters):
"""Recursively apply dictionary entries in 'parameters' to {item}s in recipe
structure, leaving undefined {item}s as they are. A special case is a
{$REPLACE:item}, which replaces the string with a copy of the referenced
parameter item.
Examples:
parameters = { 'x':'5' }
apply_parameters( { '{x}': '{y}' }, parameters )
=> { '5': '{y}' }
parameters = { 'y':'5' }
apply_parameters( { '{x}': '{y}' }, parameters )
=> { '{x}': '5' }
parameters = { 'x':'3', 'y':'5' }
apply_parameters( { '{x}': '{y}' }, parameters )
=> { '3': '5' }
parameters = { 'l': [ 1, 2 ] }
apply_parameters( { 'x': '{$REPLACE:l}' }, parameters )
=> { 'x': [ 1, 2 ] }
"""
class SafeString(object):
def __init__(self, s):
self.string = s
def __repr__(self):
return "{" + self.string + "}"
def __str__(self):
return "{" + self.string + "}"
def __getitem__(self, item):
return SafeString(self.string + "[" + item + "]")
class SafeDict(dict):
"""A dictionary that returns undefined keys as {keyname}.
This can be used to selectively replace variables in datastructures."""
def __missing__(self, key):
return SafeString(key)
# By default the python formatter class is used to resolve {item} references
formatter = string.Formatter()
# Special format strings "{$REPLACE:(...)}" use this data structure
# formatter to return the referenced data structure rather than a formatted
# string.
ds_formatter = string.Formatter()
def ds_format_field(value, spec):
ds_format_field.last = value
return ""
ds_formatter.format_field = ds_format_field
params = SafeDict(parameters)
def _recursive_apply(item):
"""Helper function to recursively apply replacements."""
if isinstance(item, basestring):
if item.startswith("{$REPLACE") and item.endswith("}"):
try:
ds_formatter.vformat("{" + item[10:-1] + "}", (), parameters)
except KeyError:
return None
return copy.deepcopy(ds_formatter.format_field.last)
else:
return formatter.vformat(item, (), params)
if isinstance(item, dict):
return {
_recursive_apply(key): _recursive_apply(value)
for key, value in item.items()
}
if isinstance(item, tuple):
return tuple(_recursive_apply(list(item)))
if isinstance(item, list):
return [_recursive_apply(x) for x in item]
return item
self.recipe = _recursive_apply(self.recipe) | 0.002616 |
def adjust(self, to):
'''
Adjusts the time from kwargs to timedelta
**Will change this object**
return new copy of self
'''
if self.date == 'infinity':
return
new = copy(self)
if type(to) in (str, unicode):
to = to.lower()
res = TIMESTRING_RE.search(to)
if res:
rgroup = res.groupdict()
if (rgroup.get('delta') or rgroup.get('delta_2')):
i = int(text2num(rgroup.get('num', 'one'))) * (-1 if to.startswith('-') else 1)
delta = (rgroup.get('delta') or rgroup.get('delta_2')).lower()
if delta.startswith('y'):
try:
new.date = new.date.replace(year=(new.date.year + i))
except ValueError:
# day is out of range for month
new.date = new.date + timedelta(days=(365 * i))
elif delta.startswith('month'):
if (new.date.month + i) > 12:
new.date = new.date.replace(month=(i - (i / 12)),
year=(new.date.year + 1 + (i / 12)))
elif (new.date.month + i) < 1:
new.date = new.date.replace(month=12, year=(new.date.year - 1))
else:
new.date = new.date.replace(month=(new.date.month + i))
elif delta.startswith('q'):
# NP
pass
elif delta.startswith('w'):
new.date = new.date + timedelta(days=(7 * i))
elif delta.startswith('s'):
new.date = new.date + timedelta(seconds=i)
else:
new.date = new.date + timedelta(**{('days' if delta.startswith('d') else 'hours' if delta.startswith('h') else 'minutes' if delta.startswith('m') else 'seconds'): i})
return new
else:
new.date = new.date + timedelta(seconds=int(to))
return new
raise TimestringInvalid('Invalid addition request') | 0.003939 |
def _build_argspec(self):
"""Builds the ansible argument spec using the fields from the schema
definition. It's the caller's responsibility to add any arguments which
are not defined in the schema (e.g. login parameters)
"""
fields = self.manager._schema.fields
argspec = {}
for (field_name, field) in six.iteritems(fields):
# Readonly fields are omitted, obviously
if field.get('readonly', False):
continue
argspec_field = {'required': field.get('required', False)}
# Set the name of the argument as the `altname` if it's specified.
# Otherwise, use the same name as the API does.
if field['altname']:
name = field['altname']
else:
name = field_name
argspec[name] = argspec_field
return argspec | 0.002203 |
def clear(self, models=None, commit=True):
"""
Clears all indexes for the current project.
:param models: if specified, only deletes the entries for the given models.
:param commit: This is ignored by Haystack (maybe a bug?)
"""
for language in self.languages:
self.log.debug('clearing index for {0}'.format(language))
self.index_name = self._index_name_for_language(language)
super(ElasticsearchMultilingualSearchBackend, self).clear(models, commit)
self._reset_existing_mapping() | 0.006981 |
def refine_water_bridges(self, wbridges, hbonds_ldon, hbonds_pdon):
"""A donor atom already forming a hydrogen bond is not allowed to form a water bridge. Each water molecule
can only be donor for two water bridges, selecting the constellation with the omega angle closest to 110 deg."""
donor_atoms_hbonds = [hb.d.idx for hb in hbonds_ldon + hbonds_pdon]
wb_dict = {}
wb_dict2 = {}
omega = 110.0
# Just one hydrogen bond per donor atom
for wbridge in [wb for wb in wbridges if wb.d.idx not in donor_atoms_hbonds]:
if (wbridge.water.idx, wbridge.a.idx) not in wb_dict:
wb_dict[(wbridge.water.idx, wbridge.a.idx)] = wbridge
else:
if abs(omega - wb_dict[(wbridge.water.idx, wbridge.a.idx)].w_angle) < abs(omega - wbridge.w_angle):
wb_dict[(wbridge.water.idx, wbridge.a.idx)] = wbridge
for wb_tuple in wb_dict:
water, acceptor = wb_tuple
if water not in wb_dict2:
wb_dict2[water] = [(abs(omega - wb_dict[wb_tuple].w_angle), wb_dict[wb_tuple]), ]
elif len(wb_dict2[water]) == 1:
wb_dict2[water].append((abs(omega - wb_dict[wb_tuple].w_angle), wb_dict[wb_tuple]))
wb_dict2[water] = sorted(wb_dict2[water])
else:
if wb_dict2[water][1][0] < abs(omega - wb_dict[wb_tuple].w_angle):
wb_dict2[water] = [wb_dict2[water][0], (wb_dict[wb_tuple].w_angle, wb_dict[wb_tuple])]
filtered_wb = []
for fwbridges in wb_dict2.values():
[filtered_wb.append(fwb[1]) for fwb in fwbridges]
return filtered_wb | 0.005882 |
def cleanup(self):
"""
Clean up finished children.
:return: None
"""
self.lock.acquire()
logger.debug('Acquired lock in cleanup for ' + str(self))
self.children = [child for child in self.children if child.is_alive()]
self.lock.release() | 0.006623 |
def getAltitudeFromLatLon(self, lat, lon):
"""Get the altitude of a lat lon pair, using the four neighbouring
pixels for interpolation.
"""
# print "-----\nFromLatLon", lon, lat
lat -= self.lat
lon -= self.lon
# print "lon, lat", lon, lat
if lat < 0.0 or lat >= 1.0 or lon < 0.0 or lon >= 1.0:
raise WrongTileError(self.lat, self.lon, self.lat+lat, self.lon+lon)
x = lon * (self.size - 1)
y = lat * (self.size - 1)
# print "x,y", x, y
x_int = int(x)
x_frac = x - int(x)
y_int = int(y)
y_frac = y - int(y)
# print "frac", x_int, x_frac, y_int, y_frac
value00 = self.getPixelValue(x_int, y_int)
value10 = self.getPixelValue(x_int+1, y_int)
value01 = self.getPixelValue(x_int, y_int+1)
value11 = self.getPixelValue(x_int+1, y_int+1)
value1 = self._avg(value00, value10, x_frac)
value2 = self._avg(value01, value11, x_frac)
value = self._avg(value1, value2, y_frac)
# print "%4d %4d | %4d\n%4d %4d | %4d\n-------------\n%4d" % (
# value00, value10, value1, value01, value11, value2, value)
return value | 0.003249 |
def register():
"""
Return dictionary of tranform factories
"""
registry = {
key: bake_html(key)
for key in ('css', 'css-all', 'tag', 'text')
}
registry['xpath'] = bake_parametrized(xpath_selector, select_all=False)
registry['xpath-all'] = bake_parametrized(xpath_selector, select_all=True)
return registry | 0.002825 |
def tempfile_writer(target):
'''write cache data to a temporary location. when writing is
complete, rename the file to the actual location. delete
the temporary file on any error'''
tmp = target.parent / ('_%s' % target.name)
try:
with tmp.open('wb') as fd:
yield fd
except:
tmp.unlink()
raise
LOG.debug('rename %s -> %s', tmp, target)
tmp.rename(target) | 0.004717 |
def start(self, name: str, increment_count: bool = True) -> None:
"""
Start a named timer.
Args:
name: name of the timer
increment_count: increment the start count for this timer
"""
if not self._timing:
return
now = get_now_utc_pendulum()
# If we were already timing something else, pause that.
if self._stack:
last = self._stack[-1]
self._totaldurations[last] += now - self._starttimes[last]
# Start timing our new thing
if name not in self._starttimes:
self._totaldurations[name] = datetime.timedelta()
self._count[name] = 0
self._starttimes[name] = now
if increment_count:
self._count[name] += 1
self._stack.append(name) | 0.002415 |
def add_named_foreign_key_constraint(
self, name, foreign_table, local_columns, foreign_columns, options
):
"""
Adds a foreign key constraint with a given name.
:param name: The constraint name
:type name: str
:param foreign_table: Table instance or table name
:type foreign_table: Table or str
:type local_columns: list
:type foreign_columns: list
:type options: dict
:rtype: Table
"""
if isinstance(foreign_table, Table):
for column in foreign_columns:
if not foreign_table.has_column(column):
raise ColumnDoesNotExist(column, foreign_table.get_name())
for column in local_columns:
if not self.has_column(column):
raise ColumnDoesNotExist(column, self._name)
constraint = ForeignKeyConstraint(
local_columns, foreign_table, foreign_columns, name, options
)
self._add_foreign_key_constraint(constraint)
return self | 0.002828 |
def list(showgroups):
"""
Show list of Anchore data feeds.
"""
ecode = 0
try:
result = {}
subscribed = {}
available = {}
unavailable = {}
current_user_data = contexts['anchore_auth']['user_info']
feedmeta = anchore_feeds.load_anchore_feedmeta()
for feed in feedmeta.keys():
if feedmeta[feed]['subscribed']:
subscribed[feed] = {}
subscribed[feed]['description'] = feedmeta[feed]['description']
if showgroups:
subscribed[feed]['groups'] = feedmeta[feed]['groups'].keys()
else:
if current_user_data:
tier = int(current_user_data['tier'])
else:
tier = 0
if int(feedmeta[feed]['access_tier']) > tier:
collection = unavailable
else:
collection = available
collection[feed] = {}
collection[feed]['description'] = feedmeta[feed]['description']
if showgroups and collection == available:
collection[feed]['groups'] = feedmeta[feed]['groups'].keys()
if available:
result['Available'] = available
if subscribed:
result['Subscribed'] = subscribed
if unavailable:
result['Unavailable/Insufficient Access Tier'] = unavailable
anchore_print(result, do_formatting=True)
except Exception as err:
anchore_print_err('operation failed')
ecode = 1
sys.exit(ecode) | 0.001848 |
def isalive(path):
"""
Returns True if the file with the given name contains a process
id that is still alive.
Returns False otherwise.
:type path: str
:param path: The name of the pidfile.
:rtype: bool
:return: Whether the process is alive.
"""
# try to read the pid from the pidfile
pid = read(path)
if pid is None:
return False
# Check if a process with the given pid exists.
try:
os.kill(pid, 0) # Signal 0 does not kill, but check.
except OSError as xxx_todo_changeme1:
(code, text) = xxx_todo_changeme1.args
if code == errno.ESRCH: # No such process.
return False
return True | 0.001439 |
def cfnumber_to_number(cfnumber):
"""Convert CFNumber to python int or float."""
numeric_type = cf.CFNumberGetType(cfnumber)
cfnum_to_ctype = {kCFNumberSInt8Type: c_int8, kCFNumberSInt16Type: c_int16,
kCFNumberSInt32Type: c_int32,
kCFNumberSInt64Type: c_int64,
kCFNumberFloat32Type: c_float,
kCFNumberFloat64Type: c_double,
kCFNumberCharType: c_byte, kCFNumberShortType: c_short,
kCFNumberIntType: c_int, kCFNumberLongType: c_long,
kCFNumberLongLongType: c_longlong,
kCFNumberFloatType: c_float,
kCFNumberDoubleType: c_double,
kCFNumberCFIndexType: CFIndex,
kCFNumberCGFloatType: CGFloat}
if numeric_type in cfnum_to_ctype:
t = cfnum_to_ctype[numeric_type]
result = t()
if cf.CFNumberGetValue(cfnumber, numeric_type, byref(result)):
return result.value
else:
raise Exception(
'cfnumber_to_number: unhandled CFNumber type %d' % numeric_type) | 0.000863 |
def remove(self):
"""
Call this to remove the node/replicas from the ring.
"""
pipeline = self.conn.pipeline()
for replica in self.replicas:
pipeline.zrem(self.key, '{start}:{name}'.format(
start=replica[0],
name=replica[1]
))
pipeline.execute()
self._notify() | 0.005376 |
def parameters(self):
"""
Get the dictionary of parameters (either ra,dec or l,b)
:return: dictionary of parameters
"""
if self._coord_type == 'galactic':
return collections.OrderedDict((('l', self.l), ('b', self.b)))
else:
return collections.OrderedDict((('ra', self.ra), ('dec', self.dec))) | 0.00813 |
def list_minus(l: List, minus: List) -> List:
"""Returns l without what is in minus
>>> list_minus([1, 2, 3], [2])
[1, 3]
"""
return [o for o in l if o not in minus] | 0.010753 |
def do_command(self, words):
"""Parse and act upon the command in the list of strings `words`."""
self.output = ''
self._do_command(words)
return self.output | 0.010582 |
def remove(self, item):
"""Remove an item from the list
:param item: The item to remove from the list.
:raises ValueError: If the item is not present in the list.
"""
if item not in self:
raise ValueError('objectlist.remove(item) failed, item not in list')
item_path = self._view_path_for(item)
giter = self._iter_for(item)
del self[giter]
self.emit('item-removed', item, item_path) | 0.006424 |
def save(path, im):
"""
Saves an image to file.
If the image is type float, it will assume to have values in [0, 1].
Parameters
----------
path : str
Path to which the image will be saved.
im : ndarray (image)
Image.
"""
from PIL import Image
if im.dtype == np.uint8:
pil_im = Image.fromarray(im)
else:
pil_im = Image.fromarray((im*255).astype(np.uint8))
pil_im.save(path) | 0.002203 |
def get_bgcolor(self, index):
"""Background color depending on value"""
value = self.get_value(index)
if index.column() < 3:
color = ReadOnlyCollectionsModel.get_bgcolor(self, index)
else:
if self.remote:
color_name = value['color']
else:
color_name = get_color_name(value)
color = QColor(color_name)
color.setAlphaF(.2)
return color | 0.004193 |
def __do_case_6_work(d_w, d_u, case_1, case_2, case_3, dfs_data):
"""Encapsulates the work that will be done for case 6 of __embed_frond,
since it gets used in more than one place."""
# --We should only ever see u-cases 1 and 3
if case_2:
# --We should never get here
return False
comp_d_w = abs(d_w)
# --Add the frond to the right side
__insert_frond_RF(d_w, d_u, dfs_data)
# --Add uw to Rm
m = dfs_data['FG']['m']
Rm = R(m, dfs_data)
if comp_d_w < Rm['x']:
Rm['x'] = d_w
if d_u > Rm['y']:
Rm['y'] = d_u
# --Case 3 requires a bit of extra work
if case_3:
Rm['x'] = d_w
u_m1 = u(m-1, dfs_data)
while comp_d_w < u_m1:
merge_Fm(dfs_data)
m = dfs_data['FG']['m']
u_m1 = u(m-1, dfs_data)
#else:
#print "Case 6 work, u-case 1"
return True | 0.003326 |
def OnMacroListLoad(self, event):
"""Macro list load event handler"""
# Get filepath from user
wildcards = get_filetypes2wildcards(["py", "all"]).values()
wildcard = "|".join(wildcards)
message = _("Choose macro file.")
style = wx.OPEN
filepath, filterindex = \
self.interfaces.get_filepath_findex_from_user(wildcard, message,
style)
if filepath is None:
return
# Enter safe mode because macro file could be harmful
post_command_event(self.main_window, self.main_window.SafeModeEntryMsg)
# Load macros from file
self.main_window.actions.open_macros(filepath)
event.Skip() | 0.002604 |
def _create_table(self, table_name, column_types, primary=None, nullable=()):
"""Creates a sqlite3 table from the given metadata.
Parameters
----------
column_types : list of (str, str) pairs
First element of each tuple is the column name, second element is the sqlite3 type
primary : str, optional
Which column is the primary key
nullable : iterable, optional
Names of columns which have null values
"""
require_string(table_name, "table name")
require_iterable_of(column_types, tuple, name="rows")
if primary is not None:
require_string(primary, "primary")
require_iterable_of(nullable, str, name="nullable")
column_decls = []
for column_name, column_type in column_types:
decl = "%s %s" % (column_name, column_type)
if column_name == primary:
decl += " UNIQUE PRIMARY KEY"
if column_name not in nullable:
decl += " NOT NULL"
column_decls.append(decl)
column_decl_str = ", ".join(column_decls)
create_table_sql = \
"CREATE TABLE %s (%s)" % (table_name, column_decl_str)
self.execute_sql(create_table_sql) | 0.002346 |
def clear_session(self, response):
"""Clear the session.
This method is invoked when the session is found to be invalid.
Subclasses can override this method to implement a custom session
reset.
"""
session.clear()
# if flask-login is installed, we try to clear the
# "remember me" cookie, just in case it is set
if 'flask_login' in sys.modules:
remember_cookie = current_app.config.get('REMEMBER_COOKIE',
'remember_token')
response.set_cookie(remember_cookie, '', expires=0, max_age=0) | 0.003145 |
def to_gpio(self, spec):
"""
Parses a pin *spec*, returning the equivalent Broadcom GPIO port number
or raising a :exc:`ValueError` exception if the spec does not represent
a GPIO port.
The *spec* may be given in any of the following forms:
* An integer, which will be accepted as a GPIO number
* 'GPIOn' where n is the GPIO number
* 'WPIn' where n is the `wiringPi`_ pin number
* 'BCMn' where n is the GPIO number (alias of GPIOn)
* 'BOARDn' where n is the physical pin number on the main header
* 'h:n' where h is the header name and n is the physical pin number
(for example J8:5 is physical pin 5 on header J8, which is the main
header on modern Raspberry Pis)
.. _wiringPi: http://wiringpi.com/pins/
"""
if isinstance(spec, int):
if not 0 <= spec < 54:
raise PinInvalidPin('invalid GPIO port %d specified '
'(range 0..53) ' % spec)
return spec
else:
if isinstance(spec, bytes):
spec = spec.decode('ascii')
spec = spec.upper()
if spec.isdigit():
return self.to_gpio(int(spec))
if spec.startswith('GPIO') and spec[4:].isdigit():
return self.to_gpio(int(spec[4:]))
elif spec.startswith('BCM') and spec[3:].isdigit():
return self.to_gpio(int(spec[3:]))
elif spec.startswith('WPI') and spec[3:].isdigit():
main_head = 'P1' if 'P1' in self.headers else 'J8'
try:
return self.to_gpio({
0: '%s:11' % main_head,
1: '%s:12' % main_head,
2: '%s:13' % main_head,
3: '%s:15' % main_head,
4: '%s:16' % main_head,
5: '%s:18' % main_head,
6: '%s:22' % main_head,
7: '%s:7' % main_head,
8: '%s:3' % main_head,
9: '%s:5' % main_head,
10: '%s:24' % main_head,
11: '%s:26' % main_head,
12: '%s:19' % main_head,
13: '%s:21' % main_head,
14: '%s:23' % main_head,
15: '%s:8' % main_head,
16: '%s:10' % main_head,
17: 'P5:3',
18: 'P5:4',
19: 'P5:5',
20: 'P5:6',
21: '%s:29' % main_head,
22: '%s:31' % main_head,
23: '%s:33' % main_head,
24: '%s:35' % main_head,
25: '%s:37' % main_head,
26: '%s:32' % main_head,
27: '%s:36' % main_head,
28: '%s:38' % main_head,
29: '%s:40' % main_head,
30: '%s:27' % main_head,
31: '%s:28' % main_head,
}[int(spec[3:])])
except KeyError:
raise PinInvalidPin('%s is not a valid wiringPi pin' % spec)
elif ':' in spec:
header, pin = spec.split(':', 1)
if pin.isdigit():
try:
header = self.headers[header]
except KeyError:
raise PinInvalidPin(
'there is no %s header on this Pi' % header)
try:
function = header.pins[int(pin)].function
except KeyError:
raise PinInvalidPin(
'no such pin %s on header %s' % (pin, header.name))
if function.startswith('GPIO') and function[4:].isdigit():
return self.to_gpio(int(function[4:]))
else:
raise PinInvalidPin('%s is not a GPIO pin' % spec)
elif spec.startswith('BOARD') and spec[5:].isdigit():
main_head = ({'P1', 'J8', 'SODIMM'} & set(self.headers)).pop()
return self.to_gpio('%s:%s' % (main_head, spec[5:]))
raise PinInvalidPin('%s is not a valid pin spec' % spec) | 0.00156 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.