text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def deep_unicode(s, encodings=None):
"""decode "DEEP" S using the codec registered for encoding."""
if encodings is None:
encodings = ['utf-8', 'latin-1']
if isinstance(s, (list, tuple)):
return [deep_unicode(i) for i in s]
if isinstance(s, dict):
return dict([
(deep_unicode(key), deep_unicode(s[key]))
for key in s
])
# in_dict = {}
# for key in s:
# in_dict[to_unicode(key)] = to_unicode(s[key])
# return in_dict
elif isinstance(s, str):
for encoding in encodings:
try:
return s.decode(encoding)
except:
pass
return s | 0.002857 |
def remove(self, element):
"""
Return a new PSet with element removed. Raises KeyError if element is not present.
>>> s1 = s(1, 2)
>>> s1.remove(2)
pset([1])
"""
if element in self._map:
return self.evolver().remove(element).persistent()
raise KeyError("Element '%s' not present in PSet" % element) | 0.007979 |
def main(argv=sys.argv):
# type: (List[str]) -> int
"""Parse and check the command line arguments."""
parser = optparse.OptionParser(
usage="""\
usage: %prog [options] -o <output_path> <module_path> [exclude_pattern, ...]
Look recursively in <module_path> for Python modules and packages and create
one reST file with automodule directives per package in the <output_path>.
The <exclude_pattern>s can be file and/or directory patterns that will be
excluded from generation.
Note: By default this script will not overwrite already created files.""")
parser.add_option('-o', '--output-dir', action='store', dest='destdir',
help='Directory to place all output', default='api')
parser.add_option('-s', '--source-dir', action='store', dest='srcdir',
help='Documentation source directory', default=BASEDIR)
parser.add_option('-n', '--docname', action='store', dest='docname',
help='Index document name', default='api')
parser.add_option('-l', '--follow-links', action='store_true',
dest='followlinks', default=False,
help='Follow symbolic links. Powerful when combined '
'with collective.recipe.omelette.')
parser.add_option('-P', '--private', action='store_true',
dest='includeprivate',
help='Include "_private" modules')
parser.add_option('--implicit-namespaces', action='store_true',
dest='implicit_namespaces',
help='Interpret module paths according to PEP-0420 '
'implicit namespaces specification')
parser.add_option('--version', action='store_true', dest='show_version',
help='Show version information and exit')
parser.add_option('--clean', action='store_true', dest='cleanup',
help='Clean up generated files and exit')
group = parser.add_option_group('Extension options')
for ext in EXTENSIONS:
group.add_option('--ext-' + ext, action='store_true',
dest='ext_' + ext, default=False,
help='enable %s extension' % ext)
(opts, args) = parser.parse_args(argv[1:])
# Make this more explicitly the current directory.
if not opts.srcdir:
opts.srcdir = '.'
if opts.show_version:
print('Sphinx (sphinx-apidoc) %s' % __display_version__)
return 0
if opts.cleanup:
print("Removing generated API docs from '{}'...".format(opts.srcdir))
return cleanup_api_docs(opts)
if not args:
parser.error('A package path is required.')
opts.rootpath, opts.excludes = args[0], args[1:]
return generate_api_docs(opts) | 0.000358 |
def parse(s):
"""
Parse string representation back into the SparseVector.
>>> SparseVector.parse(' (4, [0,1 ],[ 4.0,5.0] )')
SparseVector(4, {0: 4.0, 1: 5.0})
"""
start = s.find('(')
if start == -1:
raise ValueError("Tuple should start with '('")
end = s.find(')')
if end == -1:
raise ValueError("Tuple should end with ')'")
s = s[start + 1: end].strip()
size = s[: s.find(',')]
try:
size = int(size)
except ValueError:
raise ValueError("Cannot parse size %s." % size)
ind_start = s.find('[')
if ind_start == -1:
raise ValueError("Indices array should start with '['.")
ind_end = s.find(']')
if ind_end == -1:
raise ValueError("Indices array should end with ']'")
new_s = s[ind_start + 1: ind_end]
ind_list = new_s.split(',')
try:
indices = [int(ind) for ind in ind_list if ind]
except ValueError:
raise ValueError("Unable to parse indices from %s." % new_s)
s = s[ind_end + 1:].strip()
val_start = s.find('[')
if val_start == -1:
raise ValueError("Values array should start with '['.")
val_end = s.find(']')
if val_end == -1:
raise ValueError("Values array should end with ']'.")
val_list = s[val_start + 1: val_end].split(',')
try:
values = [float(val) for val in val_list if val]
except ValueError:
raise ValueError("Unable to parse values from %s." % s)
return SparseVector(size, indices, values) | 0.001185 |
def dlf_notation(atom_key):
"""Return element for atom key using DL_F notation."""
split = list(atom_key)
element = ''
number = False
count = 0
while number is False:
element = "".join((element, split[count]))
count += 1
if is_number(split[count]) is True:
number = True
# In case of for example Material Studio output, integers can also be
# in the beginning of the string. As the dlf_notation decipher function
# is very general in use, we have to make sure these integers are deleted.
# In standard DL_F notation the string will never start with integer so it
# will not affect the functionality towards it.
# EDIT2: also the '?' atoms, you can delete them manually or somewhere else
element = "".join(i for i in element if not is_number(i))
element = "".join(i for i in element if i != '?')
return element | 0.001105 |
def add_newline(self, number=1):
""" Starts over again at the new line. If number is specified,
it will leave multiple lines."""
if isinstance(number, int):
try:
self.page._add_newline(self.font, number, self.double_spacing)
except ValueError:
self.add_page()
else:
raise TypeError("Number of newlines must be an integer.") | 0.004587 |
def item(self, current_item):
"""
Return the current item.
@param current_item: Current item
@type param: django.models
@return: Value and label of the current item
@rtype : dict
"""
return {
'value': text(getattr(current_item, self.get_field_name())),
'label': self.label(current_item)
} | 0.005168 |
def do_mfa(self, args):
"""
Enter a 6-digit MFA token. Nephele will execute the appropriate
`aws` command line to authenticate that token.
mfa -h for more details
"""
parser = CommandArgumentParser("mfa")
parser.add_argument(dest='token',help='MFA token value');
parser.add_argument("-p","--profile",dest='awsProfile',default=AwsConnectionFactory.instance.getProfile(),help='MFA token value');
args = vars(parser.parse_args(args))
token = args['token']
awsProfile = args['awsProfile']
arn = AwsConnectionFactory.instance.load_arn(awsProfile)
credentials_command = ["aws","--profile",awsProfile,"--output","json","sts","get-session-token","--serial-number",arn,"--token-code",token]
output = run_cmd(credentials_command) # Throws on non-zero exit :yey:
credentials = json.loads("\n".join(output.stdout))['Credentials']
AwsConnectionFactory.instance.setMfaCredentials(credentials,awsProfile) | 0.024248 |
def get_provider_choices():
"""Returns a list of currently available metrics providers
suitable for use as model fields choices.
"""
choices = []
for provider in METRICS_PROVIDERS:
choices.append((provider.alias, provider.title))
return choices | 0.00361 |
def _find_fuse_next(working_list, homology, tm):
'''Find the next sequence to fuse, and fuse it (or raise exception).
:param homology: length of terminal homology in bp
:type homology: int
:raises: AmbiguousGibsonError if there is more than one way for the
fragment ends to combine.
GibsonOverlapError if no homology match can be found.
'''
# 1. Take the first sequence and find all matches
# Get graphs:
# a) pattern watson : targets watson
# b) pattern watson : targets crick
# c) pattern crick: targets watson
# d) pattern crick: targets crick
pattern = working_list[0]
targets = working_list[1:]
# Output graph nodes of terminal binders:
# (destination, size, strand1, strand2)
def graph_strands(strand1, strand2):
graph = []
for i, target in enumerate(targets):
matchlen = homology_report(pattern, target, strand1, strand2,
cutoff=homology, min_tm=tm)
if matchlen:
graph.append((i, matchlen, strand1, strand2))
return graph
graph_ww = graph_strands('w', 'w')
graph_wc = graph_strands('w', 'c')
graph_cw = graph_strands('c', 'w')
graph_cc = graph_strands('c', 'c')
graphs_w = graph_ww + graph_wc
graphs_c = graph_cw + graph_cc
graphs = graphs_w + graphs_c
# 2. See if there's more than one result on a strand.
# If so, throw an exception.
if len(graphs_w) > 1 or len(graphs_c) > 1:
raise AmbiguousGibsonError('multiple compatible ends.')
if len(graphs_w) == len(graphs_c) == 0:
raise GibsonOverlapError('Failed to find compatible Gibson ends.')
# 3. There must be one result. Where is it?
# If there's one result on each strand, go with the one that matches the
# pattern watson strand (will occur first - index 0)
match = graphs[0]
# 4. Combine pieces together
# 4a. Orient pattern sequence
if match[2] == 'c':
left_side = pattern.reverse_complement()
else:
left_side = pattern
# 4b. Orient target sequence
if match[3] == 'w':
right_side = working_list.pop(match[0] + 1).reverse_complement()
else:
right_side = working_list.pop(match[0] + 1)
working_list[0] = left_side + right_side[match[1]:]
return working_list | 0.000421 |
def _unpublish(self):
"""
Process an unpublish action on the related object, returns a boolean if a change is made.
Only objects with a current active version will be updated.
"""
obj = self.content_object
actioned = False
# Only update if needed
if obj.current_version is not None:
obj.current_version = None
obj.save(update_fields=['current_version'])
actioned = True
return actioned | 0.006048 |
def iflat_tasks(self, status=None, op="==", nids=None):
"""
Generator to iterate over all the tasks of the :class:`Flow`.
If status is not None, only the tasks whose status satisfies
the condition (task.status op status) are selected
status can be either one of the flags defined in the :class:`Task` class
(e.g Task.S_OK) or a string e.g "S_OK"
nids is an optional list of node identifiers used to filter the tasks.
"""
return self._iflat_tasks_wti(status=status, op=op, nids=nids, with_wti=False) | 0.007005 |
def detect_devices(soapy_args=''):
"""Returns detected SoapySDR devices"""
devices = simplesoapy.detect_devices(soapy_args, as_string=True)
text = []
text.append('Detected SoapySDR devices:')
if devices:
for i, d in enumerate(devices):
text.append(' {}'.format(d))
else:
text.append(' No devices found!')
return (devices, '\n'.join(text)) | 0.002525 |
def to_script(self, wf_name='wf'):
"""Generated and print the scriptcwl script for the currunt workflow.
Args:
wf_name (str): string used for the WorkflowGenerator object in the
generated script (default: ``wf``).
"""
self._closed()
script = []
# Workflow documentation
# if self.documentation:
# if is_multiline(self.documentation):
# print('doc = """')
# print(self.documentation)
# print('"""')
# print('{}.set_documentation(doc)'.format(wf_name))
# else:
# print('{}.set_documentation(\'{}\')'.format(wf_name,
# self.documentation))
# Workflow inputs
params = []
returns = []
for name, typ in self.wf_inputs.items():
params.append('{}=\'{}\''.format(name, typ))
returns.append(name)
script.append('{} = {}.add_inputs({})'.format(
', '.join(returns), wf_name, ', '.join(params)))
# Workflow steps
returns = []
for name, step in self.wf_steps.items():
pyname = step.python_name
returns = ['{}_{}'.format(pyname, o) for o in step['out']]
params = ['{}={}'.format(name, python_name(param))
for name, param in step['in'].items()]
script.append('{} = {}.{}({})'.format(
', '.join(returns), wf_name, pyname, ', '.join(params)))
# Workflow outputs
params = []
for name, details in self.wf_outputs.items():
params.append('{}={}'.format(
name, python_name(details['outputSource'])))
script.append('{}.add_outputs({})'.format(wf_name, ', '.join(params)))
return '\n'.join(script) | 0.001093 |
def expr_items(expr):
"""
Returns a set() of all items (symbols and choices) that appear in the
expression 'expr'.
"""
res = set()
def rec(subexpr):
if subexpr.__class__ is tuple:
# AND, OR, NOT, or relation
rec(subexpr[1])
# NOTs only have a single operand
if subexpr[0] is not NOT:
rec(subexpr[2])
else:
# Symbol or choice
res.add(subexpr)
rec(expr)
return res | 0.001984 |
def delete(self, id):
"""
Deletes document with ID on all Solr cores
"""
for core in self.endpoints:
self._send_solr_command(self.endpoints[core], "{\"delete\" : { \"id\" : \"%s\"}}" % (id,)) | 0.012766 |
def get_tasks(self, list_id, completed=False):
''' Gets tasks for the list with the given ID, filtered by the given completion flag '''
return tasks_endpoint.get_tasks(self, list_id, completed=completed) | 0.013699 |
def target_to_ipv6_long(target):
""" Attempt to return a IPv6 long-range list from a target string. """
splitted = target.split('-')
if len(splitted) != 2:
return None
try:
start_packed = inet_pton(socket.AF_INET6, splitted[0])
end_packed = inet_pton(socket.AF_INET6, splitted[1])
except socket.error:
return None
if end_packed < start_packed:
return None
return ipv6_range_to_list(start_packed, end_packed) | 0.002101 |
def merge_table_records(self, table, record_data, match_column_names):
""" Responsys.mergeTableRecords call
Accepts:
InteractObject table
RecordData record_data
list match_column_names
Returns a MergeResult
"""
table = table.get_soap_object(self.client)
record_data = record_data.get_soap_object(self.client)
return MergeResult(self.call(
'mergeTableRecords', table, record_data, match_column_names)) | 0.003953 |
def site_cleanup(sender, action, instance, **kwargs):
"""
Make sure there is only a single preferences object per site.
So remove sites from pre-existing preferences objects.
"""
if action == 'post_add':
if isinstance(instance, Preferences) \
and hasattr(instance.__class__, 'objects'):
site_conflicts = instance.__class__.objects.filter(
sites__in=instance.sites.all()
).only('id').distinct()
for conflict in site_conflicts:
if conflict.id != instance.id:
for site in instance.sites.all():
conflict.sites.remove(site) | 0.002981 |
def instantiate(self, parallel_envs, seed=0, preset='default') -> VecEnv:
""" Create vectorized environments """
envs = DummyVecEnv([self._creation_function(i, seed, preset) for i in range(parallel_envs)])
if self.frame_history is not None:
envs = VecFrameStack(envs, self.frame_history)
return envs | 0.008696 |
def update(self, pid, session, **kwargs):
'''taobao.fenxiao.product.update 更新产品
- 更新分销平台产品数据,不传更新数据返回失败
- 对sku进行增、删操作时,原有的sku_ids字段会被忽略,请使用sku_properties和sku_properties_del。'''
request = TOPRequest('taobao.fenxiao.product.update')
request['pid'] = pid
for k, v in kwargs.iteritems():
if k not in ('name', 'standard_price', 'cost_price', 'retail_price_low', 'retail_price_high', 'outer_id', 'quantity', 'alarm_number','desc','prov','city','postage_type','postage_id','postage_ordinary','postage_fast','postage_ems','status','sku_ids','sku_cost_prices','sku_quantitys','sku_outer_ids','have_invoice','have_guarantee','discount_id','sku_standard_prices','sku_properties','sku_properties_del','is_authz','pic_path','image','properties','property_alias','input_properties','dealer_cost_price','sku_dealer_cost_prices','category_id') and v==None: continue
request[k] = v
self.create(self.execute(request, session), fields=['pid','modified'], models={'modified':TOPDate})
return self | 0.036347 |
def get_service_display_name(name):
"""
Get the service display name for the given service name.
@see: L{get_service}
@type name: str
@param name: Service unique name. You can get this value from the
C{ServiceName} member of the service descriptors returned by
L{get_services} or L{get_active_services}.
@rtype: str
@return: Service display name.
"""
with win32.OpenSCManager(
dwDesiredAccess = win32.SC_MANAGER_ENUMERATE_SERVICE
) as hSCManager:
return win32.GetServiceDisplayName(hSCManager, name) | 0.006329 |
def inv(self):
"""In place invert"""
self.v = 1/self.v
tmp = self.v**2
if self.deriv > 1:
self.dd[:] = tmp*(2*self.v*np.outer(self.d, self.d) - self.dd)
if self.deriv > 0:
self.d[:] = -tmp*self.d[:] | 0.007634 |
def convective_adjustment_direct(p, T, c, lapserate=6.5):
"""Convective Adjustment to a specified lapse rate.
Input argument lapserate gives the lapse rate expressed in degrees K per km
(positive means temperature increasing downward).
Default lapse rate is 6.5 K / km.
Returns the adjusted Column temperature.
inputs:
p is pressure in hPa
T is temperature in K
c is heat capacity in in J / m**2 / K
Implements the conservative adjustment algorithm from Akmaev (1991) MWR
"""
# largely follows notation and algorithm in Akmaev (1991) MWR
alpha = const.Rd / const.g * lapserate / 1.E3 # same dimensions as lapserate
L = p.size
### now handles variable lapse rate
pextended = np.insert(p,0,const.ps) # prepend const.ps = 1000 hPa as ref pressure to compute potential temperature
Pi = np.cumprod((p / pextended[:-1])**alpha) # Akmaev's equation 14 recurrence formula
beta = 1./Pi
theta = T * beta
q = Pi * c
n_k = np.zeros(L, dtype=np.int8)
theta_k = np.zeros_like(p)
s_k = np.zeros_like(p)
t_k = np.zeros_like(p)
thetaadj = Akmaev_adjustment_multidim(theta, q, beta, n_k,
theta_k, s_k, t_k)
T = thetaadj * Pi
return T | 0.00707 |
def read_vpcs_stdout(self):
"""
Reads the standard output of the VPCS process.
Only use when the process has been stopped or has crashed.
"""
output = ""
if self._vpcs_stdout_file:
try:
with open(self._vpcs_stdout_file, "rb") as file:
output = file.read().decode("utf-8", errors="replace")
except OSError as e:
log.warn("Could not read {}: {}".format(self._vpcs_stdout_file, e))
return output | 0.005714 |
def line_intersects_itself(lons, lats, closed_shape=False):
"""
Return ``True`` if line of points intersects itself.
Line with the last point repeating the first one considered
intersecting itself.
The line is defined by lists (or numpy arrays) of points'
longitudes and latitudes (depth is not taken into account).
:param closed_shape:
If ``True`` the line will be checked twice: first time with
its original shape and second time with the points sequence
being shifted by one point (the last point becomes first,
the first turns second and so on). This is useful for
checking that the sequence of points defines a valid
:class:`~openquake.hazardlib.geo.polygon.Polygon`.
"""
assert len(lons) == len(lats)
if len(lons) <= 3:
# line can not intersect itself unless there are
# at least four points
return False
west, east, north, south = get_spherical_bounding_box(lons, lats)
proj = OrthographicProjection(west, east, north, south)
xx, yy = proj(lons, lats)
if not shapely.geometry.LineString(list(zip(xx, yy))).is_simple:
return True
if closed_shape:
xx, yy = proj(numpy.roll(lons, 1), numpy.roll(lats, 1))
if not shapely.geometry.LineString(list(zip(xx, yy))).is_simple:
return True
return False | 0.000726 |
def get_h_distance(self, latlonalt1, latlonalt2):
'''get the horizontal distance between threat and vehicle'''
(lat1, lon1, alt1) = latlonalt1
(lat2, lon2, alt2) = latlonalt2
lat1 = radians(lat1)
lon1 = radians(lon1)
lat2 = radians(lat2)
lon2 = radians(lon2)
dLat = lat2 - lat1
dLon = lon2 - lon1
# math as per mavextra.distance_two()
a = sin(0.5 * dLat)**2 + sin(0.5 * dLon)**2 * cos(lat1) * cos(lat2)
c = 2.0 * atan2(sqrt(a), sqrt(1.0 - a))
return 6371 * 1000 * c | 0.003497 |
def makeWidget(self):
"""
Return a single widget that should be placed in the second tree column.
The widget must be given three attributes:
========== ============================================================
sigChanged a signal that is emitted when the widget's value is changed
value a function that returns the value
setValue a function that sets the value
========== ============================================================
This is a good function to override in subclasses.
"""
opts = self.param.opts
t = opts['type']
if t == 'int':
defs = {
'value': 0, 'min': None, 'max': None, 'int': True,
'step': 1.0, 'minStep': 1.0, 'dec': False,
'siPrefix': False, 'suffix': ''
}
defs.update(opts)
if 'limits' in opts:
defs['bounds'] = opts['limits']
w = SpinBox()
w.setOpts(**defs)
w.sigChanged = w.sigValueChanged
w.sigChanging = w.sigValueChanging
elif t == 'float':
defs = {
'value': 0, 'min': None, 'max': None,
'step': 1.0, 'dec': False,
'siPrefix': False, 'suffix': ''
}
defs.update(opts)
if 'limits' in opts:
defs['bounds'] = opts['limits']
w = SpinBox()
w.setOpts(**defs)
w.sigChanged = w.sigValueChanged
w.sigChanging = w.sigValueChanging
elif t == 'bool':
w = QtWidgets.QCheckBox()
w.sigChanged = w.toggled
w.value = w.isChecked
w.setValue = w.setChecked
w.setEnabled(not opts.get('readonly', False))
self.hideWidget = False
elif t == 'str':
w = QtWidgets.QLineEdit()
w.sigChanged = w.editingFinished
w.value = lambda: asUnicode(w.text())
w.setValue = lambda v: w.setText(asUnicode(v))
w.sigChanging = w.textChanged
elif t == 'color':
w = ColorButton()
w.sigChanged = w.sigColorChanged
w.sigChanging = w.sigColorChanging
w.value = w.color
w.setValue = w.setColor
self.hideWidget = False
w.setFlat(True)
w.setEnabled(not opts.get('readonly', False))
elif t == 'colormap':
# from pyqtgraph_karl.widgets.GradientWidget import GradientWidget
# ## need this here to avoid import loop
w = GradientWidget(orientation='bottom')
w.sigChanged = w.sigGradientChangeFinished
w.sigChanging = w.sigGradientChanged
w.value = w.colorMap
w.setValue = w.setColorMap
self.hideWidget = False
else:
raise Exception("Unknown type '%s'" % asUnicode(t))
return w | 0.001341 |
def _normalize_slice(self, index, pipe=None):
"""Given a :obj:`slice` *index*, return a 4-tuple
``(start, stop, step, fowrward)``. The first three items can be used
with the ``range`` function to retrieve the values associated with the
slice; the last item indicates the direction.
"""
if index.step == 0:
raise ValueError
pipe = self.redis if pipe is None else pipe
len_self = self.__len__(pipe)
step = index.step or 1
forward = step > 0
step = abs(step)
if index.start is None:
start = 0 if forward else len_self - 1
elif index.start < 0:
start = max(len_self + index.start, 0)
else:
start = min(index.start, len_self)
if index.stop is None:
stop = len_self if forward else -1
elif index.stop < 0:
stop = max(len_self + index.stop, 0)
else:
stop = min(index.stop, len_self)
if not forward:
start, stop = min(stop + 1, len_self), min(start + 1, len_self)
return start, stop, step, forward, len_self | 0.001735 |
def serialize(self):
"""Serializes the ExpectAssert object for collection.
Warning, this will only grab the available information.
It is strongly that you only call this once all specs and
tests have completed.
"""
converted_dict = {
'success': self.success,
'assertion': str(self),
'required': self.required
}
return converted_dict | 0.004619 |
def _find_function_from_code(frame, code):
"""
Given a frame and a compiled function code, find the corresponding function object within the frame.
This function addresses the following problem: when handling a stacktrace, we receive information about
which piece of code was being executed in the form of a CodeType object. That objects contains function name,
file name, line number, and the compiled bytecode. What it *doesn't* contain is the function object itself.
So this utility function aims at locating this function object, and it does so by searching through objects
in the preceding local frame (i.e. the frame where the function was called from). We expect that the function
should usually exist there -- either by itself, or as a method on one of the objects.
:param types.FrameType frame: local frame where the function ought to be found somewhere.
:param types.CodeType code: the compiled code of the function to look for.
:returns: the function object, or None if not found.
"""
def find_code(iterable, depth=0):
if depth > 3: return # Avoid potential infinite loops, or generally objects that are too deep.
for item in iterable:
if item is None: continue
found = None
if hasattr(item, "__code__") and item.__code__ == code:
found = item
elif isinstance(item, type) or isinstance(item, ModuleType): # class / module
try:
found = find_code((getattr(item, n, None) for n in dir(item)), depth + 1)
except Exception:
# Sometimes merely getting module's attributes may cause an exception. For example :mod:`six.moves`
# is such an offender...
continue
elif isinstance(item, (list, tuple, set)):
found = find_code(item, depth + 1)
elif isinstance(item, dict):
found = find_code(item.values(), depth + 1)
if found: return found
return find_code(frame.f_locals.values()) or find_code(frame.f_globals.values()) | 0.007944 |
def _encode_ndef_uri_type(self, data):
"""
Implement NDEF URI Identifier Code.
This is a small hack to replace some well known prefixes (such as http://)
with a one byte code. If the prefix is not known, 0x00 is used.
"""
t = 0x0
for (code, prefix) in uri_identifiers:
if data[:len(prefix)].decode('latin-1').lower() == prefix:
t = code
data = data[len(prefix):]
break
data = yubico_util.chr_byte(t) + data
return data | 0.005445 |
def do_xmlattr(_eval_ctx, d, autospace=True):
"""Create an SGML/XML attribute string based on the items in a dict.
All values that are neither `none` nor `undefined` are automatically
escaped:
.. sourcecode:: html+jinja
<ul{{ {'class': 'my_list', 'missing': none,
'id': 'list-%d'|format(variable)}|xmlattr }}>
...
</ul>
Results in something like this:
.. sourcecode:: html
<ul class="my_list" id="list-42">
...
</ul>
As you can see it automatically prepends a space in front of the item
if the filter returned something unless the second parameter is false.
"""
rv = u' '.join(
u'%s="%s"' % (escape(key), escape(value))
for key, value in d.iteritems()
if value is not None and not isinstance(value, Undefined)
)
if autospace and rv:
rv = u' ' + rv
if _eval_ctx.autoescape:
rv = Markup(rv)
return rv | 0.001037 |
def __set_amount(self, value):
'''
Sets the amount of the payment operation.
@param value:float
'''
try:
self.__amount = quantize(Decimal(str(value)))
except:
raise ValueError('Invalid amount value') | 0.01107 |
def AddBatchJob(client):
"""Add a new BatchJob to upload operations to.
Args:
client: an instantiated AdWordsClient used to retrieve the BatchJob.
Returns:
The new BatchJob created by the request.
"""
# Initialize appropriate service.
batch_job_service = client.GetService('BatchJobService', version='v201809')
# Create a BatchJob.
batch_job_operations = [{
'operand': {},
'operator': 'ADD'
}]
return batch_job_service.mutate(batch_job_operations)['value'][0] | 0.013972 |
def pubkey(self, identity, ecdh=False):
"""Return public key."""
_verify_support(identity)
data = self.vk.to_string()
x, y = data[:32], data[32:]
prefix = bytearray([2 + (bytearray(y)[0] & 1)])
return bytes(prefix) + x | 0.007519 |
def tshift(self, periods=1, freq=None, axis=0):
"""
Shift the time index, using the index's frequency if available.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
freq : DateOffset, timedelta, or time rule string, default None
Increment to use from the tseries module or time rule (e.g. 'EOM')
axis : int or basestring
Corresponds to the axis that contains the Index
Returns
-------
shifted : NDFrame
Notes
-----
If freq is not specified then tries to use the freq or inferred_freq
attributes of the index. If neither of those attributes exist, a
ValueError is thrown
"""
index = self._get_axis(axis)
if freq is None:
freq = getattr(index, 'freq', None)
if freq is None:
freq = getattr(index, 'inferred_freq', None)
if freq is None:
msg = 'Freq was not given and was not set in the index'
raise ValueError(msg)
if periods == 0:
return self
if isinstance(freq, str):
freq = to_offset(freq)
block_axis = self._get_block_manager_axis(axis)
if isinstance(index, PeriodIndex):
orig_freq = to_offset(index.freq)
if freq == orig_freq:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods)
else:
msg = ('Given freq %s does not match PeriodIndex freq %s' %
(freq.rule_code, orig_freq.rule_code))
raise ValueError(msg)
else:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods, freq)
return self._constructor(new_data).__finalize__(self) | 0.001057 |
def getNodeByName(node, name):
"""
Get the first child node matching a given local name
"""
if node is None:
raise Exception(
"Cannot search for a child '%s' in a None object" % (name,)
)
if not name:
raise Exception("Unspecified name to find node for.")
try:
childNode = node.xpath("*[local-name() = '%s']" % name)[0]
except:
return None
return childNode | 0.004545 |
async def async_connect(self):
"""Connect to the ASUS-WRT Telnet server."""
self._reader, self._writer = await asyncio.open_connection(
self._host, self._port)
with (await self._io_lock):
try:
await asyncio.wait_for(self._reader.readuntil(b'login: '), 9)
except asyncio.streams.IncompleteReadError:
_LOGGER.error(
"Unable to read from router on %s:%s" % (
self._host, self._port))
return
except TimeoutError:
_LOGGER.error("Host timeout.")
self._writer.write((self._username + '\n').encode('ascii'))
await self._reader.readuntil(b'Password: ')
self._writer.write((self._password + '\n').encode('ascii'))
self._prompt_string = (await self._reader.readuntil(
b'#')).split(b'\n')[-1]
self._connected = True | 0.002088 |
def rndstr(size=16):
"""
Returns a string of random ascii characters or digits
:param size: The length of the string
:return: string
"""
_basech = string.ascii_letters + string.digits
return "".join([rnd.choice(_basech) for _ in range(size)]) | 0.00369 |
def runtime_values(self, **kwargs):
"""
=====API DOCS=====
Context manager that temporarily override runtime level configurations.
:param kwargs: Keyword arguments specifying runtime configuration settings.
:type kwargs: arbitrary keyword arguments
:returns: N/A
:Example:
>>> import tower_cli
>>> from tower_cli.conf import settings
>>> with settings.runtime_values(username='user', password='pass'):
>>> print(tower_cli.get_resource('credential').list())
=====API DOCS=====
"""
# Coerce all values to strings (to be coerced back by configparser
# later) and defenestrate any None values.
for k, v in copy.copy(kwargs).items():
# If the value is None, just get rid of it.
if v is None:
kwargs.pop(k)
continue
# Remove these keys from the cache, if they are present.
self._cache.pop(k, None)
# Coerce values to strings.
kwargs[k] = six.text_type(v)
# Replace the `self._runtime` INI parser with a new one, using
# the context manager's kwargs as the "defaults" (there can never
# be anything other than defaults, but that isn't a problem for our
# purposes because we're using our own precedence system).
#
# Ensure that everything is put back to rights at the end of the
# context manager call.
old_runtime_parser = self._runtime
try:
self._runtime = Parser(defaults=kwargs)
self._runtime.add_section('general')
yield self
finally:
# Revert the runtime configparser object.
self._runtime = old_runtime_parser
# Remove the keys from the cache again, since the settings
# have been reverted.
for key in kwargs:
self._cache.pop(k, None) | 0.001522 |
def visit_Operation(self, expression, *operands):
""" constant folding, if all operands of an expression are a Constant do the math """
if all(isinstance(o, Constant) for o in operands):
expression = constant_folder(expression)
if self._changed(expression, operands):
expression = self._rebuild(expression, operands)
return expression | 0.007692 |
def decipher_atom_key(atom_key, forcefield):
"""
Return element for deciphered atom key.
This functions checks if the forcfield specified by user is supported
and passes the atom key to the appropriate function for deciphering.
Parameters
----------
atom_key : str
The atom key which is to be deciphered.
forcefield : str
The forcefield to which the atom key belongs to.
Returns
-------
str
A string that is the periodic table element equvalent of forcefield
atom key.
"""
load_funcs = {
'DLF': dlf_notation,
'DL_F': dlf_notation,
'OPLS': opls_notation,
'OPLSAA': opls_notation,
'OPLS2005': opls_notation,
'OPLS3': opls_notation,
}
if forcefield.upper() in load_funcs.keys():
return load_funcs[forcefield.upper()](atom_key)
else:
raise _ForceFieldError(
("Unfortunetely, '{0}' forcefield is not supported by pyWINDOW."
" For list of supported forcefields see User's Manual or "
"MolecularSystem._decipher_atom_keys() function doc string."
).format(forcefield)) | 0.00085 |
def _speak_as_digits_inherit(self, element):
"""
Speak the digit at a time for each number for element and descendants.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
"""
self._reverse_speak_as(element, 'digits')
self._isolate_text_node(element)
self._visit(element, self._speak_as_digits) | 0.004963 |
def reind_proc(self, inputstring, **kwargs):
"""Add back indentation."""
out = []
level = 0
for line in inputstring.splitlines():
line, comment = split_comment(line.strip())
indent, line = split_leading_indent(line)
level += ind_change(indent)
if line:
line = " " * self.tabideal * level + line
line, indent = split_trailing_indent(line)
level += ind_change(indent)
line = (line + comment).rstrip()
out.append(line)
if level != 0:
complain(CoconutInternalException("non-zero final indentation level", level))
return "\n".join(out) | 0.004237 |
def filter(self, destination_object=None, source_object=None, **kwargs):
"""
See ``QuerySet.filter`` for full documentation
This adds support for ``destination_object`` and ``source_object``
as kwargs. This converts those objects into the values necessary
to handle the ``GenericForeignKey`` fields.
"""
if destination_object:
kwargs.update({
"destination_id": destination_object.pk,
"destination_type": get_for_model(destination_object),
})
if source_object:
kwargs.update({
"source_id": source_object.pk,
"source_type": get_for_model(source_object),
})
return super(RelatedContentQuerySet, self).filter(**kwargs) | 0.0025 |
def add(self, synchronous=True, **kwargs):
"""Add provided Content View Component.
:param synchronous: What should happen if the server returns an HTTP
202 (accepted) status code? Wait for the task to complete if
``True``. Immediately return the server's response otherwise.
:param kwargs: Arguments to pass to requests.
:returns: The server's response, with all JSON decoded.
:raises: ``requests.exceptions.HTTPError`` If the server responds with
an HTTP 4XX or 5XX message.
"""
kwargs = kwargs.copy() # shadow the passed-in kwargs
if 'data' not in kwargs:
# data is required
kwargs['data'] = dict()
if 'component_ids' not in kwargs['data']:
kwargs['data']['components'] = [_payload(self.get_fields(), self.get_values())]
kwargs.update(self._server_config.get_client_kwargs())
response = client.put(self.path('add'), **kwargs)
return _handle_response(response, self._server_config, synchronous) | 0.002814 |
def reply_to_request(self, req, rep):
"""
Send a reply for a synchronous request sent by send_request.
The first argument should be an instance of EventRequestBase.
The second argument should be an instance of EventReplyBase.
"""
assert isinstance(req, EventRequestBase)
assert isinstance(rep, EventReplyBase)
rep.dst = req.src
if req.sync:
req.reply_q.put(rep)
else:
self.send_event(rep.dst, rep) | 0.003984 |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'class_name') and self.class_name is not None:
_dict['class'] = self.class_name
if hasattr(self, 'score') and self.score is not None:
_dict['score'] = self.score
if hasattr(self, 'type_hierarchy') and self.type_hierarchy is not None:
_dict['type_hierarchy'] = self.type_hierarchy
return _dict | 0.004167 |
def add_molecular_graph(self, molecular_graph, atom_types=None, charges=None, split=True, molecule=None):
"""Add the molecular graph to the data structure
Argument:
| ``molecular_graph`` -- a MolecularGraph instance
Optional arguments:
| ``atom_types`` -- a list with atom type strings
| ``charges`` -- The net atom charges
| ``split`` -- When True, the molecule is split into disconnected
molecules [default=True]
"""
# add atom numbers and molecule indices
new = len(molecular_graph.numbers)
if new == 0: return
prev = len(self.numbers)
offset = prev
self.numbers.resize(prev + new)
self.numbers[-new:] = molecular_graph.numbers
if atom_types is None:
atom_types = [periodic[number].symbol for number in molecular_graph.numbers]
self.atom_types.extend(atom_types)
if charges is None:
charges = [0.0]*len(molecular_graph.numbers)
self.charges.extend(charges)
self.molecules.resize(prev + new)
# add names (autogenerated)
if split:
groups = molecular_graph.independent_vertices
names = [self._get_name(molecular_graph, group) for group in groups]
group_indices = np.zeros(new, int)
for group_index, group in enumerate(groups):
for index in group:
group_indices[index] = group_index
self.names.extend([names[group_index] for group_index in group_indices])
if prev == 0:
self.molecules[:] = group_indices
else:
self.molecules[-new:] = self.molecules[-new]+group_indices+1
else:
if prev == 0:
self.molecules[-new:] = 0
else:
self.molecules[-new:] = self.molecules[-new]+1
name = self._get_name(molecular_graph)
self.names.extend([name]*new)
self._add_graph_bonds(molecular_graph, offset, atom_types, molecule)
self._add_graph_bends(molecular_graph, offset, atom_types, molecule)
self._add_graph_dihedrals(molecular_graph, offset, atom_types, molecule)
self._add_graph_impropers(molecular_graph, offset, atom_types, molecule) | 0.00382 |
def _logging_env_conf_overrides(log_init_warnings=None):
"""Returns a dictionary that is empty or has a "logging" key that refers to
the (up to 3) key-value pairs that pertain to logging and are read from the env.
This is mainly a convenience function for ConfigWrapper so that it can accurately
report the source of the logging settings without
"""
# This is called from a locked section of _read_logging_config, so don't call that function or you'll get deadlock
global _LOGGING_ENV_CONF_OVERRIDES
if _LOGGING_ENV_CONF_OVERRIDES is not None:
return _LOGGING_ENV_CONF_OVERRIDES
with _LOGGING_ENV_CONF_OVERRIDES_LOCK:
if _LOGGING_ENV_CONF_OVERRIDES is not None:
return _LOGGING_ENV_CONF_OVERRIDES
level_from_env = os.environ.get("PEYOTL_LOGGING_LEVEL")
format_from_env = os.environ.get("PEYOTL_LOGGING_FORMAT")
log_file_path_from_env = os.environ.get("PEYOTL_LOG_FILE_PATH")
_LOGGING_ENV_CONF_OVERRIDES = {}
if level_from_env:
env_w_list = []
_get_logging_level(level_from_env, env_w_list)
if len(env_w_list) > 0:
if log_init_warnings is not None:
log_init_warnings.extend(env_w_list)
log_init_warnings.append('PEYOTL_LOGGING_LEVEL is invalid. Relying on setting from conf file.')
else:
_LOGGING_ENV_CONF_OVERRIDES.setdefault("logging", {})['level'] = level_from_env
if format_from_env:
env_w_list = []
_get_logging_formatter(format_from_env, env_w_list)
if len(env_w_list) > 0:
if log_init_warnings is not None:
log_init_warnings.extend(env_w_list)
log_init_warnings.append('PEYOTL_LOGGING_FORMAT was invalid. Relying on setting from conf file.')
else:
_LOGGING_ENV_CONF_OVERRIDES.setdefault("logging", {})['formatter'] = format_from_env
if log_file_path_from_env is not None:
_LOGGING_ENV_CONF_OVERRIDES.setdefault("logging", {})['filepath'] = log_file_path_from_env
return _LOGGING_ENV_CONF_OVERRIDES | 0.004115 |
def get_db_args_env(self):
"""
Get a dictionary of database connection parameters, and create an
environment for running postgres commands.
Falls back to omego defaults.
"""
db = {
'name': self.args.dbname,
'host': self.args.dbhost,
'user': self.args.dbuser,
'pass': self.args.dbpass
}
if not self.args.no_db_config:
try:
c = self.external.get_config(force=True)
except Exception as e:
log.warn('config.xml not found: %s', e)
c = {}
for k in db:
try:
db[k] = c['omero.db.%s' % k]
except KeyError:
log.info(
'Failed to lookup parameter omero.db.%s, using %s',
k, db[k])
if not db['name']:
raise Exception('Database name required')
env = os.environ.copy()
env['PGPASSWORD'] = db['pass']
return db, env | 0.001874 |
def from_array(array):
"""
Deserialize a new PassportData from a given dictionary.
:return: new PassportData instance.
:rtype: PassportData
"""
if array is None or not array:
return None
# end if
assert_type_or_raise(array, dict, parameter_name="array")
data = {}
data['data'] = EncryptedPassportElement.from_array_list(array.get('data'), list_level=1)
data['credentials'] = EncryptedCredentials.from_array(array.get('credentials'))
data['_raw'] = array
return PassportData(**data) | 0.008333 |
def ascii_text(text):
"""Transliterate the given text and make sure it ends up as ASCII."""
text = latinize_text(text, ascii=True)
if isinstance(text, six.text_type):
text = text.encode('ascii', 'ignore').decode('ascii')
return text | 0.003906 |
def magic_string(string, filename=None):
""" Returns tuple of (num_of_matches, array_of_matches)
arranged highest confidence match first
If filename is provided it will be used in the computation.
:param string: string representation to check
:param filename: original filename
:return: list of possible matches, highest confidence first
"""
if not string:
raise ValueError("Input was empty")
head, foot = _string_details(string)
ext = ext_from_filename(filename) if filename else None
info = _identify_all(head, foot, ext)
info.sort(key=lambda x: x.confidence, reverse=True)
return info | 0.001543 |
def serialize(
self,
value, # type: Any
state # type: _ProcessorState
):
# type: (...) -> ET.Element
"""Serialize the value and returns it."""
xml_value = _hooks_apply_before_serialize(self._hooks, state, value)
return self._processor.serialize(xml_value, state) | 0.011905 |
def create(self):
"""Deploy a cluster on Amazon's EKS Service configured
for Jupyterhub Deployments.
"""
steps = [
(self.create_role, (), {}),
(self.create_vpc, (), {}),
(self.create_cluster, (), {}),
(self.create_node_group, (), {}),
(self.create_spot_nodes, (), {}),
(self.create_utilities, (), {}),
]
# Execute creation.
for step in tqdm.tqdm(steps, ncols=70):
method, args, kwargs = step
method(*args, **kwargs) | 0.003527 |
def triangle_address(fx, pt):
'''
triangle_address(FX, P) yields an address coordinate (t,r) for the point P in the triangle
defined by the (3 x d)-sized coordinate matrix FX, in which each row of the matrix is the
d-dimensional vector representing the respective triangle vertx for triangle [A,B,C]. The
resulting coordinates (t,r) (0 <= t <= 1, 0 <= r <= 1) address the point P such that, if t gives
the fraction of the angle from vector AB to vector AC that is made by the angle between vectors
AB and AP, and r gives the fraction ||AP||/||AR|| where R is the point of intersection between
lines AP and BC. If P is a (d x n)-sized matrix of points, then a (2 x n) matrix of addresses
is returned.
'''
fx = np.asarray(fx)
pt = np.asarray(pt)
# The triangle vectors...
ab = fx[1] - fx[0]
ac = fx[2] - fx[0]
bc = fx[2] - fx[1]
ap = np.asarray([pt_i - a_i for (pt_i, a_i) in zip(pt, fx[0])])
# get the unnormalized distance...
r = np.sqrt((ap ** 2).sum(0))
# now we can find the angle...
unit = 1 - r.astype(bool)
t0 = vector_angle(ab, ac)
t = vector_angle(ap + [ab_i * unit for ab_i in ab], ab)
sint = np.sin(t)
sindt = np.sin(t0 - t)
# finding r0 is tricker--we use this fancy formula based on the law of sines
q0 = np.sqrt((bc ** 2).sum(0)) # B->C distance
beta = vector_angle(-ab, bc) # Angle at B
sinGamma = np.sin(math.pi - beta - t0)
sinBeta = np.sin(beta)
r0 = q0 * sinBeta * sinGamma / (sinBeta * sindt + sinGamma * sint)
return np.asarray([t/t0, r/r0]) | 0.006211 |
def setup(product_name):
"""Setup logging."""
if CONF.log_config:
_load_log_config(CONF.log_config)
else:
_setup_logging_from_conf()
sys.excepthook = _create_logging_excepthook(product_name) | 0.004505 |
def csi_wrap(self, value, capname, *args):
"""Return a value wrapped in the selected CSI and does a reset."""
if isinstance(value, str):
value = value.encode('utf-8')
return b''.join([
self.csi(capname, *args),
value,
self.csi('sgr0'),
]) | 0.006289 |
def to_vcf(in_tsv, data):
"""Convert seq2c output file into BED output.
"""
call_convert = {"Amp": "DUP", "Del": "DEL"}
out_file = "%s.vcf" % utils.splitext_plus(in_tsv)[0]
if not utils.file_uptodate(out_file, in_tsv):
with file_transaction(data, out_file) as tx_out_file:
with open(in_tsv) as in_handle:
with open(tx_out_file, "w") as out_handle:
out_handle.write(VCF_HEADER + "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t%s\n"
% (dd.get_sample_name(data)))
header = in_handle.readline().split("\t")
for cur in (dict(zip(header, l.split("\t"))) for l in in_handle):
if cur["Amp_Del"] in call_convert:
svtype = call_convert[cur["Amp_Del"]]
info = "SVTYPE=%s;END=%s;SVLEN=%s;FOLD_CHANGE_LOG=%s;PROBES=%s;GENE=%s" % (
svtype, cur["End"], int(cur["End"]) - int(cur["Start"]),
cur["Log2ratio"], cur["Ab_Seg"], cur["Gene"])
out_handle.write("\t".join([cur["Chr"], cur["Start"], ".", "N", "<%s>" % (svtype),
".", ".", info, "GT", "1/1"]) + "\n")
return vcfutils.sort_by_ref(out_file, data) | 0.005801 |
def expand_parameters(self, para):
'''
Enumerate all possible combinations of all parameters
para: {key1: [v11, v12, ...], key2: [v21, v22, ...], ...}
return: {{key1: v11, key2: v21, ...}, {key1: v11, key2: v22, ...}, ...}
'''
if len(para) == 1:
for key, values in para.items():
return list(map(lambda v: {key: v}, values))
key = list(para)[0]
values = para.pop(key)
rest_para = self.expand_parameters(para)
ret_para = list()
for val in values:
for config in rest_para:
config[key] = val
ret_para.append(copy.deepcopy(config))
return ret_para | 0.002813 |
def get_script_header(script_text, executable=sys_executable, wininst=False):
"""Create a #! line, getting options (if any) from script_text"""
from distutils.command.build_scripts import first_line_re
# first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
if not isinstance(first_line_re.pattern, str):
first_line_re = re.compile(first_line_re.pattern.decode())
first = (script_text+'\n').splitlines()[0]
match = first_line_re.match(first)
options = ''
if match:
options = match.group(1) or ''
if options: options = ' '+options
if wininst:
executable = "python.exe"
else:
executable = nt_quote_arg(executable)
hdr = "#!%(executable)s%(options)s\n" % locals()
if not isascii(hdr):
# Non-ascii path to sys.executable, use -x to prevent warnings
if options:
if options.strip().startswith('-'):
options = ' -x'+options.strip()[1:]
# else: punt, we can't do it, let the warning happen anyway
else:
options = ' -x'
executable = fix_jython_executable(executable, options)
hdr = "#!%(executable)s%(options)s\n" % locals()
return hdr | 0.001647 |
def connect_all(self):
"""[Re-]connects all signals and slots.
If already in "connected" state, ignores the call.
"""
if self.__connected:
return # assert not self.__connected, "connect_all() already in \"connected\" state"
with self.__lock:
for signal in self.__signals:
self.__connect_signal(signal)
if self.__slot is not None:
self.__sigDelayed.connect(self.__slot, Qt.QueuedConnection)
self.__connected = True | 0.005484 |
def GetLocation(location=None,alias=None,session=None):
"""Returns a list of anti-affinity policies within a specific location.
>>> clc.v2.AntiAffinity.GetLocation("VA1")
[<clc.APIv2.anti_affinity.AntiAffinity object at 0x105eeded0>]
"""
if not location: location = clc.v2.Account.GetLocation(session=session)
return(AntiAffinity.GetAll(alias=alias,location=location,session=session)) | 0.0325 |
def edge_tuple(self, vertex0_id, vertex1_id):
"""To avoid duplicate edges where the vertex ids are reversed,
we maintain that the vertex ids are ordered so that the corresponding
pathway names are alphabetical.
Parameters
-----------
vertex0_id : int
one vertex in the edge
vertex1_id : int
the other vertex in the edge
Returns
-----------
tup(int, int)|None, the edge id or None if the vertices do not
exist in the network or they map to the same pathway (there should not
be any self-loops in the network)
"""
pw0 = self.__getitem__(vertex0_id)
pw1 = self.__getitem__(vertex1_id)
if not pw0 or not pw1:
return None
if pw0 < pw1:
return (vertex0_id, vertex1_id)
elif pw0 > pw1:
return (vertex1_id, vertex0_id)
else:
return None | 0.002103 |
def read_register(self, addr, numBytes):
"""Reads @numBytes bytes from the grizzly starting at @addr. Due
to packet format, cannot read more than 127 packets at a time.
Returns a byte array of the requested data in little endian.
@addr should be from the Addr class e.g. Addr.Speed"""
assert numBytes <= 0x7f, "Cannot read more than 127 bytes at a time"
cmd = chr(addr) + chr(numBytes)
cmd += (16 - len(cmd)) * chr(0)
return self._dev.exchange_bytes(cmd) | 0.003861 |
def exit(self, status=0, message=None):
'''
Argparse expects exit() to be a terminal function and not return.
As such, this function must raise an exception instead.
'''
self.exited = True
self.status = status
if message is not None:
self.outp.printf(message)
raise s_exc.ParserExit(mesg=message, status=status) | 0.005155 |
def lovasz_softmax(probas, labels, only_present=False, per_image=False, ignore=None):
"""
Multi-class Lovasz-Softmax loss
probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1)
labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)
only_present: average only on classes present in ground truth
per_image: compute the loss per image instead of per batch
ignore: void class labels
"""
if per_image:
loss = mean(lovasz_softmax_flat(*flatten_probas(prob.unsqueeze(0), lab.unsqueeze(0), ignore), only_present=only_present)
for prob, lab in zip(probas, labels))
else:
loss = lovasz_softmax_flat(*flatten_probas(probas, labels, ignore), only_present=only_present)
return loss | 0.007407 |
def meta(self, remote_path, **kwargs):
"""获取单个文件或目录的元信息.
:param remote_path: 网盘中文件/目录的路径,必须以 /apps/ 开头。
.. warning::
* 路径长度限制为1000;
* 径中不能包含以下字符:``\\\\ ? | " > < : *``;
* 文件名或路径名开头结尾不能是 ``.``
或空白字符,空白字符包括:
``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。
:return: Response 对象
"""
params = {
'path': remote_path
}
return self._request('file', 'meta', extra_params=params, **kwargs) | 0.003175 |
def get_pem_entries(glob_path):
'''
Returns a dict containing PEM entries in files matching a glob
glob_path:
A path to certificates to be read and returned.
CLI Example:
.. code-block:: bash
salt '*' x509.get_pem_entries "/etc/pki/*.crt"
'''
ret = {}
for path in glob.glob(glob_path):
if os.path.isfile(path):
try:
ret[path] = get_pem_entry(text=path)
except ValueError as err:
log.debug('Unable to get PEM entries from %s: %s', path, err)
return ret | 0.001745 |
def list_menu(self, options, title="Choose a value", message="Choose a value", default=None, **kwargs):
"""
Show a single-selection list menu
Usage: C{dialog.list_menu(options, title="Choose a value", message="Choose a value", default=None, **kwargs)}
@param options: list of options (strings) for the dialog
@param title: window title for the dialog
@param message: message displayed above the list
@param default: default value to be selected
@return: a tuple containing the exit code and user choice
@rtype: C{DialogData(int, str)}
"""
choices = []
optionNum = 0
for option in options:
choices.append(str(optionNum))
choices.append(option)
if option == default:
choices.append("on")
else:
choices.append("off")
optionNum += 1
return_code, result = self._run_kdialog(title, ["--radiolist", message] + choices, kwargs)
choice = options[int(result)]
return DialogData(return_code, choice) | 0.008643 |
def gff(args):
"""
%prog gff *.gff
Draw exons for genes based on gff files. Each gff file should contain only
one gene, and only the "mRNA" and "CDS" feature will be drawn on the canvas.
"""
align_choices = ("left", "center", "right")
p = OptionParser(gff.__doc__)
p.add_option("--align", default="left", choices=align_choices,
help="Horizontal alignment [default: %default]")
p.add_option("--noUTR", default=False, action="store_true",
help="Do not plot UTRs [default: %default]")
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
fig = plt.figure(1, (8, 5))
root = fig.add_axes([0, 0, 1, 1])
gffiles = args
ngenes = len(gffiles)
canvas = .6
setups, ratio = get_setups(gffiles, canvas=canvas, noUTR=opts.noUTR)
align = opts.align
xs = .2 if align == "left" else .8
yinterval = canvas / ngenes
ys = .8
tip = .01
for genename, mrnabed, cdsbeds in setups:
ExonGlyph(root, xs, ys, mrnabed, cdsbeds, ratio=ratio, align=align)
if align == "left":
root.text(xs - tip, ys, genename, ha="right", va="center")
elif align == "right":
root.text(xs + tip, ys, genename, ha="left", va="center")
ys -= yinterval
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
figname = "exons.pdf"
savefig(figname, dpi=300) | 0.001378 |
def retrieve_crls(self, cert):
"""
:param cert:
An asn1crypto.x509.Certificate object
:param path:
A certvalidator.path.ValidationPath object for the cert
:return:
A list of asn1crypto.crl.CertificateList objects
"""
if not self._allow_fetching:
return self._crls
if cert.issuer_serial not in self._fetched_crls:
try:
crls = crl_client.fetch(
cert,
**self._crl_fetch_params
)
self._fetched_crls[cert.issuer_serial] = crls
for crl_ in crls:
try:
certs = crl_client.fetch_certs(
crl_,
user_agent=self._crl_fetch_params.get('user_agent'),
timeout=self._crl_fetch_params.get('timeout')
)
for cert_ in certs:
if self.certificate_registry.add_other_cert(cert_):
self._revocation_certs[cert_.issuer_serial] = cert_
except (URLError, socket.error):
pass
except (URLError, socket.error) as e:
self._fetched_crls[cert.issuer_serial] = []
if self._revocation_mode == "soft-fail":
self._soft_fail_exceptions.append(e)
raise SoftFailError()
else:
raise
return self._fetched_crls[cert.issuer_serial] | 0.002472 |
def read_code_bytes(self, size = 128, offset = 0):
"""
Tries to read some bytes of the code currently being executed.
@type size: int
@param size: Number of bytes to read.
@type offset: int
@param offset: Offset from the program counter to begin reading.
@rtype: str
@return: Bytes read from the process memory.
@raise WindowsError: Could not read the requested data.
"""
return self.get_process().read(self.get_pc() + offset, size) | 0.011385 |
def pretty_duration(seconds):
""" Returns a user-friendly representation of the provided duration in seconds.
For example: 62.8 => "1m2.8s", or 129837.8 => "2d12h4m57.8s"
"""
if seconds is None:
return ''
ret = ''
if seconds >= 86400:
ret += '{:.0f}d'.format(int(seconds / 86400))
seconds = seconds % 86400
if seconds >= 3600:
ret += '{:.0f}h'.format(int(seconds / 3600))
seconds = seconds % 3600
if seconds >= 60:
ret += '{:.0f}m'.format(int(seconds / 60))
seconds = seconds % 60
if seconds > 0:
ret += '{:.1f}s'.format(seconds)
return ret | 0.003106 |
def theme_set(new):
"""
Change the current(default) theme
Parameters
----------
new : theme
New default theme
Returns
-------
out : theme
Previous theme
"""
if (not isinstance(new, theme) and
not issubclass(new, theme)):
raise PlotnineError("Expecting object to be a theme")
out = get_option('current_theme')
set_option('current_theme', new)
return out | 0.002257 |
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
"""
cookie_dict = {}
for _, cookies in cj._cookies.items():
for _, cookies in cookies.items():
for cookie in cookies.values():
# print cookie
cookie_dict[cookie.name] = cookie.value
return cookie_dict | 0.002415 |
def step_worker(step, pipe, max_entities):
"""
All messages follow the form: <message>, <data>
Valid messages
--------------
run, <input_data>
finalise, None
next, None
stop, None
"""
state = None
while True:
message, input = pipe.recv()
if message == 'run':
state = step.run(input, max_entities)
elif message == 'finalise':
state = step.finalise(max_entities)
elif message == 'next':
try:
data = state.next()
sys.stderr.write(' {}\n'.format(step.name))
sys.stderr.write(' * {}\n'.format(', '.join(key.name for key in data)))
sys.stderr.write(' * {}\n'.format(', '.join(str(value) for value in data.values())))
pipe.send(('data', {'step': step, 'data': data}))
except StopIteration:
pipe.send(('stop', {'step': step}))
state = None
elif message == 'stop':
break | 0.002935 |
def subject(self):
""" Normalized subject.
Only used for debugging and human-friendly logging.
"""
# Fetch subject from first message.
subject = self.message.get('Subject', '')
subject, _ = re.subn(r'\s+', ' ', subject)
return subject | 0.006873 |
def earnings(symbol, token='', version=''):
'''Earnings data for a given company including the actual EPS, consensus, and fiscal period. Earnings are available quarterly (last 4 quarters) and annually (last 4 years).
https://iexcloud.io/docs/api/#earnings
Updates at 9am, 11am, 12pm UTC every day
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
dict: result
'''
_raiseIfNotStr(symbol)
return _getJson('stock/' + symbol + '/earnings', token, version) | 0.003466 |
def add_command_arguments(parser):
"""
Additional command line arguments for the behave management command
"""
parser.add_argument(
'--noinput',
'--no-input',
action='store_const',
const=False,
dest='interactive',
help='Tells Django to NOT prompt the user for input of any kind.',
)
parser.add_argument(
'--failfast', action='store_const', const=True, dest='failfast',
help=('Tells Django to stop running the '
'test suite after first failed test.'),
)
parser.add_argument(
'-r', '--reverse', action='store_const', const=True, dest='reverse',
help='Reverses test cases order.',
)
parser.add_argument(
'--use-existing-database',
action='store_true',
default=False,
help="Don't create a test database. USE AT YOUR OWN RISK!",
)
parser.add_argument(
'-k', '--keepdb',
action='store_const',
const=True,
help="Preserves the test DB between runs.",
)
parser.add_argument(
'-S', '--simple',
action='store_true',
default=False,
help="Use simple test runner that supports Django's"
" testing client only (no web browser automation)"
) | 0.000779 |
def from_file(cls, db_file=ALL_SETS_PATH):
"""Reads card data from a JSON-file.
:param db_file: A file-like object or a path.
:return: A new :class:`~mtgjson.CardDb` instance.
"""
if callable(getattr(db_file, 'read', None)):
return cls(json.load(db_file))
with io.open(db_file, encoding='utf8') as inp:
return cls(json.load(inp)) | 0.004963 |
def get_subdir(index):
"""
Return the sub-directory given the index dictionary. The return
value is obtained in the following order:
1. when the 'subdir' key exists, it's value is returned
2. if the 'arch' is None, or does not exist, 'noarch' is returned
3. otherwise, the return value is constructed from the 'platform' key
and the 'arch' key (where 'x86' is replaced by '32',
and 'x86_64' by '64')
"""
try:
return index['subdir']
except KeyError:
arch = index.get('arch')
if arch is None:
return 'noarch'
intel_map = {'x86': '32', 'x86_64': '64'}
return '%s-%s' % (index.get('platform'), intel_map.get(arch, arch)) | 0.001389 |
def delete(self, client=None):
"""Deletes a task from Task Queue.
:type client: :class:`gcloud.taskqueue.client.Client` or ``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the task's taskqueue.
:rtype: :class:`Task`
:returns: The task that was just deleted.
:raises: :class:`gcloud.exceptions.NotFound`
(propagated from
:meth:`gcloud.taskqueue.taskqueue.Taskqueue.delete_task`).
"""
return self.taskqueue.delete_task(self.id, client=client) | 0.003195 |
def inertia(X,y,samples=False):
""" return the within-class squared distance from the centroid"""
# pdb.set_trace()
if samples:
# return within-class distance for each sample
inertia = np.zeros(y.shape)
for label in np.unique(y):
inertia[y==label] = (X[y==label] - np.mean(X[y==label])) ** 2
else: # return aggregate score
inertia = 0
for i,label in enumerate(np.unique(y)):
inertia += np.sum((X[y==label] - np.mean(X[y==label])) ** 2)/len(y[y==label])
inertia = inertia/len(np.unique(y))
return inertia | 0.020101 |
def given(self):
"""Given name could include both first and middle name"""
if self._primary.value[0] and self._primary.value[2]:
return self._primary.value[0] + ' ' + self._primary.value[2]
return self._primary.value[0] or self._primary.value[2] | 0.007117 |
def panel_index(time, panels, names=None):
"""
Returns a multi-index suitable for a panel-like DataFrame.
Parameters
----------
time : array-like
Time index, does not have to repeat
panels : array-like
Panel index, does not have to repeat
names : list, optional
List containing the names of the indices
Returns
-------
multi_index : MultiIndex
Time index is the first level, the panels are the second level.
Examples
--------
>>> years = range(1960,1963)
>>> panels = ['A', 'B', 'C']
>>> panel_idx = panel_index(years, panels)
>>> panel_idx
MultiIndex([(1960, 'A'), (1961, 'A'), (1962, 'A'), (1960, 'B'),
(1961, 'B'), (1962, 'B'), (1960, 'C'), (1961, 'C'),
(1962, 'C')], dtype=object)
or
>>> years = np.repeat(range(1960,1963), 3)
>>> panels = np.tile(['A', 'B', 'C'], 3)
>>> panel_idx = panel_index(years, panels)
>>> panel_idx
MultiIndex([(1960, 'A'), (1960, 'B'), (1960, 'C'), (1961, 'A'),
(1961, 'B'), (1961, 'C'), (1962, 'A'), (1962, 'B'),
(1962, 'C')], dtype=object)
"""
if names is None:
names = ['time', 'panel']
time, panels = _ensure_like_indices(time, panels)
return MultiIndex.from_arrays([time, panels], sortorder=None, names=names) | 0.000734 |
def __walk_rec(self, top, rec):
"""
Yields each subdirectories of top, doesn't follow symlinks.
If rec is false, only yield top.
@param top: root directory.
@type top: string
@param rec: recursive flag.
@type rec: bool
@return: path of one subdirectory.
@rtype: string
"""
if not rec or os.path.islink(top) or not os.path.isdir(top):
yield top
else:
for root, dirs, files in os.walk(top):
yield root | 0.003731 |
def new_format(self, navbar: BeautifulSoup, content: BeautifulSoup) -> List[str]:
"""
Extracts email message information if it uses the new Mailman format
Args:
content: BeautifulSoup
Returns: List[str]
"""
sender = content.find(id='from').text.split('via')[0][6:].strip()
date_str = content.find(id='date').text.split(': ')[1].strip()
date = parsedate_to_datetime(date_str).isoformat()[:19]
body = content.find(id='body').text.strip()
nxt, rep_to = None, None
links = navbar.findAll('a')
for l in links:
if 'Next in thread' in str(l):
nxt = '/'.join(self.email_url.split('/')[:-1]) + '/' + l['href']
nxt = nxt[1:] if nxt[0] == '/' else nxt
elif 'reply to' in str(l):
rep_to = '/'.join(self.email_url.split('/')[:-1]) + '/' + l['href']
rep_to = rep_to[1:] if rep_to[0] == '/' else rep_to
return [str(i) for i in [sender, date, body, nxt, rep_to]] | 0.007477 |
def forward(self, # pylint: disable=arguments-differ
text_field_input: Dict[str, torch.Tensor],
num_wrapping_dims: int = 0) -> torch.Tensor:
"""
Parameters
----------
text_field_input : ``Dict[str, torch.Tensor]``
A dictionary that was the output of a call to ``TextField.as_tensor``. Each tensor in
here is assumed to have a shape roughly similar to ``(batch_size, sequence_length)``
(perhaps with an extra trailing dimension for the characters in each token).
num_wrapping_dims : ``int``, optional (default=0)
If you have a ``ListField[TextField]`` that created the ``text_field_input``, you'll
end up with tensors of shape ``(batch_size, wrapping_dim1, wrapping_dim2, ...,
sequence_length)``. This parameter tells us how many wrapping dimensions there are, so
that we can correctly ``TimeDistribute`` the embedding of each named representation.
"""
raise NotImplementedError | 0.010456 |
def _process_transform(self, data, transform, step_size):
'''
Process transforms on the data.
'''
if isinstance(transform, (list,tuple,set)):
return { t : self._transform(data,t,step_size) for t in transform }
elif isinstance(transform, dict):
return { tn : self._transform(data,tf,step_size) for tn,tf in transform.items() }
return self._transform(data,transform,step_size) | 0.046341 |
def sync(self):
"""
Syncs the parent app changes with the current app instance.
:return: Synced App object.
"""
app = self._api.post(url=self._URL['sync'].format(id=self.id)).json()
return App(api=self._api, **app) | 0.007634 |
def find_local_uuid(tw, keys, issue, legacy_matching=False):
""" For a given issue issue, find its local UUID.
Assembles a list of task IDs existing in taskwarrior
matching the supplied issue (`issue`) on the combination of any
set of supplied unique identifiers (`keys`) or, optionally,
the task's description field (should `legacy_matching` be `True`).
:params:
* `tw`: An instance of `taskw.TaskWarriorShellout`
* `keys`: A list of lists of keys to use for uniquely identifying
an issue. To clarify the "list of lists" behavior, assume that
there are two services, one having a single primary key field
-- 'serviceAid' -- and another having a pair of fields composing
its primary key -- 'serviceBproject' and 'serviceBnumber' --, the
incoming data for this field would be::
[
['serviceAid'],
['serviceBproject', 'serviceBnumber'],
]
* `issue`: An instance of a subclass of `bugwarrior.services.Issue`.
* `legacy_matching`: By default, this is disabled, and it allows
the matching algorithm to -- in addition to searching by stored
issue keys -- search using the task's description for a match.
It is prone to error and should avoided if possible.
:returns:
* A single string UUID.
:raises:
* `bugwarrior.db.MultipleMatches`: if multiple matches were found.
* `bugwarrior.db.NotFound`: if an issue was not found.
"""
if not issue['description']:
raise ValueError('Issue %s has no description.' % issue)
possibilities = set([])
if legacy_matching:
legacy_description = issue.get_default_description().rsplit('..', 1)[0]
# Furthermore, we have to kill off any single quotes which break in
# task-2.4.x, as much as it saddens me.
legacy_description = legacy_description.split("'")[0]
results = tw.filter_tasks({
'description.startswith': legacy_description,
'or': [
('status', 'pending'),
('status', 'waiting'),
],
})
possibilities = possibilities | set([
task['uuid'] for task in results
])
for service, key_list in six.iteritems(keys):
if any([key in issue for key in key_list]):
results = tw.filter_tasks({
'and': [("%s.is" % key, issue[key]) for key in key_list],
'or': [
('status', 'pending'),
('status', 'waiting'),
],
})
possibilities = possibilities | set([
task['uuid'] for task in results
])
if len(possibilities) == 1:
return possibilities.pop()
if len(possibilities) > 1:
raise MultipleMatches(
"Issue %s matched multiple IDs: %s" % (
issue['description'],
possibilities
)
)
raise NotFound(
"No issue was found matching %s" % issue
) | 0.000329 |
def _create_w_objective(m, X, R):
"""
Creates an objective function and its derivative for W, given M and X (data)
Args:
m (array): genes x clusters
X (array): genes x cells
R (array): 1 x genes
"""
genes, clusters = m.shape
cells = X.shape[1]
R1 = R.reshape((genes, 1)).dot(np.ones((1, cells)))
def objective(w):
# convert w into a matrix first... because it's a vector for
# optimization purposes
w = w.reshape((m.shape[1], X.shape[1]))
d = m.dot(w)+eps
return np.sum((X + R1)*np.log(d + R1) - X*np.log(d))/genes
def deriv(w):
# derivative of objective wrt all elements of w
# for w_{ij}, the derivative is... m_j1+...+m_jn sum over genes minus
# x_ij
w2 = w.reshape((m.shape[1], X.shape[1]))
d = m.dot(w2)+eps
temp = X/d
temp2 = (X+R1)/(d+R1)
m1 = m.T.dot(temp2)
m2 = m.T.dot(temp)
deriv = m1 - m2
return deriv.flatten()/genes
return objective, deriv | 0.004776 |
def from_netcdf(filename):
"""Initialize object from a netcdf file.
Expects that the file will have groups, each of which can be loaded by xarray.
Parameters
----------
filename : str
location of netcdf file
Returns
-------
InferenceData object
"""
groups = {}
with nc.Dataset(filename, mode="r") as data:
data_groups = list(data.groups)
for group in data_groups:
with xr.open_dataset(filename, group=group) as data:
groups[group] = data
return InferenceData(**groups) | 0.004777 |
def to_call(self, func, *args, **kwargs):
"""
Sets the function & its arguments to be called when the task is
processed.
Ex::
task.to_call(my_function, 1, 'c', another=True)
:param func: The callable with business logic to execute
:type func: callable
:param args: Positional arguments to pass to the callable task
:type args: list
:param kwargs: Keyword arguments to pass to the callable task
:type kwargs: dict
"""
self.func = func
self.func_args = args
self.func_kwargs = kwargs | 0.003279 |
def copy_node(node):
"""Makes a copy of a node with the same attributes and text, but no children."""
element = node.makeelement(node.tag)
element.text = node.text
element.tail = node.tail
for key, value in node.items():
element.set(key, value)
return element | 0.006849 |
def function(script, x_func='x', y_func='y', z_func='z'):
"""Geometric function using muparser lib to generate new Coordinates
You can change x, y, z for every vertex according to the function specified.
See help(mlx.muparser_ref) for muparser reference documentation.
It's possible to use the following per-vertex variables in the expression:
Variables (per vertex):
x, y, z (coordinates)
nx, ny, nz (normal)
r, g, b, a (color)
q (quality)
rad (radius)
vi (vertex index)
vtu, vtv (texture coordinates)
ti (texture index)
vsel (is the vertex selected? 1 yes, 0 no)
and all custom vertex attributes already defined by user.
Args:
x_func (str): function to generate new coordinates for x
y_func (str): function to generate new coordinates for y
z_func (str): function to generate new coordinates for z
Layer stack:
No impacts
MeshLab versions:
1.3.4BETA
"""
filter_xml = ''.join([
' <filter name="Geometric Function">\n',
' <Param name="x" ',
'value="{}" '.format(str(x_func).replace('&', '&').replace('<', '<')),
'description="func x = " ',
'type="RichString" ',
'/>\n',
' <Param name="y" ',
'value="{}" '.format(str(y_func).replace('&', '&').replace('<', '<')),
'description="func y = " ',
'type="RichString" ',
'/>\n',
' <Param name="z" ',
'value="{}" '.format(str(z_func).replace('&', '&').replace('<', '<')),
'description="func z = " ',
'type="RichString" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
return None | 0.002812 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.