text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _parse_dependencies(string):
"""
This function actually parses the dependencies are sorts them into
the buildable and given dependencies
"""
contents = _get_contents_between(string, '(', ')')
unsorted_dependencies = contents.split(',')
_check_parameters(unsorted_dependencies, ('?',))
buildable_dependencies = []
given_dependencies = []
for dependency in unsorted_dependencies:
if dependency[0] == '?':
given_dependencies.append(dependency[1:])
else:
buildable_dependencies.append(dependency)
string = string[string.index(')') + 1:]
return buildable_dependencies, given_dependencies, string | 0.001462 |
def _pressModifiers(self, modifiers, pressed=True, globally=False):
"""Press given modifiers (provided in list form).
Parameters: modifiers list, global or app specific
Optional: keypressed state (default is True (down))
Returns: Unsigned int representing flags to set
"""
if not isinstance(modifiers, list):
raise TypeError('Please provide modifiers in list form')
if not hasattr(self, 'keyboard'):
self.keyboard = AXKeyboard.loadKeyboard()
modFlags = 0
# Press given modifiers
for nextMod in modifiers:
if nextMod not in self.keyboard:
errStr = 'Key %s not found in keyboard layout'
self._clearEventQueue()
raise ValueError(errStr % self.keyboard[nextMod])
modEvent = Quartz.CGEventCreateKeyboardEvent(
Quartz.CGEventSourceCreate(0),
self.keyboard[nextMod],
pressed
)
if not pressed:
# Clear the modflags:
Quartz.CGEventSetFlags(modEvent, 0)
if globally:
self._queueEvent(Quartz.CGEventPost, (0, modEvent))
else:
# To direct output to the correct application need the PSN (macOS <=10.10) or PID(macOS > 10.10):
macVer, _, _ = platform.mac_ver()
macVer = int(macVer.split('.')[1])
if macVer > 10:
appPid = self._getPid()
self._queueEvent(Quartz.CGEventPostToPid, (appPid, modEvent))
else:
appPsn = self._getPsnForPid(self._getPid())
self._queueEvent(Quartz.CGEventPostToPSN, (appPsn, modEvent))
# Add the modifier flags
modFlags += AXKeyboard.modKeyFlagConstants[nextMod]
return modFlags | 0.002625 |
def build_parameters_error(cls, errors=None):
"""Utility method to build a HTTP 400 Parameter Error response"""
errors = [errors] if not isinstance(errors, list) else errors
return cls(Status.PARAMETERS_ERROR, errors) | 0.008299 |
def slamdunkTcPerUTRPosPlot (self):
""" Generate the tc per UTR pos plots """
pconfig_nontc = {
'id': 'slamdunk_slamdunk_nontcperutrpos_plot',
'title': 'Slamdunk: Non-T>C mutations over 3\' UTR ends',
'ylab': 'Percent mismatches %',
'xlab': 'Position in the static last 250bp window of 3\' UTR',
'xDecimals': False,
'ymin': 0,
'tt_label': '<b>Pos {point.x}</b>: {point.y:.2f} %',
'data_labels': [{'name': 'UTRs on plus strand', 'ylab': 'Percent mismatches %'},
{'name': 'UTRs on minus strand', 'ylab': 'Percent mismatches %'}]
}
pconfig_tc = {
'id': 'slamdunk_slamdunk_tcperutrpos_plot',
'title': 'Slamdunk: T>C conversions over 3\' UTR ends',
'ylab': 'Percent converted %',
'xlab': 'Position in the static last 250bp window of 3\' UTR',
'xDecimals': False,
'ymin': 0,
'tt_label': '<b>Pos {point.x}</b>: {point.y:.2f} %',
'data_labels': [{'name': 'UTRs on plus strand', 'ylab': 'Percent converted %'},
{'name': 'UTRs on minus strand', 'ylab': 'Percent converted %'}]
}
self.add_section (
name = 'Non T>C mismatches over UTR positions',
anchor = 'slamdunk_nontcperutrpos',
description = """This plot shows the distribution of non T>C mismatches across UTR positions for the last 250 bp from the 3\' UTR end
(see the <a href="http://t-neumann.github.io/slamdunk/docs.html#tcperutrpos" target="_blank">slamdunk docs</a>).""",
plot = linegraph.plot([self.nontc_per_utrpos_plus, self.nontc_per_utrpos_minus], pconfig_nontc)
)
self.add_section (
name = 'T>C conversions over UTR positions',
anchor = 'tcperutrpos',
description = """This plot shows the distribution of T>C conversions across UTR positions for the last 250 bp from the 3\' UTR end
(see the <a href="http://t-neumann.github.io/slamdunk/docs.html#tcperutrpos" target="_blank">slamdunk docs</a>).""",
plot = linegraph.plot([self.tc_per_utrpos_plus, self.tc_per_utrpos_minus], pconfig_tc)
) | 0.013333 |
def impad(img, shape, pad_val=0):
"""Pad an image to a certain shape.
Args:
img (ndarray): Image to be padded.
shape (tuple): Expected padding shape.
pad_val (number or sequence): Values to be filled in padding areas.
Returns:
ndarray: The padded image.
"""
if not isinstance(pad_val, (int, float)):
assert len(pad_val) == img.shape[-1]
if len(shape) < len(img.shape):
shape = shape + (img.shape[-1], )
assert len(shape) == len(img.shape)
for i in range(len(shape) - 1):
assert shape[i] >= img.shape[i]
pad = np.empty(shape, dtype=img.dtype)
pad[...] = pad_val
pad[:img.shape[0], :img.shape[1], ...] = img
return pad | 0.001385 |
def _add_instruction(self, instruction, value):
"""
:param instruction: instruction name to be added
:param value: instruction value
"""
if (instruction == 'LABEL' or instruction == 'ENV') and len(value) == 2:
new_line = instruction + ' ' + '='.join(map(quote, value)) + '\n'
else:
new_line = '{0} {1}\n'.format(instruction, value)
if new_line:
lines = self.lines
if not lines[len(lines) - 1].endswith('\n'):
new_line = '\n' + new_line
lines += new_line
self.lines = lines | 0.00487 |
def longest_common_substring_similarity(s1, s2, norm='dice', min_len=2):
"""
longest_common_substring_similarity(s1, s2, norm='dice', min_len=2)
An implementation of the longest common substring similarity algorithm
described in Christen, Peter (2012).
Parameters
----------
s1 : label, pandas.Series
Series or DataFrame to compare all fields.
s2 : label, pandas.Series
Series or DataFrame to compare all fields.
norm : str
The name of the normalization applied to the raw length computed by
the lcs algorithm. One of "overlap", "jaccard", or "dice". Default:
"dice""
Returns
-------
pandas.Series
A pandas series with normalized similarity values.
"""
if len(s1) != len(s2):
raise ValueError('Arrays or Series have to be same length.')
if len(s1) == len(s2) == 0:
return []
conc = pandas.Series(list(zip(s1, s2)))
def lcs_iteration(x):
"""
lcs_iteration(x)
A helper function implementation of a single iteration longest common
substring algorithm, adapted from https://en.wikibooks.org/wiki/Algori
thm_Implementation/Strings/Longest_common_substring. but oriented
towards the iterative approach described by Christen, Peter (2012).
Parameters
----------
x : A series containing the two strings to be compared.
Returns
-------
A tuple of strings and a substring length i.e. ((str, str), int).
"""
str1 = x[0]
str2 = x[1]
if str1 is np.nan or str2 is np.nan or min(len(str1),
len(str2)) < min_len:
longest = 0
new_str1 = None
new_str2 = None
else:
# Creating a matrix of 0s for preprocessing
m = [[0] * (1 + len(str2)) for _ in range(1 + len(str1))]
# Track length of longest substring seen
longest = 0
# Track the ending position of this substring in str1 (x) and
# str2(y)
x_longest = 0
y_longest = 0
# Create matrix of substring lengths
for x in range(1, 1 + len(str1)):
for y in range(1, 1 + len(str2)):
# Check if the chars match
if str1[x - 1] == str2[y - 1]:
# add 1 to the diagonal
m[x][y] = m[x - 1][y - 1] + 1
# Update values if longer than previous longest
# substring
if m[x][y] > longest:
longest = m[x][y]
x_longest = x
y_longest = y
else:
# If there is no match, start from zero
m[x][y] = 0
# Copy str1 and str2, but subtract the longest common substring
# for the next iteration.
new_str1 = str1[0:x_longest - longest] + str1[x_longest:]
new_str2 = str2[0:y_longest - longest] + str2[y_longest:]
return (new_str1, new_str2), longest
def lcs_apply(x):
"""
lcs_apply(x)
A helper function that is applied to each pair of records
in s1 and s2. Assigns a similarity score to each pair,
between 0 and 1. Used by the pandas.apply method.
Parameters
----------
x : pandas.Series
A pandas Series containing two strings to be compared.
Returns
-------
Float
A normalized similarity score.
"""
if pandas.isnull(x[0]) or pandas.isnull(x[1]):
return np.nan
# Compute lcs value with first ordering.
lcs_acc_1 = 0
new_x_1 = (x[0], x[1])
while True:
# Get new string pair (iter_x) and length (iter_lcs)
# for this iteration.
iter_x, iter_lcs = lcs_iteration(new_x_1)
if iter_lcs < min_len:
# End if the longest substring is below the threshold
break
else:
# Otherwise, accumulate length and start a new iteration
# with the new string pair.
new_x_1 = iter_x
lcs_acc_1 = lcs_acc_1 + iter_lcs
# Compute lcs value with second ordering.
lcs_acc_2 = 0
new_x_2 = (x[1], x[0])
while True:
# Get new string pair (iter_x) and length (iter_lcs)
# for this iteration.
iter_x, iter_lcs = lcs_iteration(new_x_2)
if iter_lcs < min_len:
# End if the longest substring is below the threshold
break
else:
# Otherwise, accumulate length and start a new iteration
# with the new string pair.
new_x_2 = iter_x
lcs_acc_2 = lcs_acc_2 + iter_lcs
def normalize_lcs(lcs_value):
"""
normalize_lcs(lcs_value)
A helper function used to normalize the score produced by
compute_score() to a score between 0 and 1. Applies one of the
normalization schemes described in in Christen, Peter (2012). The
normalization method is determined by the norm argument provided
to the parent, longest_common_substring_similarity function.
Parameters
----------
lcs_value : Float
The raw lcs length.
Returns
-------
Float
The normalized lcs length.
"""
if len(x[0]) == 0 or len(x[1]) == 0:
return 0
if norm == 'overlap':
return lcs_value / min(len(x[0]), len(x[1]))
elif norm == 'jaccard':
return lcs_value / (len(x[0]) + len(x[1]) - abs(lcs_value))
elif norm == 'dice':
return lcs_value * 2 / (len(x[0]) + len(x[1]))
else:
warnings.warn(
'Unrecognized longest common substring normalization. '
'Defaulting to "dice" method.')
return lcs_value * 2 / (len(x[0]) + len(x[1]))
# Average the two orderings, since lcs may be sensitive to comparison
# order.
return (normalize_lcs(lcs_acc_1) + normalize_lcs(lcs_acc_2)) / 2
return conc.apply(lcs_apply) | 0.000153 |
def print_commandless_help(self):
"""
print_commandless_help
"""
doc_help = self.m_doc.strip().split("\n")
if len(doc_help) > 0:
print("\033[33m--\033[0m")
print("\033[34m" + doc_help[0] + "\033[0m")
asp = "author :"
doc_help_rest = "\n".join(doc_help[1:])
if asp in doc_help_rest:
doc_help_rest = doc_help_rest.split("author :")
if len(doc_help_rest) > 1:
print("\n\033[33m" + doc_help_rest[0].strip() + "\n")
print("\033[37m" + asp + doc_help_rest[1] + "\033[0m")
else:
print(doc_help_rest)
else:
print(doc_help_rest)
print("\033[33m--\033[0m")
else:
print("\033[31mERROR, doc should have more then one line\033[0m")
print(self.m_doc) | 0.002169 |
def get_mimetype(path):
"""
Guesses the mime type of a file. If mime type cannot be detected, plain
text is assumed.
:param path: path of the file
:return: the corresponding mime type.
"""
filename = os.path.split(path)[1]
mimetype = mimetypes.guess_type(filename)[0]
if mimetype is None:
mimetype = 'text/x-plain'
_logger().debug('mimetype detected: %s', mimetype)
return mimetype | 0.004149 |
def write_file(filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
contents = "\n".join(contents)
# assuming the contents has been vetted for utf-8 encoding
contents = contents.encode("utf-8")
with open(filename, "wb") as f: # always write POSIX-style manifest
f.write(contents) | 0.002451 |
def save_image(figure, filename):
"""Save an image to the docs images directory.
Args:
filename (str): The name of the file (not containing
directory info).
"""
path = os.path.join(IMAGES_DIR, filename)
figure.savefig(path, bbox_inches="tight")
plt.close(figure) | 0.003257 |
def print_file_results(file_result):
"""Print the results of validating a file.
Args:
file_result: A FileValidationResults instance.
"""
print_results_header(file_result.filepath, file_result.is_valid)
for object_result in file_result.object_results:
if object_result.warnings:
print_warning_results(object_result, 1)
if object_result.errors:
print_schema_results(object_result, 1)
if file_result.fatal:
print_fatal_results(file_result.fatal, 1) | 0.00189 |
def get_installed_extensions(self, contribution_ids=None, include_disabled_apps=None, asset_types=None):
"""GetInstalledExtensions.
[Preview API]
:param [str] contribution_ids:
:param bool include_disabled_apps:
:param [str] asset_types:
:rtype: [InstalledExtension]
"""
query_parameters = {}
if contribution_ids is not None:
contribution_ids = ";".join(contribution_ids)
query_parameters['contributionIds'] = self._serialize.query('contribution_ids', contribution_ids, 'str')
if include_disabled_apps is not None:
query_parameters['includeDisabledApps'] = self._serialize.query('include_disabled_apps', include_disabled_apps, 'bool')
if asset_types is not None:
asset_types = ":".join(asset_types)
query_parameters['assetTypes'] = self._serialize.query('asset_types', asset_types, 'str')
response = self._send(http_method='GET',
location_id='2648442b-fd63-4b9a-902f-0c913510f139',
version='5.0-preview.1',
query_parameters=query_parameters)
return self._deserialize('[InstalledExtension]', self._unwrap_collection(response)) | 0.006255 |
def raster_weights(self, **kwargs):
"""
Compute neighbor weights for GeoRaster.
See help(gr.raster_weights) for options
Usage:
geo.raster_weights(rook=True)
"""
if self.weights is None:
self.weights = raster_weights(self.raster, **kwargs)
pass | 0.00625 |
def one_or_more(
schema: dict, unique_items: bool = True, min: int = 1, max: int = None
) -> dict:
"""
Helper function to construct a schema that validates items matching
`schema` or an array containing items matching `schema`.
:param schema: The schema to use
:param unique_items: Flag if array items should be unique
:param min: Correlates to ``minLength`` attribute of JSON Schema array
:param max: Correlates to ``maxLength`` attribute of JSON Schema array
"""
multi_schema = {
"type": "array",
"items": schema,
"minItems": min,
"uniqueItems": unique_items,
}
if max:
multi_schema["maxItems"] = max
return {"oneOf": [multi_schema, schema]} | 0.001359 |
def save_jsonf(data: Union[list, dict], fpath: str, encoding: str, indent=None) -> str:
"""
:param data: list | dict data
:param fpath: write path
:param encoding: encoding
:param indent:
:rtype: written path
"""
with codecs.open(fpath, mode='w', encoding=encoding) as f:
f.write(dump_json(data, indent))
return fpath | 0.005479 |
def connect(src, *destinations, exclude: set=None, fit=False):
"""
Connect src (signals/interfaces/values) to all destinations
:param exclude: interfaces on any level on src or destinations
which should be excluded from connection process
:param fit: auto fit source width to destination width
"""
assignemnts = []
if isinstance(src, HObjList):
for dst in destinations:
assert len(src) == len(dst), (src, dst)
_destinations = [iter(d) for d in destinations]
for _src in src:
dsts = [next(d) for d in _destinations]
assignemnts.append(connect(_src, *dsts, exclude=exclude, fit=fit))
else:
for dst in destinations:
assignemnts.append(_connect(src, dst, exclude, fit))
return assignemnts | 0.003699 |
def parse_cookies(self, req: Request, name: str, field: Field) -> typing.Any:
"""Pull a value from the cookiejar."""
return core.get_value(req.cookies, name, field) | 0.011111 |
def close(self):
"""shut down the pool's workers
this method sets the :attr:`closing` attribute, lines up the
:attr:`closed` attribute to be set once any queued data has been
processed, and raises a PoolClosed() exception in any coroutines still
blocked on :meth:`get`.
"""
super(Pool, self).close()
for waiter, waketime in self.outq._waiters:
scheduler.schedule_exception(PoolClosed(), waiter) | 0.004246 |
def __search_email_by_subject(self, subject, match_recipient):
"Get a list of message numbers"
if match_recipient is None:
_, data = self._mail.uid('search',
None,
'(HEADER SUBJECT "{subject}")'
.format(subject=subject))
uid_list = data[0].split()
return uid_list
else:
_, data = self._mail.uid('search',
None,
'(HEADER SUBJECT "{subject}" TO "{recipient}")'
.format(subject=subject, recipient=match_recipient))
filtered_list = []
uid_list = data[0].split()
for uid in uid_list:
# Those hard coded indexes [1][0][1] is a hard reference to the message email message headers
# that's burried in all those wrapper objects that's associated
# with fetching a message.
to_addr = re.search(
"[^-]To: (.*)", self._mail.uid('fetch', uid, "(RFC822)")[1][0][1]).group(1).strip()
if (to_addr == match_recipient or to_addr == "<{0}>".format(match_recipient)):
# Add matching entry to the list.
filtered_list.append(uid)
return filtered_list | 0.00495 |
def copy(self):
'''
copy - Create a copy of this IRField.
Each subclass should implement this, as you'll need to pass in the args to constructor.
@return <IRField (or subclass)> - Another IRField that has all the same values as this one.
'''
return self.__class__(name=self.name, valueType=self.valueType, defaultValue=self.defaultValue, hashIndex=self.hashIndex) | 0.031414 |
def _decoderFromString(cfg):
"""
Return a decoder function.
If cfg is a string such as 'latin-1' or u'latin-1',
then we return a new lambda, s.decode().
If cfg is already a lambda or function, then we return that.
"""
if isinstance(cfg, (bytes, str)):
return lambda s: s.decode(cfg, 'replace')
return cfg | 0.005263 |
def split(self, point=None):
"""
Split this read into two halves. Original sequence is left unaltered.
The name of the resultant reads will have '.1' and '.2' appended to the
name from the original read.
:param point: the point (index, starting from 0) at which to split this
read -- everything before this index will be placed into the
first sequence, and everything at or after this index will be
placed in the second resultant sequence. If None
(the default), then we split in the middle; if the original
size is not a multiple of 2, the extra nucleotide is placed
into the second resultant sequence. Must be >= 0
and <= length of sequence.
:return: two NGSRead objects which correspond to the split of this
sequence.
"""
if point is None:
point = len(self) / 2
if point < 0:
raise NGSReadError("Cannot split read at index less than 0 " +
"(index provided: " + str(point) + ")")
if point > len(self):
raise NGSReadError("Cannot split read at index greater than read " +
"length (index provided: " + str(point) + ")")
r1 = NGSRead(self.sequenceData[:point], self.name + ".1",
self.seq_qual[:point])
r2 = NGSRead(self.sequenceData[point:], self.name + ".2",
self.seq_qual[point:])
return r1, r2 | 0.002683 |
def add_includes(self, includes):
# type: (_BaseSourcePaths, list) -> None
"""Add a list of includes
:param _BaseSourcePaths self: this
:param list includes: list of includes
"""
if not isinstance(includes, list):
if isinstance(includes, tuple):
includes = list(includes)
else:
includes = [includes]
# remove any starting rglob spec
incl = []
for inc in includes:
tmp = pathlib.Path(inc).parts
if tmp[0] == '**':
if len(tmp) == 1:
continue
else:
incl.append(str(pathlib.Path(*tmp[1:])))
else:
incl.append(inc)
# check for any remaining rglob specs
if any(['**' in x for x in incl]):
raise ValueError('invalid include specification containing "**"')
if self._include is None:
self._include = incl
else:
self._include.extend(incl) | 0.002863 |
def installedApp(self):
"""identify the propery application to launch, given the configuration"""
try: return self._installedApp
except: # raises if not yet defined
self._installedApp = runConfigs.get() # application/install/platform management
return self._installedApp | 0.028037 |
def build_error_handler_for_flask_restplus(*tasks):
"""
Provides a generic error function that packages a flask_buzz exception
so that it can be handled by the flask-restplus error handler::
@api.errorhandler(SFBDError)
def do_it(error):
return SFBDError.build_error_handler_for_flask_restplus()()
or::
api.errorhandler(SFBDError)(
SFBDError.build_error_handler_for_flask_restplus()
)
Flask-restplus handles exceptions differently than Flask, and it is
awkward. For further reading on why special functionality is needed for
flask-restplus, observe and compare:
* http://flask.pocoo.org/docs/0.12/patterns/apierrors/
* http://flask-restplus.readthedocs.io/en/stable/errors.html#
Additionally, extra tasks may be applied to the error prior to
packaging as in ``build_error_handler``
"""
def _handler(error, tasks=[]):
[t(error) for t in tasks]
response = error.jsonify()
return flask.json.loads(response.get_data()), response.status_code
return functools.partial(_handler, tasks=tasks) | 0.00163 |
def _parse_makefile(filename, vars=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
if vars is None:
vars = {}
done = {}
notdone = {}
with codecs.open(filename, encoding='utf-8', errors="surrogateescape") as f:
lines = f.readlines()
for line in lines:
if line.startswith('#') or line.strip() == '':
continue
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
# `$$' is a literal `$' in make
tmpv = v.replace('$$', '')
if "$" in tmpv:
notdone[n] = v
else:
try:
v = int(v)
except ValueError:
# insert literal `$'
done[n] = v.replace('$$', '$')
else:
done[n] = v
# do variable interpolation here
variables = list(notdone.keys())
# Variables with a 'PY_' prefix in the makefile. These need to
# be made available without that prefix through sysconfig.
# Special care is needed to ensure that variable expansion works, even
# if the expansion uses the name without a prefix.
renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
while len(variables) > 0:
for name in tuple(variables):
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m is not None:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
elif n in os.environ:
# do it like make: fall back to environment
item = os.environ[n]
elif n in renamed_variables:
if (name.startswith('PY_') and
name[3:] in renamed_variables):
item = ""
elif 'PY_' + n in notdone:
found = False
else:
item = str(done['PY_' + n])
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try:
value = int(value)
except ValueError:
done[name] = value.strip()
else:
done[name] = value
variables.remove(name)
if (name.startswith('PY_') and
name[3:] in renamed_variables):
name = name[3:]
if name not in done:
done[name] = value
else:
# bogus variable reference (e.g. "prefix=$/opt/python");
# just drop it since we can't deal
done[name] = value
variables.remove(name)
# strip spurious spaces
for k, v in done.items():
if isinstance(v, str):
done[k] = v.strip()
# save the results in the global dictionary
vars.update(done)
return vars | 0.001511 |
def counter(self, key, delta=1, initial=None, ttl=0):
"""Increment or decrement the numeric value of an item.
This method instructs the server to treat the item stored under
the given key as a numeric counter.
Counter operations require that the stored value
exists as a string representation of a number (e.g. ``123``). If
storing items using the :meth:`upsert` family of methods, and
using the default :const:`couchbase.FMT_JSON` then the value
will conform to this constraint.
:param string key: A key whose counter value is to be modified
:param int delta: an amount by which the key should be modified.
If the number is negative then this number will be
*subtracted* from the current value.
:param initial: The initial value for the key, if it does not
exist. If the key does not exist, this value is used, and
`delta` is ignored. If this parameter is `None` then no
initial value is used
:type initial: int or `None`
:param int ttl: The lifetime for the key, after which it will
expire
:raise: :exc:`.NotFoundError` if the key does not exist on the
bucket (and `initial` was `None`)
:raise: :exc:`.DeltaBadvalError` if the key exists, but the
existing value is not numeric
:return: A :class:`.Result` object. The current value of the
counter may be obtained by inspecting the return value's
`value` attribute.
Simple increment::
rv = cb.counter("key")
rv.value
# 42
Increment by 10::
rv = cb.counter("key", delta=10)
Decrement by 5::
rv = cb.counter("key", delta=-5)
Increment by 20, set initial value to 5 if it does not exist::
rv = cb.counter("key", delta=20, initial=5)
Increment three keys::
kv = cb.counter_multi(["foo", "bar", "baz"])
for key, result in kv.items():
print "Key %s has value %d now" % (key, result.value)
.. seealso:: :meth:`counter_multi`
"""
return _Base.counter(self, key, delta=delta, initial=initial, ttl=ttl) | 0.000877 |
def app_start_up_time(self, package: str) -> str:
'''Get the time it took to launch your application.'''
output, _ = self._execute(
'-s', self.device_sn, 'shell', 'am', 'start', '-W', package)
return re.findall('TotalTime: \d+', output)[0] | 0.010909 |
def simple_name_generator(obj):
"""
Simple name_generator designed for HoloViews objects.
Objects are labeled with {group}-{label} for each nested
object, based on a depth-first search. Adjacent objects with
identical representations yield only a single copy of the
representation, to avoid long names for the common case of
a container whose element(s) share the same group and label.
"""
if isinstance(obj, LabelledData):
labels = obj.traverse(lambda x:
(x.group + ('-' +x.label if x.label else '')))
labels=[l[0] for l in itertools.groupby(labels)]
obj_str = ','.join(labels)
else:
obj_str = repr(obj)
return obj_str | 0.006859 |
def discover(source):
"Given a JavaScript file, find the sourceMappingURL line"
source = source.splitlines()
# Source maps are only going to exist at either the top or bottom of the document.
# Technically, there isn't anything indicating *where* it should exist, so we
# are generous and assume it's somewhere either in the first or last 5 lines.
# If it's somewhere else in the document, you're probably doing it wrong.
if len(source) > 10:
possibilities = source[:5] + source[-5:]
else:
possibilities = source
for line in set(possibilities):
pragma = line[:21]
if pragma == '//# sourceMappingURL=' or pragma == '//@ sourceMappingURL=':
# We want everything AFTER the pragma, which is 21 chars long
return line[21:].rstrip()
# XXX: Return None or raise an exception?
return None | 0.005675 |
def get_instance(self, payload):
"""
Build an instance of VerificationCheckInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.acc_security.service.verification_check.VerificationCheckInstance
:rtype: twilio.rest.preview.acc_security.service.verification_check.VerificationCheckInstance
"""
return VerificationCheckInstance(self._version, payload, service_sid=self._solution['service_sid'], ) | 0.010246 |
def createStatus(self,
repo_user, repo_name, sha, state, target_url=None,
description=None, context=None):
"""
:param sha: Full sha to create the status for.
:param state: one of the following 'pending', 'success', 'error'
or 'failure'.
:param target_url: Target url to associate with this status.
:param description: Short description of the status.
:return: A defered with the result from GitHub.
"""
payload = {'state': state}
if description is not None:
payload['description'] = description
if target_url is not None:
payload['target_url'] = target_url
if context is not None:
payload['context'] = context
return self.api.makeRequest(
['repos', repo_user, repo_name, 'statuses', sha],
method='POST',
post=payload) | 0.004278 |
def _read_tags(self):
"""
Fill in the _tags dict from the tags file.
Args:
None
Returns:
True
Todo:
Figure what could go wrong and at least acknowledge the
the fact that Murphy was an optimist.
"""
tags = self._config.get('tags', {})
logging.info('Tags:')
for tag_name in tags.keys():
tag = {}
tag['Key'] = tag_name
tag['Value'] = tags[tag_name]
self._tags.append(tag)
logging.info('{} = {}'.format(tag_name, tags[tag_name]))
logging.debug(json.dumps(
self._tags,
indent=2,
sort_keys=True
))
return True | 0.002677 |
def create_community(self, token, name, **kwargs):
"""
Create a new community or update an existing one using the uuid.
:param token: A valid token for the user in question.
:type token: string
:param name: The community name.
:type name: string
:param description: (optional) The community description.
:type description: string
:param uuid: (optional) uuid of the community. If none is passed, will
generate one.
:type uuid: string
:param privacy: (optional) Default 'Public', possible values
[Public|Private].
:type privacy: string
:param can_join: (optional) Default 'Everyone', possible values
[Everyone|Invitation].
:type can_join: string
:returns: The community dao that was created.
:rtype: dict
"""
parameters = dict()
parameters['token'] = token
parameters['name'] = name
optional_keys = ['description', 'uuid', 'privacy', 'can_join']
for key in optional_keys:
if key in kwargs:
if key == 'can_join':
parameters['canjoin'] = kwargs[key]
continue
parameters[key] = kwargs[key]
response = self.request('midas.community.create', parameters)
return response | 0.001453 |
def copy(self):
""" :rtype: Return a copy of the hypercube """
return HyperCube(
dimensions=self.dimensions(copy=False),
arrays=self.arrays(),
properties=self.properties()) | 0.008929 |
def searchsorted(self, value, side='left', sorter=None):
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted array `self` such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `self` would be preserved.
Parameters
----------
value : array_like
Values to insert into `self`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort `self` into ascending
order. They are typically the result of ``np.argsort``.
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `value`.
"""
if isinstance(value, str):
value = self._scalar_from_string(value)
if not (isinstance(value, (self._scalar_type, type(self)))
or isna(value)):
raise ValueError("Unexpected type for 'value': {valtype}"
.format(valtype=type(value)))
self._check_compatible_with(value)
if isinstance(value, type(self)):
value = value.asi8
else:
value = self._unbox_scalar(value)
return self.asi8.searchsorted(value, side=side, sorter=sorter) | 0.001234 |
def gen_decode(iterable):
"A generator for de-unsynchronizing a byte iterable."
sync = False
for b in iterable:
if sync and b & 0xE0:
warn("Invalid unsynched data", Warning)
if not (sync and b == 0x00):
yield b
sync = (b == 0xFF) | 0.006231 |
def _parse_pages_binding(details):
"""
Parse number of pages and binding of the book.
Args:
details (obj): HTMLElement containing slice of the page with details.
Returns:
(pages, binding): Tuple with two string or two None.
"""
pages = _get_td_or_none(
details,
"ctl00_ContentPlaceHolder1_tblRowRozsahVazba"
)
if not pages:
return None, None
binding = None # binding info and number of pages is stored in same string
if "/" in pages:
binding = pages.split("/")[1].strip()
pages = pages.split("/")[0].strip()
if not pages:
pages = None
return pages, binding | 0.001481 |
def unserialize(self, msg_list, content=True, copy=True):
"""Unserialize a msg_list to a nested message dict.
This is roughly the inverse of serialize. The serialize/unserialize
methods work with full message lists, whereas pack/unpack work with
the individual message parts in the message list.
Parameters:
-----------
msg_list : list of bytes or Message objects
The list of message parts of the form [HMAC,p_header,p_parent,
p_content,buffer1,buffer2,...].
content : bool (True)
Whether to unpack the content dict (True), or leave it packed
(False).
copy : bool (True)
Whether to return the bytes (True), or the non-copying Message
object in each place (False).
Returns
-------
msg : dict
The nested message dict with top-level keys [header, parent_header,
content, buffers].
"""
minlen = 4
message = {}
if not copy:
for i in range(minlen):
msg_list[i] = msg_list[i].bytes
if self.auth is not None:
signature = msg_list[0]
if not signature:
raise ValueError("Unsigned Message")
if signature in self.digest_history:
raise ValueError("Duplicate Signature: %r"%signature)
self.digest_history.add(signature)
check = self.sign(msg_list[1:4])
if not signature == check:
raise ValueError("Invalid Signature: %r"%signature)
if not len(msg_list) >= minlen:
raise TypeError("malformed message, must have at least %i elements"%minlen)
header = self.unpack(msg_list[1])
message['header'] = header
message['msg_id'] = header['msg_id']
message['msg_type'] = header['msg_type']
message['parent_header'] = self.unpack(msg_list[2])
if content:
message['content'] = self.unpack(msg_list[3])
else:
message['content'] = msg_list[3]
message['buffers'] = msg_list[4:]
return message | 0.002775 |
def present(name,
save=False,
**kwargs):
'''
Ensure beacon is configured with the included beacon data.
Args:
name (str):
The name of the beacon ensure is configured.
save (bool):
``True`` updates the beacons.conf. Default is ``False``.
Returns:
dict: A dictionary of information about the results of the state
Example:
.. code-block:: yaml
ps_beacon:
beacon.present:
- name: ps
- save: True
- enable: False
- services:
salt-master: running
apache2: stopped
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': []}
current_beacons = __salt__['beacons.list'](return_yaml=False, **kwargs)
beacon_data = [{k: v} for k, v in six.iteritems(kwargs)]
if name in current_beacons:
if beacon_data == current_beacons[name]:
ret['comment'].append('Job {0} in correct state'.format(name))
else:
if 'test' in __opts__ and __opts__['test']:
kwargs['test'] = True
result = __salt__['beacons.modify'](name, beacon_data, **kwargs)
ret['comment'].append(result['comment'])
ret['changes'] = result['changes']
else:
result = __salt__['beacons.modify'](name, beacon_data, **kwargs)
if not result['result']:
ret['result'] = result['result']
ret['comment'] = result['comment']
return ret
else:
if 'changes' in result:
ret['comment'].append('Modifying {0} in beacons'.format(name))
ret['changes'] = result['changes']
else:
ret['comment'].append(result['comment'])
else:
if 'test' in __opts__ and __opts__['test']:
kwargs['test'] = True
result = __salt__['beacons.add'](name, beacon_data, **kwargs)
ret['comment'].append(result['comment'])
else:
result = __salt__['beacons.add'](name, beacon_data, **kwargs)
if not result['result']:
ret['result'] = result['result']
ret['comment'] = result['comment']
return ret
else:
ret['comment'].append('Adding {0} to beacons'.format(name))
if save:
__salt__['beacons.save'](**kwargs)
ret['comment'].append('Beacon {0} saved'.format(name))
ret['comment'] = '\n'.join(ret['comment'])
return ret | 0.001483 |
def set_vm_status(self, boot_on_next_reset):
"""Set the Virtual Media drive status.
:param boot_on_next_reset: boolean value
:raises: SushyError, on an error from iLO.
"""
data = {
"Oem": {
"Hpe": {
"BootOnNextServerReset": boot_on_next_reset
}
}
}
self._conn.patch(self.path, data=data) | 0.00463 |
def get_file_type_map():
"""Map file types (extensions) to strategy types."""
file_type_map = {}
for strategy_type in get_strategy_types():
for ext in strategy_type.file_types:
if ext in file_type_map:
raise KeyError(
'File type {ext} already registered to {file_type_map[ext]}'
.format(**locals()))
file_type_map[ext] = strategy_type
return file_type_map | 0.004357 |
def apply(db, op):
"""
Apply operation in db
"""
dbname = op['ns'].split('.')[0] or "admin"
opts = bson.CodecOptions(uuid_representation=bson.binary.STANDARD)
db[dbname].command("applyOps", [op], codec_options=opts) | 0.004184 |
def get_not_unique_values(array):
'''Returns the values that appear at least twice in array.
Parameters
----------
array : array like
Returns
-------
numpy.array
'''
s = np.sort(array, axis=None)
s = s[s[1:] == s[:-1]]
return np.unique(s) | 0.003521 |
def _convert_response_to_error(self, response):
"""Subclasses may override this method in order to influence
how errors are parsed from the response.
Parameters:
response(Response): The response object.
Returns:
object or None: Any object for which a max retry count can
be retrieved or None if the error cannot be handled.
"""
content_type = response.headers.get("content-type", "")
if "application/x-protobuf" in content_type:
self.logger.debug("Decoding protobuf response.")
data = status_pb2.Status.FromString(response.content)
status = self._PB_ERROR_CODES.get(data.code)
error = {"status": status}
return error
elif "application/json" in content_type:
self.logger.debug("Decoding json response.")
data = response.json()
error = data.get("error")
if not error or not isinstance(error, dict):
self.logger.warning("Unexpected error response: %r", data)
return None
return error
self.logger.warning("Unexpected response: %r", response.text)
return None | 0.001639 |
def onset_precision_recall_f1(ref_intervals, est_intervals,
onset_tolerance=0.05, strict=False, beta=1.0):
"""Compute the Precision, Recall and F-measure of note onsets: an estimated
onset is considered correct if it is within +-50ms of a reference onset.
Note that this metric completely ignores note offset and note pitch. This
means an estimated onset will be considered correct if it matches a
reference onset, even if the onsets come from notes with completely
different pitches (i.e. notes that would not match with
:func:`match_notes`).
Examples
--------
>>> ref_intervals, _ = mir_eval.io.load_valued_intervals(
... 'reference.txt')
>>> est_intervals, _ = mir_eval.io.load_valued_intervals(
... 'estimated.txt')
>>> (onset_precision,
... onset_recall,
... onset_f_measure) = mir_eval.transcription.onset_precision_recall_f1(
... ref_intervals, est_intervals)
Parameters
----------
ref_intervals : np.ndarray, shape=(n,2)
Array of reference notes time intervals (onset and offset times)
est_intervals : np.ndarray, shape=(m,2)
Array of estimated notes time intervals (onset and offset times)
onset_tolerance : float > 0
The tolerance for an estimated note's onset deviating from the
reference note's onset, in seconds. Default is 0.05 (50 ms).
strict : bool
If ``strict=False`` (the default), threshold checks for onset matching
are performed using ``<=`` (less than or equal). If ``strict=True``,
the threshold checks are performed using ``<`` (less than).
beta : float > 0
Weighting factor for f-measure (default value = 1.0).
Returns
-------
precision : float
The computed precision score
recall : float
The computed recall score
f_measure : float
The computed F-measure score
"""
validate_intervals(ref_intervals, est_intervals)
# When reference notes are empty, metrics are undefined, return 0's
if len(ref_intervals) == 0 or len(est_intervals) == 0:
return 0., 0., 0.
matching = match_note_onsets(ref_intervals, est_intervals,
onset_tolerance=onset_tolerance,
strict=strict)
onset_precision = float(len(matching))/len(est_intervals)
onset_recall = float(len(matching))/len(ref_intervals)
onset_f_measure = util.f_measure(onset_precision, onset_recall, beta=beta)
return onset_precision, onset_recall, onset_f_measure | 0.000386 |
def wrap(self, value):
"""Wrap numpy.ndarray as Value
"""
attrib = getattr(value, 'attrib', {})
S, NS = divmod(time.time(), 1.0)
value = numpy.asarray(value) # loses any special/augmented attributes
dims = list(value.shape)
dims.reverse() # inner-most sent as left
if 'ColorMode' not in attrib:
# attempt to infer color mode from shape
if value.ndim==2:
attrib['ColorMode'] = 0 # gray
elif value.ndim==3:
for idx,dim in enumerate(dims):
if dim==3: # assume it's a color
attrib['ColorMode'] = 2 + idx # 2 - RGB1, 3 - RGB2, 4 - RGB3
break # assume that the first is color, and any subsequent dim=3 is a thin ROI
dataSize = value.nbytes
return Value(self.type, {
'value': (self._code2u[value.dtype.char], value.flatten()),
'compressedSize': dataSize,
'uncompressedSize': dataSize,
'uniqueId': 0,
'timeStamp': {
'secondsPastEpoch': S,
'nanoseconds': NS * 1e9,
},
'attribute': [{'name': K, 'value': V} for K, V in attrib.items()],
'dimension': [{'size': N,
'offset': 0,
'fullSize': N,
'binning': 1,
'reverse': False} for N in dims],
}) | 0.00869 |
def call_api(self, table, column, value, **kwargs):
"""Exposed method to connect and query the EPA's API."""
try:
output_format = kwargs.pop('output_format')
except KeyError:
output_format = self.output_format
url_list = [self.base_url, table, column,
quote(value), 'rows']
rows_count = self._number_of_rows(**kwargs)
url_list.append(rows_count)
url_string = '/'.join(url_list)
xml_data = urlopen(url_string).read()
data = self._format_data(output_format, xml_data)
return data | 0.003328 |
def get_slac_default_args(job_time=1500):
""" Create a batch job interface object.
Parameters
----------
job_time : int
Expected max length of the job, in seconds.
This is used to select the batch queue and set the
job_check_sleep parameter that sets how often
we check for job completion.
"""
slac_default_args = dict(lsf_args={'W': job_time,
'R': '\"select[rhel60&&!fell]\"'},
max_jobs=500,
time_per_cycle=15,
jobs_per_cycle=20,
max_job_age=90,
no_batch=False)
return slac_default_args.copy() | 0.00135 |
def indicator_body(indicators):
"""Generate the appropriate dictionary content for POST of an File indicator
Args:
indicators (list): A list of one or more hash value(s).
"""
hash_patterns = {
'md5': re.compile(r'^([a-fA-F\d]{32})$'),
'sha1': re.compile(r'^([a-fA-F\d]{40})$'),
'sha256': re.compile(r'^([a-fA-F\d]{64})$'),
}
body = {}
for indicator in indicators:
if indicator is None:
continue
if hash_patterns['md5'].match(indicator):
body['md5'] = indicator
elif hash_patterns['sha1'].match(indicator):
body['sha1'] = indicator
elif hash_patterns['sha256'].match(indicator):
body['sha256'] = indicator
return body | 0.003559 |
def rotate_clip(data_np, theta_deg, rotctr_x=None, rotctr_y=None,
out=None, use_opencl=True, logger=None):
"""
Rotate numpy array `data_np` by `theta_deg` around rotation center
(rotctr_x, rotctr_y). If the rotation center is omitted it defaults
to the center of the array.
No adjustment is done to the data array beforehand, so the result will
be clipped according to the size of the array (the output array will be
the same size as the input array).
"""
# If there is no rotation, then we are done
if math.fmod(theta_deg, 360.0) == 0.0:
return data_np
ht, wd = data_np.shape[:2]
dtype = data_np.dtype
if rotctr_x is None:
rotctr_x = wd // 2
if rotctr_y is None:
rotctr_y = ht // 2
if have_opencv:
if logger is not None:
logger.debug("rotating with OpenCv")
# opencv is fastest
M = cv2.getRotationMatrix2D((rotctr_y, rotctr_x), theta_deg, 1)
if data_np.dtype == np.dtype('>f8'):
# special hack for OpenCv warpAffine bug on numpy arrays of
# dtype '>f8'-- it corrupts them
data_np = data_np.astype(np.float64)
newdata = cv2.warpAffine(data_np, M, (wd, ht))
new_ht, new_wd = newdata.shape[:2]
assert (wd == new_wd) and (ht == new_ht), \
Exception("rotated cutout is %dx%d original=%dx%d" % (
new_wd, new_ht, wd, ht))
newdata = newdata.astype(dtype, copy=False)
if out is not None:
out[:, :, ...] = newdata
newdata = out
elif have_opencl and use_opencl:
if logger is not None:
logger.debug("rotating with OpenCL")
# opencl is very close, sometimes better, sometimes worse
if (data_np.dtype == np.uint8) and (len(data_np.shape) == 3):
# special case for 3D RGB images
newdata = trcalc_cl.rotate_clip_uint32(data_np, theta_deg,
rotctr_x, rotctr_y,
out=out)
else:
newdata = trcalc_cl.rotate_clip(data_np, theta_deg,
rotctr_x, rotctr_y,
out=out)
else:
if logger is not None:
logger.debug("rotating with numpy")
yi, xi = np.mgrid[0:ht, 0:wd]
xi -= rotctr_x
yi -= rotctr_y
cos_t = np.cos(np.radians(theta_deg))
sin_t = np.sin(np.radians(theta_deg))
if have_numexpr:
ap = ne.evaluate("(xi * cos_t) - (yi * sin_t) + rotctr_x")
bp = ne.evaluate("(xi * sin_t) + (yi * cos_t) + rotctr_y")
else:
ap = (xi * cos_t) - (yi * sin_t) + rotctr_x
bp = (xi * sin_t) + (yi * cos_t) + rotctr_y
#ap = np.rint(ap).astype('int').clip(0, wd-1)
#bp = np.rint(bp).astype('int').clip(0, ht-1)
# Optomizations to reuse existing intermediate arrays
np.rint(ap, out=ap)
ap = ap.astype(np.int, copy=False)
ap.clip(0, wd - 1, out=ap)
np.rint(bp, out=bp)
bp = bp.astype(np.int, copy=False)
bp.clip(0, ht - 1, out=bp)
if out is not None:
out[:, :, ...] = data_np[bp, ap]
newdata = out
else:
newdata = data_np[bp, ap]
new_ht, new_wd = newdata.shape[:2]
assert (wd == new_wd) and (ht == new_ht), \
Exception("rotated cutout is %dx%d original=%dx%d" % (
new_wd, new_ht, wd, ht))
return newdata | 0.000826 |
def _solve_implicit_banded(current, banded_matrix):
"""Uses a banded solver for matrix inversion of a tridiagonal matrix.
Converts the complete listed tridiagonal matrix *(nxn)* into a three row
matrix *(3xn)* and calls :py:func:`scipy.linalg.solve_banded()`.
:param array current: the current state of the variable for which
matrix inversion should be computed
:param array banded_matrix: complete diffusion matrix (*dimension: nxn*)
:returns: output of :py:func:`scipy.linalg.solve_banded()`
:rtype: array
"""
# can improve performance by storing the banded form once and not
# recalculating it...
# but whatever
J = banded_matrix.shape[0]
diag = np.zeros((3, J))
diag[1, :] = np.diag(banded_matrix, k=0)
diag[0, 1:] = np.diag(banded_matrix, k=1)
diag[2, :-1] = np.diag(banded_matrix, k=-1)
return solve_banded((1, 1), diag, current) | 0.002985 |
def _get_migration_files(self, path):
"""
Get all of the migration files in a given path.
:type path: str
:rtype: list
"""
files = glob.glob(os.path.join(path, "[0-9]*_*.py"))
if not files:
return []
files = list(map(lambda f: os.path.basename(f).replace(".py", ""), files))
files = sorted(files)
return files | 0.007371 |
def clear_symbols(self, index):
"""Clears all symbols begining with the index to the end of table"""
try:
del self.table[index:]
except Exception:
self.error()
self.table_len = len(self.table) | 0.007874 |
def read(self, size=None):
"""Read data from the file.
"""
if size is None:
size = self.size - self.position
else:
size = min(size, self.size - self.position)
buf = b""
while size > 0:
while True:
data, start, stop, offset = self.map[self.map_index]
if start <= self.position < stop:
break
else:
self.map_index += 1
if self.map_index == len(self.map):
self.map_index = 0
length = min(size, stop - self.position)
if data:
self.fileobj.seek(offset + (self.position - start))
buf += self.fileobj.read(length)
else:
buf += NUL * length
size -= length
self.position += length
return buf | 0.002188 |
def validate_root_vertex_directives(root_ast):
"""Validate the directives that appear at the root vertex field."""
directives_present_at_root = set()
for directive_obj in root_ast.directives:
directive_name = directive_obj.name.value
if is_filter_with_outer_scope_vertex_field_operator(directive_obj):
raise GraphQLCompilationError(u'Found a filter directive with an operator that is not'
u'allowed on the root vertex: {}'.format(directive_obj))
directives_present_at_root.add(directive_name)
disallowed_directives = directives_present_at_root & VERTEX_DIRECTIVES_PROHIBITED_ON_ROOT
if disallowed_directives:
raise GraphQLCompilationError(u'Found prohibited directives on root vertex: '
u'{}'.format(disallowed_directives)) | 0.005747 |
def lb2pix(nside, l, b, nest=True):
"""
Converts Galactic (l, b) to HEALPix pixel index.
Args:
nside (:obj:`int`): The HEALPix :obj:`nside` parameter.
l (:obj:`float`, or array of :obj:`float`): Galactic longitude, in degrees.
b (:obj:`float`, or array of :obj:`float`): Galactic latitude, in degrees.
nest (Optional[:obj:`bool`]): If :obj:`True` (the default), nested pixel ordering
will be used. If :obj:`False`, ring ordering will be used.
Returns:
The HEALPix pixel index or indices. Has the same shape as the input :obj:`l`
and :obj:`b`.
"""
theta = np.radians(90. - b)
phi = np.radians(l)
if not hasattr(l, '__len__'):
if (b < -90.) or (b > 90.):
return -1
pix_idx = hp.pixelfunc.ang2pix(nside, theta, phi, nest=nest)
return pix_idx
idx = (b >= -90.) & (b <= 90.)
pix_idx = np.empty(l.shape, dtype='i8')
pix_idx[idx] = hp.pixelfunc.ang2pix(nside, theta[idx], phi[idx], nest=nest)
pix_idx[~idx] = -1
return pix_idx | 0.005576 |
def verify(self):
"""Checks all parameters for invalidating conditions
:returns: str -- message if error, 0 otherwise
"""
for row in range(self.nrows()):
result = self.verify_row(row)
if result != 0:
return result
return 0 | 0.006601 |
def next_tokens_in_sequence(observed, current):
""" Given the observed list of tokens, and the current list,
finds out what should be next next emitted word
"""
idx = 0
for word in current:
if observed[idx:].count(word) != 0:
found_pos = observed.index(word, idx)
idx = max(idx + 1, found_pos)
# otherwise, don't increment idx
if idx < len(observed):
return observed[idx:]
else:
return [] | 0.002119 |
def branches(self):
"""Get basic block branches.
"""
branches = []
if self._taken_branch:
branches += [(self._taken_branch, 'taken')]
if self._not_taken_branch:
branches += [(self._not_taken_branch, 'not-taken')]
if self._direct_branch:
branches += [(self._direct_branch, 'direct')]
return branches | 0.005076 |
def percent_pareto_recharges(recharges, percentage=0.8):
"""
Percentage of recharges that account for 80% of total recharged amount.
"""
amounts = sorted([r.amount for r in recharges], reverse=True)
total_sum = sum(amounts)
partial_sum = 0
for count, a in enumerate(amounts):
partial_sum += a
if partial_sum >= percentage * total_sum:
break
return (count + 1) / len(recharges) | 0.002283 |
def xyz_with_ports(self):
"""Return all particle coordinates in this compound including ports.
Returns
-------
pos : np.ndarray, shape=(n, 3), dtype=float
Array with the positions of all particles and ports.
"""
if not self.children:
pos = self._pos
else:
arr = np.fromiter(
itertools.chain.from_iterable(
particle.pos for particle in self.particles(
include_ports=True)), dtype=float)
pos = arr.reshape((-1, 3))
return pos | 0.00335 |
def extract_thermodynamic_quantities(self,temperature_array):
"""
Calculates the thermodynamic quantities of your system at each point in time.
Calculated Quantities: self.Q (heat),self.W (work), self.Delta_E_kin, self.Delta_E_pot
self.Delta_E (change of Hamiltonian),
Parameters
----------
temperature_array : array
array which represents the temperature at every point in your time trace
and should therefore have the same length as the Hamiltonian
Requirements
------------
execute calc_hamiltonian on the DataObject first
Returns:
-------
Q : array
The heat exchanged by the particle at every point in time over a given trap-frequency and temperature change.
W : array
The work "done" by the particle at every point in time over a given trap-frequency and temperature change.
"""
beta = 1/(_scipy.constants.Boltzmann*temperature_array)
self.Q = self.Hamiltonian*(_np.insert(_np.diff(beta),0,beta[1]-beta[0])*self.SampleFreq)
self.W = self.Hamiltonian-self.Q
self.Delta_E_kin = _np.diff(self.E_kin)*self.SampleFreq
self.Delta_E_pot = _np.diff(self.E_pot)*self.SampleFreq
self.Delta_E = _np.diff(self.Hamiltonian)*self.SampleFreq
return self.Q, self.W | 0.009986 |
def record(self):
# type: () -> bytes
'''
Generate a string representing the Rock Ridge Extension Selector record.
Parameters:
None.
Returns:
String containing the Rock Ridge record.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('ES record not yet initialized!')
return b'ES' + struct.pack('=BBB', RRESRecord.length(), SU_ENTRY_VERSION, self.extension_sequence) | 0.012371 |
def connection_lost(self, exception):
"""Called when the connection is lost or closed.
The argument is either an exception object or None. The latter means
a regular EOF is received, or the connection was aborted or closed by
this side of the connection.
"""
if exception:
self.logger.exception('Connection lost!')
else:
self.logger.info('Connection lost') | 0.004577 |
def record_timing(self, duration, *path):
"""Record a timing.
This method records a timing to the application's namespace
followed by a calculated path. Each element of `path` is
converted to a string and normalized before joining the
elements by periods. The normalization process is little
more than replacing periods with dashes.
:param float duration: timing to record in seconds
:param path: elements of the metric path to record
"""
self.application.statsd.send(path, duration * 1000.0, 'ms') | 0.003431 |
def localize(self):
"""
Check if this module was saved as a resource. If it was, return a new module descriptor
that points to a local copy of that resource. Should only be called on a worker node. On
the leader, this method returns this resource, i.e. self.
:rtype: toil.resource.Resource
"""
if not self._runningOnWorker():
log.warn('The localize() method should only be invoked on a worker.')
resource = Resource.lookup(self._resourcePath)
if resource is None:
return self
else:
def stash(tmpDirPath):
# Save the original dirPath such that we can restore it in globalize()
with open(os.path.join(tmpDirPath, '.stash'), 'w') as f:
f.write('1' if self.fromVirtualEnv else '0')
f.write(self.dirPath)
resource.download(callback=stash)
return self.__class__(dirPath=resource.localDirPath,
name=self.name,
fromVirtualEnv=self.fromVirtualEnv) | 0.005357 |
def add_statement(self, statement_obj):
"""
Adds a statement object to the layer
@type statement_obj: L{Cstatement}
@param statement_obj: the statement object
"""
if statement_obj.get_id() in self.idx:
raise ValueError("Statement with id {} already exists!"
.format(statement_obj.get_id()))
self.node.append(statement_obj.get_node())
self.idx[statement_obj.get_id()] = statement_obj | 0.004107 |
def create(self, request, *args, **kwargs):
"""HACK: couldn't get POST to the list endpoint without
messing up POST for the other list_routes so I'm doing this.
Maybe something to do with the router?
"""
return self.list(request, *args, **kwargs) | 0.006993 |
def status(self, additional=[]):
""" Returns status information for device.
This returns only a subset of possible properties.
"""
self.manager.refresh_client()
fields = ['batteryLevel', 'deviceDisplayName', 'deviceStatus', 'name']
fields += additional
properties = {}
for field in fields:
properties[field] = self.content.get(field)
return properties | 0.004587 |
def create_guest(self, capacity_id, test, guest_object):
"""Turns an empty Reserve Capacity into a real Virtual Guest
:param int capacity_id: ID of the RESERVED_CAPACITY_GROUP to create this guest into
:param bool test: True will use verifyOrder, False will use placeOrder
:param dictionary guest_object: Below is the minimum info you need to send in
guest_object = {
'domain': 'test.com',
'hostname': 'A1538172419',
'os_code': 'UBUNTU_LATEST_64',
'primary_disk': '25',
}
"""
vs_manager = VSManager(self.client)
mask = "mask[instances[id, billingItem[id, item[id,keyName]]], backendRouter[id, datacenter[name]]]"
capacity = self.get_object(capacity_id, mask=mask)
try:
capacity_flavor = capacity['instances'][0]['billingItem']['item']['keyName']
flavor = _flavor_string(capacity_flavor, guest_object['primary_disk'])
except KeyError:
raise SoftLayer.SoftLayerError("Unable to find capacity Flavor.")
guest_object['flavor'] = flavor
guest_object['datacenter'] = capacity['backendRouter']['datacenter']['name']
# Reserved capacity only supports SAN as of 20181008
guest_object['local_disk'] = False
template = vs_manager.verify_create_instance(**guest_object)
template['reservedCapacityId'] = capacity_id
if guest_object.get('ipv6'):
ipv6_price = self.ordering_manager.get_price_id_list('PUBLIC_CLOUD_SERVER', ['1_IPV6_ADDRESS'])
template['prices'].append({'id': ipv6_price[0]})
if test:
result = self.client.call('Product_Order', 'verifyOrder', template)
else:
result = self.client.call('Product_Order', 'placeOrder', template)
return result | 0.004828 |
def _enumerator(opener, entry_cls, format_code=None, filter_code=None):
"""Return an archive enumerator from a user-defined source, using a user-
defined entry type.
"""
archive_res = _archive_read_new()
try:
r = _set_read_context(archive_res, format_code, filter_code)
opener(archive_res)
def it():
while 1:
with _archive_read_next_header(archive_res) as entry_res:
if entry_res is None:
break
e = entry_cls(archive_res, entry_res)
yield e
if e.is_consumed is False:
_archive_read_data_skip(archive_res)
yield it()
finally:
_archive_read_free(archive_res) | 0.001282 |
def updateAccuracy(self, *accuracy):
"""
Updates current accuracy flag
"""
for acc in accuracy:
if not isinstance(acc, int):
acc = self._ACCURACY_REVERSE_MAPPING[acc]
self.accuracy |= acc | 0.007722 |
def _tensor_product(self, other, reverse=False):
"""Return the tensor product channel.
Args:
other (QuantumChannel): a quantum channel.
reverse (bool): If False return self ⊗ other, if True return
if True return (other ⊗ self) [Default: False
Returns:
Choi: the tensor product channel as a Choi object.
Raises:
QiskitError: if other is not a QuantumChannel subclass.
"""
# Convert other to Choi
if not isinstance(other, Choi):
other = Choi(other)
if reverse:
input_dims = self.input_dims() + other.input_dims()
output_dims = self.output_dims() + other.output_dims()
data = _bipartite_tensor(
other.data,
self._data,
shape1=other._bipartite_shape,
shape2=self._bipartite_shape)
else:
input_dims = other.input_dims() + self.input_dims()
output_dims = other.output_dims() + self.output_dims()
data = _bipartite_tensor(
self._data,
other.data,
shape1=self._bipartite_shape,
shape2=other._bipartite_shape)
return Choi(data, input_dims, output_dims) | 0.001522 |
def init_defaults(self):
"""
Sets the default values for this instance
"""
self.sql = ''
self.tables = []
self.joins = []
self._where = Where()
self.groups = []
self.sorters = []
self._limit = None
self.table_prefix = ''
self.is_inner = False
self.with_tables = []
self._distinct = False
self.distinct_ons = []
self.field_names = []
self.field_names_pk = None
self.values = [] | 0.003846 |
def _AcceptRPC(self):
"""Reads RPC request from stdin and processes it, writing result to stdout.
Returns:
True as long as execution is to be continued, False otherwise.
Raises:
RpcException: if no function was specified in the RPC or no such API
function exists.
"""
request = self._ReadObject()
if request['func'] == '__kill__':
self.ClearBreakpoints()
self._WriteObject('__kill_ack__')
return False
if 'func' not in request or request['func'].startswith('_'):
raise RpcException('Not a valid public API function.')
rpc_result = getattr(self, request['func'])(*request['args'])
self._WriteObject(rpc_result)
return True | 0.007062 |
def check_html_warnings(html, parser=None):
"""Check html warnings
:param html: str: raw html text
:param parser: bs4.BeautifulSoup: html parser
:raise VkPageWarningsError: in case of found warnings
"""
if parser is None:
parser = bs4.BeautifulSoup(html, 'html.parser')
# Check warnings
warnings = parser.find_all('div', {'class': 'service_msg_warning'})
if warnings:
raise VkPageWarningsError('; '.join([w.get_text() for w in warnings]))
return True | 0.001969 |
def get_first_category(app_uid):
'''
Get the first, as the uniqe category of post.
'''
recs = MPost2Catalog.query_by_entity_uid(app_uid).objects()
if recs.count() > 0:
return recs.get()
return None | 0.007752 |
def from_text(text):
"""Convert text into a DNS rdata type value.
@param text: the text
@type text: string
@raises dns.rdatatype.UnknownRdatatype: the type is unknown
@raises ValueError: the rdata type value is not >= 0 and <= 65535
@rtype: int"""
value = _by_text.get(text.upper())
if value is None:
match = _unknown_type_pattern.match(text)
if match == None:
raise UnknownRdatatype
value = int(match.group(1))
if value < 0 or value > 65535:
raise ValueError("type must be between >= 0 and <= 65535")
return value | 0.003295 |
def addDeviceNames(self, remote):
""" If XML-API (http://www.homematic-inside.de/software/addons/item/xmlapi) is installed on CCU this function will add names to CCU devices """
LOG.debug("RPCFunctions.addDeviceNames")
# First try to get names from metadata when nur credentials are set
if self.remotes[remote]['resolvenames'] == 'metadata':
for address in self.devices[remote]:
try:
name = self.devices[remote][
address]._proxy.getMetadata(address, 'NAME')
self.devices[remote][address].NAME = name
for address, device in self.devices[remote][address].CHANNELS.items():
device.NAME = name
self.devices_all[remote][device.ADDRESS].NAME = name
except Exception as err:
LOG.debug(
"RPCFunctions.addDeviceNames: Unable to get name for %s from metadata." % str(address))
# Then try to get names via JSON-RPC
elif (self.remotes[remote]['resolvenames'] == 'json' and
self.remotes[remote]['username'] and
self.remotes[remote]['password']):
LOG.debug("RPCFunctions.addDeviceNames: Getting names via JSON-RPC")
try:
session = False
params = {"username": self.remotes[remote][
'username'], "password": self.remotes[remote]['password']}
response = self.jsonRpcPost(
self.remotes[remote]['ip'], self.remotes[remote].get('jsonport', DEFAULT_JSONPORT), "Session.login", params)
if response['error'] is None and response['result']:
session = response['result']
if not session:
LOG.warning(
"RPCFunctions.addDeviceNames: Unable to open session.")
return
params = {"_session_id_": session}
response = self.jsonRpcPost(
self.remotes[remote]['ip'], self.remotes[remote].get('jsonport', DEFAULT_JSONPORT), "Interface.listInterfaces", params)
interface = False
if response['error'] is None and response['result']:
for i in response['result']:
if i['port'] in [self.remotes[remote]['port'], self.remotes[remote]['port'] + 30000]:
interface = i['name']
break
LOG.debug(
"RPCFunctions.addDeviceNames: Got interface: %s" % interface)
if not interface:
params = {"_session_id_": session}
response = self.jsonRpcPost(
self.remotes[remote]['ip'], self.remotes[remote].get('jsonport', DEFAULT_JSONPORT), "Session.logout", params)
return
params = {"_session_id_": session}
response = self.jsonRpcPost(
self.remotes[remote]['ip'], self.remotes[remote].get('jsonport', DEFAULT_JSONPORT), "Device.listAllDetail", params)
if response['error'] is None and response['result']:
LOG.debug(
"RPCFunctions.addDeviceNames: Resolving devicenames")
for i in response['result']:
try:
if i.get('address') in self.devices[remote]:
self.devices[remote][
i['address']].NAME = i['name']
except Exception as err:
LOG.warning(
"RPCFunctions.addDeviceNames: Exception: %s" % str(err))
params = {"_session_id_": session}
response = self.jsonRpcPost(
self.remotes[remote]['ip'], self.remotes[remote].get('jsonport', DEFAULT_JSONPORT), "Session.logout", params)
except Exception as err:
params = {"_session_id_": session}
response = self.jsonRpcPost(
self.remotes[remote]['ip'], self.remotes[remote].get('jsonport', DEFAULT_JSONPORT), "Session.logout", params)
LOG.warning(
"RPCFunctions.addDeviceNames: Exception: %s" % str(err))
# Then try to get names from XML-API
elif self.remotes[remote]['resolvenames'] == 'xml':
LOG.warning("Resolving names with the XML-API addon will be disabled in a future release. Please switch to json.")
try:
response = urllib.request.urlopen(
"http://%s%s" % (self.remotes[remote]['ip'], XML_API_URL), timeout=5)
device_list = response.read().decode("ISO-8859-1")
except Exception as err:
LOG.warning(
"RPCFunctions.addDeviceNames: Could not access XML-API: %s" % (str(err), ))
return
device_list_tree = ET.ElementTree(ET.fromstring(device_list))
for device in device_list_tree.getroot():
address = device.attrib['address']
name = device.attrib['name']
if address in self.devices[remote]:
self.devices[remote][address].NAME = name
for address, device in self.devices[remote][address].CHANNELS.items():
device.NAME = name
self.devices_all[remote][device.ADDRESS].NAME = name | 0.003389 |
def find_ref_centers(mds):
"""Finds the center of the three reference clusters.
:param mds: the ``mds`` information about each samples.
:type mds: numpy.recarray
:returns: a tuple with a :py:class:`numpy.array` containing the centers of
the three reference population cluster as first element, and a
:py:class:`dict` containing the label of each of the three
reference population clusters.
First, we extract the ``mds`` values of each of the three reference
populations. The, we compute the center of each of those clusters by
computing the means.
.. math::
\\textrm{Cluster}_\\textrm{pop} = \\left(
\\frac{\\sum_{i=1}^n x_i}{n}, \\frac{\\sum_{i=1}^n y_i}{n}
\\right)
"""
# Computing the centers of each of the reference clusters
ceu_mds = mds[mds["pop"] == "CEU"]
yri_mds = mds[mds["pop"] == "YRI"]
asn_mds = mds[mds["pop"] == "JPT-CHB"]
# Computing the centers
centers = [[np.mean(ceu_mds["c1"]), np.mean(ceu_mds["c2"])],
[np.mean(yri_mds["c1"]), np.mean(yri_mds["c2"])],
[np.mean(asn_mds["c1"]), np.mean(asn_mds["c2"])]]
return np.array(centers), {"CEU": 0, "YRI": 1, "JPT-CHB": 2} | 0.000796 |
def get_available_fields(self, obj):
"""
Get a list of all available fields for an object.
:param obj: The name of the Salesforce object that we are getting a description of.
:type obj: str
:return: the names of the fields.
:rtype: list of str
"""
self.get_conn()
obj_description = self.describe_object(obj)
return [field['name'] for field in obj_description['fields']] | 0.006637 |
def load(cls, fp, **kwargs):
"""wrapper for :py:func:`json.load`"""
json_obj = json.load(fp, **kwargs)
return parse(cls, json_obj) | 0.007042 |
def chat(self):
"""
Access the Chat Twilio Domain
:returns: Chat Twilio Domain
:rtype: twilio.rest.chat.Chat
"""
if self._chat is None:
from twilio.rest.chat import Chat
self._chat = Chat(self)
return self._chat | 0.006849 |
def encodeLength(value):
'''
Encodes value into a multibyte sequence defined by MQTT protocol.
Used to encode packet length fields.
'''
encoded = bytearray()
while True:
digit = value % 128
value //= 128
if value > 0:
digit |= 128
encoded.append(digit)
if value <= 0:
break
return encoded | 0.002632 |
def T(self, T):
"""
Set the temperature of the package to the specified value, and
recalculate it's enthalpy.
:param T: Temperature. [°C]
"""
self._T = T
self._H = self._calculate_H(T) | 0.008264 |
def drawPolyline(self, points):
"""Draw several connected line segments.
"""
for i, p in enumerate(points):
if i == 0:
if not (self.lastPoint == Point(p)):
self.draw_cont += "%g %g m\n" % JM_TUPLE(Point(p) * self.ipctm)
self.lastPoint = Point(p)
else:
self.draw_cont += "%g %g l\n" % JM_TUPLE(Point(p) * self.ipctm)
self.updateRect(p)
self.lastPoint = Point(points[-1])
return self.lastPoint | 0.005556 |
def set_client_certificate(self, certificate):
'''Sets client certificate for the request. '''
_certificate = BSTR(certificate)
_WinHttpRequest._SetClientCertificate(self, _certificate) | 0.009569 |
def exception_message():
"""Create a message with details on the exception."""
exc_type, exc_value, exc_tb = exc_info = sys.exc_info()
return {'exception': {'type': exc_type,
'value': exc_value,
'traceback': exc_tb},
'traceback': traceback.format_exception(*exc_info)} | 0.002907 |
def to_polygon(self):
"""
Generate a polygon from the line string points.
Returns
-------
imgaug.augmentables.polys.Polygon
Polygon with the same corner points as the line string.
Note that the polygon might be invalid, e.g. contain less than 3
points or have self-intersections.
"""
from .polys import Polygon
return Polygon(self.coords, label=self.label) | 0.004367 |
def build(self):
'''Builds Depoyed Classifier
'''
if self._clf is None:
raise NeedToTrainExceptionBeforeDeployingException()
return DeployedClassifier(self._category,
self._term_doc_matrix._category_idx_store,
self._term_doc_matrix._term_idx_store,
self._term_doc_matrix_factory) | 0.034759 |
def get_all_security_groups(self, groupnames=None, group_ids=None,
filters=None):
"""
Get all security groups associated with your account in a region.
:type groupnames: list
:param groupnames: A list of the names of security groups to retrieve.
If not provided, all security groups will be
returned.
:type group_ids: list
:param group_ids: A list of IDs of security groups to retrieve for
security groups within a VPC.
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list
:return: A list of :class:`boto.ec2.securitygroup.SecurityGroup`
"""
params = {}
if groupnames is not None:
self.build_list_params(params, groupnames, 'GroupName')
if group_ids is not None:
self.build_list_params(params, group_ids, 'GroupId')
if filters is not None:
self.build_filter_params(params, filters)
return self.get_list('DescribeSecurityGroups', params,
[('item', SecurityGroup)], verb='POST') | 0.001807 |
def _create_index_files(root_dir, force_no_processing=False):
"""
Crawl the root directory downwards, generating an index HTML file in each
directory on the way down.
@param {String} root_dir - The top level directory to crawl down from. In
normal usage, this will be '.'.
@param {Boolean=False} force_no_processing - If True, do not attempt to
actually process thumbnails, PIL images or anything. Simply index
<img> tags with original file src attributes.
@return {[String]} Full file paths of all created files.
"""
# Initialise list of created file paths to build up as we make them
created_files = []
# Walk the root dir downwards, creating index files as we go
for here, dirs, files in os.walk(root_dir):
print('Processing %s' % here)
# Sort the subdirectories by name
dirs = sorted(dirs)
# Get image files - all files in the directory matching IMAGE_FILE_REGEX
image_files = [f for f in files if re.match(IMAGE_FILE_REGEX, f)]
# Sort the image files by name
image_files = sorted(image_files)
# Create this directory's index file and add its name to the created
# files list
created_files.append(
_create_index_file(
root_dir, here, image_files, dirs, force_no_processing
)
)
# Return the list of created files
return created_files | 0.001389 |
def dict_to_etree(source, root_tag=None):
""" Recursively load dict/list representation of an XML tree into an etree representation.
Args:
source -- A dictionary representing an XML document where identical children tags are
countained in a list.
Keyword args:
root_tag -- A parent tag in which to wrap the xml tree. If None, and the source dict
contains multiple root items, a list of etree's Elements will be returned.
Returns:
An ET.Element which is the root of an XML tree or a list of these.
>>> dict_to_etree({'foo': 'lorem'}) #doctest: +ELLIPSIS
<Element foo at 0x...>
>>> dict_to_etree({'foo': 'lorem', 'bar': 'ipsum'}) #doctest: +ELLIPSIS
[<Element foo at 0x...>, <Element bar at 0x...>]
>>> ET.tostring(dict_to_etree({'document': {'item1': 'foo', 'item2': 'bar'}}))
'<document><item2>bar</item2><item1>foo</item1></document>'
>>> ET.tostring(dict_to_etree({'foo': 'baz'}, root_tag='document'))
'<document><foo>baz</foo></document>'
>>> ET.tostring(dict_to_etree({'title': 'foo', 'list': [{'li':1}, {'li':2}]}, root_tag='document'))
'<document><list><li>1</li><li>2</li></list><title>foo</title></document>'
"""
def dict_to_etree_recursive(source, parent):
if hasattr(source, 'keys'):
for key, value in source.iteritems():
sub = ET.SubElement(parent, key)
dict_to_etree_recursive(value, sub)
elif isinstance(source, list):
for element in source:
dict_to_etree_recursive(element, parent)
else: # TODO: Add feature to include xml literals as special objects or a etree subtree
parent.text = source
if root_tag is None:
if len(source) == 1:
root_tag = source.keys()[0]
source = source[root_tag]
else:
roots = []
for tag, content in source.iteritems():
root = ET.Element(tag)
dict_to_etree_recursive(content, root)
roots.append(root)
return roots
root = ET.Element(root_tag)
dict_to_etree_recursive(source, root)
return root | 0.003571 |
def get_balance():
"""
Get the latest balance(s) for a single User.
Currently no search parameters are supported. All balances returned.
---
responses:
'200':
description: the User's balance(s)
schema:
items:
$ref: '#/definitions/Balance'
type: array
default:
description: unexpected error
schema:
$ref: '#/definitions/errorModel'
security:
- kid: []
- typ: []
- alg: []
operationId: getBalance
"""
balsq = ses.query(wm.Balance).filter(wm.Balance.user_id == current_user.id)
if not balsq:
return None
bals = [json.loads(jsonify2(b, 'Balance')) for b in balsq]
print "returning bals %s" % bals
response = current_app.bitjws.create_response(bals)
ses.close()
return response | 0.001185 |
def disasm_app(_parser, cmd, args): # pragma: no cover
"""
Disassemble code from commandline or stdin.
"""
parser = argparse.ArgumentParser(
prog=_parser.prog,
description=_parser.description,
)
parser.add_argument('code', help='the code to disassemble, read from stdin if omitted', nargs='?')
pwnypack.main.add_target_arguments(parser)
parser.add_argument(
'--syntax', '-s',
choices=AsmSyntax.__members__.keys(),
default=None,
)
parser.add_argument(
'--address', '-o',
type=lambda v: int(v, 0),
default=0,
help='the address of the disassembled code',
)
parser.add_argument(
'--format', '-f',
choices=['hex', 'bin'],
help='the input format (defaults to hex for commandline, bin for stdin)',
)
args = parser.parse_args(args)
target = pwnypack.main.target_from_arguments(args)
if args.syntax is not None:
syntax = AsmSyntax.__members__[args.syntax]
else:
syntax = None
if args.format is None:
if args.code is None:
args.format = 'bin'
else:
args.format = 'hex'
if args.format == 'hex':
code = pwnypack.codec.dehex(pwnypack.main.string_value_or_stdin(args.code))
else:
code = pwnypack.main.binary_value_or_stdin(args.code)
print('\n'.join(disasm(code, args.address, syntax=syntax, target=target))) | 0.002755 |
def clean(self):
""" Cleans the data and throws ValidationError on failure """
errors = {}
cleaned = {}
for name, validator in self.validate_schema.items():
val = getattr(self, name, None)
try:
cleaned[name] = validator.to_python(val)
except formencode.api.Invalid, err:
errors[name] = err
if errors:
raise ValidationError('Invalid data', errors)
return cleaned | 0.004073 |
def enhance(self):
""" Function enhance
Enhance the object with new item or enhanced items
"""
self.update({'os_default_templates':
SubDict(self.api, self.objName,
self.payloadObj, self.key,
SubItemOsDefaultTemplate)})
self.update({'operatingsystems':
SubDict(self.api, self.objName,
self.payloadObj, self.key,
SubItemOperatingSystem)}) | 0.003745 |
def VAR_DECL(self, cursor):
"""Handles Variable declaration."""
# get the name
name = self.get_unique_name(cursor)
log.debug('VAR_DECL: name: %s', name)
# Check for a previous declaration in the register
if self.is_registered(name):
return self.get_registered(name)
# get the typedesc object
_type = self._VAR_DECL_type(cursor)
# transform the ctypes values into ctypeslib
init_value = self._VAR_DECL_value(cursor, _type)
# finished
log.debug('VAR_DECL: _type:%s', _type.name)
log.debug('VAR_DECL: _init:%s', init_value)
log.debug('VAR_DECL: location:%s', getattr(cursor, 'location'))
obj = self.register(name, typedesc.Variable(name, _type, init_value))
self.set_location(obj, cursor)
self.set_comment(obj, cursor)
return True | 0.002265 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.