text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _verify_encoding(self, enc):
"""Verify encoding is okay."""
enc = PYTHON_ENCODING_NAMES.get(enc, enc)
try:
codecs.getencoder(enc)
encoding = enc
except LookupError:
encoding = None
return encoding | 0.00722 |
def _transform_should_cast(self, func_nm):
"""
Parameters:
-----------
func_nm: str
The name of the aggregation function being performed
Returns:
--------
bool
Whether transform should attempt to cast the result of aggregation
"""
return (self.size().fillna(0) > 0).any() and (
func_nm not in base.cython_cast_blacklist) | 0.004662 |
def render_layout_form(form, layout=None, **kwargs):
"""Render an entire form with Semantic UI wrappers for each field with
a layout provided in the template or in the form class
Args:
form (form): Django Form
layout (tuple): layout design
kwargs (dict): other attributes will be passed to fields
Returns:
string: HTML of Django Form fields with Semantic UI wrappers
"""
def make_component(type_, *args):
"""Loop through tuples to make field wrappers for fields.
"""
if type_ == "Text":
return "".join(args)
elif type_ == "Field":
result = ""
for c in args:
if isinstance(c, tuple):
result += make_component(*c)
elif isinstance(c, str):
result += render_field(form.__getitem__(c), **kwargs)
return result
else:
if len(args) < 2:
return ""
result = "".join([make_component(*c) for c in args])
if type_:
return "<div class=\"%s\">%s</div>" % (type_.lower(), result)
else:
return result
return mark_safe("".join([make_component(*component) for component in layout])) | 0.034991 |
def update(self, forecasts, observations):
"""
Update the ROC curve with a set of forecasts and observations
Args:
forecasts: 1D array of forecast values
observations: 1D array of observation values.
"""
for t, threshold in enumerate(self.thresholds):
tp = np.count_nonzero((forecasts >= threshold) & (observations >= self.obs_threshold))
fp = np.count_nonzero((forecasts >= threshold) &
(observations < self.obs_threshold))
fn = np.count_nonzero((forecasts < threshold) &
(observations >= self.obs_threshold))
tn = np.count_nonzero((forecasts < threshold) &
(observations < self.obs_threshold))
self.contingency_tables.iloc[t] += [tp, fp, fn, tn] | 0.003432 |
def y0(x, context=None):
"""
Return the value of the second kind Bessel function of order 0 at x.
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_y0,
(BigFloat._implicit_convert(x),),
context,
) | 0.003759 |
def parse_ctcp(query):
""" Strip and de-quote CTCP messages. """
query = query.strip(CTCP_DELIMITER)
query = query.replace(CTCP_ESCAPE_CHAR + '0', '\0')
query = query.replace(CTCP_ESCAPE_CHAR + 'n', '\n')
query = query.replace(CTCP_ESCAPE_CHAR + 'r', '\r')
query = query.replace(CTCP_ESCAPE_CHAR + CTCP_ESCAPE_CHAR, CTCP_ESCAPE_CHAR)
if ' ' in query:
return query.split(' ', 1)
return query, None | 0.004587 |
def remove_absolute_impute__r2(X, y, model_generator, method_name, num_fcounts=11):
""" Remove Absolute (impute)
xlabel = "Max fraction of features removed"
ylabel = "1 - R^2"
transform = "one_minus"
sort_order = 9
"""
return __run_measure(measures.remove_impute, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.r2_score) | 0.00813 |
def merge_equal_neighbors(self):
""" Merge neighbors with same speaker. """
IDX_LENGTH = 3
merged = self.segs.copy()
current_start = 0
j = 0
seg = self.segs.iloc[0]
for i in range(1, self.num_segments):
seg = self.segs.iloc[i]
last = self.segs.iloc[i - 1]
if seg.speaker == last.speaker:
merged.iat[j, IDX_LENGTH] = seg.start + seg.length - current_start
else:
j += 1
merged.iloc[j] = seg
current_start = seg.start
merged = merged.iloc[:(j+1)]
merged.sort_values('start', inplace = True)
return self.update_segs(merged) | 0.007013 |
def add_tag(self, tag):
"""
Adds a tag to the list of tags and makes sure the result list contains only unique results.
"""
self.tags = list(set(self.tags or []) | set([tag])) | 0.014218 |
def storage_uri_for_key(key):
"""Returns a StorageUri for the given key.
:type key: :class:`boto.s3.key.Key` or subclass
:param key: URI naming bucket + optional object.
"""
if not isinstance(key, boto.s3.key.Key):
raise InvalidUriError('Requested key (%s) is not a subclass of '
'boto.s3.key.Key' % str(type(key)))
prov_name = key.bucket.connection.provider.get_provider_name()
uri_str = '%s://%s/%s' % (prov_name, key.bucket.name, key.name)
return storage_uri(uri_str) | 0.001848 |
def get_docstring(node):
"""
Return the docstring for the given node or `None` if no docstring can be
found. If the node provided does not accept docstrings a `TypeError`
will be raised.
"""
if not isinstance(node, (FunctionDef, ClassDef, Module)):
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
if node.body and isinstance(node.body[0], Str):
return node.body[0].s | 0.002309 |
def reshuffle(expr, by=None, sort=None, ascending=True):
"""
Reshuffle data.
:param expr:
:param by: the sequence or scalar to shuffle by. RandomScalar as default
:param sort: the sequence or scalar to sort.
:param ascending: True if ascending else False
:return: collection
"""
by = by or RandomScalar()
grouped = expr.groupby(by)
if sort:
grouped = grouped.sort_values(sort, ascending=ascending)
return ReshuffledCollectionExpr(_input=grouped, _schema=expr._schema) | 0.001898 |
def set_title(self, title):
"""
Set title. You can set multiple titles.
:Args:
- title: Title value
"""
self.title = title
self.add_metadata('DC', 'title', self.title) | 0.008772 |
def get_idx(self, arr:Collection, is_item:bool=True):
"Fetch item or user (based on `is_item`) for all in `arr`. (Set model to `cpu` and no grad.)"
m = self.model.eval().cpu()
requires_grad(m,False)
u_class,i_class = self.data.train_ds.x.classes.values()
classes = i_class if is_item else u_class
c2i = {v:k for k,v in enumerate(classes)}
try: return tensor([c2i[o] for o in arr])
except Exception as e:
print(f"""You're trying to access {'an item' if is_item else 'a user'} that isn't in the training data.
If it was in your original data, it may have been split such that it's only in the validation set now.""") | 0.021157 |
def compute_qpi(self):
"""Compute model data with current parameters
Returns
-------
qpi: qpimage.QPImage
Modeled phase data
Notes
-----
The model image might deviate from the fitted image
because of interpolation during the fitting process.
"""
kwargs = self.model_kwargs.copy()
kwargs["radius"] = self.radius
kwargs["sphere_index"] = self.sphere_index
kwargs["center"] = [self.posx_offset, self.posy_offset]
qpi = self.sphere_method(**kwargs)
# apply phase offset
bg_data = np.ones(qpi.shape) * -self.pha_offset
qpi.set_bg_data(bg_data=bg_data, which_data="phase")
return qpi | 0.002721 |
def to_svg(self, instruction_or_id,
i_promise_not_to_change_the_result=False):
"""Return the SVG for an instruction.
:param instruction_or_id: either an
:class:`~knittingpattern.Instruction.Instruction` or an id
returned by :meth:`get_instruction_id`
:param bool i_promise_not_to_change_the_result:
- :obj:`False`: the result is copied, you can alter it.
- :obj:`True`: the result is directly from the cache. If you change
the result, other calls of this function get the changed result.
:return: an SVGDumper
:rtype: knittingpattern.Dumper.SVGDumper
"""
return self._new_svg_dumper(lambda: self.instruction_to_svg_dict(
instruction_or_id, not i_promise_not_to_change_the_result)) | 0.003667 |
def alias_log_entry(self, log_entry_id, alias_id):
"""Adds an ``Id`` to a ``LogEntry`` for the purpose of creating compatibility.
The primary ``Id`` of the ``LogEntry`` is determined by the
provider. The new ``Id`` performs as an alias to the primary
``Id``. If the alias is a pointer to another log entry, it is
reassigned to the given log entry ``Id``.
arg: log_entry_id (osid.id.Id): the ``Id`` of a ``LogEntry``
arg: alias_id (osid.id.Id): the alias ``Id``
raise: AlreadyExists - ``alias_id`` is already assigned
raise: NotFound - ``log_entry_id`` not found
raise: NullArgument - ``log_entry_id`` or ``alias_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.alias_resources_template
self._alias_id(primary_id=log_entry_id, equivalent_id=alias_id) | 0.002688 |
def cursor(lcd, row, col):
"""
Context manager to control cursor position. DEPRECATED.
"""
warnings.warn('The `cursor` context manager is deprecated', DeprecationWarning)
lcd.cursor_pos = (row, col)
yield | 0.008772 |
def stop_sequence(self):
"""Return the sorted StopTimes for this trip."""
return sorted(
self.stop_times(),
key=lambda x:int(x.get('stop_sequence'))
) | 0.011494 |
def covfilter(args):
"""
%prog covfilter blastfile fastafile
Fastafile is used to get the sizes of the queries. Two filters can be
applied, the id% and cov%.
"""
from jcvi.algorithms.supermap import supermap
from jcvi.utils.range import range_union
allowed_iterby = ("query", "query_sbjct")
p = OptionParser(covfilter.__doc__)
p.set_align(pctid=95, pctcov=50)
p.add_option("--scov", default=False, action="store_true",
help="Subject coverage instead of query [default: %default]")
p.add_option("--supermap", action="store_true",
help="Use supermap instead of union")
p.add_option("--ids", dest="ids", default=None,
help="Print out the ids that satisfy [default: %default]")
p.add_option("--list", dest="list", default=False, action="store_true",
help="List the id% and cov% per gene [default: %default]")
p.add_option("--iterby", dest="iterby", default="query", choices=allowed_iterby,
help="Choose how to iterate through BLAST [default: %default]")
p.set_outfile(outfile=None)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
blastfile, fastafile = args
pctid = opts.pctid
pctcov = opts.pctcov
union = not opts.supermap
scov = opts.scov
sz = Sizes(fastafile)
sizes = sz.mapping
iterby = opts.iterby
qspair = iterby == "query_sbjct"
if not union:
querysupermap = blastfile + ".query.supermap"
if not op.exists(querysupermap):
supermap(blastfile, filter="query")
blastfile = querysupermap
assert op.exists(blastfile)
covered = 0
mismatches = 0
gaps = 0
alignlen = 0
queries = set()
valid = set()
blast = BlastSlow(blastfile)
iterator = blast.iter_hits_pair if qspair else blast.iter_hits
covidstore = {}
for query, blines in iterator():
blines = list(blines)
queries.add(query)
# per gene report
this_covered = 0
this_alignlen = 0
this_mismatches = 0
this_gaps = 0
this_identity = 0
ranges = []
for b in blines:
if scov:
s, start, stop = b.subject, b.sstart, b.sstop
else:
s, start, stop = b.query, b.qstart, b.qstop
cov_id = s
if b.pctid < pctid:
continue
if start > stop:
start, stop = stop, start
this_covered += stop - start + 1
this_alignlen += b.hitlen
this_mismatches += b.nmismatch
this_gaps += b.ngaps
ranges.append(("1", start, stop))
if ranges:
this_identity = 100. - (this_mismatches + this_gaps) * 100. / this_alignlen
if union:
this_covered = range_union(ranges)
this_coverage = this_covered * 100. / sizes[cov_id]
covidstore[query] = (this_identity, this_coverage)
if this_identity >= pctid and this_coverage >= pctcov:
valid.add(query)
covered += this_covered
mismatches += this_mismatches
gaps += this_gaps
alignlen += this_alignlen
if opts.list:
if qspair:
allpairs = defaultdict(list)
for (q, s) in covidstore:
allpairs[q].append((q, s))
allpairs[s].append((q, s))
for id, size in sz.iter_sizes():
if id not in allpairs:
print("\t".join((id, "na", "0", "0")))
else:
for qs in allpairs[id]:
this_identity, this_coverage = covidstore[qs]
print("{0}\t{1:.1f}\t{2:.1f}".format("\t".join(qs), this_identity, this_coverage))
else:
for query, size in sz.iter_sizes():
this_identity, this_coverage = covidstore.get(query, (0, 0))
print("{0}\t{1:.1f}\t{2:.1f}".format(query, this_identity, this_coverage))
mapped_count = len(queries)
valid_count = len(valid)
cutoff_message = "(id={0.pctid}% cov={0.pctcov}%)".format(opts)
m = "Identity: {0} mismatches, {1} gaps, {2} alignlen\n".\
format(mismatches, gaps, alignlen)
total = len(sizes.keys())
m += "Total mapped: {0} ({1:.1f}% of {2})\n".\
format(mapped_count, mapped_count * 100. / total, total)
m += "Total valid {0}: {1} ({2:.1f}% of {3})\n".\
format(cutoff_message, valid_count, valid_count * 100. / total, total)
m += "Average id = {0:.2f}%\n".\
format(100 - (mismatches + gaps) * 100. / alignlen)
queries_combined = sz.totalsize
m += "Coverage: {0} covered, {1} total\n".\
format(covered, queries_combined)
m += "Average coverage = {0:.2f}%".\
format(covered * 100. / queries_combined)
logfile = blastfile + ".covfilter.log"
fw = open(logfile, "w")
for f in (sys.stderr, fw):
print(m, file=f)
fw.close()
if opts.ids:
filename = opts.ids
fw = must_open(filename, "w")
for id in valid:
print(id, file=fw)
logging.debug("Queries beyond cutoffs {0} written to `{1}`.".\
format(cutoff_message, filename))
outfile = opts.outfile
if not outfile:
return
fw = must_open(outfile, "w")
blast = Blast(blastfile)
for b in blast:
query = (b.query, b.subject) if qspair else b.query
if query in valid:
print(b, file=fw) | 0.00341 |
def post(self, request):
"""
POST /consent/api/v1/data_sharing_consent
Requires a JSON object of the following format:
>>> {
>>> "username": "bob",
>>> "course_id": "course-v1:edX+DemoX+Demo_Course",
>>> "enterprise_customer_uuid": "enterprise-uuid-goes-right-here"
>>> }
Keys:
*username*
The edX username from whom to get consent.
*course_id*
The course for which consent is granted.
*enterprise_customer_uuid*
The UUID of the enterprise customer that requires consent.
"""
try:
consent_record = self.get_consent_record(request)
if consent_record is None:
return self.get_no_record_response(request)
if consent_record.consent_required():
# If and only if the given EnterpriseCustomer requires data sharing consent
# for the given course, then, since we've received a POST request, set the
# consent state for the EC/user/course combo.
consent_record.granted = True
# Models don't have return values when saving, but ProxyDataSharingConsent
# objects do - they should return either a model instance, or another instance
# of ProxyDataSharingConsent if representing a multi-course consent record.
consent_record = consent_record.save() or consent_record
except ConsentAPIRequestError as invalid_request:
return Response({'error': str(invalid_request)}, status=HTTP_400_BAD_REQUEST)
return Response(consent_record.serialize()) | 0.004714 |
def update_free_shipping_by_id(cls, free_shipping_id, free_shipping, **kwargs):
"""Update FreeShipping
Update attributes of FreeShipping
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_free_shipping_by_id(free_shipping_id, free_shipping, async=True)
>>> result = thread.get()
:param async bool
:param str free_shipping_id: ID of freeShipping to update. (required)
:param FreeShipping free_shipping: Attributes of freeShipping to update. (required)
:return: FreeShipping
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_free_shipping_by_id_with_http_info(free_shipping_id, free_shipping, **kwargs)
else:
(data) = cls._update_free_shipping_by_id_with_http_info(free_shipping_id, free_shipping, **kwargs)
return data | 0.005469 |
def _alter_code(code, **attrs):
"""Create a new code object by altering some of ``code`` attributes
Args:
code: code objcect
attrs: a mapping of names of code object attrs to their values
"""
PyCode_New = ctypes.pythonapi.PyCode_New
PyCode_New.argtypes = (
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.py_object,
ctypes.py_object,
ctypes.py_object,
ctypes.py_object,
ctypes.py_object,
ctypes.py_object,
ctypes.py_object,
ctypes.py_object,
ctypes.c_int,
ctypes.py_object)
PyCode_New.restype = ctypes.py_object
args = [
[code.co_argcount, 'co_argcount'],
[code.co_kwonlyargcount, 'co_kwonlyargcount'],
[code.co_nlocals, 'co_nlocals'],
[code.co_stacksize, 'co_stacksize'],
[code.co_flags, 'co_flags'],
[code.co_code, 'co_code'],
[code.co_consts, 'co_consts'],
[code.co_names, 'co_names'],
[code.co_varnames, 'co_varnames'],
[code.co_freevars, 'co_freevars'],
[code.co_cellvars, 'co_cellvars'],
[code.co_filename, 'co_filename'],
[code.co_name, 'co_name'],
[code.co_firstlineno, 'co_firstlineno'],
[code.co_lnotab, 'co_lnotab']]
for arg in args:
if arg[1] in attrs:
arg[0] = attrs[arg[1]]
return PyCode_New(
args[0][0], # code.co_argcount,
args[1][0], # code.co_kwonlyargcount,
args[2][0], # code.co_nlocals,
args[3][0], # code.co_stacksize,
args[4][0], # code.co_flags,
args[5][0], # code.co_code,
args[6][0], # code.co_consts,
args[7][0], # code.co_names,
args[8][0], # code.co_varnames,
args[9][0], # code.co_freevars,
args[10][0], # code.co_cellvars,
args[11][0], # code.co_filename,
args[12][0], # code.co_name,
args[13][0], # code.co_firstlineno,
args[14][0]) | 0.000489 |
def read_qemu_img_stdout(self):
"""
Reads the standard output of the QEMU-IMG process.
"""
output = ""
if self._qemu_img_stdout_file:
try:
with open(self._qemu_img_stdout_file, "rb") as file:
output = file.read().decode("utf-8", errors="replace")
except OSError as e:
log.warning("Could not read {}: {}".format(self._qemu_img_stdout_file, e))
return output | 0.006237 |
def symmetrized_csiszar_function(logu, csiszar_function, name=None):
"""Symmetrizes a Csiszar-function in log-space.
A Csiszar-function is a member of,
```none
F = { f:R_+ to R : f convex }.
```
The symmetrized Csiszar-function is defined as:
```none
f_g(u) = 0.5 g(u) + 0.5 u g (1 / u)
```
where `g` is some other Csiszar-function.
We say the function is "symmetrized" because:
```none
D_{f_g}[p, q] = D_{f_g}[q, p]
```
for all `p << >> q` (i.e., `support(p) = support(q)`).
There exists alternatives for symmetrizing a Csiszar-function. For example,
```none
f_g(u) = max(f(u), f^*(u)),
```
where `f^*` is the dual Csiszar-function, also implies a symmetric
f-Divergence.
Example:
When either of the following functions are symmetrized, we obtain the
Jensen-Shannon Csiszar-function, i.e.,
```none
g(u) = -log(u) - (1 + u) log((1 + u) / 2) + u - 1
h(u) = log(4) + 2 u log(u / (1 + u))
```
implies,
```none
f_g(u) = f_h(u) = u log(u) - (1 + u) log((1 + u) / 2)
= jensen_shannon(log(u)).
```
Warning: this function makes non-log-space calculations and may therefore be
numerically unstable for `|logu| >> 0`.
Args:
logu: `float`-like `Tensor` representing `log(u)` from above.
csiszar_function: Python `callable` representing a Csiszar-function over
log-domain.
name: Python `str` name prefixed to Ops created by this function.
Returns:
symmetrized_g_of_u: `float`-like `Tensor` of the result of applying the
symmetrization of `g` evaluated at `u = exp(logu)`.
"""
with tf.compat.v1.name_scope(name, "symmetrized_csiszar_function", [logu]):
logu = tf.convert_to_tensor(value=logu, name="logu")
return 0.5 * (csiszar_function(logu)
+ dual_csiszar_function(logu, csiszar_function)) | 0.001633 |
def get_unit_property(self, unit_id, property_name):
'''This function rerturns the data stored under the property name given
from the given unit.
Parameters
----------
unit_id: int
The unit id for which the property will be returned
property_name: str
The name of the property
Returns
----------
value
The data associated with the given property name. Could be many
formats as specified by the user.
'''
if isinstance(unit_id, (int, np.integer)):
if unit_id in self.get_unit_ids():
if unit_id not in self._unit_properties:
self._unit_properties[unit_id] = {}
if isinstance(property_name, str):
if property_name in list(self._unit_properties[unit_id].keys()):
return self._unit_properties[unit_id][property_name]
else:
raise ValueError(str(property_name) + " has not been added to unit " + str(unit_id))
else:
raise ValueError(str(property_name) + " must be a string")
else:
raise ValueError(str(unit_id) + " is not a valid unit_id")
else:
raise ValueError(str(unit_id) + " must be an int") | 0.002937 |
def setChecked(src, ids=[], dpth = 0, key = ''):
""" Recursively find checked item."""
#tabs = lambda n: ' ' * n * 4 # or 2 or 8 or...
#brace = lambda s, n: '%s%s%s' % ('['*n, s, ']'*n)
if isinstance(src, dict):
for key, value in src.iteritems():
setChecked(value, ids, dpth + 1, key)
elif isinstance(src, list):
for litem in src:
if isinstance(litem, types.DictType):
if "id" in litem and litem["id"] in ids:
litem["checked"] = True
litem["select"] = True
setChecked(litem, ids, dpth + 2) | 0.023217 |
def SetDefaultValue(self, scan_object):
"""Sets the default (non-match) value.
Args:
scan_object: a scan object, either a scan tree sub node (instance of
PathFilterScanTreeNode) or a string containing a path.
Raises:
TypeError: if the scan object is of an unsupported type.
ValueError: if the default value is already set.
"""
if (not isinstance(scan_object, PathFilterScanTreeNode) and
not isinstance(scan_object, py2to3.STRING_TYPES)):
raise TypeError('Unsupported scan object type.')
if self.default_value:
raise ValueError('Default value already set.')
self.default_value = scan_object | 0.0059 |
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params: dict
The dictionary to pretty print
offset: int
The offset in characters to add at the begin of each line.
printer:
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset // 2) * ' '
for i, (k, v) in enumerate(sorted(six.iteritems(params))):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines | 0.001179 |
def toseries(self, chunk_size='auto'):
"""
Converts to series data.
This method is equivalent to images.toblocks(size).toSeries().
Parameters
----------
chunk_size : str or tuple, size of image chunk used during conversion, default = 'auto'
String interpreted as memory size (in kilobytes, e.g. '64').
The exception is the string 'auto', which will choose a chunk size to make the
resulting blocks ~100 MB in size. Tuple of ints interpreted as 'pixels per dimension'.
Only valid in spark mode.
"""
from thunder.series.series import Series
if chunk_size is 'auto':
chunk_size = str(max([int(1e5/self.shape[0]), 1]))
n = len(self.shape) - 1
index = arange(self.shape[0])
if self.mode == 'spark':
return Series(self.values.swap((0,), tuple(range(n)), size=chunk_size), index=index)
if self.mode == 'local':
return Series(self.values.transpose(tuple(range(1, n+1)) + (0,)), index=index) | 0.006506 |
def add(config, username, filename):
"""Add user's SSH public key to their LDAP entry."""
try:
client = Client()
client.prepare_connection()
user_api = UserApi(client)
key_api = API(client)
key_api.add(username, user_api, filename)
except (ldap3.core.exceptions.LDAPNoSuchAttributeResult,
ldap_tools.exceptions.InvalidResult,
ldap3.core.exceptions.LDAPAttributeOrValueExistsResult
) as err: # pragma: no cover
print('{}: {}'.format(type(err), err.args[0]))
except Exception as err: # pragma: no cover
raise err from None | 0.002915 |
def _compute_diff(configured, expected):
'''Computes the differences between the actual config and the expected config'''
diff = {
'add': {},
'update': {},
'remove': {}
}
configured_users = set(configured.keys())
expected_users = set(expected.keys())
add_usernames = expected_users - configured_users
remove_usernames = configured_users - expected_users
common_usernames = expected_users & configured_users
add = dict((username, expected.get(username)) for username in add_usernames)
remove = dict((username, configured.get(username)) for username in remove_usernames)
update = {}
for username in common_usernames:
user_configuration = configured.get(username)
user_expected = expected.get(username)
if user_configuration == user_expected:
continue
update[username] = {}
for field, field_value in six.iteritems(user_expected):
if user_configuration.get(field) != field_value:
update[username][field] = field_value
diff.update({
'add': add,
'update': update,
'remove': remove
})
return diff | 0.003376 |
def get_file_to_bytes(self, share_name, directory_name, file_name,
start_range=None, end_range=None, validate_content=False,
progress_callback=None, max_connections=2, timeout=None):
'''
Downloads a file as an array of bytes, with automatic chunking and
progress notifications. Returns an instance of :class:`File` with
properties, metadata, and content.
:param str share_name:
Name of existing share.
:param str directory_name:
The path to the directory.
:param str file_name:
Name of existing file.
:param int start_range:
Start of byte range to use for downloading a section of the file.
If no end_range is given, all bytes after the start_range will be downloaded.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param int end_range:
End of byte range to use for downloading a section of the file.
If end_range is given, start_range must be provided.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param bool validate_content:
If set to true, validates an MD5 hash for each retrieved portion of
the file. This is primarily valuable for detecting bitflips on the wire
if using http instead of https as https (the default) will already
validate. Note that the service will only return transactional MD5s
for chunks 4MB or less so the first get request will be of size
self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If
self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be
thrown. As computing the MD5 takes processing time and more requests
will need to be done due to the reduced chunk size there may be some
increase in latency.
:param progress_callback:
Callback for progress with signature function(current, total)
where current is the number of bytes transfered so far, and total is
the size of the file if known.
:type progress_callback: callback function in format of func(current, total)
:param int max_connections:
If set to 2 or greater, an initial get will be done for the first
self.MAX_SINGLE_GET_SIZE bytes of the file. If this is the entire file,
the method returns at this point. If it is not, it will download the
remaining data parallel using the number of threads equal to
max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
If set to 1, a single large get request will be done. This is not
generally recommended but available if very few threads should be
used, network requests are very expensive, or a non-seekable stream
prevents parallel download. This may also be valuable if the file is
being concurrently modified to enforce atomicity or if many files are
expected to be empty as an extra request is required for empty files
if max_connections is greater than 1.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:return: A File with properties, content, and metadata.
:rtype: :class:`~azure.storage.file.models.File`
'''
_validate_not_none('share_name', share_name)
_validate_not_none('file_name', file_name)
stream = BytesIO()
file = self.get_file_to_stream(
share_name,
directory_name,
file_name,
stream,
start_range,
end_range,
validate_content,
progress_callback,
max_connections,
timeout)
file.content = stream.getvalue()
return file | 0.010593 |
def find_duplicates(items, k=2, key=None):
"""
Find all duplicate items in a list.
Search for all items that appear more than `k` times and return a mapping
from each (k)-duplicate item to the positions it appeared in.
Args:
items (Iterable): hashable items possibly containing duplicates
k (int): only return items that appear at least `k` times (default=2)
key (Callable, optional): Returns indices where `key(items[i])`
maps to a particular value at least k times.
Returns:
dict: maps each duplicate item to the indices at which it appears
CommandLine:
python -m ubelt.util_dict find_duplicates
Example:
>>> import ubelt as ub
>>> items = [0, 0, 1, 2, 3, 3, 0, 12, 2, 9]
>>> duplicates = ub.find_duplicates(items)
>>> print('items = %r' % (items,))
>>> print('duplicates = %r' % (duplicates,))
>>> assert duplicates == {0: [0, 1, 6], 2: [3, 8], 3: [4, 5]}
>>> assert ub.find_duplicates(items, 3) == {0: [0, 1, 6]}
Example:
>>> import ubelt as ub
>>> items = [0, 0, 1, 2, 3, 3, 0, 12, 2, 9]
>>> # note: k can be 0
>>> duplicates = ub.find_duplicates(items, k=0)
>>> print(ub.repr2(duplicates, nl=0))
{0: [0, 1, 6], 1: [2], 2: [3, 8], 3: [4, 5], 9: [9], 12: [7]}
Example:
>>> import ubelt as ub
>>> items = [10, 11, 12, 13, 14, 15, 16]
>>> duplicates = ub.find_duplicates(items, key=lambda x: x // 2)
>>> print(ub.repr2(duplicates, nl=0))
{5: [0, 1], 6: [2, 3], 7: [4, 5]}
"""
# Build mapping from items to the indices at which they appear
# if key is not None:
# items = map(key, items)
duplicates = defaultdict(list)
if key is None:
for count, item in enumerate(items):
duplicates[item].append(count)
else:
for count, item in enumerate(items):
duplicates[key(item)].append(count)
# remove items seen fewer than k times.
for key in list(duplicates.keys()):
if len(duplicates[key]) < k:
del duplicates[key]
duplicates = dict(duplicates)
return duplicates | 0.000453 |
def fits2radec(fitsfn: Path, solve: bool=False, args: str=None) -> xarray.Dataset:
fitsfn = Path(fitsfn).expanduser()
if fitsfn.suffix == '.fits':
WCSfn = fitsfn.with_suffix('.wcs') # using .wcs will also work but gives a spurious warning
elif fitsfn.suffix == '.wcs':
WCSfn = fitsfn
else:
raise ValueError(f'please convert {fitsfn} to GRAYSCALE .fits e.g. with ImageJ or ImageMagick')
if solve:
doSolve(fitsfn, args)
with fits.open(fitsfn, mode='readonly') as f:
yPix, xPix = f[0].shape[-2:]
x, y = meshgrid(range(xPix), range(yPix)) # pixel indices to find RA/dec of
xy = column_stack((x.ravel(order='C'), y.ravel(order='C')))
# %% use astropy.wcs to register pixels to RA/DEC
"""
http://docs.astropy.org/en/stable/api/astropy.wcs.WCS.html#astropy.wcs.WCS
naxis=[0,1] is to take x,y axes in case a color photo was input e.g. to astrometry.net cloud solver
"""
try:
with fits.open(WCSfn, mode='readonly') as f:
# radec = wcs.WCS(hdul[0].header,naxis=[0,1]).all_pix2world(xy, 0)
radec = wcs.WCS(f[0].header).all_pix2world(xy, 0)
except OSError:
raise OSError(f'It appears the WCS solution is not present, was the FITS image solved? looking for: {WCSfn}')
ra = radec[:, 0].reshape((yPix, xPix), order='C')
dec = radec[:, 1].reshape((yPix, xPix), order='C')
# %% collect output
radec = xarray.Dataset({'ra': (('y', 'x'), ra),
'dec': (('y', 'x'), dec), },
{'x': range(xPix), 'y': range(yPix)},
attrs={'filename': fitsfn})
return radec | 0.006567 |
def make_tokens_list(dir_, filters):
"""Find sources.json in <dir_>. It contains a list of tokenized texts. For
each tokenized text listed in sources.json, read its tokens, filter them,
and add them to an aggregated list. Write the aggregated list to disk using
a filename based on the <filters> given.
"""
with open(tokens_dir + dir_ + '/sources.json', 'r') as injson:
data = json.load(injson)
sources = [corpora_dir + fname for fname in data]
with open('data/skipwords.txt', 'r') as f:
skipwords = [line.rstrip() for line in f]
tokens_list = []
for fname in sources:
print("Incorporating tokens from " + fname)
with open(fname, 'r') as injson:
data = json.load(injson)
words = [w.lower() for w in data if not w == '']
filtered = [w for w,p in nltk.pos_tag(words) if p in filters]
sanitized = [w for w in filtered if not w in skipwords]
tokens_list += sanitized
tokens_list = list(set(tokens_list)) # unique
target = tokens_dir + dir_ + '/' + '-'.join(filters) + '.json'
with open(target, 'w') as outjson:
json.dump(tokens_list, outjson) | 0.003342 |
def convert_tree(message, config, indent=0, wrap_alternative=True, charset=None):
"""Recursively convert a potentially-multipart tree.
Returns a tuple of (the converted tree, whether any markdown was found)
"""
ct = message.get_content_type()
cs = message.get_content_subtype()
if charset is None:
charset = get_charset_from_message_fragment(message)
if not message.is_multipart():
# we're on a leaf
converted = None
disposition = message.get('Content-Disposition', 'inline')
if disposition == 'inline' and ct in ('text/plain', 'text/markdown'):
converted = convert_one(message, config, charset)
if converted is not None:
if wrap_alternative:
new_tree = MIMEMultipart('alternative')
_move_headers(message, new_tree)
new_tree.attach(message)
new_tree.attach(converted)
return new_tree, True
else:
return converted, True
return message, False
else:
if ct == 'multipart/signed':
# if this is a multipart/signed message, then let's just
# recurse into the non-signature part
new_root = MIMEMultipart('alternative')
if message.preamble:
new_root.preamble = message.preamble
_move_headers(message, new_root)
converted = None
for part in message.get_payload():
if part.get_content_type() != 'application/pgp-signature':
converted, did_conversion = convert_tree(part, config, indent=indent + 1,
wrap_alternative=False,
charset=charset)
if did_conversion:
new_root.attach(converted)
new_root.attach(message)
return new_root, did_conversion
else:
did_conversion = False
new_root = MIMEMultipart(cs, message.get_charset())
if message.preamble:
new_root.preamble = message.preamble
_move_headers(message, new_root)
for part in message.get_payload():
part, did_this_conversion = convert_tree(part, config, indent=indent + 1, charset=charset)
did_conversion |= did_this_conversion
new_root.attach(part)
return new_root, did_conversion | 0.001985 |
def sealedbox_encrypt(data, **kwargs):
'''
Encrypt data using a public key generated from `nacl.keygen`.
The encryptd data can be decrypted using `nacl.sealedbox_decrypt` only with the secret key.
CLI Examples:
.. code-block:: bash
salt-run nacl.sealedbox_encrypt datatoenc
salt-call --local nacl.sealedbox_encrypt datatoenc pk_file=/etc/salt/pki/master/nacl.pub
salt-call --local nacl.sealedbox_encrypt datatoenc pk='vrwQF7cNiNAVQVAiS3bvcbJUnF0cN6fU9YTZD9mBfzQ='
'''
# ensure data is in bytes
data = salt.utils.stringutils.to_bytes(data)
pk = _get_pk(**kwargs)
b = libnacl.sealed.SealedBox(pk)
return base64.b64encode(b.encrypt(data)) | 0.005658 |
def list_domains(self):
""" Return all domains. Domain is a key, so group by them """
self.connect()
results = self.server.list_domains(self.session_id)
return {i['domain']: i['subdomains'] for i in results} | 0.008368 |
def send_message(self, message, mention_id=None, mentions=[]):
"""
Send the specified message to twitter, with appropriate mentions, tokenized as necessary
:param message: Message to be sent
:param mention_id: In-reply-to mention_id (to link messages to a previous message)
:param mentions: List of usernames to mention in reply
:return:
"""
messages = self.tokenize(message, self.MESSAGE_LENGTH, mentions)
code = 0
for message in messages:
if self.dry_run:
mention_message = ''
if mention_id:
mention_message = " to mention_id '{0}'".format(mention_id)
logging.info("Not posting to Twitter because DRY_RUN is set. Would have posted "
"the following message{0}:\n{1}".format(mention_message, message))
else:
try:
self.twitter.statuses.update(status=message,
in_reply_to_status_id=mention_id)
except TwitterHTTPError as e:
logging.error('Unable to post to twitter: {0}'.format(e))
code = e.response_data['errors'][0]['code']
return code | 0.005447 |
def next_board(board, wrap):
"""Given a board, return the board one interation later.
Adapted from Jack Diedrich's implementation from his 2012 PyCon talk "Stop
Writing Classes"
:arg wrap: A callable which takes a point and transforms it, for example
to wrap to the other edge of the screen. Return None to remove a point.
"""
new_board = {}
# We need consider only the points that are alive and their neighbors:
points_to_recalc = set(board.iterkeys()) | set(chain(*map(neighbors, board)))
for point in points_to_recalc:
count = sum((neigh in board) for neigh in
(wrap(n) for n in neighbors(point) if n))
if count == 3:
state = 0 if point in board else 1
elif count == 2 and point in board:
state = 2
else:
state = None
if state is not None:
wrapped = wrap(point)
if wrapped:
new_board[wrapped] = state
return new_board | 0.001978 |
def find_next(start, stop, i2hits):
"""
which protein has the best hit, the one to the 'right' or to the 'left?'
"""
if start not in i2hits and stop in i2hits:
index = stop
elif stop not in i2hits and start in i2hits:
index = start
elif start not in i2hits and stop not in i2hits:
index = choice([start, stop])
i2hits[index] = [[False]]
else:
A, B = i2hits[start][0], i2hits[stop][0]
if B[10] <= A[10]:
index = stop
else:
index = start
if index == start:
nstart = start - 1
nstop = stop
else:
nstop = stop + 1
nstart = start
match = i2hits[index][0]
rp = match[-1]
return index, nstart, nstop, rp, match | 0.001311 |
def deleted_count(self):
"""The number of documents deleted."""
if isinstance(self.raw_result, list):
return len(self.raw_result)
else:
return self.raw_result | 0.009709 |
def logging_syslog_server_secure(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logging = ET.SubElement(config, "logging", xmlns="urn:brocade.com:mgmt:brocade-ras")
syslog_server = ET.SubElement(logging, "syslog-server")
syslogip_key = ET.SubElement(syslog_server, "syslogip")
syslogip_key.text = kwargs.pop('syslogip')
use_vrf_key = ET.SubElement(syslog_server, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
secure = ET.SubElement(syslog_server, "secure")
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.004545 |
def serialise_to_rsh(params: dict) -> str:
"""Преобразование конфигурационного файла в формате JSON в текстовый хедер.
rsh. Хедер можно использовать как конфигурационный файл для lan10-12base
@params -- параметры в формате JSON (dfparser.def_values.DEF_RSH_PARAMS)
@return -- текстовый хедер
"""
out = "// Generated at %s\n\n" % (datetime.now())
def add_val(field, value):
"""Add value to multiple line in rsh format."""
if isinstance(value, bytes):
value = value.decode("cp1251")
val = ''.join('%s, ' % (v) for v in value) if isinstance(value, list) \
else value
if isinstance(val, str) and val.endswith(', '):
val = val[:-2]
return '%s -- %s\n' % (val, field)
for param in params:
if param == "channel":
for i, channel in enumerate(params[param]):
for ch_par in channel:
val = "%s_%s[%s]" % (param, ch_par, i)
if ch_par == "params":
val = "%ss_%s[%s]" % (param, ch_par, i)
out += add_val(val, channel[ch_par])
elif param == "synchro_channel":
for sync_ch_par in params[param]:
if sync_ch_par == "type":
out += add_val(param, params[param][sync_ch_par])
else:
out += add_val("%s_%s" % (param, sync_ch_par),
params[param][sync_ch_par])
else:
out += add_val(param, params[param])
return out | 0.000635 |
def _incremental_compile_module(
optimizer: PythonASTOptimizer,
py_ast: GeneratedPyAST,
mod: types.ModuleType,
source_filename: str,
collect_bytecode: Optional[BytecodeCollector] = None,
) -> None:
"""Incrementally compile a stream of AST nodes in module mod.
The source_filename will be passed to Python's native compile.
Incremental compilation is an integral part of generating a Python module
during the same process as macro-expansion."""
module_body = list(
map(_statementize, itertools.chain(py_ast.dependencies, [py_ast.node]))
)
module = ast.Module(body=list(module_body))
module = optimizer.visit(module)
ast.fix_missing_locations(module)
_emit_ast_string(module)
bytecode = compile(module, source_filename, "exec")
if collect_bytecode:
collect_bytecode(bytecode)
exec(bytecode, mod.__dict__) | 0.001117 |
def _cube_dict(self):
"""dict containing raw cube response, parsed from JSON payload."""
try:
cube_response = self._cube_response_arg
# ---parse JSON to a dict when constructed with JSON---
cube_dict = (
cube_response
if isinstance(cube_response, dict)
else json.loads(cube_response)
)
# ---cube is 'value' item in a shoji response---
return cube_dict.get("value", cube_dict)
except TypeError:
raise TypeError(
"Unsupported type <%s> provided. Cube response must be JSON "
"(str) or dict." % type(self._cube_response_arg).__name__
) | 0.002732 |
def get_url_report(self, this_url, scan='0', allinfo=1):
""" Get the scan results for a URL.
:param this_url: A URL for which you want to retrieve the most recent report. You may also specify a scan_id
(sha256-timestamp as returned by the URL submission API) to access a specific report. At the same time, you
can specify a CSV list made up of a combination of urls and scan_ids (up to 25 items) so as to perform a batch
request with one single call. The CSV list must be separated by new line characters.
:param scan: (optional) This is an optional parameter that when set to "1" will automatically submit the URL
for analysis if no report is found for it in VirusTotal's database. In this case the result will contain a
scan_id field that can be used to query the analysis report later on.
:param allinfo: (optional) If this parameter is specified and set to "1" additional info regarding the URL
(other than the URL scanning engine results) will also be returned. This additional info includes VirusTotal
related metadata (first seen date, last seen date, files downloaded from the given URL, etc.) and the output
of other tools and datasets when fed with the URL.
:return: JSON response
"""
params = {'apikey': self.api_key, 'resource': this_url, 'scan': scan, 'allinfo': allinfo}
try:
response = requests.get(self.base + 'url/report', params=params, proxies=self.proxies)
except requests.RequestException as e:
return dict(error=e.message)
return _return_response_and_status_code(response) | 0.007803 |
def setup(app):
"""
Just connects the docstring pre_processor and should_skip functions to be
applied on all docstrings.
"""
app.connect('autodoc-process-docstring',
lambda *args: pre_processor(*args, namer=audiolazy_namer))
app.connect('autodoc-skip-member', should_skip) | 0.013378 |
def spherical_variogram_model(m, d):
"""Spherical model, m is [psill, range, nugget]"""
psill = float(m[0])
range_ = float(m[1])
nugget = float(m[2])
return np.piecewise(d, [d <= range_, d > range_],
[lambda x: psill * ((3.*x)/(2.*range_) - (x**3.)/(2.*range_**3.)) + nugget, psill + nugget]) | 0.005848 |
def set_weekly(self, interval, *, days_of_week, first_day_of_week,
**kwargs):
""" Set to repeat every week on specified days for every x no. of days
:param int interval: no. of days to repeat at
:param str first_day_of_week: starting day for a week
:param list[str] days_of_week: list of days of the week to repeat
:keyword date start: Start date of repetition (kwargs)
:keyword date end: End date of repetition (kwargs)
:keyword int occurrences: no of occurrences (kwargs)
"""
self.set_daily(interval, **kwargs)
self.__days_of_week = set(days_of_week)
self.__first_day_of_week = first_day_of_week | 0.004255 |
def _stage_local_files(local_dir, local_files={}):
"""
Either ``local_files`` and/or ``context`` should be supplied.
Will stage a ``local_files`` dictionary of path:filename pairs where path
is relative to ``local_dir`` into a local tmp staging directory.
Returns a path to the temporary local staging directory
"""
staging_dir = os.path.join(tempfile.mkdtemp(),os.path.basename(local_dir))
os.mkdir(staging_dir)
for root, dirs, files in os.walk(local_dir):
relative_tree = root.replace(local_dir,'')
if relative_tree:
relative_tree = relative_tree[1:]
if local_files:
files = local_files.get(relative_tree,[])
for file in files:
if relative_tree:
filepath = os.path.join(relative_tree,file)
if not os.path.exists(os.path.join(staging_dir,relative_tree)):
os.mkdir(os.path.join(staging_dir,relative_tree))
else: filepath = file
shutil.copy2(os.path.join(root,file),os.path.join(staging_dir,filepath))
return staging_dir | 0.012567 |
def _get_api_key(self):
"""
Get Opsgenie api_key for creating alert
"""
conn = self.get_connection(self.http_conn_id)
api_key = conn.password
if not api_key:
raise AirflowException('Opsgenie API Key is required for this hook, '
'please check your conn_id configuration.')
return api_key | 0.007712 |
def make(self, cwl):
"""Instantiate a CWL object from a CWl document."""
load = load_tool.load_tool(cwl, self.loading_context)
if isinstance(load, int):
raise Exception("Error loading tool")
return Callable(load, self) | 0.007634 |
def emit_function(self, return_type=None, argtypes=[], proxy=True):
"""Compiles code and returns a Python-callable function."""
if argtypes is not None:
make_func = ctypes.CFUNCTYPE(return_type, *argtypes)
else:
make_func = ctypes.CFUNCTYPE(return_type)
# NOTE: An optional way of binding the function is to use cffi.
# It's a tad faster then emit_function_fast:
# import cffi
# ffi = cffi.FFI()
# ...
# code = jit.emit()
# func = ffi.cast("long (*fptr)(long, long)", code.value)
# func(123)
code = self.emit()
func = make_func(code.value)
# Save this in case anyone wants to disassemble using external
# libraries
func.address = code
# Because functions code are munmapped when we call _jit_destroy_state,
# we need to return weakrefs to the functions. Otherwise, a user could
# call a function that points to invalid memory.
if proxy:
self.functions.append(func)
return weakref.proxy(func)
else:
return func | 0.001733 |
def get_services(root, hosts_map, view=None):
''' Gets a list of objects representing the Nagios services.
Each object contains the Nagios hostname, service name, service display
name, and service health summary.
'''
services_list = []
mgmt_service = root.get_cloudera_manager().get_service()
services_list.append({"hostname": CM_DUMMY_HOST,
"name": mgmt_service.name,
"display_name": "CM Managed Service: %s" % (mgmt_service.name,),
"status": get_status(mgmt_service),
"url": mgmt_service.serviceUrl,
"health_summary": mgmt_service.healthSummary})
for cm_role in root.get_cloudera_manager().get_service().get_all_roles(view):
services_list.append({"hostname": hosts_map[cm_role.hostRef.hostId]["hostname"],
"name": cm_role.name,
"display_name": "CM Management Service: %s" % (cm_role.name,),
"status": get_status(cm_role),
"url": cm_role.roleUrl,
"health_summary": cm_role.healthSummary})
for cm_host in root.get_all_hosts(view):
services_list.append({"hostname": hosts_map[cm_host.hostId]["hostname"],
"name": "cm-host-%s" % (cm_host.hostname,),
"display_name": "CM Managed Host: %s" % (cm_host.hostname,),
"status": get_status(cm_host),
"url": cm_host.hostUrl,
"health_summary": cm_host.healthSummary})
for cluster in root.get_all_clusters(view):
for service in cluster.get_all_services(view):
services_list.append({"hostname": cluster.name,
"name": service.name,
"display_name": "CM Managed Service: %s" % (service.name,),
"status": get_status(service),
"url": service.serviceUrl,
"health_summary": service.healthSummary})
for role in service.get_all_roles(view):
services_list.append({"hostname": hosts_map[role.hostRef.hostId]["hostname"],
"name": role.name,
"display_name": "%s:%s" % (cluster.name, role.name,),
"status": get_status(role),
"url": role.roleUrl,
"health_summary": role.healthSummary})
return services_list | 0.007031 |
def copy_and_update(dictionary, update):
"""Returns an updated copy of the dictionary without modifying the original"""
newdict = dictionary.copy()
newdict.update(update)
return newdict | 0.00995 |
def _heapify_max(x):
"""Transform list into a maxheap, in-place, in O(len(x)) time."""
n = len(x)
for i in reversed(range(n//2)):
_siftup_max(x, i) | 0.005988 |
def edit_required_pull_request_reviews(self, dismissal_users=github.GithubObject.NotSet, dismissal_teams=github.GithubObject.NotSet, dismiss_stale_reviews=github.GithubObject.NotSet, require_code_owner_reviews=github.GithubObject.NotSet, required_approving_review_count=github.GithubObject.NotSet):
"""
:calls: `PATCH /repos/:owner/:repo/branches/:branch/protection/required_pull_request_reviews <https://developer.github.com/v3/repos/branches>`_
:dismissal_users: list of strings
:dismissal_teams: list of strings
:dismiss_stale_reviews: bool
:require_code_owner_reviews: bool
:required_approving_review_count: int
"""
assert dismissal_users is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) or isinstance(element, (str, unicode)) for element in dismissal_users), dismissal_users
assert dismissal_teams is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) or isinstance(element, (str, unicode)) for element in dismissal_teams), dismissal_teams
assert dismiss_stale_reviews is github.GithubObject.NotSet or isinstance(dismiss_stale_reviews, bool), dismiss_stale_reviews
assert require_code_owner_reviews is github.GithubObject.NotSet or isinstance(require_code_owner_reviews, bool), require_code_owner_reviews
assert required_approving_review_count is github.GithubObject.NotSet or isinstance(required_approving_review_count, int), required_approving_review_count
post_parameters = {}
if dismissal_users is not github.GithubObject.NotSet:
post_parameters["dismissal_restrictions"] = {"users": dismissal_users}
if dismissal_teams is not github.GithubObject.NotSet:
if "dismissal_restrictions" not in post_parameters:
post_parameters["dismissal_restrictions"] = {}
post_parameters["dismissal_restrictions"]["teams"] = dismissal_teams
if dismiss_stale_reviews is not github.GithubObject.NotSet:
post_parameters["dismiss_stale_reviews"] = dismiss_stale_reviews
if require_code_owner_reviews is not github.GithubObject.NotSet:
post_parameters["require_code_owner_reviews"] = require_code_owner_reviews
if required_approving_review_count is not github.GithubObject.NotSet:
post_parameters["required_approving_review_count"] = required_approving_review_count
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.protection_url + "/required_pull_request_reviews",
headers={'Accept': Consts.mediaTypeRequireMultipleApprovingReviews},
input=post_parameters
) | 0.005166 |
def dump(self, function_name):
"""
Pretty-dump the bytecode for the function with the given name.
"""
assert isinstance(function_name, str)
self.stdout.write(function_name)
self.stdout.write("\n")
self.stdout.write("-" * len(function_name))
self.stdout.write("\n\n")
byte_code = self.interpreter.compiled_functions[function_name]
self.stdout.write(byte_code.dump())
self.stdout.write("\n") | 0.004167 |
def prepare(self):
"""
Reorganizes the data such that the deployment logic can find it all
where it expects to be.
The raw configuration file is intended to be as human-friendly as
possible partly through the following mechanisms:
- In order to minimize repetition, any attributes that are common
to all server configurations can be specified in the
``server_common_attributes`` stanza even though the stanza itself
does not map directly to a deployable resource.
- For reference locality, each security group stanza contains its
list of rules even though rules are actually created in a
separate stage from the groups themselves.
In order to make the :class:`Config` object more useful to the program
logic, this method performs the following transformations:
- Distributes the ``server_common_attributes`` among all the
members of the ``servers`` stanza.
- Extracts security group rules to a top-level key, and
interpolates all source and target values.
"""
# TODO: take server_common_attributes and disperse it among the various
# server stanzas
# First stage - turn all the dicts (SERVER, SECGROUP, DATABASE, LOADBAL)
# into lists now they're merged properly
for stanza_key, name_key in (
(R.SERVERS, A.server.NAME),
(R.SERVER_SECURITY_GROUPS, A.secgroup.NAME),
(R.LOAD_BALANCERS, A.loadbalancer.NAME),
(R.DATABASES, A.database.NAME),
(R.BUCKETS, A.NAME),
(R.QUEUES, A.NAME)):
self[stanza_key] = self._convert_to_list(stanza_key, name_key)
self._prepare_ssh_keys()
self._prepare_secgroups()
self._prepare_tags()
self._prepare_dbs()
self._prepare_servers()
self._prepare_load_balancers()
self._prepare_ansible() | 0.001474 |
def types(self):
"""
Return a list of all the variable types that exist in the
Variables object.
"""
output = set()
for var in self.values():
if var.has_value():
output.update(var.types())
return list(output) | 0.006849 |
def get_fptr(self):
"""Get the function pointer."""
cmpfunc = ctypes.CFUNCTYPE(ctypes.c_int,
WPARAM,
LPARAM,
ctypes.POINTER(KBDLLHookStruct))
return cmpfunc(self.handle_input) | 0.006579 |
def IBA_calc(TPR, TNR, alpha=1):
"""
Calculate IBA (Index of balanced accuracy).
:param TNR: specificity or true negative rate
:type TNR : float
:param TPR: sensitivity, recall, hit rate, or true positive rate
:type TPR : float
:param alpha : alpha coefficient
:type alpha : float
:return: IBA as float
"""
try:
IBA = (1 + alpha * (TPR - TNR)) * TPR * TNR
return IBA
except Exception:
return "None" | 0.002123 |
def check_access_token(self, openid=None, access_token=None):
"""检查 access_token 有效性
:param openid: 可选,微信 openid,默认获取当前授权用户信息
:param access_token: 可选,access_token,默认使用当前授权用户的 access_token
:return: 有效返回 True,否则 False
"""
openid = openid or self.open_id
access_token = access_token or self.access_token
res = self._get(
'sns/auth',
params={
'access_token': access_token,
'openid': openid
}
)
if res['errcode'] == 0:
return True
return False | 0.003295 |
def set_attrs(self, username, attrs):
""" set user attributes"""
ldap_client = self._bind()
tmp = self._get_user(self._byte_p2(username), ALL_ATTRS)
if tmp is None:
raise UserDoesntExist(username, self.backend_name)
dn = self._byte_p2(tmp[0])
old_attrs = tmp[1]
for attr in attrs:
bcontent = self._byte_p2(attrs[attr])
battr = self._byte_p2(attr)
new = {battr: self._modlist(self._byte_p3(bcontent))}
# if attr is dn entry, use rename
if attr.lower() == self.dn_user_attr.lower():
ldap_client.rename_s(
dn,
ldap.dn.dn2str([[(battr, bcontent, 1)]])
)
dn = ldap.dn.dn2str(
[[(battr, bcontent, 1)]] + ldap.dn.str2dn(dn)[1:]
)
else:
# if attr is already set, replace the value
# (see dict old passed to modifyModlist)
if attr in old_attrs:
if type(old_attrs[attr]) is list:
tmp = []
for value in old_attrs[attr]:
tmp.append(self._byte_p2(value))
bold_value = tmp
else:
bold_value = self._modlist(
self._byte_p3(old_attrs[attr])
)
old = {battr: bold_value}
# attribute is not set, just add it
else:
old = {}
ldif = modlist.modifyModlist(old, new)
if ldif:
try:
ldap_client.modify_s(dn, ldif)
except Exception as e:
ldap_client.unbind_s()
self._exception_handler(e)
ldap_client.unbind_s() | 0.001029 |
def _pc_decode(self, msg):
"""PC: PLC (lighting) change."""
housecode = msg[4:7]
return {'housecode': housecode, 'index': housecode_to_index(housecode),
'light_level': int(msg[7:9])} | 0.009009 |
def get_top_assets(self):
"""
Gets images and videos to populate top assets.
Map is built separately.
"""
images = self.get_all_images()[0:14]
video = []
if supports_video:
video = self.eventvideo_set.all()[0:10]
return list(chain(images, video))[0:15] | 0.006061 |
def recent_all_projects(self, limit=30, offset=0):
"""Return information about recent builds across all projects.
Args:
limit (int), Number of builds to return, max=100, defaults=30.
offset (int): Builds returned from this point, default=0.
Returns:
A list of dictionaries.
"""
method = 'GET'
url = ('/recent-builds?circle-token={token}&limit={limit}&'
'offset={offset}'.format(token=self.client.api_token,
limit=limit,
offset=offset))
json_data = self.client.request(method, url)
return json_data | 0.002882 |
def build(c: Union[Text, 'Choice', Dict[Text, Any]]) -> 'Choice':
"""Create a choice object from different representations."""
if isinstance(c, Choice):
return c
elif isinstance(c, str):
return Choice(c, c)
else:
return Choice(c.get('name'),
c.get('value'),
c.get('disabled', None),
c.get('checked'),
c.get('key')) | 0.004107 |
def delete_message(self, queue_name, message_id, pop_receipt, timeout=None):
'''
Deletes the specified message.
Normally after a client retrieves a message with the get_messages operation,
the client is expected to process and delete the message. To delete the
message, you must have two items of data: id and pop_receipt. The
id is returned from the previous get_messages operation. The
pop_receipt is returned from the most recent :func:`~get_messages` or
:func:`~update_message` operation. In order for the delete_message operation
to succeed, the pop_receipt specified on the request must match the
pop_receipt returned from the :func:`~get_messages` or :func:`~update_message`
operation.
:param str queue_name:
The name of the queue from which to delete the message.
:param str message_id:
The message id identifying the message to delete.
:param str pop_receipt:
A valid pop receipt value returned from an earlier call
to the :func:`~get_messages` or :func:`~update_message`.
:param int timeout:
The server timeout, expressed in seconds.
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('message_id', message_id)
_validate_not_none('pop_receipt', pop_receipt)
request = HTTPRequest()
request.method = 'DELETE'
request.host_locations = self._get_host_locations()
request.path = _get_path(queue_name, True, message_id)
request.query = {
'popreceipt': _to_str(pop_receipt),
'timeout': _int_to_str(timeout)
}
self._perform_request(request) | 0.007941 |
def kaplan_meier_estimator(event, time_exit, time_enter=None, time_min=None):
"""Kaplan-Meier estimator of survival function.
Parameters
----------
event : array-like, shape = (n_samples,)
Contains binary event indicators.
time_exit : array-like, shape = (n_samples,)
Contains event/censoring times.
time_enter : array-like, shape = (n_samples,), optional
Contains time when each individual entered the study for
left truncated survival data.
time_min : float, optional
Compute estimator conditional on survival at least up to
the specified time.
Returns
-------
time : array, shape = (n_times,)
Unique times.
prob_survival : array, shape = (n_times,)
Survival probability at each unique time point.
If `time_enter` is provided, estimates are conditional probabilities.
Examples
--------
Creating a Kaplan-Meier curve:
>>> x, y = kaplan_meier_estimator(event, time)
>>> plt.step(x, y, where="post")
>>> plt.ylim(0, 1)
>>> plt.show()
References
----------
.. [1] Kaplan, E. L. and Meier, P., "Nonparametric estimation from incomplete observations",
Journal of The American Statistical Association, vol. 53, pp. 457-481, 1958.
"""
event, time_enter, time_exit = check_y_survival(event, time_enter, time_exit, allow_all_censored=True)
check_consistent_length(event, time_enter, time_exit)
if time_enter is None:
uniq_times, n_events, n_at_risk = _compute_counts(event, time_exit)
else:
uniq_times, n_events, n_at_risk = _compute_counts_truncated(event, time_enter, time_exit)
values = 1 - n_events / n_at_risk
if time_min is not None:
mask = uniq_times >= time_min
uniq_times = numpy.compress(mask, uniq_times)
values = numpy.compress(mask, values)
y = numpy.cumprod(values)
return uniq_times, y | 0.002567 |
def search_all(self, queries, audio_basename=None, case_sensitive=False,
subsequence=False, supersequence=False, timing_error=0.0,
anagram=False, missing_word_tolerance=0):
"""
Returns a dictionary of all results of all of the queries for all of
the audio files.
All the specified parameters work per query.
Parameters
----------
queries : [str] or str
A list of the strings that'll be searched. If type of queries is
`str`, it'll be insterted into a list within the body of the
method.
audio_basename : str, optional
Search only within the given audio_basename.
Default is `None`.
case_sensitive : bool
Default is `False`
subsequence : bool, optional
`True` if it's not needed for the exact word be detected and larger
strings that contain the given one are fine.
If the query is a sentences with multiple words, it'll be
considered for each word, not the whole sentence.
Default is `False`.
supersequence : bool, optional
`True` if it's not needed for the exact word be detected and
smaller strings that are contained within the given one are fine.
If the query is a sentences with multiple words, it'll be
considered for each word, not the whole sentence.
Default is `False`.
anagram : bool, optional
`True` if it's acceptable for a complete permutation of the word to
be found. e.g. "abcde" would be acceptable for "edbac".
If the query is a sentences with multiple words, it'll be
considered for each word, not the whole sentence.
Default is `False`.
timing_error : None or float, optional
Sometimes other words (almost always very small) would be detected
between the words of the `query`. This parameter defines the
timing difference/tolerance of the search.
Default is 0.0 i.e. No timing error is tolerated.
missing_word_tolerance : int, optional
The number of words that can be missed within the result.
For example, if the query is "Some random text" and the tolerance
value is `1`, then "Some text" would be a valid response.
Note that the first and last words cannot be missed. Also,
there'll be an error if the value is more than the number of
available words. For the example above, any value more than 1
would have given an error (since there's only one word i.e.
"random" that can be missed)
Default is 0.
Returns
-------
search_results : {str: {str: [(float, float)]}}
A dictionary whose keys are queries and whose values are
dictionaries whose keys are all the audiofiles in which the query
is present and whose values are a list whose elements are 2-tuples
whose first element is the starting second of the query and whose
values are the ending second. e.g.
{"apple": {"fruits.wav" : [(1.1, 1.12)]}}
Raises
------
TypeError
if `queries` is neither a list nor a str
"""
search_gen_rest_of_kwargs = {
"audio_basename": audio_basename,
"case_sensitive": case_sensitive,
"subsequence": subsequence,
"supersequence": supersequence,
"timing_error": timing_error,
"anagram": anagram,
"missing_word_tolerance": missing_word_tolerance}
if not isinstance(queries, (list, str)):
raise TypeError("Invalid query type.")
if type(queries) is not list:
queries = [queries]
search_results = _PrettyDefaultDict(lambda: _PrettyDefaultDict(list))
for query in queries:
search_gen = self.search_gen(query=query,
**search_gen_rest_of_kwargs)
for search_result in search_gen:
search_results[query][
search_result["File Name"]].append(search_result["Result"])
return search_results | 0.000921 |
def run():
"""
Looks through the docs/ dir and parses each markdown document, looking for
sections to update from Python docstrings. Looks for section headers in
the format:
- ### `ClassName()` class
- ##### `.method_name()` method
- ##### `.attribute_name` attribute
- ### `function_name()` function
The markdown content following these section headers up until the next
section header will be replaced by new markdown generated from the Python
docstrings of the associated source files.
By default maps docs/{name}.md to {modulename}/{name}.py. Allows for
custom mapping via the md_source_map variable.
"""
print('Updating API docs...')
md_files = []
for root, _, filenames in os.walk(os.path.join(package_root, 'docs')):
for filename in filenames:
if not filename.endswith('.md'):
continue
md_files.append(os.path.join(root, filename))
parser = CommonMark.Parser()
for md_file in md_files:
md_file_relative = md_file[len(package_root) + 1:]
if md_file_relative in md_source_map:
py_files = md_source_map[md_file_relative]
py_paths = [os.path.join(package_root, py_file) for py_file in py_files]
else:
py_files = [os.path.basename(md_file).replace('.md', '.py')]
py_paths = [os.path.join(package_root, package_name, py_files[0])]
if not os.path.exists(py_paths[0]):
continue
with open(md_file, 'rb') as f:
markdown = f.read().decode('utf-8')
original_markdown = markdown
md_lines = list(markdown.splitlines())
md_ast = parser.parse(markdown)
last_class = []
last = {}
sections = OrderedDict()
find_sections(md_ast, sections, last, last_class, markdown.count("\n") + 1)
md_chunks = {}
for index, py_file in enumerate(py_files):
py_path = py_paths[index]
with open(os.path.join(py_path), 'rb') as f:
code = f.read().decode('utf-8')
module_ast = ast.parse(code, filename=py_file)
code_lines = list(code.splitlines())
for node in ast.iter_child_nodes(module_ast):
walk_ast(node, code_lines, sections, md_chunks)
added_lines = 0
def _replace_md(key, sections, md_chunk, md_lines, added_lines):
start, end = sections[key]
start -= 1
start += added_lines
end += added_lines
new_lines = md_chunk.split('\n')
added_lines += len(new_lines) - (end - start)
# Ensure a newline above each class header
if start > 0 and md_lines[start][0:4] == '### ' and md_lines[start - 1][0:1] == '>':
added_lines += 1
new_lines.insert(0, '')
md_lines[start:end] = new_lines
return added_lines
for key in sections:
if key not in md_chunks:
raise ValueError('No documentation found for %s' % key[1])
added_lines = _replace_md(key, sections, md_chunks[key], md_lines, added_lines)
markdown = '\n'.join(md_lines).strip() + '\n'
if original_markdown != markdown:
with open(md_file, 'wb') as f:
f.write(markdown.encode('utf-8')) | 0.001471 |
def subseq(self, start_offset=0, end_offset=None):
"""
Return a subset of the sequence
starting at start_offset (defaulting to the beginning)
ending at end_offset (None representing the end, whih is the default)
Raises ValueError if duration_64 is missing on any element
"""
from sebastian.core import DURATION_64
def subseq_iter(start_offset, end_offset):
cur_offset = 0
for point in self._elements:
try:
cur_offset += point[DURATION_64]
except KeyError:
raise ValueError("HSeq.subseq requires all points to have a %s attribute" % DURATION_64)
#Skip until start
if cur_offset < start_offset:
continue
#Yield points start_offset <= point < end_offset
if end_offset is None or cur_offset < end_offset:
yield point
else:
raise StopIteration
return HSeq(subseq_iter(start_offset, end_offset)) | 0.004545 |
def next_basis_label_or_index(self, label_or_index, n=1):
"""Given the label or index of a basis state, return the label
the next basis state.
More generally, if `n` is given, return the `n`'th next basis state
label/index; `n` may also be negative to obtain previous basis state
labels. Returns a :class:`str` label if `label_or_index` is a
:class:`str` or :class:`int`, or a :class:`SpinIndex` if
`label_or_index` is a :class:`SpinIndex`.
Args:
label_or_index (int or str or SpinIndex): If `int`, the
zero-based index of a basis state; if `str`, the label of a
basis state
n (int): The increment
Raises:
IndexError: If going beyond the last or first basis state
ValueError: If `label` is not a label for any basis state in the
Hilbert space
.BasisNotSetError: If the Hilbert space has no defined basis
TypeError: if `label_or_index` is neither a :class:`str` nor an
:class:`int`, nor a :class:`SpinIndex`
Note:
This differs from its super-method only by never returning an
integer index (which is not accepted when instantiating a
:class:`BasisKet` for a :class:`SpinSpace`)
"""
if isinstance(label_or_index, int):
new_index = label_or_index + n
if new_index < 0:
raise IndexError("index %d < 0" % new_index)
if new_index >= self.dimension:
raise IndexError(
"index %d out of range for basis %s"
% (new_index, self._basis))
return self.basis_labels[new_index]
elif isinstance(label_or_index, str):
label_index = self.basis_labels.index(label_or_index)
new_index = label_index + n
if (new_index < 0) or (new_index >= len(self._basis)):
raise IndexError(
"index %d out of range for basis %s"
% (new_index, self._basis))
return self.basis_labels[new_index]
elif isinstance(label_or_index, SpinIndex):
return label_or_index.__class__(expr=label_or_index.expr + n) | 0.000875 |
def calculate_ecef_velocity(inst):
"""
Calculates spacecraft velocity in ECEF frame.
Presumes that the spacecraft velocity in ECEF is in
the input instrument object as position_ecef_*. Uses a symmetric
difference to calculate the velocity thus endpoints will be
set to NaN. Routine should be run using pysat data padding feature
to create valid end points.
Parameters
----------
inst : pysat.Instrument
Instrument object
Returns
-------
None
Modifies pysat.Instrument object in place to include ECEF velocity
using naming scheme velocity_ecef_* (*=x,y,z)
"""
x = inst['position_ecef_x']
vel_x = (x.values[2:] - x.values[0:-2])/2.
y = inst['position_ecef_y']
vel_y = (y.values[2:] - y.values[0:-2])/2.
z = inst['position_ecef_z']
vel_z = (z.values[2:] - z.values[0:-2])/2.
inst[1:-1, 'velocity_ecef_x'] = vel_x
inst[1:-1, 'velocity_ecef_y'] = vel_y
inst[1:-1, 'velocity_ecef_z'] = vel_z
inst.meta['velocity_ecef_x'] = {'units':'km/s',
'desc':'Velocity of satellite calculated with respect to ECEF frame.'}
inst.meta['velocity_ecef_y'] = {'units':'km/s',
'desc':'Velocity of satellite calculated with respect to ECEF frame.'}
inst.meta['velocity_ecef_z'] = {'units':'km/s',
'desc':'Velocity of satellite calculated with respect to ECEF frame.'}
return | 0.01237 |
def response_list(data, key):
"""Obtain the relevant response data in a list.
If the response does not already contain the result in a list, a new one
will be created to ease iteration in the parser methods.
Args:
data (dict): API response.
key (str): Attribute of the response that contains the result values.
Returns:
List of response items (usually dict) or None if the key is not present.
"""
if key not in data:
return None
if isinstance(data[key], list):
return data[key]
else:
return [data[key],] | 0.005068 |
def validate(self, vat_deets):
"""Validates an existing VAT identification number against VIES."""
request = self._get('validation', vat_deets)
return self.responder(request) | 0.010101 |
def get_hierarchies(self):
"""Gets the hierarchy list resulting from the search.
return: (osid.hierarchy.HierarchyList) - the hierarchy list
raise: IllegalState - the hierarchy list was already retrieved
*compliance: mandatory -- This method must be implemented.*
"""
if self.retrieved:
raise errors.IllegalState('List has already been retrieved.')
self.retrieved = True
return objects.HierarchyList(self._results, runtime=self._runtime) | 0.003876 |
def transform(self, X):
r'''
Computes the divergences from X to :attr:`features_`.
Parameters
----------
X : list of bag feature arrays or :class:`skl_groups.features.Features`
The bags to search "from".
Returns
-------
divs : array of shape ``[len(div_funcs), len(Ks), len(X), len(features_)] + ([2] if do_sym else [])``
The divergences from X to :attr:`features_`.
``divs[d, k, i, j]`` is the ``div_funcs[d]`` divergence
from ``X[i]`` to ``fetaures_[j]`` using a K of ``Ks[k]``.
If ``do_sym``, ``divs[d, k, i, j, 0]`` is
:math:`D_{d,k}( X_i \| \texttt{features_}_j)` and
``divs[d, k, i, j, 1]`` is :math:`D_{d,k}(\texttt{features_}_j \| X_i)`.
'''
X = as_features(X, stack=True, bare=True)
Y = self.features_
Ks = np.asarray(self.Ks)
if X.dim != Y.dim:
msg = "incompatible dimensions: fit with {}, transform with {}"
raise ValueError(msg.format(Y.dim, X.dim))
memory = self.memory
if isinstance(memory, string_types):
memory = Memory(cachedir=memory, verbose=0)
# ignore Y_indices to avoid slow pickling of them
# NOTE: if the indices are approximate, then might not get the same
# results!
est = memory.cache(_est_divs, ignore=['n_jobs', 'Y_indices', 'Y_rhos'])
output, self.rhos_ = est(
X, Y, self.indices_, getattr(self, 'rhos_', None),
self.div_funcs, Ks,
self.do_sym, self.clamp, self.version, self.min_dist,
self._flann_args(), self._n_jobs)
return output | 0.002338 |
def normalize(self,asOf=None,multiplier=100):
"""
Returns a normalized series or DataFrame
Example:
Series.normalize()
Returns: series of DataFrame
Parameters:
-----------
asOf : string
Date format
'2015-02-29'
multiplier : int
Factor by which the results will be adjusted
"""
if not asOf:
x0=self.ix[0]
else:
x0=self.ix[asOf]
return self/x0*multiplier | 0.069948 |
def _get_any_translated_model(self, meta=None):
"""
Return any available translation.
Returns None if there are no translations at all.
"""
if meta is None:
meta = self._parler_meta.root
tr_model = meta.model
local_cache = self._translations_cache[tr_model]
if local_cache:
# There is already a language available in the case. No need for queries.
# Give consistent answers if they exist.
check_languages = [self._current_language] + self.get_fallback_languages()
try:
for fallback_lang in check_languages:
trans = local_cache.get(fallback_lang, None)
if trans and not is_missing(trans):
return trans
return next(t for t in six.itervalues(local_cache) if not is_missing(t))
except StopIteration:
pass
try:
# Use prefetch if available, otherwise perform separate query.
prefetch = self._get_prefetched_translations(meta=meta)
if prefetch is not None:
translation = prefetch[0] # Already a list
else:
translation = self._get_translated_queryset(meta=meta)[0]
except IndexError:
return None
else:
local_cache[translation.language_code] = translation
_cache_translation(translation)
return translation | 0.003333 |
def apply(self, reboot=False):
"""Apply the configuration to iRMC."""
self.root.use_virtual_addresses = True
self.root.manage.manage = True
self.root.mode = 'new'
self.root.init_boot = reboot
self.client.set_profile(self.root.get_json()) | 0.006993 |
def get_organism(self):
"""Select Enrichr organism from below:
Human & Mouse: H. sapiens & M. musculus
Fly: D. melanogaster
Yeast: S. cerevisiae
Worm: C. elegans
Fish: D. rerio
"""
organism = {'default': ['', 'hs', 'mm', 'human','mouse',
'homo sapiens', 'mus musculus',
'h. sapiens', 'm. musculus'],
'Fly': ['fly', 'd. melanogaster', 'drosophila melanogaster'],
'Yeast': ['yeast', 's. cerevisiae', 'saccharomyces cerevisiae'],
'Worm': ['worm', 'c. elegans', 'caenorhabditis elegans', 'nematode'],
'Fish': ['fish', 'd. rerio', 'danio rerio', 'zebrafish']
}
for k, v in organism.items():
if self.organism.lower() in v :
self._organism = k
if self._organism is None:
raise Exception("No supported organism found !!!")
if self._organism == 'default':
self._organism = ''
return | 0.007253 |
def _set_market_trade_type(self, ttype):
"""根据选择的市价交易类型选择对应的下拉选项"""
selects = self._main.child_window(
control_id=self._config.TRADE_MARKET_TYPE_CONTROL_ID,
class_name="ComboBox",
)
for i, text in selects.texts():
# skip 0 index, because 0 index is current select index
if i == 0:
continue
if ttype in text:
selects.select(i - 1)
break
else:
raise TypeError("不支持对应的市价类型: {}".format(ttype)) | 0.003643 |
def correct_scanpy(adatas, **kwargs):
"""Batch correct a list of `scanpy.api.AnnData`.
Parameters
----------
adatas : `list` of `scanpy.api.AnnData`
Data sets to integrate and/or correct.
kwargs : `dict`
See documentation for the `correct()` method for a full list of
parameters to use for batch correction.
Returns
-------
corrected
By default (`return_dimred=False`), returns a list of
`scanpy.api.AnnData` with batch corrected values in the `.X` field.
corrected, integrated
When `return_dimred=False`, returns a two-tuple containing a list of
`np.ndarray` with integrated low-dimensional embeddings and a list
of `scanpy.api.AnnData` with batch corrected values in the `.X`
field.
"""
if 'return_dimred' in kwargs and kwargs['return_dimred']:
datasets_dimred, datasets, genes = correct(
[adata.X for adata in adatas],
[adata.var_names.values for adata in adatas],
**kwargs
)
else:
datasets, genes = correct(
[adata.X for adata in adatas],
[adata.var_names.values for adata in adatas],
**kwargs
)
new_adatas = []
for i, adata in enumerate(adatas):
adata.X = datasets[i]
new_adatas.append(adata)
if 'return_dimred' in kwargs and kwargs['return_dimred']:
return datasets_dimred, new_adatas
else:
return new_adatas | 0.000671 |
def setImgShape(self, shape):
'''
image shape must be known for calculating camera matrix
if method==Manual and addPoints is used instead of addImg
this method must be called before .coeffs are obtained
'''
self.img = type('Dummy', (object,), {})
# if imgProcessor.ARRAYS_ORDER_IS_XY:
# self.img.shape = shape[::-1]
# else:
self.img.shape = shape | 0.004545 |
def group_apis(reg, features=None, extensions=None, api=None, profile=None,
support=None):
"""Groups Types, Enums, Commands with their respective Features, Extensions
Similar to :py:func:`import_registry`, but generates a new Registry object
for every feature or extension.
:param Registry reg: Input registry
:param features: Feature names to import, or None to import all.
:type features: Iterable of strs
:param extensions: Extension names to import, or None to import all.
:type extensions: Iterable of strs
:param str profile: Import features which belong in `profile`, or None
to import all.
:param str api: Import features which belong in `api`, or None to
import all.
:param str support: Import extensions which belong in this extension
support string, or None to import all.
:returns: list of :py:class:`Registry` objects
"""
features = (reg.get_features(api) if features is None
else [reg.features[x] for x in features])
if extensions is None:
extensions = sorted(reg.get_extensions(support),
key=extension_sort_key)
else:
extensions = [reg.extensions[x] for x in extensions]
output_symbols = set()
def filter_symbol(type, name):
k = (type, name)
if k in output_symbols:
return False
else:
output_symbols.add(k)
return True
out_apis = []
for x in features:
out = Registry(x.name)
import_feature(out, reg, x.name, api, profile, filter_symbol)
out_apis.append(out)
for x in extensions:
out = Registry(x.name)
import_extension(out, reg, x.name, api, profile, filter_symbol)
out_apis.append(out)
return out_apis | 0.000538 |
def create_entry(group, name, timestamp, **attributes):
"""Create a new ARF entry under group, setting required attributes.
An entry is an abstract collection of data which all refer to the same time
frame. Data can include physiological recordings, sound recordings, and
derived data such as spike times and labels. See add_data() for information
on how data are stored.
name -- the name of the new entry. any valid python string.
timestamp -- timestamp of entry (datetime object, or seconds since
January 1, 1970). Can be an integer, a float, or a tuple
of integers (seconds, microsceconds)
Additional keyword arguments are set as attributes on created entry.
Returns: newly created entry object
"""
# create group using low-level interface to store creation order
from h5py import h5p, h5g, _hl
try:
gcpl = h5p.create(h5p.GROUP_CREATE)
gcpl.set_link_creation_order(
h5p.CRT_ORDER_TRACKED | h5p.CRT_ORDER_INDEXED)
except AttributeError:
grp = group.create_group(name)
else:
name, lcpl = group._e(name, lcpl=True)
grp = _hl.group.Group(h5g.create(group.id, name, lcpl=lcpl, gcpl=gcpl))
set_uuid(grp, attributes.pop("uuid", None))
set_attributes(grp,
timestamp=convert_timestamp(timestamp),
**attributes)
return grp | 0.000708 |
def get_bind_processor(column_type, dialect):
"""
Returns a bind processor for a column type and dialect, with special handling
for JSON/JSONB column types to return dictionaries instead of serialized JSON strings.
NOTE: This is a workaround for https://github.com/NerdWalletOSS/savage/issues/8
:param column_type: :py:class:`~sqlalchemy.sql.type_api.TypeEngine`
:param dialect: :py:class:`~sqlalchemy.engine.interfaces.Dialect`
:return: bind processor for given column type and dialect
"""
if column_type.compile(dialect) not in {'JSON', 'JSONB'}:
# For non-JSON/JSONB column types, return the column type's bind processor
return column_type.bind_processor(dialect)
if type(column_type) in {JSON, JSONB}:
# For bare JSON/JSONB types, we simply skip bind processing altogether
return None
elif isinstance(column_type, TypeDecorator) and column_type._has_bind_processor:
# For decorated JSON/JSONB types, we return the custom bind processor (if any)
return partial(column_type.process_bind_param, dialect=dialect)
else:
# For all other cases, we fall back to deserializing the result of the bind processor
def wrapped_bind_processor(value):
json_deserializer = dialect._json_deserializer or json.loads
return json_deserializer(column_type.bind_processor(dialect)(value))
return wrapped_bind_processor | 0.006224 |
def set_brightness(self, percent, group=None):
""" Set brightness.
Percent is int between 0 (minimum brightness) and 100 (maximum brightness), or
float between 0.0 (minimum brightness) and 1.0 (maximum brightness).
See also .nightmode().
If group (1-4) is not specified, brightness of all four groups will be adjusted.
"""
# If input is float, assume it is percent value from 0 to 1.
if isinstance(percent, float):
if percent > 1:
percent = int(percent)
else:
percent = int(percent * 100)
percent, value = self.get_brightness_level(percent)
self.on(group)
self._send_command((b"\x4e", struct.pack("B", value)))
return percent | 0.00625 |
def onTagAdd(self, name, func):
'''
Register a callback for tag addition.
Args:
name (str): The name of the tag or tag glob.
func (function): The callback func(node, tagname, tagval).
'''
# TODO allow name wild cards
if '*' in name:
self.ontagaddglobs.add(name, func)
else:
self.ontagadds[name].append(func) | 0.004843 |
def plot_gen_diff(
networkA,
networkB,
leave_out_carriers=[
'geothermal',
'oil',
'other_non_renewable',
'reservoir',
'waste']):
"""
Plot difference in generation between two networks grouped by carrier type
Parameters
----------
networkA : PyPSA network container with switches
networkB : PyPSA network container without switches
leave_out_carriers : list of carriers to leave out (default to all small
carriers)
Returns
-------
Plot
"""
def gen_by_c(network):
gen = pd.concat([network.generators_t.p.mul(
network.snapshot_weightings, axis=0)[network.generators
[network.generators.control != 'Slack'].index],
network.generators_t.p.mul(
network.snapshot_weightings, axis=0)[network.generators
[network. generators.control == 'Slack'].index]
.iloc[:, 0].apply(lambda x: x if x > 0 else 0)],
axis=1)\
.groupby(network.generators.carrier,axis=1).sum()
return gen
gen = gen_by_c(networkB)
gen_switches = gen_by_c(networkA)
diff = gen_switches - gen
colors = coloring()
diff.drop(leave_out_carriers, axis=1, inplace=True)
colors = [colors[col] for col in diff.columns]
plot = diff.plot(kind='line', color=colors, use_index=False)
plot.legend(loc='upper left', ncol=5, prop={'size': 8})
x = []
for i in range(0, len(diff)):
x.append(i)
plt.xticks(x, x)
plot.set_xlabel('Timesteps')
plot.set_ylabel('Difference in Generation in MW')
plot.set_title('Difference in Generation')
plt.tight_layout() | 0.002255 |
def _ensure_data(values, dtype=None):
"""
routine to ensure that our data is of the correct
input dtype for lower-level routines
This will coerce:
- ints -> int64
- uint -> uint64
- bool -> uint64 (TODO this should be uint8)
- datetimelike -> i8
- datetime64tz -> i8 (in local tz)
- categorical -> codes
Parameters
----------
values : array-like
dtype : pandas_dtype, optional
coerce to this dtype
Returns
-------
(ndarray, pandas_dtype, algo dtype as a string)
"""
# we check some simple dtypes first
try:
if is_object_dtype(dtype):
return ensure_object(np.asarray(values)), 'object', 'object'
if is_bool_dtype(values) or is_bool_dtype(dtype):
# we are actually coercing to uint64
# until our algos support uint8 directly (see TODO)
return np.asarray(values).astype('uint64'), 'bool', 'uint64'
elif is_signed_integer_dtype(values) or is_signed_integer_dtype(dtype):
return ensure_int64(values), 'int64', 'int64'
elif (is_unsigned_integer_dtype(values) or
is_unsigned_integer_dtype(dtype)):
return ensure_uint64(values), 'uint64', 'uint64'
elif is_float_dtype(values) or is_float_dtype(dtype):
return ensure_float64(values), 'float64', 'float64'
elif is_object_dtype(values) and dtype is None:
return ensure_object(np.asarray(values)), 'object', 'object'
elif is_complex_dtype(values) or is_complex_dtype(dtype):
# ignore the fact that we are casting to float
# which discards complex parts
with catch_warnings():
simplefilter("ignore", np.ComplexWarning)
values = ensure_float64(values)
return values, 'float64', 'float64'
except (TypeError, ValueError, OverflowError):
# if we are trying to coerce to a dtype
# and it is incompat this will fall thru to here
return ensure_object(values), 'object', 'object'
# datetimelike
if (needs_i8_conversion(values) or
is_period_dtype(dtype) or
is_datetime64_any_dtype(dtype) or
is_timedelta64_dtype(dtype)):
if is_period_dtype(values) or is_period_dtype(dtype):
from pandas import PeriodIndex
values = PeriodIndex(values)
dtype = values.dtype
elif is_timedelta64_dtype(values) or is_timedelta64_dtype(dtype):
from pandas import TimedeltaIndex
values = TimedeltaIndex(values)
dtype = values.dtype
else:
# Datetime
from pandas import DatetimeIndex
values = DatetimeIndex(values)
dtype = values.dtype
return values.asi8, dtype, 'int64'
elif (is_categorical_dtype(values) and
(is_categorical_dtype(dtype) or dtype is None)):
values = getattr(values, 'values', values)
values = values.codes
dtype = 'category'
# we are actually coercing to int64
# until our algos support int* directly (not all do)
values = ensure_int64(values)
return values, dtype, 'int64'
# we have failed, return object
values = np.asarray(values, dtype=np.object)
return ensure_object(values), 'object', 'object' | 0.000296 |
def get_previous_dagrun(self, session=None):
"""The previous DagRun, if there is one"""
return session.query(DagRun).filter(
DagRun.dag_id == self.dag_id,
DagRun.execution_date < self.execution_date
).order_by(
DagRun.execution_date.desc()
).first() | 0.006289 |
def getmembers(self):
"""Gets members (vars) from all scopes, using both runtime and static.
This method will attempt both static and runtime getmembers. This is the
recommended way of getting available members.
Returns:
Set of available vars.
Raises:
NotImplementedError if any scope fails to implement 'getmembers'.
"""
names = set()
for scope in self.scopes:
if isinstance(scope, type):
names.update(structured.getmembers_static(scope))
else:
names.update(structured.getmembers_runtime(scope))
return names | 0.004511 |
def __make_security_group_api_request(server_context, api, user_ids, group_id, container_path):
"""
Execute a request against the LabKey Security Controller Group Membership apis
:param server_context: A LabKey server context. See utils.create_server_context.
:param api: Action to execute
:param user_ids: user ids to apply action to
:param group_id: group id to apply action to
:param container_path: Additional container context path
:return: Request json object
"""
url = server_context.build_url(security_controller, api, container_path)
# if user_ids is only a single scalar make it an array
if not hasattr(user_ids, "__iter__"):
user_ids = [user_ids]
return server_context.make_request(url, {
'groupId': group_id,
'principalIds': user_ids
}) | 0.004819 |
def dialog_open(self, *, dialog: dict, trigger_id: str, **kwargs) -> SlackResponse:
"""Open a dialog with a user.
Args:
dialog (dict): A dictionary of dialog arguments.
{
"callback_id": "46eh782b0",
"title": "Request something",
"submit_label": "Request",
"state": "Max",
"elements": [
{
"type": "text",
"label": "Origin",
"name": "loc_origin"
},
{
"type": "text",
"label": "Destination",
"name": "loc_destination"
}
]
}
trigger_id (str): The trigger id of a recent message interaction.
e.g. '12345.98765.abcd2358fdea'
"""
kwargs.update({"dialog": dialog, "trigger_id": trigger_id})
return self.api_call("dialog.open", json=kwargs) | 0.00266 |
def get_span_column_count(span):
"""
Find the length of a colspan.
Parameters
----------
span : list of lists of int
The [row, column] pairs that make up the span
Returns
-------
columns : int
The number of columns included in the span
Example
-------
Consider this table::
+------+------------------+
| foo | bar |
+------+--------+---------+
| spam | goblet | berries |
+------+--------+---------+
::
>>> span = [[0, 1], [0, 2]]
>>> print(get_span_column_count(span))
2
"""
columns = 1
first_column = span[0][1]
for i in range(len(span)):
if span[i][1] > first_column:
columns += 1
first_column = span[i][1]
return columns | 0.001211 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.