text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def generate_visualizations(methods, data, true_labels, base_dir = 'visualizations',
figsize=(18,10), **scatter_options):
"""
Generates visualization scatters for all the methods.
Args:
methods: follows same format as run_experiments. List of tuples.
data: genes x cells
true_labels: array of integers
base_dir: base directory to save all the plots
figsize: tuple of ints representing size of figure
scatter_options: options for plt.scatter
"""
plt.figure(figsize=figsize)
for method in methods:
preproc= method[0]
if isinstance(preproc, Preprocess):
preprocessed, ll = preproc.run(data)
output_names = preproc.output_names
else:
# if the input is a list, only use the first preproc result
p1 = data
output_names = ['']
for p in preproc:
p1, ll = p.run(p1)
p1 = p1[0]
output_names[0] = output_names[0] + p.output_names[0]
preprocessed = [p1]
for r, name in zip(preprocessed, output_names):
# TODO: cluster labels
print(name)
# if it's 2d, just display it... else, do tsne to reduce to 2d
if r.shape[0]==2:
r_dim_red = r
else:
# sometimes the data is too big to do tsne... (for sklearn)
if sparse.issparse(r) and r.shape[0] > 100:
name = 'tsvd_' + name
tsvd = TruncatedSVD(50)
r_dim_red = tsvd.fit_transform(r.T)
try:
tsne = TSNE(2)
r_dim_red = tsne.fit_transform(r_dim_red).T
name = 'tsne_' + name
except:
tsvd2 = TruncatedSVD(2)
r_dim_red = tsvd2.fit_transform(r_dim_red).T
else:
name = 'tsne_' + name
tsne = TSNE(2)
r_dim_red = tsne.fit_transform(r.T).T
if isinstance(method[1], list):
for clustering_method in method[1]:
try:
cluster_labels = clustering_method.run(r)
except:
print('clustering failed')
continue
output_path = base_dir + '/{0}_{1}_labels.png'.format(name, clustering_method.name)
visualize_dim_red(r_dim_red, cluster_labels, output_path, **scatter_options)
else:
clustering_method = method[1]
try:
cluster_labels = clustering_method.run(r)
except:
print('clustering failed')
continue
output_path = base_dir + '/{0}_{1}_labels.png'.format(name, clustering_method.name)
visualize_dim_red(r_dim_red, cluster_labels, output_path, **scatter_options)
output_path = base_dir + '/{0}_true_labels.png'.format(name)
visualize_dim_red(r_dim_red, true_labels, output_path, **scatter_options) | 0.004995 |
def get_redirect_url(self, request, callback, parameters=None):
"""Build authentication redirect url."""
args = self.get_redirect_args(request, callback=callback)
additional = parameters or {}
args.update(additional)
params = urlencode(args)
return '{0}?{1}'.format(self.authorization_url, params) | 0.005797 |
def storage(self, provider='osfstorage'):
"""Return storage `provider`."""
stores = self._json(self._get(self._storages_url), 200)
stores = stores['data']
for store in stores:
provides = self._get_attribute(store, 'attributes', 'provider')
if provides == provider:
return Storage(store, self.session)
raise RuntimeError("Project has no storage "
"provider '{}'".format(provider)) | 0.004107 |
def correct_pairs(p, pf, tag):
"""
Take one pair of reads and correct to generate *.corr.fastq.
"""
from jcvi.assembly.preprocess import correct as cr
logging.debug("Work on {0} ({1})".format(pf, ','.join(p)))
itag = tag[0]
cm = ".".join((pf, itag))
targets = (cm + ".1.corr.fastq", cm + ".2.corr.fastq", \
pf + ".PE-0.corr.fastq")
if not need_update(p, targets):
logging.debug("Corrected reads found: {0}. Skipped.".format(targets))
return
slink(p, pf, tag)
cwd = os.getcwd()
os.chdir(pf)
cr(sorted(glob("*.fastq") + glob("*.fastq.gz")) + ["--nofragsdedup"])
sh("mv {0}.1.corr.fastq ../{1}".format(itag, targets[0]))
sh("mv {0}.2.corr.fastq ../{1}".format(itag, targets[1]))
sh("mv frag_reads_corr.corr.fastq ../{0}".format(targets[2]))
logging.debug("Correction finished: {0}".format(targets))
os.chdir(cwd) | 0.003272 |
def folderitem(self, obj, item, index):
"""Service triggered each time an item is iterated in folderitems.
The use of this service prevents the extra-loops in child objects.
:obj: the instance of the class to be foldered
:item: dict containing the properties of the object to be used by
the template
:index: current index of the item
"""
# ensure we have an object and not a brain
obj = api.get_object(obj)
url = api.get_url(obj)
title = api.get_title(obj)
keyword = obj.getKeyword()
# get the category
if self.show_categories_enabled():
category = obj.getCategoryTitle()
if category not in self.categories:
self.categories.append(category)
item["category"] = category
item["Title"] = title
item["replace"]["Title"] = get_link(url, value=title)
item["choices"]["min_operator"] = self.min_operator_choices
item["choices"]["max_operator"] = self.max_operator_choices
item["allow_edit"] = self.get_editable_columns()
item["required"] = self.get_required_columns()
spec = self.specification.get(keyword, {})
item["selected"] = spec and True or False
item["min_operator"] = spec.get("min_operator", "geq")
item["min"] = spec.get("min", "")
item["max_operator"] = spec.get("max_operator", "leq")
item["max"] = spec.get("max", "")
item["warn_min"] = spec.get("warn_min", "")
item["warn_max"] = spec.get("warn_max", "")
item["hidemin"] = spec.get("hidemin", "")
item["hidemax"] = spec.get("hidemax", "")
item["rangecomment"] = spec.get("rangecomment", "")
# Add methods
methods = obj.getMethods()
if methods:
links = map(
lambda m: get_link(
m.absolute_url(), value=m.Title(), css_class="link"),
methods)
item["replace"]["Methods"] = ", ".join(links)
else:
item["methods"] = ""
# Icons
after_icons = ""
if obj.getAccredited():
after_icons += get_image(
"accredited.png", title=_("Accredited"))
if obj.getAttachmentOption() == "r":
after_icons += get_image(
"attach_reqd.png", title=_("Attachment required"))
if obj.getAttachmentOption() == "n":
after_icons += get_image(
"attach_no.png", title=_("Attachment not permitted"))
if after_icons:
item["after"]["Title"] = after_icons
return item | 0.00075 |
def write(self, msg, level=logging.INFO):
""" method implements stream write interface, allowing to redirect stdout to logger """
if msg is not None and len(msg.strip()) > 0:
self.logger.log(level, msg) | 0.013043 |
def last_modified(self, name: str = None) -> str:
"""
Return a compact ISO8601 timestamp (UTC timezone) indicating when an attribute was last modified
Note: if no attribute name is given (the default), the modification time of the most recently modified attribute will be returned
Note: if the attributes do not contain a timestamp, and the mode is 'r+', a new timestamp is created and returned.
Otherwise, the current time in UTC will be returned.
"""
a = ["/row_attrs/", "/col_attrs/"][self.axis]
if self.ds is not None:
if name is None:
if "last_modified" in self.ds._file[a].attrs:
return self.ds._file[a].attrs["last_modified"]
elif self.ds._file.mode == 'r+':
self.ds._file[a].attrs["last_modified"] = timestamp()
self.ds._file.flush()
return self.ds._file[a].attrs["last_modified"]
if name is not None:
if "last_modified" in self.ds._file[a + name].attrs:
return self.ds._file[a + name].attrs["last_modified"]
elif self.ds._file.mode == 'r+':
self.ds._file[a + name].attrs["last_modified"] = timestamp()
self.ds._file.flush()
return self.ds._file[a + name].attrs["last_modified"]
return timestamp() | 0.023589 |
def p_select_from_where_statement_1(self, p):
'''
statement : SELECT ANY variable_name FROM INSTANCES OF identifier WHERE expression
| SELECT MANY variable_name FROM INSTANCES OF identifier WHERE expression
'''
p[0] = SelectFromWhereNode(cardinality=p[2],
variable_name=p[3],
key_letter=p[7],
where_clause=p[9]) | 0.008584 |
def dataReceived(self, data):
"""Received some data from the local side.
If we have set up the multiplexed connection, sends the data
over the multiplexed connection. Otherwise, buffers.
"""
log.msg("{} bytes of data received locally".format(len(data)))
if self.connection is None:
# we haven't finished connecting yet
log.msg("Connection not made yet, buffering...")
self._buffer.write(data)
else:
log.msg("Sending data...")
self._sendData(data) | 0.003546 |
def add_edge_configuration(self, param_name, edge, param_value):
"""
Set a parameter for a given edge
:param param_name: parameter identifier (as specified by the chosen model)
:param edge: edge identifier
:param param_value: parameter value
"""
if param_name not in self.config['edges']:
self.config['edges'][param_name] = {edge: param_value}
else:
self.config['edges'][param_name][edge] = param_value | 0.00611 |
def update(self, other):
"""
Update internal dictionary object. This is meant to be an
analog for dict.update().
"""
if self.meta_type == 'list':
raise AssertionError('Cannot update object of `list` base type!')
elif self.meta_type == 'dict':
self._dict = dict(self + composite(other))
return | 0.005319 |
def display(self):
"""Displays the symbol table content"""
#Finding the maximum length for each column
sym_name = "Symbol name"
sym_len = max(max(len(i.name) for i in self.table),len(sym_name))
kind_name = "Kind"
kind_len = max(max(len(SharedData.KINDS[i.kind]) for i in self.table),len(kind_name))
type_name = "Type"
type_len = max(max(len(SharedData.TYPES[i.type]) for i in self.table),len(type_name))
attr_name = "Attribute"
attr_len = max(max(len(i.attribute_str()) for i in self.table),len(attr_name))
#print table header
print("{0:3s} | {1:^{2}s} | {3:^{4}s} | {5:^{6}s} | {7:^{8}} | {9:s}".format(" No", sym_name, sym_len, kind_name, kind_len, type_name, type_len, attr_name, attr_len, "Parameters"))
print("-----------------------------" + "-" * (sym_len + kind_len + type_len + attr_len))
#print symbol table
for i,sym in enumerate(self.table):
parameters = ""
for p in sym.param_types:
if parameters == "":
parameters = "{0}".format(SharedData.TYPES[p])
else:
parameters += ", {0}".format(SharedData.TYPES[p])
print("{0:3d} | {1:^{2}s} | {3:^{4}s} | {5:^{6}s} | {7:^{8}} | ({9})".format(i, sym.name, sym_len, SharedData.KINDS[sym.kind], kind_len, SharedData.TYPES[sym.type], type_len, sym.attribute_str(), attr_len, parameters)) | 0.010753 |
def streaming(self, remotefile, localpipe, fmt = 'M3U8_480_360', chunk = 4 * const.OneM):
''' Usage: stream <remotefile> <localpipe> [format] [chunk] - \
stream a video / audio file converted to M3U format at cloud side, to a pipe.
remotefile - remote file at Baidu Yun (after app root directory at Baidu Yun)
localpipe - the local pipe file to write to
format - output video format (M3U8_320_240 | M3U8_480_224 | \
M3U8_480_360 | M3U8_640_480 | M3U8_854_480). Default: M3U8_480_360
chunk - chunk (initial buffering) size for streaming (default: 4M)
To stream a file, you can use the 'mkfifo' trick with omxplayer etc.:
mkfifo /tmp/omx
bypy.py downfile <remotepath> /tmp/omx &
omxplayer /tmp/omx
*** NOT WORKING YET ****
'''
pars = {
'method' : 'streaming',
'path' : get_pcs_path(remotefile),
'type' : fmt }
return self.__get(pcsurl + 'file', pars,
self.__streaming_act, (localpipe, chunk), stream = True) | 0.025478 |
def _list_tenants(self, admin):
"""
Returns either a list of all tenants (admin=True), or the tenant for
the currently-authenticated user (admin=False).
"""
resp, resp_body = self.method_get("tenants", admin=admin)
if 200 <= resp.status_code < 300:
tenants = resp_body.get("tenants", [])
return [Tenant(self, tenant) for tenant in tenants]
elif resp.status_code in (401, 403):
raise exc.AuthorizationFailure("You are not authorized to list "
"tenants.")
else:
raise exc.TenantNotFound("Could not get a list of tenants.") | 0.004601 |
def get_url_and_revision_from_pip_url(cls, pip_url):
"""
Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'.
That's required because although they use SSH they sometimes doesn't
work with a ssh:// scheme (e.g. Github). But we need a scheme for
parsing. Hence we remove it again afterwards and return it as a stub.
The manpage for git-clone(1) refers to this as the "scp-like styntax".
"""
if '://' not in pip_url:
assert 'file:' not in pip_url
pip_url = pip_url.replace('git+', 'git+ssh://')
url, rev = super(GitRepo, cls).get_url_and_revision_from_pip_url(pip_url)
url = url.replace('ssh://', '')
elif 'github.com:' in pip_url:
raise exc.LibVCSException(
"Repo %s is malformatted, please use the convention %s for"
"ssh / private GitHub repositories."
% (pip_url, "git+https://github.com/username/repo.git")
)
else:
url, rev = super(GitRepo, cls).get_url_and_revision_from_pip_url(pip_url)
return url, rev | 0.003497 |
def _unichr(i):
"""
Helper function for taking a Unicode scalar value and returning a Unicode character.
:param s: Unicode scalar value to convert.
:return: Unicode character
"""
if not isinstance(i, int):
raise TypeError
try:
return six.unichr(i)
except ValueError:
# Workaround the error "ValueError: unichr() arg not in range(0x10000) (narrow Python build)"
return struct.pack("i", i).decode("utf-32") | 0.006397 |
def add_patches(self, patches, after=None):
""" Add a list of patches to the patches list """
if after is None:
self.insert_patches(patches)
else:
self._check_patch(after)
patchlines = self._patchlines_before(after)
patchlines.append(self.patch2line[after])
for patch in patches:
patchline = PatchLine(patch)
patchlines.append(patchline)
self.patch2line[patchline.get_patch()] = patchline
patchlines.extend(self._patchlines_after(after))
self.patchlines = patchlines | 0.003215 |
def __postCallAction_hwbp(self, event):
"""
Handles hardware breakpoint events on return from the function.
@type event: L{ExceptionEvent}
@param event: Single step event.
"""
# Remove the one shot hardware breakpoint
# at the return address location in the stack.
tid = event.get_tid()
address = event.breakpoint.get_address()
event.debug.erase_hardware_breakpoint(tid, address)
# Call the "post" callback.
try:
self.__postCallAction(event)
# Forget the parameters.
finally:
self.__pop_params(tid) | 0.004666 |
def connect_resource(self):
"""
Connect to an AWS API via boto3 high-level resource connection and set
``self.resource_conn`` to the `boto3.resource <https://boto3.readthed
ocs.org/en/latest/reference/core/boto3.html#boto3.resource>`_ object
(a ``boto3.resources.factory.*.ServiceResource`` instance).
If ``self.resource_conn`` is not None,
do nothing. This connects to the API name given by ``self.api_name``.
:returns: None
"""
if self.resource_conn is not None:
return
kwargs = self._boto3_connection_kwargs
self.resource_conn = boto3.resource(self.api_name, **kwargs)
logger.info("Connected to %s (resource) in region %s", self.api_name,
self.resource_conn.meta.client._client_config.region_name) | 0.002389 |
def rws_call(ctx, method, default_attr=None):
"""Make request to RWS"""
try:
response = ctx.obj['RWS'].send_request(method)
if ctx.obj['RAW']: # use response from RWS
result = ctx.obj['RWS'].last_result.text
elif default_attr is not None: # human-readable summary
result = ""
for item in response:
result = result + item.__dict__[default_attr] + "\n"
else: # use response from RWS
result = ctx.obj['RWS'].last_result.text
if ctx.obj['OUTPUT']: # write to file
ctx.obj['OUTPUT'].write(result.encode('utf-8'))
else: # echo
click.echo(result)
except RWSException as e:
click.echo(str(e)) | 0.001337 |
def _eval_num(self, node):
"""
Evaluate a numerical node
:param node: Node to eval
:return: Result of node
"""
if self.floats:
return node.n
else:
return int(node.n) | 0.00813 |
def get_juttle_data_url(deployment_name,
token_manager=None,
app_url=defaults.APP_URL):
"""
return the juttle data url
"""
return get_data_url(deployment_name,
endpoint_type='juttle',
app_url=app_url,
token_manager=token_manager) | 0.002732 |
def warn_or_error(removal_version, deprecated_entity_description, hint=None,
deprecation_start_version=None,
stacklevel=3, frame_info=None, context=1, ensure_stderr=False):
"""Check the removal_version against the current pants version.
Issues a warning if the removal version is > current pants version, or an error otherwise.
:param string removal_version: The pantsbuild.pants version at which the deprecated entity
will be/was removed.
:param string deprecated_entity_description: A short description of the deprecated entity, that
we can embed in warning/error messages.
:param string hint: A message describing how to migrate from the removed entity.
:param string deprecation_start_version: The pantsbuild.pants version at which the entity will
begin to display a deprecation warning. This must be less
than the `removal_version`. If not provided, the
deprecation warning is always displayed.
:param int stacklevel: The stacklevel to pass to warnings.warn.
:param FrameInfo frame_info: If provided, use this frame info instead of getting one from
`stacklevel`.
:param int context: The number of lines of source code surrounding the selected frame to display
in a warning message.
:param bool ensure_stderr: Whether use warnings.warn, or use warnings.showwarning to print
directly to stderr.
:raises DeprecationApplicationError: if the removal_version parameter is invalid.
:raises CodeRemovedError: if the current version is later than the version marked for removal.
"""
removal_semver = validate_deprecation_semver(removal_version, 'removal version')
if deprecation_start_version:
deprecation_start_semver = validate_deprecation_semver(
deprecation_start_version, 'deprecation start version')
if deprecation_start_semver >= removal_semver:
raise InvalidSemanticVersionOrderingError(
'The deprecation start version {} must be less than the end version {}.'
.format(deprecation_start_version, removal_version))
elif PANTS_SEMVER < deprecation_start_semver:
return
msg = 'DEPRECATED: {} {} removed in version {}.'.format(deprecated_entity_description,
get_deprecated_tense(removal_version), removal_version)
if hint:
msg += '\n {}'.format(hint)
# We need to have filename and line_number for warnings.formatwarning, which appears to be the only
# way to get a warning message to display to stderr. We get that from frame_info -- it's too bad
# we have to reconstruct the `stacklevel` logic ourselves, but we do also gain the ability to have
# multiple lines of context, which is neat.
if frame_info is None:
frame_info = _get_frame_info(stacklevel, context=context)
_, filename, line_number, _, code_context, _ = frame_info
if code_context:
context_lines = ''.join(code_context)
else:
context_lines = '<no code context available>'
if removal_semver > PANTS_SEMVER:
if ensure_stderr:
# No warning filters can stop us from printing this message directly to stderr.
warning_msg = warnings.formatwarning(
msg, DeprecationWarning, filename, line_number, line=context_lines)
print(warning_msg, file=sys.stderr)
else:
# This output is filtered by warning filters.
with _greater_warnings_context(context_lines):
warnings.warn_explicit(
message=DeprecationWarning(msg) if PY2 else msg,
category=DeprecationWarning,
filename=filename,
lineno=line_number)
return msg
else:
raise CodeRemovedError(msg) | 0.011951 |
def _process_api_events(self, function, api_events, template, condition=None):
"""
Actually process given API events. Iteratively adds the APIs to Swagger JSON in the respective Serverless::Api
resource from the template
:param SamResource function: SAM Function containing the API events to be processed
:param dict api_events: API Events extracted from the function. These events will be processed
:param SamTemplate template: SAM Template where Serverless::Api resources can be found
:param str condition: optional; this is the condition that is on the function with the API event
"""
for logicalId, event in api_events.items():
event_properties = event.get("Properties", {})
if not event_properties:
continue
self._add_implicit_api_id_if_necessary(event_properties)
api_id = self._get_api_id(event_properties)
try:
path = event_properties["Path"]
method = event_properties["Method"]
except KeyError as e:
raise InvalidEventException(logicalId, "Event is missing key {}.".format(e))
if (not isinstance(path, six.string_types)):
raise InvalidEventException(logicalId,
"Api Event must have a String specified for 'Path'.")
if (not isinstance(method, six.string_types)):
raise InvalidEventException(logicalId,
"Api Event must have a String specified for 'Method'.")
api_dict = self.api_conditions.setdefault(api_id, {})
method_conditions = api_dict.setdefault(path, {})
method_conditions[method] = condition
self._add_api_to_swagger(logicalId, event_properties, template)
api_events[logicalId] = event
# We could have made changes to the Events structure. Write it back to function
function.properties["Events"].update(api_events) | 0.005319 |
def translate_to_american_phonetic_alphabet(self, hide_stress_mark=False):
'''
转换成美音音。只要一个元音的时候需要隐藏重音标识
:param hide_stress_mark:
:return:
'''
translations = self.stress.mark_ipa() if (not hide_stress_mark) and self.have_vowel else ""
for phoneme in self._phoneme_list:
translations += phoneme.american
return translations | 0.007481 |
def limit(self, count):
"""Limit a query to return a fixed number of results.
If the current query already has a limit set, this will overwrite it.
Args:
count (int): Maximum number of documents to return that match
the query.
Returns:
~.firestore_v1beta1.query.Query: A limited query. Acts as a
copy of the current query, modified with the newly added
"limit" filter.
"""
return self.__class__(
self._parent,
projection=self._projection,
field_filters=self._field_filters,
orders=self._orders,
limit=count,
offset=self._offset,
start_at=self._start_at,
end_at=self._end_at,
) | 0.002516 |
def find(self, sought, view='lemma'):
'''
Returns a word instance for the hit if the "sought" word is found in the sentence.
Per default the "lemma" view of the words is compared.
You can specify the desired view with the optional "view" option.
'''
for word in self.wordlist:
if sought == word.views[view]:
yield word | 0.007614 |
def validate_url(url):
"""Validate URL is valid
NOTE: only support http & https
"""
schemes = ['http', 'https']
netloc_re = re.compile(
r'^'
r'(?:\S+(?::\S*)?@)?' # user:pass auth
r'(?:[a-z0-9]|[a-z0-9][a-z0-9\-]{0,61}[a-z0-9])'
r'(?:\.(?:[a-z0-9]|[a-z0-9][a-z0-9\-]{0,61}[a-z0-9]))*' # host
r'(?::[0-9]{2,5})?' # port
r'$', re.IGNORECASE
)
try:
scheme, netloc, path, query, fragment = urlsplit(url)
except ValueError:
raise Invalid('Invalid URL')
if scheme not in schemes:
raise Invalid('Missing URL scheme')
if not netloc_re.search(netloc):
raise Invalid('Invalid URL')
return url | 0.001401 |
def imshow(image, backend=IMSHOW_BACKEND_DEFAULT):
"""
Shows an image in a window.
dtype support::
* ``uint8``: yes; not tested
* ``uint16``: ?
* ``uint32``: ?
* ``uint64``: ?
* ``int8``: ?
* ``int16``: ?
* ``int32``: ?
* ``int64``: ?
* ``float16``: ?
* ``float32``: ?
* ``float64``: ?
* ``float128``: ?
* ``bool``: ?
Parameters
----------
image : (H,W,3) ndarray
Image to show.
backend : {'matplotlib', 'cv2'}, optional
Library to use to show the image. May be either matplotlib or OpenCV ('cv2').
OpenCV tends to be faster, but apparently causes more technical issues.
"""
do_assert(backend in ["matplotlib", "cv2"], "Expected backend 'matplotlib' or 'cv2', got %s." % (backend,))
if backend == "cv2":
image_bgr = image
if image.ndim == 3 and image.shape[2] in [3, 4]:
image_bgr = image[..., 0:3][..., ::-1]
win_name = "imgaug-default-window"
cv2.namedWindow(win_name, cv2.WINDOW_NORMAL)
cv2.imshow(win_name, image_bgr)
cv2.waitKey(0)
cv2.destroyWindow(win_name)
else:
# import only when necessary (faster startup; optional dependency; less fragile -- see issue #225)
import matplotlib.pyplot as plt
dpi = 96
h, w = image.shape[0] / dpi, image.shape[1] / dpi
w = max(w, 6) # if the figure is too narrow, the footer may appear and make the fig suddenly wider (ugly)
fig, ax = plt.subplots(figsize=(w, h), dpi=dpi)
fig.canvas.set_window_title("imgaug.imshow(%s)" % (image.shape,))
ax.imshow(image, cmap="gray") # cmap is only activate for grayscale images
plt.show() | 0.003356 |
def from_dict(cls, data, key_extractors=None, content_type=None):
"""Parse a dict using given key extractor return a model.
By default consider key
extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor
and last_rest_key_case_insensitive_extractor)
:param dict data: A dict using RestAPI structure
:param str content_type: JSON by default, set application/xml if XML.
:returns: An instance of this model
:raises: DeserializationError if something went wrong
"""
deserializer = Deserializer(cls._infer_class_models())
deserializer.key_extractors = [
rest_key_case_insensitive_extractor,
attribute_key_case_insensitive_extractor,
last_rest_key_case_insensitive_extractor
] if key_extractors is None else key_extractors
return deserializer(cls.__name__, data, content_type=content_type) | 0.003125 |
def update_headers(self, header_args):
'''
Given a set of headers, update both the user-agent
and additional headers for the remote browser.
header_args must be a dict. Keys are the names of
the corresponding HTTP header.
return value is a 2-tuple of the results of the user-agent
update, as well as the extra headers update.
If no 'User-Agent' key is present in the new headers,
the first item in the tuple will be None
'''
assert isinstance(header_args, dict), "header_args must be a dict, passed type was %s" \
% (type(header_args), )
ua = header_args.pop('User-Agent', None)
ret_1 = None
if ua:
ret_1 = self.Network_setUserAgentOverride(userAgent=ua)
ret_2 = self.Network_setExtraHTTPHeaders(headers = header_args)
return (ret_1, ret_2) | 0.030573 |
def _divf16(ins):
""" Divides 2 32bit (16.16) fixed point numbers. The result is pushed onto the stack.
Optimizations:
* If 2nd operand is 1, do nothing
* If 2nd operand is -1, do NEG32
"""
op1, op2 = tuple(ins.quad[2:])
if is_float(op2):
if float(op2) == 1:
output = _f16_oper(op1)
output.append('push de')
output.append('push hl')
return output
if float(op2) == -1:
return _negf(ins)
rev = not is_float(op1) and op1[0] != 't' and op2[0] == 't'
output = _f16_oper(op1, op2, reversed=rev)
output.append('call __DIVF16')
output.append('push de')
output.append('push hl')
REQUIRES.add('divf16.asm')
return output | 0.002625 |
def cmd(send, msg, args):
"""Insults a user.
Syntax: {command} [nick]
"""
if not msg:
user = choice(get_users(args))
else:
user = msg
send(gen_insult(user)) | 0.005025 |
def blocks_from_ops(ops):
"""
Group a list of :class:`Op` and :class:`Label` instances by label.
Everytime a label is found, a new :class:`Block` is created. The resulting
blocks are returned as a dictionary to easily access the target block of a
jump operation. The keys of this dictionary will be the labels, the values
will be the :class:`Block` instances. The initial block can be accessed
by getting the ``None`` item from the dictionary.
Arguments:
ops(list): The list of :class:`Op` and :class:`Label` instances (as
returned by :func:`disassemble`.
Returns:
dict: The resulting dictionary of blocks grouped by label.
"""
blocks = {}
current_block = blocks[None] = Block()
for op in ops:
if isinstance(op, Label):
next_block = blocks[op] = Block(op)
current_block.next = next_block
current_block = next_block
continue
current_block.ops.append(op)
return blocks | 0.000982 |
def get_subscription(self, topic_name, subscription_name):
'''
Gets an existing subscription.
topic_name:
Name of the topic.
subscription_name:
Name of the subscription.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + \
_str(topic_name) + '/subscriptions/' + _str(subscription_name) + ''
request.path, request.query = self._httpclient._update_request_uri_query(request) # pylint: disable=protected-access
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_subscription(response) | 0.003429 |
def get_accessibles(request, roles=None):
"""
Returns the list of *dictionnaries* for which the accounts are
accessibles by ``request.user`` filtered by ``roles`` if present.
"""
results = []
for role_name, organizations in six.iteritems(request.session.get(
'roles', {})):
if roles is None or role_name in roles:
results += organizations
return results | 0.004415 |
def get_data_dirs(__pkg: str) -> List[str]:
"""Return all data directories for given package.
Args:
__pkg: Package name
"""
dirs = [user_data(__pkg), ]
dirs.extend(path.expanduser(path.sep.join([d, __pkg]))
for d in getenv('XDG_DATA_DIRS',
'/usr/local/share/:/usr/share/').split(':'))
return [d for d in dirs if path.isdir(d)] | 0.002457 |
def get_body_size(params, boundary):
"""Returns the number of bytes that the multipart/form-data encoding
of ``params`` will be."""
size = sum(p.get_size(boundary) for p in MultipartParam.from_params(params))
return size + len(boundary) + 6 | 0.007813 |
def vstack_tables(filelist, hdus):
"""vstack a set of HDUs from a set of files
Parameters
----------
filelist : list
List of the files to get data from.
hdus : list
Names of the HDU containing the table with the input data.
Returns
-------
out_tables : list
A list with the table with all the requested data extracted.
out_names : list
A list with the names of the tables.
"""
nfiles = len(filelist)
out_tables = []
out_names = []
for hdu in hdus:
sys.stdout.write('Working on %i files for %s: ' % (nfiles, hdu))
sys.stdout.flush()
tlist = []
for f in filelist:
try:
tab = Table.read(f, hdu)
tlist.append(tab)
sys.stdout.write('.')
except KeyError:
sys.stdout.write('x')
sys.stdout.flush()
sys.stdout.write('!\n')
if tlist:
out_table = vstack(tlist)
out_tables.append(out_table)
out_names.append(hdu)
return (out_tables, out_names) | 0.000898 |
def env_maker(environment_id):
""" Create a relatively raw atari environment """
env = gym.make(environment_id)
assert 'NoFrameskip' in env.spec.id
# Wait for between 1 and 30 rounds doing nothing on start
env = NoopResetEnv(env, noop_max=30)
# Do the same action for k steps. Return max of last 2 frames. Return sum of rewards
env = MaxAndSkipEnv(env, skip=4)
return env | 0.004926 |
def update_range(self, share_name, directory_name, file_name, data,
start_range, end_range, validate_content=False, timeout=None):
'''
Writes the bytes specified by the request body into the specified range.
:param str share_name:
Name of existing share.
:param str directory_name:
The path to the directory.
:param str file_name:
Name of existing file.
:param bytes data:
Content of the range.
:param int start_range:
Start of byte range to use for updating a section of the file.
The range can be up to 4 MB in size.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param int end_range:
End of byte range to use for updating a section of the file.
The range can be up to 4 MB in size.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param bool validate_content:
If true, calculates an MD5 hash of the page content. The storage
service checks the hash of the content that has arrived
with the hash that was sent. This is primarily valuable for detecting
bitflips on the wire if using http instead of https as https (the default)
will already validate. Note that this MD5 hash is not stored with the
file.
:param int timeout:
The timeout parameter is expressed in seconds.
'''
_validate_not_none('share_name', share_name)
_validate_not_none('file_name', file_name)
_validate_not_none('data', data)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(share_name, directory_name, file_name)
request.query = {
'comp': 'range',
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-write': 'update',
}
_validate_and_format_range_headers(
request, start_range, end_range)
request.body = _get_data_bytes_only('data', data)
if validate_content:
computed_md5 = _get_content_md5(request.body)
request.headers['Content-MD5'] = _to_str(computed_md5)
self._perform_request(request) | 0.005892 |
def increase_crypto_config(self, crypto_adapters,
crypto_domain_configurations):
"""
Add crypto adapters and/or crypto domains to the crypto configuration
of this partition.
The general principle for maintaining crypto configurations of
partitions is as follows: Each adapter included in the crypto
configuration of a partition has all crypto domains included in the
crypto configuration. Each crypto domain included in the crypto
configuration has the same access mode on all adapters included in the
crypto configuration.
Example: Assume that the current crypto configuration of a partition
includes crypto adapter A and crypto domains 0 and 1. When this method
is called to add adapter B and domain configurations for domains 1 and
2, the resulting crypto configuration of the partition will include
domains 0, 1, and 2 on each of the adapters A and B.
Authorization requirements:
* Object-access permission to this Partition.
* Task permission to the "Partition Details" task.
Parameters:
crypto_adapters (:term:`iterable` of :class:`~zhmcclient.Adapter`):
Crypto adapters that should be added to the crypto configuration of
this partition.
crypto_domain_configurations (:term:`iterable` of `domain_config`):
Crypto domain configurations that should be added to the crypto
configuration of this partition.
A crypto domain configuration (`domain_config`) is a dictionary
with the following keys:
* ``"domain-index"`` (:term:`integer`): Domain index of the crypto
domain.
The domain index is a number in the range of 0 to a maximum that
depends on the model of the crypto adapter and the CPC model. For
the Crypto Express 5S adapter in a z13, the maximum domain index
is 84.
* ``"access-mode"`` (:term:`string`): Access mode for the crypto
domain.
The access mode specifies the way the partition can use the
crypto domain on the crypto adapter(s), using one of the
following string values:
* ``"control"`` - The partition can load cryptographic keys into
the domain, but it may not use the domain to perform
cryptographic operations.
* ``"control-usage"`` - The partition can load cryptographic keys
into the domain, and it can use the domain to perform
cryptographic operations.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
crypto_adapter_uris = [a.uri for a in crypto_adapters]
body = {'crypto-adapter-uris': crypto_adapter_uris,
'crypto-domain-configurations': crypto_domain_configurations}
self.manager.session.post(
self.uri + '/operations/increase-crypto-configuration', body) | 0.000935 |
def _get_stream_id(self, text):
"""Try to find a stream_id"""
m = self._image_re.search(text)
if m:
return m.group("stream_id") | 0.01227 |
def prt_txt_desc2nts(self, prt, desc2nts, prtfmt):
"""Print grouped and sorted GO IDs."""
# 1-D: data to print is a flat list of namedtuples
if 'flat' in desc2nts:
nts = desc2nts.get('flat')
# sys.stdout.write("FLAT NTS: {FLDS}\n".format(FLDS=" ".join(next(iter(nts))._fields)))
prt_txt(prt, nts, prtfmt)
# 2-D: data to print is a list of [(section, nts), ...
else:
for section, nts in desc2nts['sections']:
prt.write("\nSECTION: {SEC}\n".format(SEC=section))
prt_txt(prt, nts, prtfmt)
grprobj = self.sortobj.grprobj
dat = SummarySec2dHdrGos().summarize_sec2hdrnts(desc2nts['sections'])
ugos_y = dat['G'].intersection(grprobj.usrgos)
ugos_n = dat['U'].intersection(grprobj.usrgos)
return {'GO_DESC':'usr', 'SECs':len(dat['S']), 'GOs':len(ugos_y),
'UNGRP':len(ugos_n), 'undesc':'ungrpd'} | 0.009128 |
def create_endpoint(port=None, service_name=None, host=None, use_defaults=True):
"""Creates a new Endpoint object.
:param port: TCP/UDP port. Defaults to 0.
:type port: int
:param service_name: service name as a str. Defaults to 'unknown'.
:type service_name: str
:param host: ipv4 or ipv6 address of the host. Defaults to the
current host ip.
:type host: str
:param use_defaults: whether to use defaults.
:type use_defaults: bool
:returns: zipkin Endpoint object
"""
if use_defaults:
if port is None:
port = 0
if service_name is None:
service_name = 'unknown'
if host is None:
try:
host = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
host = '127.0.0.1'
ipv4 = None
ipv6 = None
if host:
# Check ipv4 or ipv6.
try:
socket.inet_pton(socket.AF_INET, host)
ipv4 = host
except socket.error:
# If it's not an ipv4 address, maybe it's ipv6.
try:
socket.inet_pton(socket.AF_INET6, host)
ipv6 = host
except socket.error:
# If it's neither ipv4 or ipv6, leave both ip addresses unset.
pass
return Endpoint(
ipv4=ipv4,
ipv6=ipv6,
port=port,
service_name=service_name,
) | 0.001389 |
def load_single_dict(pinyin_dict, style='default'):
"""载入用户自定义的单字拼音库
:param pinyin_dict: 单字拼音库。比如: ``{0x963F: u"ā,ē"}``
:param style: pinyin_dict 参数值的拼音库风格. 支持 'default', 'tone2'
:type pinyin_dict: dict
"""
if style == 'tone2':
for k, v in pinyin_dict.items():
v = _replace_tone2_style_dict_to_default(v)
PINYIN_DICT[k] = v
else:
PINYIN_DICT.update(pinyin_dict)
mmseg.retrain(mmseg.seg) | 0.002174 |
def get_object_by_uid(uid, default=_marker):
"""Find an object by a given UID
:param uid: The UID of the object to find
:type uid: string
:returns: Found Object or None
"""
# nothing to do here
if not uid:
if default is not _marker:
return default
fail("get_object_by_uid requires UID as first argument; got {} instead"
.format(uid))
# we defined the portal object UID to be '0'::
if uid == '0':
return get_portal()
brain = get_brain_by_uid(uid)
if brain is None:
if default is not _marker:
return default
fail("No object found for UID {}".format(uid))
return get_object(brain) | 0.001412 |
def log(self, *message):
"""
Logs a messate to a defined io stream if available.
"""
if self._logger is None:
return
s = " ".join([str(m) for m in message])
self._logger.write(s+'\n')
self._logger.flush() | 0.007353 |
def checksum_identity_card_number(characters):
"""
Calculates and returns a control digit for given list of characters basing on Identity Card Number standards.
"""
weights_for_check_digit = [7, 3, 1, 0, 7, 3, 1, 7, 3]
check_digit = 0
for i in range(3):
check_digit += weights_for_check_digit[i] * (ord(characters[i]) - 55)
for i in range(4, 9):
check_digit += weights_for_check_digit[i] * characters[i]
check_digit %= 10
return check_digit | 0.004032 |
def writeTnetFile(grph, name, modeNameString, weighted = False, sourceMode = None, timeString = None, nodeIndexString = 'tnet-ID', weightString = 'weight'):
"""Writes an edge list designed for reading by the _R_ package [tnet](https://toreopsahl.com/tnet/).
The _networkx_ graph provided must be a pure two-mode network, the modes must be 2 different values for the node attribute accessed by _modeNameString_ and all edges must be between different node types. Each node will be given an integer id, stored in the attribute given by _nodeIndexString_, these ids are then written to the file as the endpoints of the edges. Unless _sourceMode_ is given which mode is the source (first column) and which the target (second column) is random.
**Note** the _grph_ will be modified by this function, the ids of the nodes will be written to the graph at the attribute _nodeIndexString_.
# Parameters
_grph_ : `network Graph`
> The graph that will be written to _name_
_name_ : `str`
> The path of the file to write
_modeNameString_ : `str`
> The name of the attribute _grph_'s modes are stored in
_weighted_ : `optional bool`
> Default `False`, if `True` then the attribute _weightString_ will be written to the weight column
_sourceMode_ : `optional str`
> Default `None`, if given the name of the mode used for the source (first column) in the output file
_timeString_ : `optional str`
> Default `None`, if present the attribute _timeString_ of an edge will be written to the time column surrounded by double quotes (").
**Note** The format used by tnet for dates is very strict it uses the ISO format, down to the second and without time zones.
_nodeIndexString_ : `optional str`
> Default `'tnet-ID'`, the name of the attribute to save the id for each node
_weightString_ : `optional str`
> Default `'weight'`, the name of the weight attribute
"""
count = 0
eMax = len(grph.edges())
progArgs = (0, "Writing tnet edge list {}".format(name))
if metaknowledge.VERBOSE_MODE:
progKwargs = {'dummy' : False}
else:
progKwargs = {'dummy' : True}
with _ProgressBar(*progArgs, **progKwargs) as PBar:
if sourceMode is not None:
modes = [sourceMode]
else:
modes = []
mode1Set = set()
PBar.updateVal(.1, "Indexing nodes for tnet")
for nodeIndex, node in enumerate(grph.nodes(data = True), start = 1):
try:
nMode = node[1][modeNameString]
except KeyError:
#too many modes so will fail
modes = [1,2,3]
nMode = 4
if nMode not in modes:
if len(modes) < 2:
modes.append(nMode)
else:
raise RCValueError("Too many modes of '{}' found in the network or one of the nodes was missing its mode. There must be exactly 2 modes.".format(modeNameString))
if nMode == modes[0]:
mode1Set.add(node[0])
node[1][nodeIndexString] = nodeIndex
if len(modes) != 2:
raise RCValueError("Too few modes of '{}' found in the network. There must be exactly 2 modes.".format(modeNameString))
with open(name, 'w', encoding = 'utf-8') as f:
edgesCaller = {'data' : True}
if timeString is not None:
edgesCaller['keys'] = True
for *nodes, eDict in grph.edges(**edgesCaller):
if timeString is not None:
n1, n2, keyVal = nodes
else:
n1, n2 = nodes
count += 1
if count % 1000 == 1:
PBar.updateVal(count/ eMax * .9 + .1, "writing edge: '{}'-'{}'".format(n1, n2))
if n1 in mode1Set:
if n2 in mode1Set:
raise RCValueError("The nodes '{}' and '{}' have an edge and the same type. The network must be purely 2-mode.".format(n1, n2))
elif n2 in mode1Set:
n1, n2 = n2, n1
else:
raise RCValueError("The nodes '{}' and '{}' have an edge and the same type. The network must be purely 2-mode.".format(n1, n2))
if timeString is not None:
eTimeString = '"{}" '.format(keyVal)
else:
eTimeString = ''
if weighted:
f.write("{}{} {} {}\n".format(eTimeString, grph.node[n1][nodeIndexString], grph.node[n2][nodeIndexString], eDict[weightString]))
else:
f.write("{}{} {}\n".format(eTimeString, grph.node[n1][nodeIndexString], grph.node[n2][nodeIndexString]))
PBar.finish("Done writing tnet file '{}'".format(name)) | 0.008242 |
def Start(self, seed_list: List[str] = None, skip_seeds: bool = False) -> None:
"""
Start connecting to the seed list.
Args:
seed_list: a list of host:port strings if not supplied use list from `protocol.xxx.json`
skip_seeds: skip connecting to seed list
"""
if not seed_list:
seed_list = settings.SEED_LIST
logger.debug("Starting up nodeleader")
if not skip_seeds:
logger.debug("Attempting to connect to seed list...")
for bootstrap in seed_list:
if not is_ip_address(bootstrap):
host, port = bootstrap.split(':')
bootstrap = f"{hostname_to_ip(host)}:{port}"
addr = Address(bootstrap)
self.KNOWN_ADDRS.append(addr)
self.SetupConnection(addr)
logger.debug("Starting up nodeleader: starting peer, mempool, and blockheight check loops")
# check in on peers every 10 seconds
self.start_peer_check_loop()
self.start_memcheck_loop()
self.start_blockheight_loop()
if settings.ACCEPT_INCOMING_PEERS and not self.incoming_server_running:
class OneShotFactory(Factory):
def __init__(self, leader):
self.leader = leader
def buildProtocol(self, addr):
print(f"building new protocol for addr: {addr}")
self.leader.AddKnownAddress(Address(f"{addr.host}:{addr.port}"))
p = NeoNode(incoming_client=True)
p.factory = self
return p
def listen_err(err):
print(f"Failed start listening server for reason: {err.value}")
def listen_ok(value):
self.incoming_server_running = True
logger.debug(f"Starting up nodeleader: setting up listen server on port: {settings.NODE_PORT}")
server_endpoint = TCP4ServerEndpoint(self.reactor, settings.NODE_PORT)
listenport_deferred = server_endpoint.listen(OneShotFactory(leader=self))
listenport_deferred.addCallback(listen_ok)
listenport_deferred.addErrback(listen_err) | 0.003571 |
def play_auth(f):
"""
Injects cookies, into requests call over route
:return: route
"""
def wrapper(*args, **kwargs):
self = args[0]
if 'cookies' in kwargs:
raise AttributeError("don't set cookies explicitly")
if 'auth' in kwargs:
raise AttributeError("don't set auth token explicitly")
assert self.is_connected, "not connected, call router.connect(email, password) first"
if self._jwt_auth:
kwargs['auth'] = self._jwt_auth
kwargs['cookies'] = None
elif self._cookies:
kwargs['cookies'] = self._cookies
kwargs['auth'] = None
else:
assert False, "no cookies, no JWT, but connected o_O"
return f(*args, **kwargs)
return wrapper | 0.002494 |
def get_ast(token):
"""
Recursively unrolls token attributes into dictionaries (token.children
into lists).
Returns:
a dictionary of token's attributes.
"""
node = {}
# Python 3.6 uses [ordered dicts] [1].
# Put in 'type' entry first to make the final tree format somewhat
# similar to [MDAST] [2].
#
# [1]: https://docs.python.org/3/whatsnew/3.6.html
# [2]: https://github.com/syntax-tree/mdast
node['type'] = token.__class__.__name__
node.update({key: token.__dict__[key] for key in token.__dict__})
if 'header' in node:
node['header'] = get_ast(node['header'])
if 'children' in node:
node['children'] = [get_ast(child) for child in node['children']]
return node | 0.001314 |
def GetEntries(self, parser_mediator, data=None, **unused_kwargs):
"""Extracts uTorrent active torrents.
This is the main parsing engine for the plugin. It determines if
the selected file is the proper file to parse and extracts current
running torrents.
interface.Process() checks for the given BENCODE_KEYS set, ensures
that it matches, and then passes the bencoded data to this function for
parsing. This plugin then parses the entire set of bencoded data to extract
the variable file-name keys to retrieve their values.
uTorrent creates a file, resume.dat, and a backup, resume.dat.old, to
for all active torrents. This is typically stored in the user's
application data folder.
These files, at a minimum, contain a '.fileguard' key and a dictionary
with a key name for a particular download with a '.torrent' file
extension.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
data (Optional[dict[str, object]]): bencode data values.
"""
# Walk through one of the torrent keys to ensure it's from a valid file.
for key, value in iter(data.items()):
if not '.torrent' in key:
continue
caption = value.get('caption')
path = value.get('path')
seedtime = value.get('seedtime')
if not caption or not path or seedtime < 0:
raise errors.WrongBencodePlugin(self.NAME)
for torrent, value in iter(data.items()):
if not '.torrent' in torrent:
continue
event_data = UTorrentEventData()
event_data.caption = value.get('caption', None)
event_data.path = value.get('path', None)
# Convert seconds to minutes.
seedtime = value.get('seedtime', None)
event_data.seedtime, _ = divmod(seedtime, 60)
# Create timeline events based on extracted values.
for event_key, event_value in iter(value.items()):
if event_key == 'added_on':
date_time = dfdatetime_posix_time.PosixTime(timestamp=event_value)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
elif event_key == 'completed_on':
date_time = dfdatetime_posix_time.PosixTime(timestamp=event_value)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED)
parser_mediator.ProduceEventWithEventData(event, event_data)
elif event_key == 'modtimes':
for modtime in event_value:
# Some values are stored as 0, skip those.
if not modtime:
continue
date_time = dfdatetime_posix_time.PosixTime(timestamp=modtime)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data) | 0.008221 |
def get(self, resource):
"""
Get a resource into the cache,
:param resource: A :class:`Resource` instance.
:return: The pathname of the resource in the cache.
"""
prefix, path = resource.finder.get_cache_info(resource)
if prefix is None:
result = path
else:
result = os.path.join(self.base, self.prefix_to_dir(prefix), path)
dirname = os.path.dirname(result)
if not os.path.isdir(dirname):
os.makedirs(dirname)
if not os.path.exists(result):
stale = True
else:
stale = self.is_stale(resource, path)
if stale:
# write the bytes of the resource to the cache location
with open(result, 'wb') as f:
f.write(resource.bytes)
return result | 0.00225 |
def asDict( self ):
"""
Returns the named parse results as a nested dictionary.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
result_dict = result.asDict()
print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
# even though a ParseResults supports dict-like access, sometime you just need to have a dict
import json
print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
"""
if PY_3:
item_fn = self.items
else:
item_fn = self.iteritems
def toItem(obj):
if isinstance(obj, ParseResults):
if obj.haskeys():
return obj.asDict()
else:
return [toItem(v) for v in obj]
else:
return obj
return dict((k,toItem(v)) for k,v in item_fn()) | 0.00859 |
def next_frame_ae_range(rhp):
"""Autoencoder world model tuning grid."""
rhp.set_float("dropout", 0.3, 0.5)
rhp.set_int("num_compress_steps", 1, 3)
rhp.set_int("num_hidden_layers", 2, 6)
rhp.set_float("learning_rate_constant", 1., 2.)
rhp.set_float("initializer_gain", 0.8, 1.5)
rhp.set_int("filter_double_steps", 2, 3) | 0.024024 |
async def authorize(self, code: str=None) -> None:
"""Getting a new token from server"""
code = await self.get_code(code)
params = {
'client_id': self.app_id,
'client_secret': self.app_secret,
'redirect_uri': self.redirect_uri,
'code': code
}
response = await self.driver.json(self.CODE_URL, params, self.timeout)
if 'error' in response:
raise VkAuthError(response['error'], response['error_description'], self.CODE_URL, params)
self.access_token = response['access_token'] | 0.008489 |
def relatedness_wid(wid_pairs, gcube_token=None, lang=DEFAULT_LANG, api=DEFAULT_REL_API):
'''
Get the semantic relatedness among pairs of entities. Entities are indicated by their
Wikipedia ID (an integer).
:param wid_pairs: either one pair or a list of pairs of Wikipedia IDs.
:param gcube_token: the authentication token provided by the D4Science infrastructure.
:param lang: the Wikipedia language.
:param api: the API endpoint.
'''
return _relatedness("id", wid_pairs, gcube_token, lang, api) | 0.007519 |
def get_src_or_dst_path(prompt, count):
"""
Let the user choose a path, and store the value.
:return str _path: Target directory
:return str count: Counter for attempted prompts
"""
_path = ""
print(prompt)
option = input("Option: ")
print("\n")
if option == '1':
# Set the path to the system desktop folder.
logger_directory.info("1: desktop")
_path = os.path.expanduser('~/Desktop')
elif option == '2':
# Set the path to the system downloads folder.
logger_directory.info("2: downloads")
_path = os.path.expanduser('~/Downloads')
elif option == '3':
# Current directory
logger_directory.info("3: current")
_path = os.getcwd()
elif option == '4':
# Open up the GUI browse dialog
logger_directory.info("4: browse ")
_path = browse_dialog_dir()
else:
# Something went wrong. Prompt again. Give a couple tries before defaulting to downloads folder
if count == 2:
logger_directory.warn("too many attempts")
print("Too many failed attempts. Defaulting to current working directory.")
_path = os.getcwd()
else:
count += 1
logger_directory.warn("failed attempts: {}".format(count))
print("Invalid option. Try again.")
return _path, count | 0.002166 |
def strip_accents(string):
"""
Strip all the accents from the string
"""
return u''.join(
(character for character in unicodedata.normalize('NFD', string)
if unicodedata.category(character) != 'Mn')) | 0.00431 |
def _paged_search_ext_s(self, base, scope, filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None, timeout=-1, sizelimit=0, page_size=10):
"""
Behaves similarly to LDAPObject.search_ext_s() but internally uses the
simple paged results control to retrieve search results in chunks.
Taken from the python-ldap paged_search_ext_s.py demo, showing how to use
the paged results control: https://bitbucket.org/jaraco/python-ldap/
"""
request_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='')
results = []
while True:
msgid = self.conn.search_ext(base, scope, filterstr=filterstr, attrlist=attrlist, attrsonly=attrsonly,
serverctrls=(serverctrls or []) + [request_ctrl], clientctrls=clientctrls,
timeout=timeout, sizelimit=sizelimit)
result_type, result_data, result_msgid, result_ctrls = self.conn.result3(msgid)
results.extend(result_data)
# Extract the simple paged results response control
paged_ctrls = [c for c in result_ctrls if c.controlType == SimplePagedResultsControl.controlType]
if paged_ctrls and paged_ctrls[0].cookie:
# Copy cookie from response control to request control
request_ctrl.cookie = paged_ctrls[0].cookie
else:
break
return results | 0.00718 |
def read_full(stream):
"""Read the full contents of the given stream into memory.
:return:
A future containing the complete stream contents.
"""
assert stream, "stream is required"
chunks = []
chunk = yield stream.read()
while chunk:
chunks.append(chunk)
chunk = yield stream.read()
raise tornado.gen.Return(b''.join(chunks)) | 0.002597 |
def parse_content_type_header(value):
""" maintype "/" subtype *( ";" parameter )
The maintype and substype are tokens. Theoretically they could
be checked against the official IANA list + x-token, but we
don't do that.
"""
ctype = ContentType()
recover = False
if not value:
ctype.defects.append(errors.HeaderMissingRequiredValue(
"Missing content type specification"))
return ctype
try:
token, value = get_token(value)
except errors.HeaderParseError:
ctype.defects.append(errors.InvalidHeaderDefect(
"Expected content maintype but found {!r}".format(value)))
_find_mime_parameters(ctype, value)
return ctype
ctype.append(token)
# XXX: If we really want to follow the formal grammer we should make
# mantype and subtype specialized TokenLists here. Probably not worth it.
if not value or value[0] != '/':
ctype.defects.append(errors.InvalidHeaderDefect(
"Invalid content type"))
if value:
_find_mime_parameters(ctype, value)
return ctype
ctype.maintype = token.value.strip().lower()
ctype.append(ValueTerminal('/', 'content-type-separator'))
value = value[1:]
try:
token, value = get_token(value)
except errors.HeaderParseError:
ctype.defects.append(errors.InvalidHeaderDefect(
"Expected content subtype but found {!r}".format(value)))
_find_mime_parameters(ctype, value)
return ctype
ctype.append(token)
ctype.subtype = token.value.strip().lower()
if not value:
return ctype
if value[0] != ';':
ctype.defects.append(errors.InvalidHeaderDefect(
"Only parameters are valid after content type, but "
"found {!r}".format(value)))
# The RFC requires that a syntactically invalid content-type be treated
# as text/plain. Perhaps we should postel this, but we should probably
# only do that if we were checking the subtype value against IANA.
del ctype.maintype, ctype.subtype
_find_mime_parameters(ctype, value)
return ctype
ctype.append(ValueTerminal(';', 'parameter-separator'))
ctype.append(parse_mime_parameters(value[1:]))
return ctype | 0.000436 |
def _set_temp(self, v, load=False):
"""
Setter method for temp, mapped from YANG variable /system_monitor/temp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_temp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_temp() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=temp.temp, is_container='container', presence=False, yang_name="temp", rest_name="temp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure threshold for component:TEMPERATURE SENSOR', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-system-monitor', defining_module='brocade-system-monitor', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """temp must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=temp.temp, is_container='container', presence=False, yang_name="temp", rest_name="temp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure threshold for component:TEMPERATURE SENSOR', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-system-monitor', defining_module='brocade-system-monitor', yang_type='container', is_config=True)""",
})
self.__temp = t
if hasattr(self, '_set'):
self._set() | 0.005981 |
def build_highlight_objects(html, highlights, uniformize_html=True):
'''converts a dict of pretty_name --> [tuple(string, score), ...] to
`Highlight` objects as specified above.
'''
if uniformize_html:
try:
html = uniform_html(html.encode('utf-8')).decode('utf-8')
except Exception, exc:
logger.info('failed to get uniform_html(%d bytes) --> %s',
len(html), exc, exc_info=True)
html = None
highlight_objects = []
for category, phrase_scores in highlights.iteritems():
for (phrase, score) in phrase_scores:
hl = dict(
score=score,
category=category,
)
ranges = make_xpath_ranges(html, phrase)
if ranges:
hl['xranges'] = [{'range': r} for r in ranges]
elif phrase in html:
hl['strings'] = [phrase]
else:
hl['regexes'] = [{
'regex': phrase,
'flags': 'i',
}]
highlight_objects.append(hl)
return highlight_objects | 0.000873 |
def get_label(self):
"""Get the label of the Dataset.
Returns
-------
label : numpy array or None
The label information from the Dataset.
"""
if self.label is None:
self.label = self.get_field('label')
return self.label | 0.006667 |
def stats(self, request):
'''Get stats for the provided request.
:param request dict: A search request that also contains the 'interval'
property.
:returns: :py:class:`planet.api.models.JSON`
:raises planet.api.exceptions.APIException: On API error.
'''
# work-around for API bug
request = _patch_stats_request(request)
body = json.dumps(request)
return self.dispatcher.response(models.Request(
self._url('data/v1/stats'), self.auth,
body_type=models.JSON, data=body, method='POST')).get_body() | 0.003226 |
def ConvCnstrMODOptionsDefaults(method='fista'):
"""Get defaults dict for the ConvCnstrMOD class specified by the
``method`` parameter.
"""
dflt = copy.deepcopy(ccmod_class_label_lookup(method).Options.defaults)
if method == 'fista':
dflt.update({'MaxMainIter': 1, 'BackTrack':
{'gamma_u': 1.2, 'MaxIter': 50}})
else:
dflt.update({'MaxMainIter': 1, 'AutoRho':
{'Period': 10, 'AutoScaling': False,
'RsdlRatio': 10.0, 'Scaling': 2.0,
'RsdlTarget': 1.0}})
return dflt | 0.001681 |
def calc_allowedremoterelieve_v1(self):
"""Get the allowed remote relieve of the last simulation step.
Required log sequence:
|LoggedAllowedRemoteRelieve|
Calculated flux sequence:
|AllowedRemoteRelieve|
Basic equation:
:math:`AllowedRemoteRelieve = LoggedAllowedRemoteRelieve`
Example:
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> logs.loggedallowedremoterelieve = 2.0
>>> model.calc_allowedremoterelieve_v1()
>>> fluxes.allowedremoterelieve
allowedremoterelieve(2.0)
"""
flu = self.sequences.fluxes.fastaccess
log = self.sequences.logs.fastaccess
flu.allowedremoterelieve = log.loggedallowedremoterelieve[0] | 0.00137 |
def undefine(self):
"""Undefine the Global.
Python equivalent of the CLIPS undefglobal command.
The object becomes unusable after this method has been called.
"""
if lib.EnvUndefglobal(self._env, self._glb) != 1:
raise CLIPSError(self._env)
self._env = None | 0.006231 |
def _algo_check_for_section_problems(self, ro_rw_zi):
"""! @brief Return a string describing any errors with the layout or None if good"""
s_ro, s_rw, s_zi = ro_rw_zi
if s_ro is None:
return "RO section is missing"
if s_rw is None:
return "RW section is missing"
if s_zi is None:
return "ZI section is missing"
if s_ro.start != 0:
return "RO section does not start at address 0"
if s_ro.start + s_ro.length != s_rw.start:
return "RW section does not follow RO section"
if s_rw.start + s_rw.length != s_zi.start:
return "ZI section does not follow RW section"
return None | 0.004202 |
def GET(self, mid=None):
'''
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/minions/ms-3
.. code-block:: text
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
'''
cherrypy.request.lowstate = [{
'client': 'local', 'tgt': mid or '*', 'fun': 'grains.items',
}]
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token'))),
} | 0.001768 |
def load_stream(self, key, binary=False):
"""
:param str key: name of stream to load
:param bool binary: Whether we should treat it as binary
:return:
"""
with open(os.path.join(self.uri, key), 'rb' if binary else 'r') as f:
yield f | 0.006849 |
def get_translation_functions(package_name: str, names: Tuple[str, ...] = ('gettext',)):
"""finds and installs translation functions for package"""
translation = get_translation_for(package_name)
return [getattr(translation, x) for x in names] | 0.007843 |
def get(self, session, words, time='DAY|WEEK|MONTH|3MONTH', aFilter='PV|CLICK|AVGCPC|COMPETITION', nick=None):
'''taobao.simba.insight.wordsbase.get
===================================
词基础数据查询'''
request = TOPRequest('taobao.simba.insight.wordsbase.get')
request['words'] = words
request['time'] = time
request['filter'] = aFilter
if nick!=None: request['nick'] = nick
self.create(self.execute(request, session))
return self.result | 0.011742 |
def handle(self,
t_input: inference.TranslatorInput,
t_output: inference.TranslatorOutput,
t_walltime: float = 0.):
"""
:param t_input: Translator input.
:param t_output: Translator output.
:param t_walltime: Total wall-clock time for translation.
"""
line = "{sent_id} ||| {target} ||| {score:f} ||| {source} ||| {source_len:d} ||| {target_len:d}\n"
self.stream.write(line.format(sent_id=t_input.sentence_id,
target=" ".join(t_output.tokens),
score=t_output.score,
source=" ".join(t_input.tokens),
source_len=len(t_input.tokens),
target_len=len(t_output.tokens)))
attention_matrix = t_output.attention_matrix.T
for i in range(0, attention_matrix.shape[0]):
attention_vector = attention_matrix[i]
self.stream.write(" ".join(["%f" % value for value in attention_vector]))
self.stream.write("\n")
self.stream.write("\n")
self.stream.flush() | 0.005843 |
def _from_dict(cls, _dict):
"""Initialize a SyntaxResult object from a json dictionary."""
args = {}
if 'tokens' in _dict:
args['tokens'] = [
TokenResult._from_dict(x) for x in (_dict.get('tokens'))
]
if 'sentences' in _dict:
args['sentences'] = [
SentenceResult._from_dict(x) for x in (_dict.get('sentences'))
]
return cls(**args) | 0.004435 |
def netlsd(inp, timescales=np.logspace(-2, 2, 250), kernel='heat', eigenvalues='auto', normalization='empty', normalized_laplacian=True):
"""
Computes NetLSD signature from some given input, timescales, and normalization.
Accepts matrices, common Python graph libraries' graphs, or vectors of eigenvalues.
For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18.
Parameters
----------
inp: obj
2D numpy/scipy matrix, common Python graph libraries' graph, or vector of eigenvalues
timescales : numpy.ndarray
Vector of discrete timesteps for the kernel computation
kernel : str
Either 'heat' or 'wave'. Type of a kernel to use for computation.
eigenvalues : str
Either string or int or tuple
Number of eigenvalues to compute / use for approximation.
If string, we expect either 'full' or 'auto', otherwise error will be raised. 'auto' lets the program decide based on the faithful usage. 'full' computes all eigenvalues.
If int, compute n_eivals eigenvalues from each side and approximate using linear growth approximation.
If tuple, we expect two ints, first for lower part of approximation, and second for the upper part.
normalization : str or numpy.ndarray
Either 'empty', 'complete' or None.
If None or any ther value, return unnormalized heat kernel trace.
For the details how 'empty' and 'complete' are computed, please refer to the paper.
If np.ndarray, they are treated as exact normalization constants
normalized_laplacian: bool
Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization.
Returns
-------
numpy.ndarray
NetLSD signature
"""
if kernel not in {'heat', 'wave'}:
raise AttributeError('Unirecognized kernel type: expected one of [\'heat\', \'wave\'], got {0}'.format(kernel))
if not isinstance(normalized_laplacian, bool):
raise AttributeError('Unknown Laplacian type: expected bool, got {0}'.format(normalized_laplacian))
if not isinstance(eigenvalues, (int, tuple, str)):
raise AttributeError('Unirecognized requested eigenvalue number: expected type of [\'str\', \'tuple\', or \'int\'], got {0}'.format(type(eigenvalues)))
if not isinstance(timescales, np.ndarray):
raise AttributeError('Unirecognized timescales data type: expected np.ndarray, got {0}'.format(type(timescales)))
if timescales.ndim != 1:
raise AttributeError('Unirecognized timescales dimensionality: expected a vector, got {0}-d array'.format(timescales.ndim))
if normalization not in {'complete', 'empty', 'none', True, False, None}:
if not isinstance(normalization, np.ndarray):
raise AttributeError('Unirecognized normalization type: expected one of [\'complete\', \'empty\', None or np.ndarray], got {0}'.format(normalization))
if normalization.ndim != 1:
raise AttributeError('Unirecognized normalization dimensionality: expected a vector, got {0}-d array'.format(normalization.ndim))
if timescales.shape[0] != normalization.shape[0]:
raise AttributeError('Unirecognized normalization dimensionality: expected {0}-length vector, got length {1}'.format(timescales.shape[0], normalization.shape[0]))
eivals = check_1d(inp)
if eivals is None:
mat = check_2d(inp)
if mat is None:
mat = graph_to_laplacian(inp, normalized_laplacian)
if mat is None:
raise ValueError('Unirecognized input type: expected one of [\'np.ndarray\', \'scipy.sparse\', \'networkx.Graph\',\' graph_tool.Graph,\' or \'igraph.Graph\'], got {0}'.format(type(inp)))
else:
mat = mat_to_laplacian(inp, normalized_laplacian)
eivals = eigenvalues_auto(mat, eigenvalues)
if kernel == 'heat':
return _hkt(eivals, timescales, normalization, normalized_laplacian)
else:
return _wkt(eivals, timescales, normalization, normalized_laplacian) | 0.005272 |
def _space_examples(self, list_examples, rows, section_value):
""" makes the example text """
examples_with_index = []
for i, _ in list(enumerate(list_examples)):
if len(list_examples[i]) > 1:
examples_with_index.append("[" + str(i + 1) + "] " + list_examples[i][0] +
list_examples[i][1])
example = "".join(exam for exam in examples_with_index)
num_newline = example.count('\n')
page_number = ''
if num_newline > rows * PART_SCREEN_EXAMPLE and rows > PART_SCREEN_EXAMPLE * 10:
len_of_excerpt = math.floor(float(rows) * PART_SCREEN_EXAMPLE)
group = example.split('\n')
end = int(section_value * len_of_excerpt)
begin = int((section_value - 1) * len_of_excerpt)
if end < num_newline:
example = '\n'.join(group[begin:end]) + "\n"
else:
# default chops top off
example = '\n'.join(group[begin:]) + "\n"
while ((section_value - 1) * len_of_excerpt) > num_newline:
self.example_page -= 1
page_number = '\n' + str(section_value) + "/" + str(int(math.ceil(num_newline / len_of_excerpt)))
return example + page_number + ' CTRL+Y (^) CTRL+N (v)' | 0.00372 |
def inplace_reload(method):
"""
Executes the wrapped function and reloads the object
with data returned from the server.
"""
# noinspection PyProtectedMember
def wrapped(obj, *args, **kwargs):
in_place = True if kwargs.get('inplace') in (True, None) else False
api_object = method(obj, *args, **kwargs)
if in_place and api_object:
obj._data = api_object._data
obj._dirty = api_object._dirty
obj._data.fetched = False
return obj
elif api_object:
return api_object
else:
return obj
return wrapped | 0.001572 |
def enable_one_shot_hardware_breakpoint(self, dwThreadId, address):
"""
Enables the hardware breakpoint at the given address for only one shot.
@see:
L{define_hardware_breakpoint},
L{has_hardware_breakpoint},
L{get_hardware_breakpoint},
L{enable_hardware_breakpoint},
L{disable_hardware_breakpoint}
L{erase_hardware_breakpoint},
@type dwThreadId: int
@param dwThreadId: Thread global ID.
@type address: int
@param address: Memory address of breakpoint.
"""
t = self.system.get_thread(dwThreadId)
bp = self.get_hardware_breakpoint(dwThreadId, address)
if bp.is_running():
self.__del_running_bp_from_all_threads(bp)
bp.one_shot(None, t) | 0.003659 |
def convertforoutput(self,outputfile):
"""Convert from one of the source formats into target format. Relevant if converters are used in OutputTemplates. Outputfile is a CLAMOutputFile instance."""
super(CharEncodingConverter,self).convertforoutput(outputfile)
return withheaders( flask.make_response( ( line.encode(self.charset) for line in outputfile ) ) , 'text/plain; charset=' + self.charset) | 0.028504 |
def call(self, transaction=None, block_identifier='latest'):
"""
Execute a contract function call using the `eth_call` interface.
This method prepares a ``Caller`` object that exposes the contract
functions and public variables as callable Python functions.
Reading a public ``owner`` address variable example:
.. code-block:: python
ContractFactory = w3.eth.contract(
abi=wallet_contract_definition["abi"]
)
# Not a real contract address
contract = ContractFactory("0x2f70d3d26829e412A602E83FE8EeBF80255AEeA5")
# Read "owner" public variable
addr = contract.functions.owner().call()
:param transaction: Dictionary of transaction info for web3 interface
:return: ``Caller`` object that has contract public functions
and variables exposed as Python methods
"""
if transaction is None:
call_transaction = {}
else:
call_transaction = dict(**transaction)
if 'data' in call_transaction:
raise ValueError("Cannot set data in call transaction")
if self.address:
call_transaction.setdefault('to', self.address)
if self.web3.eth.defaultAccount is not empty:
call_transaction.setdefault('from', self.web3.eth.defaultAccount)
if 'to' not in call_transaction:
if isinstance(self, type):
raise ValueError(
"When using `Contract.[methodtype].[method].call()` from"
" a contract factory you "
"must provide a `to` address with the transaction"
)
else:
raise ValueError(
"Please ensure that this contract instance has an address."
)
block_id = parse_block_identifier(self.web3, block_identifier)
return call_contract_function(
self.web3,
self.address,
self._return_data_normalizers,
self.function_identifier,
call_transaction,
block_id,
self.contract_abi,
self.abi,
*self.args,
**self.kwargs
) | 0.001315 |
def _build_color_variants(cls):
""" Build colorized variants of all frames and return a list of
all frame object names.
"""
# Get the basic frame types first.
frametypes = cls.sets(registered=False)
_colornames = [
# 'black', disabled for now, it won't show on my terminal.
'red',
'green',
'yellow',
'blue',
'magenta',
'cyan',
'white',
]
_colornames.extend('light{}'.format(s) for s in _colornames[:])
for colorname in _colornames:
for framesobj in frametypes:
framename = '{}_{}'.format(framesobj.name, colorname)
cls.register(
framesobj.as_colr(fore=colorname),
name=framename,
) | 0.001312 |
def assign(self, variables=None, **variables_kwargs):
"""Assign new data variables to a Dataset, returning a new object
with all the original variables in addition to the new ones.
Parameters
----------
variables : mapping, value pairs
Mapping from variables names to the new values. If the new values
are callable, they are computed on the Dataset and assigned to new
data variables. If the values are not callable, (e.g. a DataArray,
scalar, or array), they are simply assigned.
**variables_kwargs:
The keyword arguments form of ``variables``.
One of variables or variables_kwarg must be provided.
Returns
-------
ds : Dataset
A new Dataset with the new variables in addition to all the
existing variables.
Notes
-----
Since ``kwargs`` is a dictionary, the order of your arguments may not
be preserved, and so the order of the new variables is not well
defined. Assigning multiple variables within the same ``assign`` is
possible, but you cannot reference other variables created within the
same ``assign`` call.
See Also
--------
pandas.DataFrame.assign
"""
variables = either_dict_or_kwargs(
variables, variables_kwargs, 'assign')
data = self.copy()
# do all calculations first...
results = data._calc_assign_results(variables)
# ... and then assign
data.update(results)
return data | 0.001239 |
def _add_page(self, text):
""" Helper function for PDFText, to have the document
add a page, and retry adding a large block of
text that would otherwise have been to long for the
page.
"""
save_cursor = self.parent.document.page.cursor.copy()
save_cursor.x_reset()
save_cursor.y_reset()
self.parent.document.add_page()
self.parent.document.set_cursor(save_cursor)
self.parent.document.add_text(text) | 0.003906 |
async def async_fetch(url: str, **kwargs) -> Selector:
"""
Do the fetch in an async style.
Args:
url (str): The url of the site.
Returns:
Selector: allows you to select parts of HTML text using CSS or XPath expressions.
"""
kwargs.setdefault('headers', DEFAULT_HEADERS)
async with aiohttp.ClientSession(**kwargs) as ses:
async with ses.get(url, **kwargs) as res:
html = await res.text()
tree = Selector(text=html)
return tree | 0.003883 |
def _get_facvar(self, polynomial):
"""Return dense vector representation of a polynomial. This function is
nearly identical to __push_facvar_sparse, but instead of pushing
sparse entries to the constraint matrices, it returns a dense
vector.
"""
facvar = [0] * (self.n_vars + 1)
# Preprocess the polynomial for uniform handling later
if is_number_type(polynomial):
facvar[0] = polynomial
return facvar
polynomial = polynomial.expand()
if polynomial.is_Mul:
elements = [polynomial]
else:
elements = polynomial.as_coeff_mul()[1][0].as_coeff_add()[1]
for element in elements:
results = self._get_index_of_monomial(element)
for (k, coeff) in results:
facvar[k] += coeff
return facvar | 0.002296 |
def has_option(self, target):
"""
Return ``True`` if the actual arguments include
the specified ``target`` option or,
if ``target`` is a list of options,
at least one of them.
:param target: the option or a list of options
:type target: Unicode string or list of Unicode strings
:rtype: bool
"""
if isinstance(target, list):
target_set = set(target)
else:
target_set = set([target])
return len(target_set & set(self.actual_arguments)) > 0 | 0.003571 |
def run_all(logdir, verbose=False):
"""Perform random search over the hyperparameter space.
Arguments:
logdir: The top-level directory into which to write data. This
directory should be empty or nonexistent.
verbose: If true, print out each run's name as it begins.
"""
data = prepare_data()
rng = random.Random(0)
base_writer = tf.summary.create_file_writer(logdir)
with base_writer.as_default():
experiment = hp.Experiment(hparams=HPARAMS, metrics=METRICS)
experiment_string = experiment.summary_pb().SerializeToString()
tf.summary.experimental.write_raw_pb(experiment_string, step=0)
base_writer.flush()
base_writer.close()
sessions_per_group = 2
num_sessions = flags.FLAGS.num_session_groups * sessions_per_group
session_index = 0 # across all session groups
for group_index in xrange(flags.FLAGS.num_session_groups):
hparams = {h: sample_uniform(h.domain, rng) for h in HPARAMS}
hparams_string = str(hparams)
group_id = hashlib.sha256(hparams_string.encode("utf-8")).hexdigest()
for repeat_index in xrange(sessions_per_group):
session_id = str(session_index)
session_index += 1
if verbose:
print(
"--- Running training session %d/%d"
% (session_index, num_sessions)
)
print(hparams_string)
print("--- repeat #: %d" % (repeat_index + 1))
run(
data=data,
base_logdir=logdir,
session_id=session_id,
group_id=group_id,
hparams=hparams,
) | 0.00969 |
def item_show(item, item_id=None, item_type=None, show='show', extra_args=None, cibfile=None):
'''
Show an item via pcs command
(mainly for use with the pcs state module)
item
config, property, resource, constraint etc.
item_id
id of the item
item_type
item type
show
show command (probably None, default: show)
extra_args
additional options for the pcs command
cibfile
use cibfile instead of the live CIB
'''
cmd = ['pcs']
if isinstance(cibfile, six.string_types):
cmd += ['-f', cibfile]
if isinstance(item, six.string_types):
cmd += [item]
elif isinstance(item, (list, tuple)):
cmd += item
# constraint command follows a different order
if item in ['constraint']:
cmd += [item_type]
if isinstance(show, six.string_types):
cmd += [show]
elif isinstance(show, (list, tuple)):
cmd += show
if isinstance(item_id, six.string_types):
cmd += [item_id]
if isinstance(extra_args, (list, tuple)):
cmd += extra_args
# constraint command only shows id, when using '--full'-parameter
if item in ['constraint']:
if not isinstance(extra_args, (list, tuple)) or '--full' not in extra_args:
cmd += ['--full']
return __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) | 0.002843 |
def _pick_statement(self, block_address, stmt_idx):
"""
Include a statement in the final slice.
:param int block_address: Address of the basic block.
:param int stmt_idx: Statement ID.
"""
# TODO: Support context-sensitivity
# Sanity check
if not isinstance(block_address, int):
raise AngrBackwardSlicingError("Invalid block address %s." % block_address)
if not isinstance(stmt_idx, int):
raise AngrBackwardSlicingError("Invalid statement ID %s." % stmt_idx)
self.chosen_statements[block_address].add(stmt_idx) | 0.00638 |
def capfirst(x):
'''Capitalise the first letter of ``x``.
'''
x = to_string(x).strip()
if x:
return x[0].upper() + x[1:].lower()
else:
return x | 0.005587 |
def _update_external_tool(self, context, context_id, external_tool_id,
json_data):
"""
Update the external tool identified by external_tool_id with the passed
json data.
context is either COURSES_API or ACCOUNTS_API.
context_id is the course_id or account_id, depending on context
https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.update
"""
url = context.format(context_id) + "/external_tools/{}".format(
external_tool_id)
return self._put_resource(url, body=json_data) | 0.004862 |
def main(self, signature=''):
"""
A decorator that is used to register the main function with the given
`signature`::
@app.main()
def main(context):
# do something
pass
The main function is called, after any options and if no command has
been called.
"""
signature = Signature.from_string(signature, option=False)
def decorator(function):
if self.main_func is not None:
raise RuntimeError('main is already defined')
try:
function = annotations()(function)
except RuntimeError:
pass
self.main_func = function
self.main_signature = signature
if function.__doc__:
self.description = textwrap.dedent(function.__doc__).strip()
return function
return decorator | 0.002141 |
def setup_injection_workflow(workflow, output_dir=None,
inj_section_name='injections', exttrig_file=None,
tags =None):
"""
This function is the gateway for setting up injection-generation jobs in a
workflow. It should be possible for this function to support a number
of different ways/codes that could be used for doing this, however as this
will presumably stay as a single call to a single code (which need not be
inspinj) there are currently no subfunctions in this moudle.
Parameters
-----------
workflow : pycbc.workflow.core.Workflow
The Workflow instance that the coincidence jobs will be added to.
output_dir : path
The directory in which injection files will be stored.
inj_section_name : string (optional, default='injections')
The string that corresponds to the option describing the exe location
in the [executables] section of the .ini file and that corresponds to
the section (and sub-sections) giving the options that will be given to
the code at run time.
tags : list of strings (optional, default = [])
A list of the tagging strings that will be used for all jobs created
by this call to the workflow. This will be used in output names.
Returns
--------
inj_files : pycbc.workflow.core.FileList
The list of injection files created by this call.
inj_tags : list of strings
The tag corresponding to each injection file and used to uniquely
identify them. The FileList class contains functions to search
based on tags.
"""
if tags is None:
tags = []
logging.info("Entering injection module.")
make_analysis_dir(output_dir)
# Get full analysis segment for output file naming
full_segment = workflow.analysis_time
ifos = workflow.ifos
# Identify which injections to do by presence of sub-sections in
# the configuration file
inj_tags = []
inj_files = FileList([])
for section in workflow.cp.get_subsections(inj_section_name):
inj_tag = section.upper()
curr_tags = tags + [inj_tag]
# FIXME: Remove once fixed in pipedown
# TEMPORARILY we require inj tags to end in "INJ"
if not inj_tag.endswith("INJ"):
err_msg = "Currently workflow requires injection names to end with "
err_msg += "a inj suffix. Ie. bnslininj or bbhinj. "
err_msg += "%s is not good." %(inj_tag.lower())
raise ValueError(err_msg)
# Parse for options in ini file
injection_method = workflow.cp.get_opt_tags("workflow-injections",
"injections-method",
curr_tags)
if injection_method in ["IN_WORKFLOW", "AT_RUNTIME"]:
# FIXME: Add ability to specify different exes
inj_job = LalappsInspinjExecutable(workflow.cp, inj_section_name,
out_dir=output_dir, ifos='HL',
tags=curr_tags)
node = inj_job.create_node(full_segment)
if injection_method == "AT_RUNTIME":
workflow.execute_node(node)
else:
workflow.add_node(node)
inj_file = node.output_files[0]
inj_files.append(inj_file)
elif injection_method == "PREGENERATED":
injectionFilePath = workflow.cp.get_opt_tags("workflow-injections",
"injections-pregenerated-file", curr_tags)
injectionFilePath = resolve_url(injectionFilePath)
file_url = urlparse.urljoin('file:',
urllib.pathname2url(injectionFilePath))
inj_file = File('HL', 'PREGEN_inj_file', full_segment, file_url,
tags=curr_tags)
inj_file.PFN(injectionFilePath, site='local')
inj_files.append(inj_file)
elif injection_method in ["IN_COH_PTF_WORKFLOW", "AT_COH_PTF_RUNTIME"]:
inj_job = LalappsInspinjExecutable(workflow.cp, inj_section_name,
out_dir=output_dir, ifos=ifos,
tags=curr_tags)
node = inj_job.create_node(full_segment, exttrig_file)
if injection_method == "AT_COH_PTF_RUNTIME":
workflow.execute_node(node)
else:
workflow.add_node(node)
inj_file = node.output_files[0]
if workflow.cp.has_option("workflow-injections",
"em-bright-only"):
em_filter_job = PycbcDarkVsBrightInjectionsExecutable(
workflow.cp,
'em_bright_filter',
tags=curr_tags,
out_dir=output_dir,
ifos=ifos)
node = em_filter_job.create_node(inj_file, full_segment,
curr_tags)
if injection_method == "AT_COH_PTF_RUNTIME":
workflow.execute_node(node)
else:
workflow.add_node(node)
inj_file = node.output_files[0]
if workflow.cp.has_option("workflow-injections",
"do-jitter-skyloc"):
jitter_job = LigolwCBCJitterSkylocExecutable(workflow.cp,
'jitter_skyloc',
tags=curr_tags,
out_dir=output_dir,
ifos=ifos)
node = jitter_job.create_node(inj_file, full_segment, curr_tags)
if injection_method == "AT_COH_PTF_RUNTIME":
workflow.execute_node(node)
else:
workflow.add_node(node)
inj_file = node.output_files[0]
if workflow.cp.has_option("workflow-injections",
"do-align-total-spin"):
align_job = LigolwCBCAlignTotalSpinExecutable(workflow.cp,
'align_total_spin', tags=curr_tags, out_dir=output_dir,
ifos=ifos)
node = align_job.create_node(inj_file, full_segment, curr_tags)
if injection_method == "AT_COH_PTF_RUNTIME":
workflow.execute_node(node)
else:
workflow.add_node(node)
inj_file = node.output_files[0]
inj_files.append(inj_file)
else:
err = "Injection method must be one of IN_WORKFLOW, "
err += "AT_RUNTIME or PREGENERATED. Got %s." % (injection_method)
raise ValueError(err)
inj_tags.append(inj_tag)
logging.info("Leaving injection module.")
return inj_files, inj_tags | 0.001509 |
def _str_desc(self, reader):
"""String containing information about the current GO DAG."""
data_version = reader.data_version
if data_version is not None:
data_version = data_version.replace("releases/", "")
desc = "{OBO}: fmt({FMT}) rel({REL}) {N:,} GO Terms".format(
OBO=reader.obo_file, FMT=reader.format_version,
REL=data_version, N=len(self))
if reader.optobj:
desc = "{D}; optional_attrs({A})".format(D=desc, A=" ".join(sorted(reader.optobj.optional_attrs)))
return desc | 0.005245 |
def instant_articles(self, **kwargs):
"""
QuerySet including all published content approved for instant articles.
Instant articles are configured via FeatureType. FeatureType.instant_article = True.
"""
eqs = self.search(**kwargs).sort('-last_modified', '-published')
return eqs.filter(InstantArticle()) | 0.008523 |
def theme_color(self):
"""
A member of :ref:`MsoThemeColorIndex` or |None| if no theme color is
specified. When :attr:`type` is `MSO_COLOR_TYPE.THEME`, the value of
this property will always be a member of :ref:`MsoThemeColorIndex`.
When :attr:`type` has any other value, the value of this property is
|None|.
Assigning a member of :ref:`MsoThemeColorIndex` causes :attr:`type`
to become `MSO_COLOR_TYPE.THEME`. Any existing RGB value is retained
but ignored by Word. Assigning |None| causes any color specification
to be removed such that the effective color is inherited from the
style hierarchy.
"""
color = self._color
if color is None or color.themeColor is None:
return None
return color.themeColor | 0.002389 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.