text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def list_supported_categories():
"""
Prints a list of supported external account category names.
For example, "AWS" is a supported external account category name.
"""
categories = get_supported_categories(api)
category_names = [category.name for category in categories]
print ("Supported account categories by name: {0}".format(
COMMA_WITH_SPACE.join(map(str, category_names)))) | 0.015152 |
def feature_assert(*feas):
"""
Takes some feature patterns (like in `feature_needs`).
Raises a fuse.FuseError if your underlying FUSE lib fails
to have some of the matching features.
(Note: use a ``has_foo`` type feature assertion only if lib support
for method ``foo`` is *necessary* for your fs. Don't use this assertion
just because your fs implements ``foo``. The usefulness of ``has_foo``
is limited by the fact that we can't guarantee that your FUSE kernel
module also supports ``foo``.)
"""
fav = APIVersion()
for fea in feas:
fn = feature_needs(fea)
if fav < fn:
raise FuseError(
"FUSE API version %d is required for feature `%s' but only %d is available" % \
(fn, str(fea), fav)) | 0.003774 |
def remove_file(self, filepath):
"""
Removes the DataFrameModel from being registered.
:param filepath: (str)
The filepath to delete from the DataFrameModelManager.
:return: None
"""
self._models.pop(filepath)
self._updates.pop(filepath, default=None)
self.signalModelDestroyed.emit(filepath) | 0.005435 |
def run_step(context):
"""Get, set, unset $ENVs.
Context is a dictionary or dictionary-like. context is mandatory.
Input context is:
env:
get: {dict}
set: {dict}
unset: [list]
At least one of env's sub-keys (get, set or unset) must exist.
This step will run whatever combination of Get, Set and Unset you specify.
Regardless of combination, execution order is Get, Set, Unset.
"""
logger.debug("started")
assert context, f"context must have value for {__name__}"
deprecated(context)
context.assert_key_has_value('env', __name__)
found_get = env_get(context)
found_set = env_set(context)
found_unset = env_unset(context)
# at least 1 of envGet, envSet or envUnset must exist in context
if not (found_get or found_set or found_unset):
raise KeyNotInContextError(
"context must contain any combination of "
"env['get'], env['set'] or env['unset'] for "
f"{__name__}")
logger.debug("done") | 0.000953 |
def n1qlQueryAll(self, *args, **kwargs):
"""
Execute a N1QL query, retrieving all rows.
This method returns a :class:`Deferred` object which is executed
with a :class:`~.N1QLRequest` object. The object may be iterated
over to yield the rows in the result set.
This method is similar to :meth:`~couchbase.bucket.Bucket.n1ql_query`
in its arguments.
Example::
def handler(req):
for row in req:
# ... handle row
d = cb.n1qlQueryAll('SELECT * from `travel-sample` WHERE city=$1`,
'Reno')
d.addCallback(handler)
:return: A :class:`Deferred`
.. seealso:: :meth:`~couchbase.bucket.Bucket.n1ql_query`
"""
if not self.connected:
cb = lambda x: self.n1qlQueryAll(*args, **kwargs)
return self.connect().addCallback(cb)
kwargs['itercls'] = BatchedN1QLRequest
o = super(RawBucket, self).n1ql_query(*args, **kwargs)
o.start()
return o._getDeferred() | 0.00274 |
def set_sensor_thresholds(self, sensor_number, lun=0,
unr=None, ucr=None, unc=None,
lnc=None, lcr=None, lnr=None):
"""Set the sensor thresholds that are not 'None'
`sensor_number`
`unr` for upper non-recoverable
`ucr` for upper critical
`unc` for upper non-critical
`lnc` for lower non-critical
`lcr` for lower critical
`lnr` for lower non-recoverable
"""
req = create_request_by_name('SetSensorThresholds')
req.sensor_number = sensor_number
req.lun = lun
thresholds = dict(unr=unr, ucr=ucr, unc=unc, lnc=lnc, lcr=lcr, lnr=lnr)
for key, value in thresholds.items():
if value is not None:
setattr(req.set_mask, key, 1)
setattr(req.threshold, key, value)
rsp = self.send_message(req)
check_completion_code(rsp.completion_code) | 0.004167 |
def _decrypt(self, hexified_value):
"""The exact opposite of _encrypt
"""
encrypted_value = binascii.unhexlify(hexified_value)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
jsonified_value = self.cipher.decrypt(
encrypted_value).decode('ascii')
value = json.loads(jsonified_value)
return value | 0.005013 |
def grant_role(self, role_name, principal_name, principal_type, grantor, grantorType, grant_option):
"""
Parameters:
- role_name
- principal_name
- principal_type
- grantor
- grantorType
- grant_option
"""
self.send_grant_role(role_name, principal_name, principal_type, grantor, grantorType, grant_option)
return self.recv_grant_role() | 0.007792 |
def ext_pillar(minion_id,
pillar, # pylint: disable=W0613
collection='pillar',
id_field='_id',
re_pattern=None,
re_replace='',
fields=None):
'''
Connect to a mongo database and read per-node pillar information.
Parameters:
* `collection`: The mongodb collection to read data from. Defaults to
``'pillar'``.
* `id_field`: The field in the collection that represents an individual
minion id. Defaults to ``'_id'``.
* `re_pattern`: If your naming convention in the collection is shorter
than the minion id, you can use this to trim the name.
`re_pattern` will be used to match the name, and `re_replace` will
be used to replace it. Backrefs are supported as they are in the
Python standard library. If ``None``, no mangling of the name will
be performed - the collection will be searched with the entire
minion id. Defaults to ``None``.
* `re_replace`: Use as the replacement value in node ids matched with
`re_pattern`. Defaults to ''. Feel free to use backreferences here.
* `fields`: The specific fields in the document to use for the pillar
data. If ``None``, will use the entire document. If using the
entire document, the ``_id`` field will be converted to string. Be
careful with other fields in the document as they must be string
serializable. Defaults to ``None``.
'''
host = __opts__['mongo.host']
port = __opts__['mongo.port']
log.info('connecting to %s:%s for mongo ext_pillar', host, port)
conn = pymongo.MongoClient(host, port)
log.debug('using database \'%s\'', __opts__['mongo.db'])
mdb = conn[__opts__['mongo.db']]
user = __opts__.get('mongo.user')
password = __opts__.get('mongo.password')
if user and password:
log.debug('authenticating as \'%s\'', user)
mdb.authenticate(user, password)
# Do the regex string replacement on the minion id
if re_pattern:
minion_id = re.sub(re_pattern, re_replace, minion_id)
log.info(
'ext_pillar.mongo: looking up pillar def for {\'%s\': \'%s\'} '
'in mongo', id_field, minion_id
)
result = mdb[collection].find_one({id_field: minion_id}, projection=fields)
if result:
if fields:
log.debug(
'ext_pillar.mongo: found document, returning fields \'%s\'',
fields
)
else:
log.debug('ext_pillar.mongo: found document, returning whole doc')
if '_id' in result:
# Converting _id to a string
# will avoid the most common serialization error cases, but DBRefs
# and whatnot will still cause problems.
result['_id'] = six.text_type(result['_id'])
return result
else:
# If we can't find the minion the database it's not necessarily an
# error.
log.debug(
'ext_pillar.mongo: no document found in collection %s',
collection
)
return {} | 0.000315 |
def owner(self):
"""
Username of document creator
"""
if self._owner:
return self._owner
elif not self.abstract:
return self.read_meta()._owner
raise EmptyDocumentException() | 0.008097 |
def _init_tmatrix(self):
"""Initialize the T-matrix.
"""
if self.radius_type == Scatterer.RADIUS_MAXIMUM:
# Maximum radius is not directly supported in the original
# so we convert it to equal volume radius
radius_type = Scatterer.RADIUS_EQUAL_VOLUME
radius = self.equal_volume_from_maximum()
else:
radius_type = self.radius_type
radius = self.radius
self.nmax = pytmatrix.calctmat(radius, radius_type,
self.wavelength, self.m.real, self.m.imag, self.axis_ratio,
self.shape, self.ddelt, self.ndgs)
self._tm_signature = (self.radius, self.radius_type, self.wavelength,
self.m, self.axis_ratio, self.shape, self.ddelt, self.ndgs) | 0.006369 |
def keep_folder(raw_path):
"""
Keep only folders that don't contain patterns in `DIR_EXCLUDE_PATTERNS`.
"""
keep = True
for pattern in DIR_EXCLUDE_PATTERNS:
if pattern in raw_path:
LOGGER.debug('rejecting', raw_path)
keep = False
return keep | 0.003367 |
def execute(self, method, args, ref):
""" Execute the method with args """
response = {'result': None, 'error': None, 'ref': ref}
fun = self.methods.get(method)
if not fun:
response['error'] = 'Method `{}` not found'.format(method)
else:
try:
response['result'] = fun(*args)
except Exception as exception:
logging.error(exception, exc_info=1)
response['error'] = str(exception)
return response | 0.003802 |
def get_selection(cls, strings, title="Select an option", subtitle=None, exit_option=True, _menu=None):
"""
Single-method way of getting a selection out of a list of strings.
Args:
strings (:obj:`list` of :obj:`str`): The list of strings this menu should be built from.
title (str): The title of the menu.
subtitle (str): The subtitle of the menu.
exit_option (bool): Specifies whether this menu should show an exit item by default. Defaults to True.
_menu: Should probably only be used for testing, pass in a list and the created menu used internally by
the method will be appended to it
Returns:
int: The index of the selected option.
"""
menu = cls(strings, title, subtitle, exit_option)
if _menu is not None:
_menu.append(menu)
menu.show()
menu.join()
return menu.selected_option | 0.006205 |
def get_asset_repository_assignment_session(self):
"""Gets the session for assigning asset to repository mappings.
return: (osid.repository.AssetRepositoryAssignmentSession) - an
``AssetRepositoryAsignmentSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_asset_repository_assignment()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_asset_repository_assignment()`` is ``true``.*
"""
if not self.supports_asset_repository_assignment():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.AssetRepositoryAssignmentSession(runtime=self._runtime) | 0.002581 |
def difference(self, other):
"""difference(x, y) = x(t) - y(t)."""
return self.operation(other, lambda x, y: x - y) | 0.015267 |
def edges(self, nodes=None):
"""
Returns a ``tuple`` of all edges in the ``DictGraph`` an edge is a pair
of **node objects**.
Arguments:
- nodes(iterable) [default: ``None``] iterable of **node objects** if
specified the edges will be limited to those outgoing from one of
the specified nodes.
"""
# If a Node has been directly updated (__not__ recommended)
# then the Graph will not know the added nodes and therefore will
# miss half of their edges.
edges = set()
for node in (nodes or self.iterkeys()):
ends = self[node].nodes()
edges.update([(node, end) for end in ends])
return tuple(edges) | 0.009044 |
def read_geo(self, key, info):
"""Read angles.
"""
pairs = {('satellite_azimuth_angle', 'satellite_zenith_angle'):
("SatelliteAzimuthAngle", "SatelliteZenithAngle"),
('solar_azimuth_angle', 'solar_zenith_angle'):
("SolarAzimuthAngle", "SolarZenithAngle"),
('dnb_solar_azimuth_angle', 'dnb_solar_zenith_angle'):
("SolarAzimuthAngle", "SolarZenithAngle"),
('dnb_lunar_azimuth_angle', 'dnb_lunar_zenith_angle'):
("LunarAzimuthAngle", "LunarZenithAngle"),
}
for pair, fkeys in pairs.items():
if key.name in pair:
if (self.cache.get(pair[0]) is None or
self.cache.get(pair[1]) is None):
angles = self.angles(*fkeys)
self.cache[pair[0]], self.cache[pair[1]] = angles
if key.name == pair[0]:
return xr.DataArray(self.cache[pair[0]], name=key.name,
attrs=self.mda, dims=('y', 'x'))
else:
return xr.DataArray(self.cache[pair[1]], name=key.name,
attrs=self.mda, dims=('y', 'x'))
if info.get('standard_name') in ['latitude', 'longitude']:
if self.lons is None or self.lats is None:
self.lons, self.lats = self.navigate()
mda = self.mda.copy()
mda.update(info)
if info['standard_name'] == 'longitude':
return xr.DataArray(self.lons, attrs=mda, dims=('y', 'x'))
else:
return xr.DataArray(self.lats, attrs=mda, dims=('y', 'x'))
if key.name == 'dnb_moon_illumination_fraction':
mda = self.mda.copy()
mda.update(info)
return xr.DataArray(self.geostuff["MoonIllumFraction"].value,
attrs=info) | 0.001009 |
def niceStringify( self ):
" Returns a string representation with new lines and shifts "
out = ""
if self.docstring is not None:
out += str( self.docstring )
if not self.encoding is None:
if out != "":
out += '\n'
out += str( self.encoding )
for item in self.imports:
if out != "":
out += '\n'
out += str( item )
for item in self.globals:
if out != "":
out += '\n'
out += str( item )
for item in self.functions:
if out != "":
out += '\n'
out += item.niceStringify( 0 )
for item in self.classes:
if out != "":
out += '\n'
out += item.niceStringify( 0 )
return out | 0.020071 |
def namedb_select_where_unexpired_names(current_block, only_registered=True):
"""
Generate part of a WHERE clause that selects from name records joined with namespaces
(or projections of them) that are not expired.
Also limit to names that are registered at this block, if only_registered=True.
If only_registered is False, then as long as current_block is before the expire block, then the name will be returned (but the name may not have existed at that block)
"""
ns_lifetime_multiplier = get_epoch_namespace_lifetime_multiplier(current_block, '*')
ns_grace_period = get_epoch_namespace_lifetime_grace_period(current_block, '*')
unexpired_query_fragment = "(" + \
"(" + \
"namespaces.op = ? AND " + \
"(" + \
"(namespaces.ready_block + ((namespaces.lifetime * {}) + {}) > ?) OR ".format(ns_lifetime_multiplier, ns_grace_period) + \
"(name_records.last_renewed + ((namespaces.lifetime * {}) + {}) >= ?)".format(ns_lifetime_multiplier, ns_grace_period) + \
")" + \
") OR " + \
"(" + \
"namespaces.op = ? AND namespaces.reveal_block <= ? AND ? < namespaces.reveal_block + ?" + \
")" + \
")"
unexpired_query_args = (NAMESPACE_READY,
current_block,
current_block,
NAMESPACE_REVEAL, current_block, current_block, NAMESPACE_REVEAL_EXPIRE)
if only_registered:
# also limit to only names registered before this block
unexpired_query_fragment = '(name_records.first_registered <= ? AND {})'.format(unexpired_query_fragment)
unexpired_query_args = (current_block,) + unexpired_query_args
return (unexpired_query_fragment, unexpired_query_args) | 0.008447 |
def as_dict(self, **kwargs):
"""Return an error dict for self.args and kwargs."""
error, reason, details, err_kwargs = self.args
result = {
key: val
for key, val in {
'error': error, 'reason': reason, 'details': details,
}.items()
if val is not None
}
result.update(err_kwargs)
result.update(kwargs)
return result | 0.004619 |
def archive_query_interval(self, _from, to):
'''
:param _from: Start of interval (int) (inclusive)
:param to: End of interval (int) (exclusive)
:raises: IOError
'''
with self.session as session:
table = self.tables.archive
try:
results = session.query(table)\
.filter(table.dateTime >= _from)\
.filter(table.dateTime < to)\
.all()
return [self.archive_schema.dump(entry).data for entry in results]
except SQLAlchemyError as exc:
session.rollback()
print_exc()
raise IOError(exc) | 0.004261 |
def result_summary(self, result):
"""
Return a summary of the results.
"""
return "{} examples, {} errors, {} failures\n".format(
result.testsRun, len(result.errors), len(result.failures),
) | 0.008197 |
def matches_prefix(ip, prefix):
"""
Returns True if the given IP address is part of the given
network, returns False otherwise.
:type ip: string
:param ip: An IP address.
:type prefix: string
:param prefix: An IP prefix.
:rtype: bool
:return: True if the IP is in the prefix, False otherwise.
"""
ip_int = ip2int(ip)
network, pfxlen = parse_prefix(prefix)
network_int = ip2int(network)
mask_int = pfxlen2mask_int(pfxlen)
return ip_int&mask_int == network_int&mask_int | 0.00565 |
def _str2array(d):
""" Reconstructs a numpy array from a plain-text string """
if type(d) == list:
return np.asarray([_str2array(s) for s in d])
ins = StringIO(d)
return np.loadtxt(ins) | 0.004785 |
def update_payload(self, fields=None):
"""Wrap submitted data within an extra dict."""
payload = super(ConfigTemplate, self).update_payload(fields)
if 'template_combinations' in payload:
payload['template_combinations_attributes'] = payload.pop(
'template_combinations')
return {u'config_template': payload} | 0.00545 |
def find_video_by_id(self, video_id):
"""doc: http://open.youku.com/docs/doc?id=44
"""
url = 'https://openapi.youku.com/v2/videos/show_basic.json'
params = {
'client_id': self.client_id,
'video_id': video_id
}
r = requests.get(url, params=params)
check_error(r)
return r.json() | 0.005479 |
def add(self, event_state, event_type, event_value,
proc_list=None, proc_desc="", peak_time=6):
"""Add a new item to the logs list.
If 'event' is a 'new one', add it at the beginning of the list.
If 'event' is not a 'new one', update the list .
If event < peak_time then the alert is not set.
"""
proc_list = proc_list or glances_processes.getlist()
# Add or update the log
event_index = self.__event_exist(event_type)
if event_index < 0:
# Event did not exist, add it
self._create_event(event_state, event_type, event_value,
proc_list, proc_desc, peak_time)
else:
# Event exist, update it
self._update_event(event_index, event_state, event_type, event_value,
proc_list, proc_desc, peak_time)
return self.len() | 0.004334 |
def p_pkg_file_name(self, p):
"""pkg_file_name : PKG_FILE_NAME LINE"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.set_pkg_file_name(self.document, value)
except OrderError:
self.order_error('PackageFileName', 'PackageName', p.lineno(1))
except CardinalityError:
self.more_than_one_error('PackageFileName', p.lineno(1)) | 0.004115 |
def replace(iterable, pred, substitutes, count=None, window_size=1):
"""Yield the items from *iterable*, replacing the items for which *pred*
returns ``True`` with the items from the iterable *substitutes*.
>>> iterable = [1, 1, 0, 1, 1, 0, 1, 1]
>>> pred = lambda x: x == 0
>>> substitutes = (2, 3)
>>> list(replace(iterable, pred, substitutes))
[1, 1, 2, 3, 1, 1, 2, 3, 1, 1]
If *count* is given, the number of replacements will be limited:
>>> iterable = [1, 1, 0, 1, 1, 0, 1, 1, 0]
>>> pred = lambda x: x == 0
>>> substitutes = [None]
>>> list(replace(iterable, pred, substitutes, count=2))
[1, 1, None, 1, 1, None, 1, 1, 0]
Use *window_size* to control the number of items passed as arguments to
*pred*. This allows for locating and replacing subsequences.
>>> iterable = [0, 1, 2, 5, 0, 1, 2, 5]
>>> window_size = 3
>>> pred = lambda *args: args == (0, 1, 2) # 3 items passed to pred
>>> substitutes = [3, 4] # Splice in these items
>>> list(replace(iterable, pred, substitutes, window_size=window_size))
[3, 4, 5, 3, 4, 5]
"""
if window_size < 1:
raise ValueError('window_size must be at least 1')
# Save the substitutes iterable, since it's used more than once
substitutes = tuple(substitutes)
# Add padding such that the number of windows matches the length of the
# iterable
it = chain(iterable, [_marker] * (window_size - 1))
windows = windowed(it, window_size)
n = 0
for w in windows:
# If the current window matches our predicate (and we haven't hit
# our maximum number of replacements), splice in the substitutes
# and then consume the following windows that overlap with this one.
# For example, if the iterable is (0, 1, 2, 3, 4...)
# and the window size is 2, we have (0, 1), (1, 2), (2, 3)...
# If the predicate matches on (0, 1), we need to zap (0, 1) and (1, 2)
if pred(*w):
if (count is None) or (n < count):
n += 1
yield from substitutes
consume(windows, window_size - 1)
continue
# If there was no match (or we've reached the replacement limit),
# yield the first item from the window.
if w and (w[0] is not _marker):
yield w[0] | 0.000412 |
def get_manifest(self, repo_name, tag):
'''return the image manifest via the aws client, saved in self.manifest
'''
image = None
repo = self.aws.describe_images(repositoryName=repo_name)
if 'imageDetails' in repo:
for contender in repo.get('imageDetails'):
if tag in contender['imageTags']:
image = contender
break
# if the image isn't found, we need to exit
if image is None:
bot.exit('Cannot find %s:%s, is the uri correct?' %(repo_name, digest))
digest = image['imageDigest']
digests = self.aws.batch_get_image(repositoryName=repo_name,
imageIds=[{"imageDigest": digest,
"imageTag": tag}])
self.manifest = json.loads(digests['images'][0]['imageManifest'])
return self.manifest | 0.003421 |
def validate_arrangement_version(self):
"""Validate if the arrangement_version is supported
This is for autorebuilds to fail early otherwise they may failed
on workers because of osbs-client validation checks.
Method should be called after self.adjust_build_kwargs
Shows a warning when version is deprecated
:raises ValueError: when version is not supported
"""
arrangement_version = self.build_kwargs['arrangement_version']
if arrangement_version is None:
return
if arrangement_version <= 5:
# TODO: raise as ValueError in release 1.6.38+
self.log.warning("arrangement_version <= 5 is deprecated and will be removed"
" in release 1.6.38") | 0.003802 |
def get_p_value(transcript, rates, iterations, consequence, de_novos):
""" find the probability of getting de novos with a mean conservation
The probability is the number of simulations where the mean conservation
between simulated de novos is less than the observed conservation.
Args:
transcript: Transcript object for the current gene.
rates: SiteRates object, which contains WeightedChoice entries for
different consequence categories.
iterations: number of simulations to perform
consequence: string to indicate the consequence type e.g. "missense, or
"lof", "synonymous" etc. The full list is "missense", "nonsense",
"synonymous", "lof", "loss_of_function", "splice_lof",
"splice_region".
de_novos: list of de novos within a gene
Returns:
tuple of mean proximity for the observed de novos and probability of
obtaining a value less than or equal to the observed proximity from the
null distribution.
"""
if len(de_novos) < 2:
return (float('nan'), float('nan'))
rename = {"lof": "loss_of_function"}
if consequence in rename:
consequence = rename[consequence]
weights = rates[consequence]
cds_positions = [ transcript.get_coding_distance(x)['pos'] for x in de_novos ]
distances = get_distances(cds_positions)
observed = geomean(distances)
# call a cython wrapped C++ library to handle the simulations
sim_prob = analyse_de_novos(weights, iterations, len(de_novos), observed)
observed = "{0:0.1f}".format(observed)
return (observed, sim_prob) | 0.008279 |
def export_plotter_vtkjs(plotter, filename, compress_arrays=False):
"""Export a plotter's rendering window to the VTKjs format.
"""
sceneName = os.path.split(filename)[1]
doCompressArrays = compress_arrays
# Generate timestamp and use it to make subdirectory within the top level output dir
timeStamp = time.strftime("%a-%d-%b-%Y-%H-%M-%S")
root_output_directory = os.path.split(filename)[0]
output_dir = os.path.join(root_output_directory, timeStamp)
mkdir_p(output_dir)
renderers = plotter.ren_win.GetRenderers()
scDirs = []
sceneComponents = []
textureToSave = {}
for rIdx in range(renderers.GetNumberOfItems()):
renderer = renderers.GetItemAsObject(rIdx)
renProps = renderer.GetViewProps()
for rpIdx in range(renProps.GetNumberOfItems()):
renProp = renProps.GetItemAsObject(rpIdx)
if not renProp.GetVisibility():
continue
if hasattr(renProp, 'GetMapper') and renProp.GetMapper() is not None:
mapper = renProp.GetMapper()
dataObject = mapper.GetInputDataObject(0, 0)
dataset = None
if dataObject is None:
continue
if dataObject.IsA('vtkCompositeDataSet'):
if dataObject.GetNumberOfBlocks() == 1:
dataset = dataObject.GetBlock(0)
else:
gf = vtk.vtkCompositeDataGeometryFilter()
gf.SetInputData(dataObject)
gf.Update()
dataset = gf.GetOutput()
else:
dataset = mapper.GetInput()
if dataset and not isinstance(dataset, (vtk.vtkPolyData, vtk.vtkImageData)):
# All data must be PolyData surfaces
gf = vtk.vtkGeometryFilter()
gf.SetInputData(dataset)
gf.Update()
dataset = gf.GetOutputDataObject(0)
if dataset:# and dataset.GetPoints(): # NOTE: vtkImageData does not have points
componentName = 'data_%d_%d' % (
rIdx, rpIdx) # getComponentName(renProp)
scalarVisibility = mapper.GetScalarVisibility()
#arrayAccessMode = mapper.GetArrayAccessMode()
#colorArrayName = mapper.GetArrayName() #TODO: if arrayAccessMode == 1 else mapper.GetArrayId()
colorMode = mapper.GetColorMode()
scalarMode = mapper.GetScalarMode()
lookupTable = mapper.GetLookupTable()
dsAttrs = None
arrayLocation = ''
if scalarVisibility:
if scalarMode == 3 or scalarMode == 1: # VTK_SCALAR_MODE_USE_POINT_FIELD_DATA or VTK_SCALAR_MODE_USE_POINT_DATA
dsAttrs = dataset.GetPointData()
arrayLocation = 'pointData'
# VTK_SCALAR_MODE_USE_CELL_FIELD_DATA or VTK_SCALAR_MODE_USE_CELL_DATA
elif scalarMode == 4 or scalarMode == 2:
dsAttrs = dataset.GetCellData()
arrayLocation = 'cellData'
colorArray = None
dataArray = None
if dsAttrs:
dataArray = dsAttrs.GetArray(0) # Force getting the active array
if dataArray:
# component = -1 => let specific instance get scalar from vector before mapping
colorArray = lookupTable.MapScalars(
dataArray, colorMode, -1)
colorArrayName = '__CustomRGBColorArray__'
colorArray.SetName(colorArrayName)
colorMode = 0
else:
colorArrayName = ''
color_array_info = {
'colorArray': colorArray,
'location': arrayLocation
}
scDirs.append(write_data_set('', dataset, output_dir, color_array_info,
new_name=componentName, compress=doCompressArrays))
# Handle texture if any
textureName = None
if renProp.GetTexture() and renProp.GetTexture().GetInput():
textureData = renProp.GetTexture().GetInput()
textureName = 'texture_%d' % get_object_id(textureData)
textureToSave[textureName] = textureData
representation = renProp.GetProperty().GetRepresentation(
) if hasattr(renProp, 'GetProperty') else 2
colorToUse = renProp.GetProperty().GetDiffuseColor(
) if hasattr(renProp, 'GetProperty') else [1, 1, 1]
if representation == 1:
colorToUse = renProp.GetProperty().GetColor() if hasattr(
renProp, 'GetProperty') else [1, 1, 1]
pointSize = renProp.GetProperty().GetPointSize(
) if hasattr(renProp, 'GetProperty') else 1.0
opacity = renProp.GetProperty().GetOpacity() if hasattr(
renProp, 'GetProperty') else 1.0
edgeVisibility = renProp.GetProperty().GetEdgeVisibility(
) if hasattr(renProp, 'GetProperty') else false
p3dPosition = renProp.GetPosition() if renProp.IsA(
'vtkProp3D') else [0, 0, 0]
p3dScale = renProp.GetScale() if renProp.IsA(
'vtkProp3D') else [1, 1, 1]
p3dOrigin = renProp.GetOrigin() if renProp.IsA(
'vtkProp3D') else [0, 0, 0]
p3dRotateWXYZ = renProp.GetOrientationWXYZ(
) if renProp.IsA('vtkProp3D') else [0, 0, 0, 0]
sceneComponents.append({
"name": componentName,
"type": "httpDataSetReader",
"httpDataSetReader": {
"url": componentName
},
"actor": {
"origin": p3dOrigin,
"scale": p3dScale,
"position": p3dPosition,
},
"actorRotation": p3dRotateWXYZ,
"mapper": {
"colorByArrayName": colorArrayName,
"colorMode": colorMode,
"scalarMode": scalarMode
},
"property": {
"representation": representation,
"edgeVisibility": edgeVisibility,
"diffuseColor": colorToUse,
"pointSize": pointSize,
"opacity": opacity
},
"lookupTable": {
"tableRange": lookupTable.GetRange(),
"hueRange": lookupTable.GetHueRange() if hasattr(lookupTable, 'GetHueRange') else [0.5, 0]
}
})
if textureName:
sceneComponents[-1]['texture'] = textureName
# Save texture data if any
for key, val in textureToSave.items():
write_data_set('', val, output_dir, None, new_name=key,
compress=doCompressArrays)
cameraClippingRange = plotter.camera.GetClippingRange()
sceneDescription = {
"fetchGzip": doCompressArrays,
"background": plotter.background_color,
"camera": {
"focalPoint": plotter.camera.GetFocalPoint(),
"position": plotter.camera.GetPosition(),
"viewUp": plotter.camera.GetViewUp(),
"clippingRange": [ elt for elt in cameraClippingRange ]
},
"centerOfRotation": plotter.camera.GetFocalPoint(),
"scene": sceneComponents
}
indexFilePath = os.path.join(output_dir, 'index.json')
with open(indexFilePath, 'w') as outfile:
json.dump(sceneDescription, outfile, indent=4)
# -----------------------------------------------------------------------------
# Now zip up the results and get rid of the temp directory
sceneFileName = os.path.join(
root_output_directory, '%s%s' % (sceneName, FILENAME_EXTENSION))
try:
import zlib
compression = zipfile.ZIP_DEFLATED
except:
compression = zipfile.ZIP_STORED
zf = zipfile.ZipFile(sceneFileName, mode='w')
try:
for dirName, subdirList, fileList in os.walk(output_dir):
for fname in fileList:
fullPath = os.path.join(dirName, fname)
relPath = '%s/%s' % (sceneName,
os.path.relpath(fullPath, output_dir))
zf.write(fullPath, arcname=relPath, compress_type=compression)
finally:
zf.close()
shutil.rmtree(output_dir)
print('Finished exporting dataset to: ', sceneFileName) | 0.002765 |
def save_object(self, obj):
"""
Save object to disk as JSON.
Generally shouldn't be called directly.
"""
obj.pre_save(self.jurisdiction.jurisdiction_id)
filename = '{0}_{1}.json'.format(obj._type, obj._id).replace('/', '-')
self.info('save %s %s as %s', obj._type, obj, filename)
self.debug(json.dumps(OrderedDict(sorted(obj.as_dict().items())),
cls=utils.JSONEncoderPlus, indent=4, separators=(',', ': ')))
self.output_names[obj._type].add(filename)
with open(os.path.join(self.datadir, filename), 'w') as f:
json.dump(obj.as_dict(), f, cls=utils.JSONEncoderPlus)
# validate after writing, allows for inspection on failure
try:
obj.validate()
except ValueError as ve:
if self.strict_validation:
raise ve
else:
self.warning(ve)
# after saving and validating, save subordinate objects
for obj in obj._related:
self.save_object(obj) | 0.00276 |
def get_instance_aws_context(ec2_client):
"""
Returns: a dictionary of aws context
dictionary will contain these entries:
region, instance_id, account, role, env, env_short, service
Raises: IOError if couldn't read metadata or lookup attempt failed
"""
result = {}
try:
result["region"] = http_get_metadata("placement/availability-zone/")
result["region"] = result["region"][:-1]
result["instance_id"] = http_get_metadata('instance-id')
except IOError as error:
raise IOError("Error looking up metadata:availability-zone or instance-id: " + repr(error))
try:
instance_desc = ec2_client.describe_instances(InstanceIds=[result["instance_id"]])
except Exception as error:
raise IOError("Error calling describe_instances: " + repr(error))
result["account"] = instance_desc["Reservations"][0]["OwnerId"]
arn = instance_desc["Reservations"][0]["Instances"][0]["IamInstanceProfile"]["Arn"]
result["role"] = arn.split(":")[5].split("/")[1]
env = re.search("^(" + EFConfig.VALID_ENV_REGEX + ")-", result["role"])
if not env:
raise IOError("Did not find environment in role name: " + result["role"])
result["env"] = env.group(1)
result["env_short"] = result["env"].strip(".0123456789")
result["service"] = "-".join(result["role"].split("-")[1:])
return result | 0.014383 |
def render(self, doc, context=None, math_option=False, img_path='',
css_path=CSS_PATH):
"""Start thread to render a given documentation"""
# If the thread is already running wait for it to finish before
# starting it again.
if self.wait():
self.doc = doc
self.context = context
self.math_option = math_option
self.img_path = img_path
self.css_path = css_path
# This causes run() to be executed in separate thread
self.start() | 0.005396 |
def sms_login(self, client_id, phone_number, code, scope='openid'):
"""Login using phone number/verification code.
"""
return self.post(
'https://{}/oauth/ro'.format(self.domain),
data={
'client_id': client_id,
'connection': 'sms',
'grant_type': 'password',
'username': phone_number,
'password': code,
'scope': scope,
},
headers={'Content-Type': 'application/json'}
) | 0.003676 |
def _remove(self, xer, primary):
"""
Private method for removing a descriptor from the event loop.
It does the inverse job of _add, and also add a check in case of the fd
has gone away.
"""
if xer in primary:
notifier = primary.pop(xer)
notifier.shutdown() | 0.006079 |
def _raw_records(self, identifier=None, rtype=None, name=None, content=None):
"""Return list of record dicts in the netcup API convention."""
record_fields = {
'id': identifier,
'type': rtype,
'hostname': name and self._relative_name(name),
'destination': content,
}
# type/hostname/destination of the dnsrecord type are mandatory (even
# when deleting), and must be queried if not all were specified:
if all(record_fields.values()):
return [record_fields]
data = self._apicall('infoDnsRecords', domainname=self.domain)
records = data.get('dnsrecords', [])
return [
record for record in records
if all(record[k] == v for k, v in record_fields.items() if v)
] | 0.002433 |
def makeUserLoginMethod(username, password, locale=None):
'''Return a function that will call the vim.SessionManager.Login() method
with the given parameters. The result of this function can be passed as
the "loginMethod" to a SessionOrientedStub constructor.'''
def _doLogin(soapStub):
si = vim.ServiceInstance("ServiceInstance", soapStub)
sm = si.content.sessionManager
if not sm.currentSession:
si.content.sessionManager.Login(username, password, locale)
return _doLogin | 0.014706 |
def deactivate_users(server_context, target_ids, container_path=None):
"""
Deactivate but do not delete user accounts
:param server_context: A LabKey server context. See utils.create_server_context.
:param target_ids:
:param container_path:
:return:
"""
# This action responds with HTML so we just check if it responds OK
response = __make_user_api_request(server_context, target_ids=target_ids, api='deactivateUsers.view',
container_path=container_path)
if response is not None and response['status_code'] == 200:
return dict(success=True)
else:
raise ValueError("Unable to deactivate users {0}".format(target_ids)) | 0.004196 |
def connecting_vars(self):
"""
Returns a dictionary with the variables that must be added to the
input file in order to connect this :class:`Node` to its dependencies.
"""
vars = {}
for prod in self.products:
vars.update(prod.connecting_vars())
return vars | 0.006154 |
def getChargeTimeElapsed(self):
"""Returns the charge time elapsed (in seconds), or 0 if is not currently charging"""
command = '$GS'
status = self.sendCommand(command)
if int(status[1]) == 3:
return int(status[2])
else:
return 0 | 0.015326 |
def shutdown(self, how):
"""
Shut down one or both halves of the connection. If ``how`` is 0,
further receives are disallowed. If ``how`` is 1, further sends
are disallowed. If ``how`` is 2, further sends and receives are
disallowed. This closes the stream in one or both directions.
:param int how:
0 (stop receiving), 1 (stop sending), or 2 (stop receiving and
sending).
"""
if (how == 0) or (how == 2):
# feign "read" shutdown
self.eof_received = 1
if (how == 1) or (how == 2):
self.lock.acquire()
try:
m = self._send_eof()
finally:
self.lock.release()
if m is not None:
self.transport._send_user_message(m) | 0.002401 |
def _configure_manager(self):
"""
Create the manager to handle the instances, and also another
to handle flavors.
"""
self._manager = CloudBlockStorageManager(self,
resource_class=CloudBlockStorageVolume, response_key="volume",
uri_base="volumes")
self._types_manager = BaseManager(self,
resource_class=CloudBlockStorageVolumeType,
response_key="volume_type", uri_base="types")
self._snapshot_manager = CloudBlockStorageSnapshotManager(self,
resource_class=CloudBlockStorageSnapshot,
response_key="snapshot", uri_base="snapshots") | 0.01173 |
def update(self, agent=None, metadata=None):
"""
Only the agent_id and metadata are able to be updated via the API.
"""
self.manager.update_entity(self, agent=agent, metadata=metadata) | 0.009259 |
def remove_zero_points(self):
"""Remove all elements where the norms and points are zero.
Note
----
This returns nothing and updates the NormalCloud in-place.
"""
points_of_interest = np.where((np.linalg.norm(self.point_cloud.data, axis=0) != 0.0) &
(np.linalg.norm(self.normal_cloud.data, axis=0) != 0.0) &
(np.isfinite(self.normal_cloud.data[0,:])))[0]
self.point_cloud._data = self.point_cloud.data[:, points_of_interest]
self.normal_cloud._data = self.normal_cloud.data[:, points_of_interest] | 0.010972 |
def load_figure(d, new_fig=True):
"""Create a figure from what is returned by :meth:`inspect_figure`"""
import matplotlib.pyplot as plt
subplotpars = d.pop('subplotpars', None)
if subplotpars is not None:
subplotpars.pop('validate', None)
subplotpars = mfig.SubplotParams(**subplotpars)
if new_fig:
nums = plt.get_fignums()
if d.get('num') in nums:
d['num'] = next(
i for i in range(max(plt.get_fignums()) + 1, 0, -1)
if i not in nums)
return plt.figure(subplotpars=subplotpars, **d) | 0.00315 |
def compare(expr, value, regex_expr=False):
"""
Compares an string or regular expression againast a given value.
Arguments:
expr (str|regex): string or regular expression value to compare.
value (str): value to compare against to.
regex_expr (bool, optional): enables string based regex matching.
Raises:
AssertionError: in case of assertion error.
Returns:
bool
"""
# Strict equality comparison
if expr == value:
return True
# Infer negate expression to match, if needed
negate = False
if isinstance(expr, str):
negate = expr.startswith(NEGATE)
expr = strip_negate(expr) if negate else expr
try:
# RegExp or strict equality comparison
test(expr, value, regex_expr=regex_expr)
except Exception as err:
if negate:
return True
else:
raise err
return True | 0.001071 |
def queue_pop(self, key, **kwargs):
"""
Remove and return the first item queue.
:param key: The document ID
:param kwargs: Arguments passed to :meth:`mutate_in`
:return: A :class:`ValueResult`
:raise: :cb_exc:`QueueEmpty` if there are no items in the queue.
:raise: :cb_exc:`NotFoundError` if the queue does not exist.
"""
while True:
try:
itm = self.list_get(key, -1)
except IndexError:
raise E.QueueEmpty
kwargs['cas'] = itm.cas
try:
self.list_remove(key, -1, **kwargs)
return itm
except E.KeyExistsError:
pass
except IndexError:
raise E.QueueEmpty | 0.002525 |
def _AnalyzeEvents(self, storage_writer, analysis_plugins, event_filter=None):
"""Analyzes events in a plaso storage.
Args:
storage_writer (StorageWriter): storage writer.
analysis_plugins (dict[str, AnalysisPlugin]): analysis plugins that
should be run and their names.
event_filter (Optional[FilterObject]): event filter.
Returns:
collections.Counter: counter containing information about the events
processed and filtered.
Raises:
RuntimeError: if a non-recoverable situation is encountered.
"""
self._status = definitions.STATUS_INDICATOR_RUNNING
self._number_of_consumed_events = 0
self._number_of_consumed_reports = 0
self._number_of_consumed_sources = 0
self._number_of_consumed_warnings = 0
self._number_of_produced_events = 0
self._number_of_produced_reports = 0
self._number_of_produced_sources = 0
self._number_of_produced_warnings = 0
number_of_filtered_events = 0
logger.debug('Processing events.')
filter_limit = getattr(event_filter, 'limit', None)
for event in storage_writer.GetSortedEvents():
event_data_identifier = event.GetEventDataIdentifier()
if event_data_identifier:
event_data = storage_writer.GetEventDataByIdentifier(
event_data_identifier)
if event_data:
for attribute_name, attribute_value in event_data.GetAttributes():
setattr(event, attribute_name, attribute_value)
event_identifier = event.GetIdentifier()
event.tag = self._event_tag_index.GetEventTagByIdentifier(
storage_writer, event_identifier)
if event_filter:
filter_match = event_filter.Match(event)
else:
filter_match = None
# pylint: disable=singleton-comparison
if filter_match == False:
number_of_filtered_events += 1
continue
for event_queue in self._event_queues.values():
# TODO: Check for premature exit of analysis plugins.
event_queue.PushItem(event)
self._number_of_consumed_events += 1
if (event_filter and filter_limit and
filter_limit == self._number_of_consumed_events):
break
logger.debug('Finished pushing events to analysis plugins.')
# Signal that we have finished adding events.
for event_queue in self._event_queues.values():
event_queue.PushItem(plaso_queue.QueueAbort(), block=False)
logger.debug('Processing analysis plugin results.')
# TODO: use a task based approach.
plugin_names = [plugin_name for plugin_name in analysis_plugins.keys()]
while plugin_names:
for plugin_name in list(plugin_names):
if self._abort:
break
# TODO: temporary solution.
task = tasks.Task()
task.identifier = plugin_name
merge_ready = storage_writer.CheckTaskReadyForMerge(task)
if merge_ready:
storage_writer.PrepareMergeTaskStorage(task)
self._status = definitions.STATUS_INDICATOR_MERGING
event_queue = self._event_queues[plugin_name]
del self._event_queues[plugin_name]
event_queue.Close()
storage_merge_reader = storage_writer.StartMergeTaskStorage(task)
storage_merge_reader.MergeAttributeContainers(
callback=self._MergeEventTag)
# TODO: temporary solution.
plugin_names.remove(plugin_name)
self._status = definitions.STATUS_INDICATOR_RUNNING
self._number_of_produced_event_tags = (
storage_writer.number_of_event_tags)
self._number_of_produced_reports = (
storage_writer.number_of_analysis_reports)
try:
storage_writer.StopTaskStorage(abort=self._abort)
except (IOError, OSError) as exception:
logger.error('Unable to stop task storage with error: {0!s}'.format(
exception))
if self._abort:
logger.debug('Processing aborted.')
else:
logger.debug('Processing completed.')
events_counter = collections.Counter()
events_counter['Events filtered'] = number_of_filtered_events
events_counter['Events processed'] = self._number_of_consumed_events
return events_counter | 0.008032 |
def _cache(self, key, val):
"""
Request that a key/value pair be considered for caching.
"""
cache_size = (1 if util.dimensionless_contents(self.streams, self.kdims)
else self.cache_size)
if len(self) >= cache_size:
first_key = next(k for k in self.data)
self.data.pop(first_key)
self[key] = val | 0.007712 |
def configure_replicator_database(host, port, username=None, password=None):
"""
Connects to dabatase, checks the version and creates the
design document used by feat (if it doesn't exist).
@returns: IDatabaseConnection bound to _replicator database
"""
database = driver.Database(host, port, '_replicator', username, password)
connection = database.get_connection()
version = yield database.get_version()
if version < (1, 1, 0):
database.disconnect()
raise ValueError("Found couchdb version %r. "
"_replicator database has been introduced in 1.1.0." %
(version, ))
design_docs = view.DesignDocument.generate_from_views([Replications])
for doc in design_docs:
try:
doc2 = yield connection.get_document(doc.doc_id)
if doc.views != doc2.views or doc.filters != doc2.filters:
doc.rev = doc2.rev
yield connection.save_document(doc)
except NotFoundError:
yield connection.save_document(doc)
defer.returnValue(connection) | 0.000897 |
def _handle_fetch_response(self, responses):
"""The callback handling the successful response from the fetch request
Delivers the message list to the processor, handles per-message errors
(ConsumerFetchSizeTooSmall), triggers another fetch request
If the processor is still processing the last batch of messages, we
defer this processing until it's done. Otherwise, we start another
fetch request and submit the messages to the processor
"""
# Successful fetch, reset our retry delay
self.retry_delay = self.retry_init_delay
self._fetch_attempt_count = 1
# Check to see if we are still processing the last block we fetched...
if self._msg_block_d:
# We are still working through the last block of messages...
# We have to wait until it's done, then process this response
self._msg_block_d.addCallback(
lambda _: self._handle_fetch_response(responses))
return
# No ongoing processing, great, let's get some started.
# Request no longer outstanding, clear the deferred tracker so we
# can refetch
self._request_d = None
messages = []
try:
for resp in responses: # We should really only ever get one...
if resp.partition != self.partition:
log.warning(
"%r: Got response with partition: %r not our own: %r",
self, resp.partition, self.partition)
continue
# resp.messages is a KafkaCodec._decode_message_set_iter
# Note that 'message' here is really an OffsetAndMessage
for message in resp.messages:
# Check for messages included which are from prior to our
# desired offset: can happen due to compressed message sets
if message.offset < self._fetch_offset:
log.debug(
'Skipping message at offset: %d, because its '
'offset is less that our fetch offset: %d.',
message.offset, self._fetch_offset)
continue
# Create a 'SourcedMessage' and add it to the messages list
messages.append(
SourcedMessage(
message=message.message,
offset=message.offset, topic=self.topic,
partition=self.partition))
# Update our notion of from where to fetch.
self._fetch_offset = message.offset + 1
except ConsumerFetchSizeTooSmall:
# A message was too large for us to receive, given our current
# buffer size. Grow it until it works, or we hit our max
# Grow by 16x up to 1MB (could result in 16MB buf), then by 2x
factor = 2
if self.buffer_size <= 2**20:
factor = 16
if self.max_buffer_size is None:
# No limit, increase until we succeed or fail to alloc RAM
self.buffer_size *= factor
elif (self.max_buffer_size is not None and
self.buffer_size < self.max_buffer_size):
# Limited, but currently below it.
self.buffer_size = min(
self.buffer_size * factor, self.max_buffer_size)
else:
# We failed, and are already at our max. Nothing we can do but
# create a Failure and errback() our start() deferred
log.error("Max fetch size %d too small", self.max_buffer_size)
failure = Failure(
ConsumerFetchSizeTooSmall(
"Max buffer size:%d too small for message",
self.max_buffer_size))
self._start_d.errback(failure)
return
log.debug(
"Next message larger than fetch size, increasing "
"to %d (~2x) and retrying", self.buffer_size)
finally:
# If we were able to extract any messages, deliver them to the
# processor now.
if messages:
self._msg_block_d = Deferred()
self._process_messages(messages)
# start another fetch, if needed, but use callLater to avoid recursion
self._retry_fetch(0) | 0.00044 |
def hira2kata(text, ignore=''):
"""Convert Hiragana to Full-width (Zenkaku) Katakana.
Parameters
----------
text : str
Hiragana string.
ignore : str
Characters to be ignored in converting.
Return
------
str
Katakana string.
Examples
--------
>>> print(jaconv.hira2kata('ともえまみ'))
トモエマミ
>>> print(jaconv.hira2kata('まどまぎ', ignore='ど'))
マどマギ
"""
if ignore:
h2k_map = _exclude_ignorechar(ignore, H2K_TABLE.copy())
return _convert(text, h2k_map)
return _convert(text, H2K_TABLE) | 0.001715 |
def get(method, hmc, uri, uri_parms, logon_required):
"""Operation: Get CPC Energy Management Data (any CPC mode)."""
cpc_oid = uri_parms[0]
try:
cpc = hmc.cpcs.lookup_by_oid(cpc_oid)
except KeyError:
raise InvalidResourceError(method, uri)
energy_props = {
'cpc-power-cap-allowed':
cpc.properties.get('cpc-power-cap-allowed'),
'cpc-power-cap-current':
cpc.properties.get('cpc-power-cap-current'),
'cpc-power-cap-maximum':
cpc.properties.get('cpc-power-cap-maximum'),
'cpc-power-cap-minimum':
cpc.properties.get('cpc-power-cap-minimum'),
'cpc-power-capping-state':
cpc.properties.get('cpc-power-capping-state'),
'cpc-power-consumption':
cpc.properties.get('cpc-power-consumption'),
'cpc-power-rating':
cpc.properties.get('cpc-power-rating'),
'cpc-power-save-allowed':
cpc.properties.get('cpc-power-save-allowed'),
'cpc-power-saving':
cpc.properties.get('cpc-power-saving'),
'cpc-power-saving-state':
cpc.properties.get('cpc-power-saving-state'),
'zcpc-ambient-temperature':
cpc.properties.get('zcpc-ambient-temperature'),
'zcpc-dew-point':
cpc.properties.get('zcpc-dew-point'),
'zcpc-exhaust-temperature':
cpc.properties.get('zcpc-exhaust-temperature'),
'zcpc-heat-load':
cpc.properties.get('zcpc-heat-load'),
'zcpc-heat-load-forced-air':
cpc.properties.get('zcpc-heat-load-forced-air'),
'zcpc-heat-load-water':
cpc.properties.get('zcpc-heat-load-water'),
'zcpc-humidity':
cpc.properties.get('zcpc-humidity'),
'zcpc-maximum-potential-heat-load':
cpc.properties.get('zcpc-maximum-potential-heat-load'),
'zcpc-maximum-potential-power':
cpc.properties.get('zcpc-maximum-potential-power'),
'zcpc-power-cap-allowed':
cpc.properties.get('zcpc-power-cap-allowed'),
'zcpc-power-cap-current':
cpc.properties.get('zcpc-power-cap-current'),
'zcpc-power-cap-maximum':
cpc.properties.get('zcpc-power-cap-maximum'),
'zcpc-power-cap-minimum':
cpc.properties.get('zcpc-power-cap-minimum'),
'zcpc-power-capping-state':
cpc.properties.get('zcpc-power-capping-state'),
'zcpc-power-consumption':
cpc.properties.get('zcpc-power-consumption'),
'zcpc-power-rating':
cpc.properties.get('zcpc-power-rating'),
'zcpc-power-save-allowed':
cpc.properties.get('zcpc-power-save-allowed'),
'zcpc-power-saving':
cpc.properties.get('zcpc-power-saving'),
'zcpc-power-saving-state':
cpc.properties.get('zcpc-power-saving-state'),
}
cpc_data = {
'error-occurred': False,
'object-uri': cpc.uri,
'object-id': cpc.oid,
'class': 'cpcs',
'properties': energy_props,
}
result = {'objects': [cpc_data]}
return result | 0.00058 |
def unindex_template(self, tpl):
"""
Unindex a template from the `templates` container.
:param tpl: The template to un-index
:type tpl: alignak.objects.item.Item
:return: None
"""
name = getattr(tpl, 'name', '')
try:
del self.name_to_template[name]
except KeyError: # pragma: no cover, simple protection
pass | 0.004926 |
def exec_run(self, cmd, stdout=True, stderr=True, stdin=False, tty=False,
privileged=False, user='', detach=False, stream=False,
socket=False, environment=None, workdir=None, demux=False):
"""
Run a command inside this container. Similar to
``docker exec``.
Args:
cmd (str or list): Command to be executed
stdout (bool): Attach to stdout. Default: ``True``
stderr (bool): Attach to stderr. Default: ``True``
stdin (bool): Attach to stdin. Default: ``False``
tty (bool): Allocate a pseudo-TTY. Default: False
privileged (bool): Run as privileged.
user (str): User to execute command as. Default: root
detach (bool): If true, detach from the exec command.
Default: False
stream (bool): Stream response data. Default: False
socket (bool): Return the connection socket to allow custom
read/write operations. Default: False
environment (dict or list): A dictionary or a list of strings in
the following format ``["PASSWORD=xxx"]`` or
``{"PASSWORD": "xxx"}``.
workdir (str): Path to working directory for this exec session
demux (bool): Return stdout and stderr separately
Returns:
(ExecResult): A tuple of (exit_code, output)
exit_code: (int):
Exit code for the executed command or ``None`` if
either ``stream`` or ``socket`` is ``True``.
output: (generator, bytes, or tuple):
If ``stream=True``, a generator yielding response chunks.
If ``socket=True``, a socket object for the connection.
If ``demux=True``, a tuple of two bytes: stdout and stderr.
A bytestring containing response data otherwise.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.exec_create(
self.id, cmd, stdout=stdout, stderr=stderr, stdin=stdin, tty=tty,
privileged=privileged, user=user, environment=environment,
workdir=workdir,
)
exec_output = self.client.api.exec_start(
resp['Id'], detach=detach, tty=tty, stream=stream, socket=socket,
demux=demux
)
if socket or stream:
return ExecResult(None, exec_output)
return ExecResult(
self.client.api.exec_inspect(resp['Id'])['ExitCode'],
exec_output
) | 0.001494 |
def reassembly(self, info):
"""Reassembly procedure.
Positional arguments:
* info -- Info, info dict of packets to be reassembled
"""
BUFID = info.bufid # Buffer Identifier
FO = info.fo # Fragment Offset
IHL = info.ihl # Internet Header Length
MF = info.mf # More Fragments flag
TL = info.tl # Total Length
# when non-fragmented (possibly discarded) packet received
if not FO and not MF:
if BUFID in self._buffer:
self._dtgram += self.submit(self._buffer[BUFID])
del self._buffer[BUFID]
return
# initialise buffer with BUFID
if BUFID not in self._buffer:
self._buffer[BUFID] = dict(
TDL=0, # Total Data Length
RCVBT=bytearray(8191), # Fragment Received Bit Table
index=list(), # index record
header=bytearray(), # header buffer
datagram=bytearray(65535), # data buffer
)
# append packet index
self._buffer[BUFID]['index'].append(info.num)
# put data into data buffer
start = FO
stop = TL - IHL + FO
self._buffer[BUFID]['datagram'][start:stop] = info.payload
# set RCVBT bits (in 8 octets)
start = FO // 8
stop = FO // 8 + (TL - IHL + 7) // 8
self._buffer[BUFID]['RCVBT'][start:stop] = b'\x01' * (stop - start + 1)
# get total data length (header excludes)
if not MF:
TDL = TL - IHL + FO
# put header into header buffer
if not FO:
self._buffer[BUFID]['header'] = info.header
# when datagram is reassembled in whole
start = 0
stop = (TDL + 7) // 8
if TDL and all(self._buffer[BUFID]['RCVBT'][start:stop]):
self._dtgram += self.submit(self._buffer[BUFID], checked=True)
del self._buffer[BUFID] | 0.000971 |
async def get_friendly_name(self) -> Text:
"""
Let's use the first name of the user as friendly name. In some cases
the user object is incomplete, and in those cases the full user is
fetched.
"""
if 'first_name' not in self._user:
user = await self._get_full_user()
else:
user = self._user
return user.get('first_name') | 0.00489 |
def get_requires(self, requires_types):
"""Extracts requires of given types from metadata file, filter windows
specific requires.
"""
if not isinstance(requires_types, list):
requires_types = list(requires_types)
extracted_requires = []
for requires_name in requires_types:
for requires in self.json_metadata.get(requires_name, []):
if 'win' in requires.get('environment', {}):
continue
extracted_requires.extend(requires['requires'])
return extracted_requires | 0.003378 |
def _make_grid_of_axes(self,
bounding_rect=cfg.bounding_rect_default,
num_rows=cfg.num_rows_per_view_default,
num_cols=cfg.num_cols_grid_default,
axis_pad=cfg.axis_pad_default,
commn_annot=None,
**axis_kwargs):
"""Creates a grid of axes bounded within a given rectangle."""
axes_in_grid = list()
extents = self._compute_cell_extents_grid(bounding_rect=bounding_rect,
num_cols=num_cols,
num_rows=num_rows, axis_pad=axis_pad)
for cell_ext in extents:
ax_cell = self.fig.add_axes(cell_ext, frameon=False, visible=False,
**axis_kwargs)
if commn_annot is not None:
ax_cell.set_title(commn_annot)
ax_cell.set_axis_off()
axes_in_grid.append(ax_cell)
return axes_in_grid | 0.008411 |
def delay(self, seconds=0, minutes=0, msg=None):
""" Delay protocol execution for a specific amount of time.
:param float seconds: A time to delay in seconds
:param float minutes: A time to delay in minutes
If both `seconds` and `minutes` are specified, they will be added.
"""
delay_time = seconds + minutes * 60
self._hw_manager.hardware.delay(delay_time) | 0.004819 |
def connect(self, dialect=None, timeout=60):
"""
Will connect to the target server and negotiate the capabilities
with the client. Once setup, the client MUST call the disconnect()
function to close the listener thread. This function will populate
various connection properties that denote the capabilities of the
server.
:param dialect: If specified, forces the dialect that is negotiated
with the server, if not set, then the newest dialect supported by
the server is used up to SMB 3.1.1
:param timeout: The timeout in seconds to wait for the initial
negotiation process to complete
"""
log.info("Setting up transport connection")
self.transport.connect()
log.info("Starting negotiation with SMB server")
smb_response = self._send_smb2_negotiate(dialect, timeout)
log.info("Negotiated dialect: %s"
% str(smb_response['dialect_revision']))
self.dialect = smb_response['dialect_revision'].get_value()
self.max_transact_size = smb_response['max_transact_size'].get_value()
self.max_read_size = smb_response['max_read_size'].get_value()
self.max_write_size = smb_response['max_write_size'].get_value()
self.server_guid = smb_response['server_guid'].get_value()
self.gss_negotiate_token = smb_response['buffer'].get_value()
if not self.require_signing and \
smb_response['security_mode'].has_flag(
SecurityMode.SMB2_NEGOTIATE_SIGNING_REQUIRED):
self.require_signing = True
log.info("Connection require signing: %s" % self.require_signing)
capabilities = smb_response['capabilities']
# SMB 2.1
if self.dialect >= Dialects.SMB_2_1_0:
self.supports_file_leasing = \
capabilities.has_flag(Capabilities.SMB2_GLOBAL_CAP_LEASING)
self.supports_multi_credit = \
capabilities.has_flag(Capabilities.SMB2_GLOBAL_CAP_LARGE_MTU)
# SMB 3.x
if self.dialect >= Dialects.SMB_3_0_0:
self.supports_directory_leasing = capabilities.has_flag(
Capabilities.SMB2_GLOBAL_CAP_DIRECTORY_LEASING)
self.supports_multi_channel = capabilities.has_flag(
Capabilities.SMB2_GLOBAL_CAP_MULTI_CHANNEL)
# TODO: SMB2_GLOBAL_CAP_PERSISTENT_HANDLES
self.supports_persistent_handles = False
self.supports_encryption = capabilities.has_flag(
Capabilities.SMB2_GLOBAL_CAP_ENCRYPTION) \
and self.dialect < Dialects.SMB_3_1_1
self.server_capabilities = capabilities
self.server_security_mode = \
smb_response['security_mode'].get_value()
# TODO: Check/add server to server_list in Client Page 203
# SMB 3.1
if self.dialect >= Dialects.SMB_3_1_1:
for context in smb_response['negotiate_context_list']:
if context['context_type'].get_value() == \
NegotiateContextType.SMB2_ENCRYPTION_CAPABILITIES:
cipher_id = context['data']['ciphers'][0]
self.cipher_id = Ciphers.get_cipher(cipher_id)
self.supports_encryption = self.cipher_id != 0
else:
hash_id = context['data']['hash_algorithms'][0]
self.preauth_integrity_hash_id = \
HashAlgorithms.get_algorithm(hash_id) | 0.000559 |
def register(name, fn=None):
"""
Decorator to register a function as a hook
Register hook for ``hook_name``. Can be used as a decorator::
@register('hook_name')
def my_hook(...):
pass
or as a function call::
def my_hook(...):
pass
register('hook_name', my_hook)
:param name: str Name of the hook/callback to register it as
:param fn: function to register in the hook/callback
:return: function Decorator if applied as a decorator
"""
def _hook_add(func):
if name not in _hooks:
logger.debug("Creating new hook %s" % name)
_hooks[name] = []
logger.debug('Registering hook %s for function %s' % (name, fn))
_hooks[name].append(func)
if fn is None:
# Behave like a decorator
def decorator(func):
_hook_add(func)
return func
return decorator
else:
# Behave like a function, just register hook
_hook_add(fn) | 0.000981 |
def change_object_link_card(obj, perms):
"""
If the user has permission to change `obj`, show a link to its Admin page.
obj -- An object like Movie, Play, ClassicalWork, Publication, etc.
perms -- The `perms` object that it's the template.
"""
# eg: 'movie' or 'classicalwork':
name = obj.__class__.__name__.lower()
permission = 'spectator.can_edit_{}'.format(name)
# eg: 'admin:events_classicalwork_change':
change_url_name = 'admin:{}_{}_change'.format(obj._meta.app_label, name)
return {
'display_link': (permission in perms),
'change_url': reverse(change_url_name, args=[obj.id])
} | 0.001538 |
def get(self, sid):
"""
Constructs a FunctionContext
:param sid: The sid
:returns: twilio.rest.serverless.v1.service.function.FunctionContext
:rtype: twilio.rest.serverless.v1.service.function.FunctionContext
"""
return FunctionContext(self._version, service_sid=self._solution['service_sid'], sid=sid, ) | 0.008287 |
def parse_endnotes(document, xmlcontent):
"""Parse endnotes document.
Endnotes are defined in file 'endnotes.xml'
"""
endnotes = etree.fromstring(xmlcontent)
document.endnotes = {}
for note in endnotes.xpath('.//w:endnote', namespaces=NAMESPACES):
paragraphs = [parse_paragraph(document, para) for para in note.xpath('.//w:p', namespaces=NAMESPACES)]
document.endnotes[note.attrib[_name('{{{w}}}id')]] = paragraphs | 0.004367 |
def _ssl_wrap_socket(self, sock):
"""Wrap SSLSocket around the Socket.
:param socket.socket sock:
:rtype: SSLSocket
"""
context = self._parameters['ssl_options'].get('context')
if context is not None:
hostname = self._parameters['ssl_options'].get('server_hostname')
return context.wrap_socket(
sock, do_handshake_on_connect=True,
server_hostname=hostname
)
if 'ssl_version' not in self._parameters['ssl_options']:
self._parameters['ssl_options']['ssl_version'] = (
compatibility.DEFAULT_SSL_VERSION
)
return ssl.wrap_socket(
sock, do_handshake_on_connect=True,
**self._parameters['ssl_options']
) | 0.002494 |
def _validate_min(self, min_value, field, value):
""" {'nullable': False } """
try:
if value < min_value:
self._error(field, errors.MIN_VALUE)
except TypeError:
pass | 0.008734 |
def journal_event(events):
"""Group multiple events into a single one."""
reasons = set(chain.from_iterable(e.reasons for e in events))
attributes = set(chain.from_iterable(e.file_attributes for e in events))
return JrnlEvent(events[0].file_reference_number,
events[0].parent_file_reference_number,
events[0].file_name,
events[0].timestamp,
list(reasons), list(attributes)) | 0.00211 |
def cost_zerg_corrected(self) -> "Cost":
""" This returns 25 for extractor and 200 for spawning pool instead of 75 and 250 respectively """
if self.race == Race.Zerg and Attribute.Structure.value in self.attributes:
# a = self._game_data.units(UnitTypeId.ZERGLING)
# print(a)
# print(vars(a))
return Cost(
self._proto.mineral_cost - 50,
self._proto.vespene_cost,
self._proto.build_time
)
else:
return self.cost | 0.00722 |
def QA_indicator_BBI(DataFrame, N1=3, N2=6, N3=12, N4=24):
'多空指标'
C = DataFrame['close']
bbi = (MA(C, N1) + MA(C, N2) + MA(C, N3) + MA(C, N4)) / 4
DICT = {'BBI': bbi}
return pd.DataFrame(DICT) | 0.004695 |
def queue(self, new_job_id = None, new_job_name = None, queue_name = None):
"""Sets the status of this job to 'queued' or 'waiting'."""
# update the job id (i.e., when the job is executed in the grid)
if new_job_id is not None:
self.id = new_job_id
if new_job_name is not None:
self.name = new_job_name
if queue_name is not None:
self.queue_name = queue_name
new_status = 'queued'
self.result = None
# check if we have to wait for another job to finish
for job in self.get_jobs_we_wait_for():
if job.status not in ('success', 'failure'):
new_status = 'waiting'
elif self.stop_on_failure and job.status == 'failure':
new_status = 'failure'
# reset the queued jobs that depend on us to waiting status
for job in self.get_jobs_waiting_for_us():
if job.status == 'queued':
job.status = 'failure' if new_status == 'failure' else 'waiting'
self.status = new_status
for array_job in self.array:
if array_job.status not in ('success', 'failure'):
array_job.status = new_status | 0.012739 |
def setModelData(self, editor, model, index):
"""Updates the model after changing data in the editor.
Args:
editor (QtGui.QComboBox): The current editor for the item. Should be
a `QtGui.QComboBox` as defined in `createEditor`.
model (ColumnDtypeModel): The model which holds the displayed data.
index (QtCore.QModelIndex): The index of the current item of the model.
"""
model.setData(index, editor.itemText(editor.currentIndex())) | 0.007737 |
def get_sources(src_dir='src', ending='.cpp'):
"""Function to get a list of files ending with `ending` in `src_dir`."""
return [os.path.join(src_dir, fnm) for fnm in os.listdir(src_dir) if fnm.endswith(ending)] | 0.009174 |
def compute(self, runner_results, setup=False, poll=False, ignore_errors=False):
''' walk through all results and increment stats '''
for (host, value) in runner_results.get('contacted', {}).iteritems():
if not ignore_errors and (('failed' in value and bool(value['failed'])) or
('rc' in value and value['rc'] != 0)):
self._increment('failures', host)
elif 'skipped' in value and bool(value['skipped']):
self._increment('skipped', host)
elif 'changed' in value and bool(value['changed']):
if not setup and not poll:
self._increment('changed', host)
self._increment('ok', host)
else:
if not poll or ('finished' in value and bool(value['finished'])):
self._increment('ok', host)
for (host, value) in runner_results.get('dark', {}).iteritems():
self._increment('dark', host) | 0.007049 |
def _remove_overlapped_date_str(self, results: List[List[dict]]) -> List[Extraction]:
"""
some string may be matched by multiple date templates,
deduplicate the results and return a single list
"""
res = []
all_results = []
for x in results:
all_results = all_results + x
if not all_results or len(all_results) == 0:
return list()
all_results.sort(key=lambda k: k['start'])
cur_max = None
i = 0
while i < len(all_results) and not cur_max:
if self._post_check(all_results[i]):
cur_max = all_results[i]
i += 1
if not cur_max:
return res
while i < len(all_results):
x = all_results[i]
i += 1
if not self._post_check(x):
continue
if cur_max['end'] <= x['start']:
parsed_date = self._parse_date(cur_max)
if parsed_date:
if self._settings[EXTRACT_FIRST_DATE_ONLY]:
return res
res.append(parsed_date)
cur_max = x
else:
if len(x['value']) > len(cur_max['value']):
cur_max = x
elif len(x['value']) == len(cur_max['value']):
if x['order'] in ['SINGLE_YEAR']:
cur_max = x
elif len(x['order']) == len(cur_max['order']):
if len(x['groups']) < len(cur_max['groups']):
cur_max = x
elif len(x['groups']) == len(cur_max['groups']):
if sum(ele is not None for ele in x['groups']) < sum(ele is not None for ele in cur_max['groups']):
cur_max = x
elif self._settings[PREFER_LANGUAGE_DATE_ORDER] and self._lan in language_date_order:
if x['order'] == language_date_order[self._lan]:
cur_max = x
elif x['order'] == self._settings[PREFERRED_DATE_ORDER]:
cur_max = x
elif x['order'] == self._settings[PREFERRED_DATE_ORDER]:
cur_max = x
parsed_date = self._parse_date(cur_max)
if parsed_date:
if self._settings[EXTRACT_FIRST_DATE_ONLY]:
return res
res.append(parsed_date)
return res | 0.003106 |
def setup_logging(name):
"""Setup logging according to environment variables."""
logger = logging.getLogger(__name__)
if 'NVIM_PYTHON_LOG_FILE' in os.environ:
prefix = os.environ['NVIM_PYTHON_LOG_FILE'].strip()
major_version = sys.version_info[0]
logfile = '{}_py{}_{}'.format(prefix, major_version, name)
handler = logging.FileHandler(logfile, 'w', 'utf-8')
handler.formatter = logging.Formatter(
'%(asctime)s [%(levelname)s @ '
'%(filename)s:%(funcName)s:%(lineno)s] %(process)s - %(message)s')
logging.root.addHandler(handler)
level = logging.INFO
if 'NVIM_PYTHON_LOG_LEVEL' in os.environ:
lvl = getattr(logging,
os.environ['NVIM_PYTHON_LOG_LEVEL'].strip(),
level)
if isinstance(lvl, int):
level = lvl
logger.setLevel(level) | 0.001079 |
def add(self, pattern_txt):
"""Add a pattern to the list.
Args:
pattern_txt (str list): the pattern, as a list of lines.
"""
self.patterns[len(pattern_txt)] = pattern_txt
low = 0
high = len(pattern_txt) - 1
while not pattern_txt[low]:
low += 1
while not pattern_txt[high]:
high -= 1
min_pattern = pattern_txt[low:high + 1]
self.min_patterns[len(min_pattern)] = min_pattern | 0.004057 |
def get_mac_address_table_output_mac_address_table_mac_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_mac_address_table = ET.Element("get_mac_address_table")
config = get_mac_address_table
output = ET.SubElement(get_mac_address_table, "output")
mac_address_table = ET.SubElement(output, "mac-address-table")
vlanid_key = ET.SubElement(mac_address_table, "vlanid")
vlanid_key.text = kwargs.pop('vlanid')
mac_address_key = ET.SubElement(mac_address_table, "mac-address")
mac_address_key.text = kwargs.pop('mac_address')
mac_state = ET.SubElement(mac_address_table, "mac-state")
mac_state.text = kwargs.pop('mac_state')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.002358 |
def _energy_distance_imp(x, y, exponent=1):
"""
Real implementation of :func:`energy_distance`.
This function is used to make parameter ``exponent`` keyword-only in
Python 2.
"""
x = _transform_to_2d(x)
y = _transform_to_2d(y)
_check_valid_energy_exponent(exponent)
distance_xx = distances.pairwise_distances(x, exponent=exponent)
distance_yy = distances.pairwise_distances(y, exponent=exponent)
distance_xy = distances.pairwise_distances(x, y, exponent=exponent)
return _energy_distance_from_distance_matrices(distance_xx=distance_xx,
distance_yy=distance_yy,
distance_xy=distance_xy) | 0.001351 |
def _add_job_from_spec(self, job_json, use_job_id=True):
""" Add a single job to the Dagobah from a spec. """
job_id = (job_json['job_id']
if use_job_id
else self.backend.get_new_job_id())
self.add_job(str(job_json['name']), job_id)
job = self.get_job(job_json['name'])
if job_json.get('cron_schedule', None):
job.schedule(job_json['cron_schedule'])
for task in job_json.get('tasks', []):
self.add_task_to_job(job,
str(task['command']),
str(task['name']),
soft_timeout=task.get('soft_timeout', 0),
hard_timeout=task.get('hard_timeout', 0),
hostname=task.get('hostname', None))
dependencies = job_json.get('dependencies', {})
for from_node, to_nodes in dependencies.iteritems():
for to_node in to_nodes:
job.add_dependency(from_node, to_node)
if job_json.get('notes', None):
job.update_job_notes(job_json['notes']) | 0.001733 |
def command(self, *args, **kwargs):
"""A shortcut decorator for declaring and attaching a command to
the group. This takes the same arguments as :func:`command` but
immediately registers the created command with this instance by
calling into :meth:`add_command`.
"""
def decorator(f):
cmd = command(*args, **kwargs)(f)
self.add_command(cmd)
return cmd
return decorator | 0.004338 |
def _configure_port_binding(self, is_provider_vlan, duplicate_type,
is_native,
switch_ip, vlan_id,
intf_type, nexus_port, vni):
"""Conditionally calls vlan and port Nexus drivers."""
# This implies VLAN, VNI, and Port are all duplicate.
# Then there is nothing to configure in Nexus.
if duplicate_type == const.DUPLICATE_PORT:
return
auto_create, auto_trunk = self._gather_config_parms(
is_provider_vlan, vlan_id)
# if type DUPLICATE_VLAN, don't create vlan
if duplicate_type == const.DUPLICATE_VLAN:
auto_create = False
if auto_create and auto_trunk:
LOG.debug("Nexus: create vlan %s and add to interface", vlan_id)
self.driver.create_and_trunk_vlan(
switch_ip, vlan_id, intf_type,
nexus_port, vni, is_native)
elif auto_create:
LOG.debug("Nexus: create vlan %s", vlan_id)
self.driver.create_vlan(switch_ip, vlan_id, vni)
elif auto_trunk:
LOG.debug("Nexus: trunk vlan %s", vlan_id)
self.driver.send_enable_vlan_on_trunk_int(
switch_ip, vlan_id,
intf_type, nexus_port, is_native) | 0.003759 |
def dir_name_changed(self, widget, data=None):
"""
Function is used for controlling
label Full Directory project name
and storing current project directory
in configuration manager
"""
config_manager.set_config_value("da.project_dir", self.dir_name.get_text())
self.update_full_label() | 0.008596 |
async def save(self, fp, *, seek_begin=True, use_cached=False):
"""|coro|
Saves this attachment into a file-like object.
Parameters
-----------
fp: Union[BinaryIO, :class:`os.PathLike`]
The file-like object to save this attachment to or the filename
to use. If a filename is passed then a file is created with that
filename and used instead.
seek_begin: :class:`bool`
Whether to seek to the beginning of the file after saving is
successfully done.
use_cached: :class:`bool`
Whether to use :attr:`proxy_url` rather than :attr:`url` when downloading
the attachment. This will allow attachments to be saved after deletion
more often, compared to the regular URL which is generally deleted right
after the message is deleted. Note that this can still fail to download
deleted attachments if too much time has passed and it does not work
on some type of attachments.
Raises
--------
HTTPException
Saving the attachment failed.
NotFound
The attachment was deleted.
Returns
--------
:class:`int`
The number of bytes written.
"""
url = self.proxy_url if use_cached else self.url
data = await self._http.get_from_cdn(url)
if isinstance(fp, io.IOBase) and fp.writable():
written = fp.write(data)
if seek_begin:
fp.seek(0)
return written
else:
with open(fp, 'wb') as f:
return f.write(data) | 0.004159 |
def _user(self, user, real_name):
"""
Sends the USER message.
Required arguments:
* user - Username to send.
* real_name - Real name to send.
"""
with self.lock:
self.send('USER %s 0 * :%s' % (user, real_name))
if self.readable():
self._recv()
self.stepback() | 0.005391 |
def add_ignore_patterns(self, *patterns):
"""
Adds an ignore pattern to the list for ignore patterns.
Ignore patterns are used to filter out unwanted files or directories
from the file system model.
A pattern is a Unix shell-style wildcards. See :mod:`fnmatch` for a
deeper explanation about the shell-style wildcards.
"""
for ptrn in patterns:
if isinstance(ptrn, list):
for p in ptrn:
self._ignored_patterns.append(p)
else:
self._ignored_patterns.append(ptrn) | 0.003317 |
def write_registers(self, registeraddress, values):
"""Write integers to 16-bit registers in the slave.
The slave register can hold integer values in the range 0 to 65535 ("Unsigned INT16").
Uses Modbus function code 16.
The number of registers that will be written is defined by the length of the ``values`` list.
Args:
* registeraddress (int): The slave register start address (use decimal numbers, not hex).
* values (list of int): The values to store in the slave registers.
Any scaling of the register data, or converting it to negative number (two's complement)
must be done manually.
Returns:
None
Raises:
ValueError, TypeError, IOError
"""
if not isinstance(values, list):
raise TypeError('The "values parameter" must be a list. Given: {0!r}'.format(values))
_checkInt(len(values), minvalue=1, description='length of input list')
# Note: The content of the list is checked at content conversion.
self._genericCommand(16, registeraddress, values, numberOfRegisters=len(values), payloadformat='registers') | 0.006711 |
def record_modify_controlfield(rec, tag, controlfield_value,
field_position_global=None,
field_position_local=None):
"""Modify controlfield at position specified by tag and field number."""
field = record_get_field(
rec, tag,
field_position_global=field_position_global,
field_position_local=field_position_local)
new_field = (field[0], field[1], field[2], controlfield_value, field[4])
record_replace_field(
rec, tag, new_field,
field_position_global=field_position_global,
field_position_local=field_position_local) | 0.00155 |
def camel_to_snake(name):
"""Converts CamelCase to snake_case.
Args:
name (string): The name to convert from CamelCase to snake_case.
Returns:
string: Converted string.
"""
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower() | 0.003135 |
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ServiceContext for this ServiceInstance
:rtype: twilio.rest.preview.acc_security.service.ServiceContext
"""
if self._context is None:
self._context = ServiceContext(self._version, sid=self._solution['sid'], )
return self._context | 0.010246 |
def loads_msgpack(buf):
"""
Args:
buf: the output of `dumps`.
"""
# Since 0.6, the default max size was set to 1MB.
# We change it to approximately 1G.
return msgpack.loads(buf, raw=False,
max_bin_len=MAX_MSGPACK_LEN,
max_array_len=MAX_MSGPACK_LEN,
max_map_len=MAX_MSGPACK_LEN,
max_str_len=MAX_MSGPACK_LEN) | 0.002283 |
def is_mixed_script(string, allowed_aliases=['COMMON']):
"""Checks if ``string`` contains mixed-scripts content, excluding script
blocks aliases in ``allowed_aliases``.
E.g. ``B. C`` is not considered mixed-scripts by default: it contains characters
from **Latin** and **Common**, but **Common** is excluded by default.
>>> confusables.is_mixed_script('Abç')
False
>>> confusables.is_mixed_script('ρτ.τ')
False
>>> confusables.is_mixed_script('ρτ.τ', allowed_aliases=[])
True
>>> confusables.is_mixed_script('Alloτ')
True
:param string: A unicode string
:type string: str
:param allowed_aliases: Script blocks aliases not to consider.
:type allowed_aliases: list(str)
:return: Whether ``string`` is considered mixed-scripts or not.
:rtype: bool
"""
allowed_aliases = [a.upper() for a in allowed_aliases]
cats = unique_aliases(string) - set(allowed_aliases)
return len(cats) > 1 | 0.002066 |
def compute_metric(self, components):
"""Compute recall from `components`"""
numerator = components[RECALL_RELEVANT_RETRIEVED]
denominator = components[RECALL_RELEVANT]
if denominator == 0.:
if numerator == 0:
return 1.
else:
raise ValueError('')
else:
return numerator/denominator | 0.005141 |
def _resolve_jars_info(self, targets, classpath_products):
"""Consults ivy_jar_products to export the external libraries.
:return: mapping of jar_id -> { 'default' : <jar_file>,
'sources' : <jar_file>,
'javadoc' : <jar_file>,
<other_confs> : <jar_file>,
}
"""
mapping = defaultdict(dict)
jar_products = classpath_products.get_artifact_classpath_entries_for_targets(
targets, respect_excludes=False)
for conf, jar_entry in jar_products:
conf = jar_entry.coordinate.classifier or 'default'
mapping[self._jar_id(jar_entry.coordinate)][conf] = jar_entry.cache_path
return mapping | 0.005155 |
def get_3d_markers_residual(
self, component_info=None, data=None, component_position=None
):
"""Get 3D markers with residual."""
return self._get_3d_markers(
RT3DMarkerPositionResidual, component_info, data, component_position
) | 0.01444 |
def convert_documentation(nb_path):
"""Run only the document conversion portion of the notebook conversion
The final document will not be completel
"""
with open(nb_path) as f:
nb = nbformat.reads(f.read(), as_version=4)
doc = ExtractInlineMetatabDoc(package_url="metapack+file:" + dirname(nb_path)).run(nb)
package_name = doc.as_version(None)
output_dir = join(getcwd(), package_name)
de = DocumentationExporter(config=Config(), log=logger, metadata=doc_metadata(doc))
prt('Converting documentation')
output, resources = de.from_filename(nb_path)
fw = FilesWriter()
fw.build_directory = join(output_dir, 'docs')
fw.write(output, resources, notebook_name='notebook')
prt("Wrote documentation to {}".format(fw.build_directory)) | 0.00375 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.