text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def update(self, *, name=None, show_headers=None, show_totals=None, style=None):
"""
Updates this table
:param str name: the name of the table
:param bool show_headers: whether or not to show the headers
:param bool show_totals: whether or not to show the totals
:param str style: the style of the table
:return: Success or Failure
"""
if name is None and show_headers is None and show_totals is None and style is None:
raise ValueError('Provide at least one parameter to update')
data = {}
if name:
data['name'] = name
if show_headers:
data['showHeaders'] = show_headers
if show_totals:
data['showTotals'] = show_totals
if style:
data['style'] = style
response = self.session.patch(self.build_url(''), data=data)
if not response:
return False
data = response.json()
self.name = data.get('name', self.name)
self.show_headers = data.get('showHeaders', self.show_headers)
self.show_totals = data.get('showTotals', self.show_totals)
self.style = data.get('style', self.style)
return True | 0.003239 |
def degree_histogram(G, t=None):
"""Return a list of the frequency of each degree value.
Parameters
----------
G : Graph opject
DyNetx graph object
t : snapshot id (default=None)
snapshot id
Returns
-------
hist : list
A list of frequencies of degrees.
The degree values are the index in the list.
Notes
-----
Note: the bins are width one, hence len(list) can be large
(Order(number_of_edges))
"""
counts = Counter(d for n, d in G.degree(t=t).items())
return [counts.get(i, 0) for i in range(max(counts) + 1)] | 0.001493 |
def main(fw_name, args=None, items=None):
"""
Reusable entry point. Arguments are parsed
via the argparse-subcommands configured via
each Command class found in globals(). Stop
exceptions are propagated to callers.
The name of the framework will be used in logging
and similar.
"""
global DEBUG_LEVEL
global FRAMEWORK_NAME
debug_name = "%s_DEBUG_LEVEL" % fw_name.upper()
if debug_name in os.environ:
try:
DEBUG_LEVEL = int(os.environ.get(debug_name))
except ValueError:
DEBUG_LEVEL = 10 # Assume poorly formatted means "debug"
FRAMEWORK_NAME = fw_name
if args is None:
args = sys.argv[1:]
if items is None:
items = list(globals().items())
yaclifw_parser, sub_parsers = parsers()
for name, MyCommand in sorted(items):
if not isinstance(MyCommand, type):
continue
if not issubclass(MyCommand, Command):
continue
if MyCommand.NAME == "abstract":
continue
MyCommand(sub_parsers)
ns = yaclifw_parser.parse_args(args)
ns.func(ns)
if hasattr(ns, 'callback'):
if callable(ns.callback):
ns.callback()
else:
raise Stop(3, "Callback not callable") | 0.000776 |
def path_filter(extensions, exclude_paths=None):
"""
Returns a function that returns True if a filepath is acceptable.
@param extensions An array of strings. Specifies what file
extensions should be accepted by the
filter. If None, we default to the Unix glob
`*` and match every file extension.
@param exclude_paths An array of strings which represents filepaths
that should never be accepted by the filter. Unix
shell-style wildcards are supported.
@return function A filter function that will only return True
when a filepath is acceptable under the above
conditions.
>>> list(map(path_filter(extensions=['js', 'php']),
... ['./profile.php', './q.jjs']))
[True, False]
>>> list(map(path_filter(extensions=['*'],
... exclude_paths=['html']),
... ['./html/x.php', './lib/y.js']))
[False, True]
>>> list(map(path_filter(extensions=['js', 'BUILD']),
... ['./a.js', './BUILD', './profile.php']))
[True, True, False]
>>> list(map(path_filter(extensions=['js'],
... exclude_paths=['*/node_modules/*']),
... ['./a.js', './tools/node_modules/dep.js']))
[True, False]
"""
exclude_paths = exclude_paths or []
def the_filter(path):
if not any(matches_extension(path, extension)
for extension in extensions):
return False
if exclude_paths:
for excluded in exclude_paths:
if (path.startswith(excluded) or
path.startswith('./' + excluded) or
fnmatch.fnmatch(path, excluded)):
return False
return True
return the_filter | 0.000532 |
def types(subtag):
"""
Get the types of a subtag string (excludes redundant and grandfathered).
:param str subtag: subtag.
:return: list of types. The return list can be empty.
"""
if subtag in index:
types = index[subtag]
return [type for type in types.keys() if type != 'redundant' or type != 'grandfathered']
else:
return [] | 0.009501 |
def a(self, text, count=1):
"""
Return the appropriate indefinite article followed by text.
The indefinite article is either 'a' or 'an'.
If count is not one, then return count followed by text
instead of 'a' or 'an'.
Whitespace at the start and end is preserved.
"""
mo = re.search(r"\A(\s*)(?:an?\s+)?(.+?)(\s*)\Z", text, re.IGNORECASE)
if mo:
word = mo.group(2)
if not word:
return text
pre = mo.group(1)
post = mo.group(3)
result = self._indef_article(word, count)
return "{}{}{}".format(pre, result, post)
return "" | 0.00289 |
def send(self, request):
""" Send a request to the server and wait for its response.
Args:
request (Request): Reference to a request object that is sent to the server.
Returns:
Response: The response from the server to the request.
"""
self._connection.connection.rpush(self._request_key, pickle.dumps(request))
resp_key = '{}:{}'.format(SIGNAL_REDIS_PREFIX, request.uid)
while True:
if self._connection.polling_time > 0.0:
sleep(self._connection.polling_time)
response_data = self._connection.connection.get(resp_key)
if response_data is not None:
self._connection.connection.delete(resp_key)
break
return pickle.loads(response_data) | 0.004932 |
def find_subgraphs_by_preds(xmrs, preds, connected=None):
"""
Yield subgraphs matching a list of predicates.
Predicates may match multiple EPs/nodes in the *xmrs*, meaning that
more than one subgraph is possible. Also, predicates in *preds*
match in number, so if a predicate appears twice in *preds*, there
will be two matching EPs/nodes in each subgraph.
Args:
xmrs (:class:`~delphin.mrs.xmrs.Xmrs`): semantic structure to
use
preds: iterable of predicates to include in subgraphs
connected (bool, optional): if `True`, all yielded subgraphs
must be connected, as determined by
:meth:`Xmrs.is_connected() <delphin.mrs.xmrs.Xmrs.is_connected>`.
Yields:
A :class:`~delphin.mrs.xmrs.Xmrs` object for each subgraphs found.
"""
preds = list(preds)
count = len(preds)
# find all lists of nodeids such that the lists have no repeated nids;
# keep them as a list (fixme: why not just get sets?)
nidsets = set(
tuple(sorted(ns))
for ns in filter(
lambda ns: len(set(ns)) == count,
product(*[select_nodeids(xmrs, pred=p) for p in preds])
)
)
for nidset in nidsets:
sg = xmrs.subgraph(nidset)
if connected is None or sg.is_connected() == connected:
yield sg | 0.000736 |
def create_pipe_workers(configfile, directory):
"""
Creates the workers based on the given configfile to provide named pipes in the directory.
"""
type_map = {'service': ServiceSearch,
'host': HostSearch, 'range': RangeSearch,
'user': UserSearch}
config = configparser.ConfigParser()
config.read(configfile)
if not len(config.sections()):
print_error("No named pipes configured")
return
print_notification("Starting {} pipes in directory {}".format(
len(config.sections()), directory))
workers = []
for name in config.sections():
section = config[name]
query = create_query(section)
object_type = type_map[section['type']]
args = (name, os.path.join(directory, name), object_type, query,
section['format'], bool(section.get('unique', 0)))
workers.append(multiprocessing.Process(target=pipe_worker, args=args))
return workers | 0.002024 |
def iter_equivalent_nodes(self, node: BaseEntity) -> Iterable[BaseEntity]:
"""Iterate over nodes that are equivalent to the given node, including the original."""
yield node
yield from self._equivalent_node_iterator_helper(node, {node}) | 0.011538 |
def run(self, port=5000, background=False):
""" Runs this application with builtin server for testing.
This is only for test usage, do not use in production stage.
:param port: Port number
:param background: Flag to run in background
:type port: int
:type background: bool
"""
target = os.path.dirname(os.path.abspath(sys.argv[0]))
driver = Driver(self, port, target, 1)
if background:
driver.run_background()
else:
driver.run() | 0.00369 |
def yeardec2datetime(atime: float) -> datetime.datetime:
"""
Convert atime (a float) to DT.datetime
This is the inverse of datetime2yeardec.
assert dt2t(t2dt(atime)) == atime
http://stackoverflow.com/questions/19305991/convert-fractional-years-to-a-real-date-in-python
Authored by "unutbu" http://stackoverflow.com/users/190597/unutbu
In Python, go from decimal year (YYYY.YYY) to datetime,
and from datetime to decimal year.
"""
# %%
if isinstance(atime, (float, int)): # typically a float
year = int(atime)
remainder = atime - year
boy = datetime.datetime(year, 1, 1)
eoy = datetime.datetime(year + 1, 1, 1)
seconds = remainder * (eoy - boy).total_seconds()
T = boy + datetime.timedelta(seconds=seconds)
assert isinstance(T, datetime.datetime)
elif isinstance(atime[0], float):
return np.asarray([yeardec2datetime(t) for t in atime])
else:
raise TypeError('expecting float, not {}'.format(type(atime)))
return T | 0.000959 |
def patch_data(data, L=100, try_diag=True, verbose=False):
'''Patch ``data`` (for example Markov chain output) into parts of
length ``L``. Return a Gaussian mixture where each component gets
the empirical mean and covariance of one patch.
:param data:
Matrix-like array; the points to be patched. Expect ``data[i]``
as the d-dimensional i-th point.
:param L:
Integer; the length of one patch. The last patch will be shorter
if ``L`` is not a divisor of ``len(data)``.
:param try_diag:
Bool; If some patch does not define a proper covariance matrix,
it cannot define a Gaussian component. ``try_diag`` defines how
to handle that case:
If ``True`` (default), the off-diagonal elements are set to zero
and it is tried to form a Gaussian with that matrix again. If
that fails as well, the patch is skipped.
If ``False`` the patch is skipped directly.
:param verbose:
Bool; If ``True`` print all status information.
'''
# patch data into length L patches
patches = _np.array([data[patch_start:patch_start + L] for patch_start in range(0, len(data), L)])
# calculate means and covs
means = _np.array([_np.mean(patch, axis=0) for patch in patches])
covs = _np.array([_np.cov (patch, rowvar=0) for patch in patches])
# form gaussian components
components = []
skipped = []
for i, (mean, cov) in enumerate(zip(means, covs)):
try:
this_comp = Gauss(mean, cov)
components.append(this_comp)
except _np.linalg.LinAlgError as error1:
if verbose:
print("Could not form Gauss from patch %i. Reason: %s" % (i, repr(error1)))
if try_diag:
cov = _np.diag(_np.diag(cov))
try:
this_comp = Gauss(mean, cov)
components.append(this_comp)
if verbose:
print('Diagonal covariance attempt succeeded.')
except _np.linalg.LinAlgError as error2:
skipped.append(i)
if verbose:
print("Diagonal covariance attempt failed. Reason: %s" % repr(error2))
else: # if not try_diag
skipped.append(i)
# print skipped components if any
if skipped:
print("WARNING: Could not form Gaussians from: %s" % skipped)
# create and return mixture
return MixtureDensity(components) | 0.003151 |
def cmd_reverse_lookup(command_name):
'''returns 0 if key not found'''
for key, value in miss_cmds.items():
if (value.upper() == command_name.upper()):
return key
return 0 | 0.004926 |
def execute(self, context):
"""Execute the python dataflow job."""
bucket_helper = GoogleCloudBucketHelper(
self.gcp_conn_id, self.delegate_to)
self.py_file = bucket_helper.google_cloud_to_local(self.py_file)
hook = DataFlowHook(gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
poll_sleep=self.poll_sleep)
dataflow_options = self.dataflow_default_options.copy()
dataflow_options.update(self.options)
# Convert argument names from lowerCamelCase to snake case.
camel_to_snake = lambda name: re.sub(
r'[A-Z]', lambda x: '_' + x.group(0).lower(), name)
formatted_options = {camel_to_snake(key): dataflow_options[key]
for key in dataflow_options}
hook.start_python_dataflow(
self.job_name, formatted_options,
self.py_file, self.py_options) | 0.003128 |
def _default(self, key, default=None):
"""
Lookup value of *key* themeable
If *key* not in themeable or value is None,
return the *default* value.
"""
try:
value = self.theme.themeables.property(key)
except KeyError:
value = None
return value if value is not None else default | 0.005464 |
async def create(source_id: str, name: str, schema_id: str, payment_handle: int):
"""
Creates a new CredentialDef object that is written to the ledger
:param source_id: Institution's unique ID for the credential definition
:param name: Name of credential definition
:param schema_id: The schema ID given during the creation of the schema
:param payment_handle: NYI - payment of ledger fee is taken from wallet automatically
Example:
source_id = 'foobar123'
schema_name = 'Schema Name'
payment_handle = 0
credential_def1 = await CredentialDef.create(source_id, name, schema_id, payment_handle)
:return: credential_def object, written to ledger
"""
constructor_params = (source_id, name, schema_id)
c_source_id = c_char_p(source_id.encode('utf-8'))
c_schema_id = c_char_p(schema_id.encode('utf-8'))
c_name = c_char_p(name.encode('utf-8'))
# default institution_did in config is used as issuer_did
c_issuer_did = None
c_payment = c_uint32(payment_handle)
# Todo: add params for tag and config
c_tag = c_char_p('tag1'.encode('utf-8'))
c_config = c_char_p('{"support_revocation":false}'.encode('utf-8'))
c_params = (c_source_id, c_name, c_schema_id, c_issuer_did, c_tag, c_config, c_payment)
return await CredentialDef._create("vcx_credentialdef_create",
constructor_params,
c_params) | 0.003827 |
def run_marionette_script(script, chrome=False, async=False, host='localhost', port=2828):
"""Create a Marionette instance and run the provided script"""
m = DeviceHelper.getMarionette(host, port)
m.start_session()
if chrome:
m.set_context(marionette.Marionette.CONTEXT_CHROME)
if not async:
result = m.execute_script(script)
else:
result = m.execute_async_script(script)
m.delete_session()
return result | 0.008696 |
def wipe(ctx):
"""Drop the mongo database given."""
LOG.info("Running scout wipe")
db_name = ctx.obj['mongodb']
LOG.info("Dropping database %s", db_name)
try:
ctx.obj['client'].drop_database(db_name)
except Exception as err:
LOG.warning(err)
ctx.abort()
LOG.info("Dropped whole database") | 0.002941 |
def getFeatures(self, referenceName=None, start=None, end=None,
startIndex=None, maxResults=None,
featureTypes=None, parentId=None,
name=None, geneSymbol=None):
"""
method passed to runSearchRequest to fulfill the request
:param str referenceName: name of reference (ex: "chr1")
:param start: castable to int, start position on reference
:param end: castable to int, end position on reference
:param startIndex: none or castable to int
:param maxResults: none or castable to int
:param featureTypes: array of str
:param parentId: none or featureID of parent
:param name: the name of the feature
:param geneSymbol: the symbol for the gene the features are on
:return: yields a protocol.Feature at a time
"""
with self._db as dataSource:
features = dataSource.searchFeaturesInDb(
startIndex, maxResults,
referenceName=referenceName,
start=start, end=end,
parentId=parentId, featureTypes=featureTypes,
name=name, geneSymbol=geneSymbol)
for feature in features:
gaFeature = self._gaFeatureForFeatureDbRecord(feature)
yield gaFeature | 0.003743 |
def split_type(self, typename):
"""
Given a potentially complex type, split it into its base type and specializers
"""
name = self._canonicalize_type(typename)
if '(' not in name:
return name, False, []
base, sub = name.split('(')
if len(sub) == 0 or sub[-1] != ')':
raise ArgumentError("syntax error in complex type, no matching ) found", passed_type=typename, basetype=base, subtype_string=sub)
sub = sub[:-1]
subs = sub.split(',')
return base, True, subs | 0.00708 |
def validate(self, metadata, path, value):
"""
Validate this requirement.
"""
if isinstance(value, Requirement):
# if the RHS is still a Requirement object, it was not set
if metadata.testing and self.mock_value is not None:
value = self.mock_value
elif self.default_value is not None:
value = self.default_value
elif not value.required:
return None
else:
raise ValidationError(f"Missing required configuration for: {'.'.join(path)}")
try:
return self.type(value)
except ValueError:
raise ValidationError(f"Missing required configuration for: {'.'.join(path)}: {value}") | 0.005208 |
def _connectionLost(self, reason):
"""Called when the protocol connection is lost
- Log the disconnection.
- Mark any outstanding requests as unsent so they will be sent when
a new connection is made.
- If closing the broker client, mark completion of that process.
:param reason:
Failure that indicates the reason for disconnection.
"""
log.info('%r: Connection closed: %r', self, reason)
# Reset our proto so we don't try to send to a down connection
self.proto = None
# Mark any in-flight requests as unsent.
for tReq in self.requests.values():
tReq.sent = False
if self._dDown:
self._dDown.callback(None)
elif self.requests:
self._connect() | 0.002466 |
def num_pending_tasks(self):
"""
Return how many tasks are PENDING + RUNNING. O(1).
"""
return len(self._status_tasks[PENDING]) + len(self._status_tasks[RUNNING]) | 0.015464 |
def main(argv=None):
"""
Entry point
:param argv: Script arguments (None for sys.argv)
:return: An exit code or None
"""
# Parse arguments
parser = argparse.ArgumentParser(
prog="pelix.shell.console",
parents=[make_common_parser()],
description="Pelix Shell Console",
)
# Parse arguments
args = parser.parse_args(argv)
# Handle arguments
init = handle_common_arguments(args)
# Set the initial bundles
bundles = [
"pelix.ipopo.core",
"pelix.shell.core",
"pelix.shell.ipopo",
"pelix.shell.completion.pelix",
"pelix.shell.completion.ipopo",
"pelix.shell.console",
]
bundles.extend(init.bundles)
# Use the utility method to create, run and delete the framework
framework = pelix.create_framework(
remove_duplicates(bundles), init.properties
)
framework.start()
# Instantiate components
init.instantiate_components(framework.get_bundle_context())
try:
framework.wait_for_stop()
except KeyboardInterrupt:
framework.stop() | 0.000898 |
def filter_facet_queryset(self, queryset):
"""
Given a search queryset, filter it with whichever facet filter backends
in use.
"""
for backend in list(self.facet_filter_backends):
queryset = backend().filter_queryset(self.request, queryset, self)
if self.load_all:
queryset = queryset.load_all()
return queryset | 0.005089 |
def _patch(self, uri, data):
"""
Simple PATCH operation for a given path.
The body is expected to list operations to perform to update
the data. Operations include:
- add
- remove
- replace
- move
- copy
- test
[
{ "op": "test", "path": "/a/b/c", "value": "foo" },
]
"""
headers = self._get_headers()
response = self.session.patch(uri, headers=headers,
data=json.dumps(data))
# Will return a 204 on successful patch
if response.status_code == 204:
return response
else:
logging.error(response.content)
response.raise_for_status() | 0.003916 |
def create_release_vcs(path, vcs_name=None):
"""Return a new release VCS that can release from this source path."""
from rez.plugin_managers import plugin_manager
vcs_types = get_release_vcs_types()
if vcs_name:
if vcs_name not in vcs_types:
raise ReleaseVCSError("Unknown version control system: %r" % vcs_name)
cls = plugin_manager.get_plugin_class('release_vcs', vcs_name)
return cls(path)
classes_by_level = {}
for vcs_name in vcs_types:
cls = plugin_manager.get_plugin_class('release_vcs', vcs_name)
result = cls.find_vcs_root(path)
if not result:
continue
vcs_path, levels_up = result
classes_by_level.setdefault(levels_up, []).append((cls, vcs_path))
if not classes_by_level:
raise ReleaseVCSError("No version control system for package "
"releasing is associated with the path %s" % path)
# it's ok to have multiple results, as long as there is only one at the
# "closest" directory up from this dir - ie, if we start at:
# /blah/foo/pkg_root
# and these dirs exist:
# /blah/.hg
# /blah/foo/.git
# ...then this is ok, because /blah/foo/.git is "closer" to the original
# dir, and will be picked. However, if these two directories exist:
# /blah/foo/.git
# /blah/foo/.hg
# ...then we error, because we can't decide which to use
lowest_level = sorted(classes_by_level)[0]
clss = classes_by_level[lowest_level]
if len(clss) > 1:
clss_str = ", ".join(x[0].name() for x in clss)
raise ReleaseVCSError("Several version control systems are associated "
"with the path %s: %s. Use rez-release --vcs to "
"choose." % (path, clss_str))
else:
cls, vcs_root = clss[0]
return cls(pkg_root=path, vcs_root=vcs_root) | 0.001558 |
def _get_child_relation(self, child_pid):
"""Retrieve the relation between this node and a child PID."""
return PIDRelation.query.filter_by(
parent=self._resolved_pid,
child=child_pid,
relation_type=self.relation_type.id).one() | 0.007168 |
def on_success(self, retval, task_id, args, kwargs):
"""on_success
http://docs.celeryproject.org/en/latest/reference/celery.app.task.html
:param retval: return value
:param task_id: celery task id
:param args: arguments passed into task
:param kwargs: keyword arguments passed into task
"""
log.info(("{} SUCCESS - retval={} task_id={} "
"args={} kwargs={}")
.format(
self.log_label,
retval,
task_id,
args,
kwargs)) | 0.003215 |
def __replace_verbs(sentence, counts):
"""Lets find and replace all instances of #VERB
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#VERB') != -1:
sentence = sentence.replace('#VERB', str(__get_verb(counts)), 1)
if sentence.find('#VERB') == -1:
return sentence
return sentence
else:
return sentence | 0.002347 |
def _match_line(self, city_name, lines):
"""
The lookup is case insensitive and returns the first matching line,
stripped.
:param city_name: str
:param lines: list of str
:return: str
"""
for line in lines:
toponym = line.split(',')[0]
if toponym.lower() == city_name.lower():
return line.strip()
return None | 0.004762 |
def _publish_stats(self, counter_prefix, stats):
"""Given a stats dictionary from _get_stats_from_socket,
publish the individual values.
"""
for stat_name, stat_value in flatten_dictionary(
stats,
prefix=counter_prefix,
):
self.publish_gauge(stat_name, stat_value) | 0.005882 |
def iter_links(self, file, encoding=None, context=False):
'''Return the links.
This function is a convenience function for calling :meth:`iter_text`
and returning only the links.
'''
if context:
return [item for item in self.iter_text(file, encoding) if item[1]]
else:
return [item[0] for item in self.iter_text(file, encoding) if item[1]] | 0.007282 |
def pixel(self, func:PixelFunc, *args, **kwargs)->'Image':
"Equivalent to `image.px = func(image.px)`."
self.px = func(self.px, *args, **kwargs)
return self | 0.022222 |
def hellinger(Ks, dim, required, clamp=True, to_self=False):
r'''
Estimate the Hellinger distance between distributions, based on kNN
distances: \sqrt{1 - \int \sqrt{p q}}
Always enforces 0 <= H, to be able to sqrt; if clamp, also enforces
H <= 1.
Returns a vector: one element for each K.
'''
bc = required
est = 1 - bc
np.maximum(est, 0, out=est)
if clamp:
np.minimum(est, 1, out=est)
np.sqrt(est, out=est)
return est | 0.002075 |
def dataset_search(self, dataset_returning_query):
"""
Run a dataset query against Citrination.
:param dataset_returning_query: :class:`DatasetReturningQuery` to execute.
:type dataset_returning_query: :class:`DatasetReturningQuery`
:return: Dataset search result object with the results of the query.
:rtype: :class:`DatasetSearchResult`
"""
self._validate_search_query(dataset_returning_query)
return self._execute_search_query(
dataset_returning_query,
DatasetSearchResult
) | 0.005146 |
def get_description(self, lang='en'):
"""
Retrieve the description in a certain language
:param lang: The Wikidata language the description should be retrieved for
:return: Returns the description string
"""
if self.fast_run:
return list(self.fast_run_container.get_language_data(self.wd_item_id, lang, 'description'))[0]
if 'descriptions' not in self.wd_json_representation or lang not in self.wd_json_representation['descriptions']:
return ''
else:
return self.wd_json_representation['descriptions'][lang]['value'] | 0.008117 |
async def delete(self, turn_context: TurnContext) -> None:
"""
Delete any state currently stored in this state scope.
:param turn_context: The context object for this turn.
:return: None
"""
if turn_context == None:
raise TypeError('BotState.delete(): turn_context cannot be None.')
turn_context.turn_state.pop(self._context_service_key)
storage_key = self.get_storage_key(turn_context)
await self._storage.delete({ storage_key }) | 0.014679 |
def map(self, func):
"""
:param func:
:type func: (K, T) -> U
:rtype: TList[U]
Usage:
>>> sorted(TDict(k1=1, k2=2, k3=3).map(lambda k, v: v*2))
[2, 4, 6]
"""
return TList([func(k, v) for k, v in self.items()]) | 0.006873 |
def remove(path):
'''
Runs os.remove(path) and suppresses the OSError if the file doesn't exist
'''
try:
os.remove(path)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise | 0.004405 |
def is_in_intervall(value, min_value, max_value, name='variable'):
"""
Raise an exception if value is not in an interval.
Parameters
----------
value : orderable
min_value : orderable
max_value : orderable
name : str
Name of the variable to print in exception.
"""
if not (min_value <= value <= max_value):
raise ValueError('{}={} is not in [{}, {}]'
.format(name, value, min_value, max_value)) | 0.002101 |
def get_logger():
"""
Instantiate a logger.
"""
root = logging.getLogger()
root.setLevel(logging.WARNING)
ch = logging.StreamHandler(sys.stderr)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
return root | 0.003135 |
def _decorator(store_name, store_values):
"""
Return a class decorator that:
1) Defines a new class method, `wait_for_js`
2) Defines a new class list variable, `store_name` and adds
`store_values` to the list.
"""
def decorator(clz): # pylint: disable=missing-docstring
# Add a `wait_for_js` method to the class
if not hasattr(clz, 'wait_for_js'):
setattr(clz, 'wait_for_js', _wait_for_js) # pylint: disable= literal-used-as-attribute
# Store the RequireJS module names in the class
if not hasattr(clz, store_name):
setattr(clz, store_name, set())
getattr(clz, store_name).update(store_values)
return clz
return decorator | 0.002717 |
def get_wallets(self, fetch=False):
"""Return this Applications's wallets object, populating it if fetch
is True."""
return Wallets(
self.resource.wallets, self.client, populate=fetch, application=self) | 0.012605 |
def write_directory(self, directory: str) -> bool:
"""Write a BEL namespace for identifiers, names, name hash, and mappings to the given directory."""
current_md5_hash = self.get_namespace_hash()
md5_hash_path = os.path.join(directory, f'{self.module_name}.belns.md5')
if not os.path.exists(md5_hash_path):
old_md5_hash = None
else:
with open(md5_hash_path) as file:
old_md5_hash = file.read().strip()
if old_md5_hash == current_md5_hash:
return False
with open(os.path.join(directory, f'{self.module_name}.belns'), 'w') as file:
self.write_bel_namespace(file, use_names=False)
with open(md5_hash_path, 'w') as file:
print(current_md5_hash, file=file)
if self.has_names:
with open(os.path.join(directory, f'{self.module_name}-names.belns'), 'w') as file:
self.write_bel_namespace(file, use_names=True)
with open(os.path.join(directory, f'{self.module_name}.belns.mapping'), 'w') as file:
self.write_bel_namespace_mappings(file, desc='writing mapping')
return True | 0.005922 |
def commit(self):
"""Send buffered requests and refresh all indexes."""
self.send_buffered_operations()
retry_until_ok(self.elastic.indices.refresh, index="") | 0.010989 |
def __calculate_perimeter_scoring():
"""Return a 512 element vector which gives the perimeter given surrounding pts
"""
#
# This is the array from the paper - a 256 - element array leaving out
# the center point. The first value is the index, the second, the perimeter
#
prashker = np.array([
[0 ,4 ],[32,4 ],[64,3 ],[96 ,1.414],[128,4 ],[160,4 ],[192,1.414],[224,2.828],
[1 ,4 ],[33,4 ],[65,3 ],[97 ,1.414],[129,4 ],[161,4 ],[193,3 ],[225,3 ],
[2 ,3 ],[34,3 ],[66,2 ],[98 ,2 ],[130,3 ],[162,3 ],[194,2 ],[226,2 ],
[3 ,1.414],[35,1.414],[67,2 ],[99 ,2 ],[131,3 ],[163,3 ],[195,2 ],[227,2 ],
[4 ,4 ],[36,4 ],[68,3 ],[100,3 ],[132,4 ],[164,4 ],[196,1.414],[228,3 ],
[5 ,4 ],[37,4 ],[69,3 ],[101,3 ],[133,4 ],[165,4 ],[197,3 ],[229,3 ],
[6 ,1.414],[38,3 ],[70,2 ],[102,2 ],[134,1.414],[166,3 ],[198,2 ],[230,2 ],
[7 ,2.828],[39,3 ],[71,2 ],[103,2 ],[135,3 ],[167,3 ],[199,2 ],[231,1.414],
[8 ,3 ],[40,1.414],[72,2 ],[104,2 ],[136,3 ],[168,1.414],[200,1.414],[232,1.414],
[9 ,1.414],[41,2.828],[73,1.414],[105,1.414],[137,3 ],[169,3 ],[201,1.414],[233,1.414],
[10,2 ],[42,1.414],[74,1 ],[106,1 ],[138,2 ],[170,2 ],[202,1 ],[234,1.414],
[11,2 ],[43,1.414],[75,1 ],[107,1 ],[139,2 ],[171,2 ],[203,1 ],[235,1 ],
[12,3 ],[44,3 ],[76,2 ],[108,2 ],[140,3 ],[172,3 ],[204,2 ],[236,2 ],
[13,1.414],[45,3 ],[77,2 ],[109,2 ],[141,3 ],[173,3 ],[205,1.414],[237,1.414],
[14,1.414],[46,1.414],[78,1 ],[110,1 ],[142,2 ],[174,1.414],[206,2 ],[238,1 ],
[15,1.414],[47,1.414],[79,1.414],[111,1 ],[143,2 ],[175,1.414],[207,1 ],[239,1 ],
[16,3 ],[48,3 ],[80,2 ],[112,1.414],[144,1.414],[176,1.414],[208,2 ],[240,1.414],
[17,3 ],[49,3 ],[81,2 ],[113,2 ],[145,3 ],[177,3 ],[209,2 ],[241,2 ],
[18,2 ],[50,2 ],[82,1 ],[114,1 ],[146,1.414],[178,2 ],[210,1 ],[242,1.414],
[19,1.414],[51,2 ],[83,1 ],[115,2 ],[147,1.414],[179,1.414],[211,1 ],[243,1 ],
[20,1.414],[52,3 ],[84,1.414],[116,1.414],[148,2.828],[180,3 ],[212,1.414],[244,1.414],
[21,1.414],[53,3 ],[85,2 ],[117,1.414],[149,3 ],[181,3 ],[213,2 ],[245,1.414],
[22,2 ],[54,2 ],[86,1 ],[118,1 ],[150,1.414],[182,2 ],[214,1 ],[246,1 ],
[23,1.414],[55,2 ],[87,1.414],[119,1 ],[151,1.414],[183,1.414],[215,1 ],[247,1 ],
[24,2 ],[56,2 ],[88,1 ],[120,1 ],[152,2 ],[184,2 ],[216,1 ],[248,1 ],
[25,2 ],[57,2 ],[89,1 ],[121,1.414],[153,2 ],[185,2 ],[217,1 ],[249,1 ],
[26,1 ],[58,1 ],[90,0 ],[122,0 ],[154,1 ],[186,2 ],[218,0 ],[250,0 ],
[27,1 ],[59,1.414],[91,0 ],[123,0 ],[155,1 ],[187,1 ],[219,0 ],[251,0 ],
[28,2 ],[60,2 ],[92,1 ],[124,1 ],[156,2 ],[188,2 ],[220,1.414],[252,1 ],
[29,2 ],[61,2 ],[93,2 ],[125,1 ],[157,2 ],[189,1.414],[221,1 ],[253,1 ],
[30,1 ],[62,1 ],[94,0 ],[126,0 ],[158,1.414],[190,1 ],[222,0 ],[254,0 ],
[31,1 ],[63,1 ],[95,0 ],[127,0 ],[159,1 ],[191,1 ],[223,0 ],[255,0]])
score = np.zeros((512,))
i = np.zeros((prashker.shape[0]),int)
for j in range(4): # 1,2,4,8
i = i+((prashker[:,0].astype(int) // 2**j)%2)*2**j
i = i+16
for j in range(4,8):
i = i+((prashker[:,0].astype(int) // 2**j)%2)*2**(j+1)
score[i.astype(int)] = prashker[:,1]
return score | 0.18441 |
def draw(self, milliseconds):
"""Draws all of the objects in our world."""
cam = Ragnarok.get_world().Camera
camPos = cam.get_world_pos()
self.__sort_draw()
self.clear_backbuffer()
for obj in self.__draw_objects:
#Check to see if the object is visible to the camera before doing anything to it.
if obj.is_static or obj.is_visible_to_camera(cam):
#Offset all of our objects by the camera offset.
old_pos = obj.coords
xVal = obj.coords.X - camPos.X
yVal = obj.coords.Y - camPos.Y
obj.coords = Vector2(xVal, yVal)
obj.draw(milliseconds, self.backbuffer)
obj.coords = old_pos | 0.006623 |
def clear(self):
"""
Clear the cache, setting it to its initial state.
"""
self.name.clear()
self.path.clear()
self.generated = False | 0.01105 |
def _endLineupsNode(self, name, content):
"""Process the end of a node under xtvd/lineups"""
if name == 'map':
if not self._error:
self._importer.new_mapping(self._lineupId, self._stationId,
self._channel, self._channelMinor,
self._validFrom, self._validTo,
self._onAirFrom, self._onAirTo) | 0.01087 |
def mark_flags_as_mutual_exclusive(flag_names, required=False,
flag_values=_flagvalues.FLAGS):
"""Ensures that only one flag among flag_names is not None.
Important note: This validator checks if flag values are None, and it does not
distinguish between default and explicit values. Therefore, this validator
does not make sense when applied to flags with default values other than None,
including other false values (e.g. False, 0, '', []). That includes multi
flags with a default value of [] instead of None.
Args:
flag_names: [str], names of the flags.
required: bool. If true, exactly one of the flags must have a value other
than None. Otherwise, at most one of the flags can have a value other
than None, and it is valid for all of the flags to be None.
flag_values: flags.FlagValues, optional FlagValues instance where the flags
are defined.
"""
for flag_name in flag_names:
if flag_values[flag_name].default is not None:
warnings.warn(
'Flag --{} has a non-None default value. That does not make sense '
'with mark_flags_as_mutual_exclusive, which checks whether the '
'listed flags have a value other than None.'.format(flag_name))
def validate_mutual_exclusion(flags_dict):
flag_count = sum(1 for val in flags_dict.values() if val is not None)
if flag_count == 1 or (not required and flag_count == 0):
return True
raise _exceptions.ValidationError(
'{} one of ({}) must have a value other than None.'.format(
'Exactly' if required else 'At most', ', '.join(flag_names)))
register_multi_flags_validator(
flag_names, validate_mutual_exclusion, flag_values=flag_values) | 0.005119 |
def iter_referents(self):
""" Generates target sets that are compatible with the current beliefstate. """
tlow, thigh = self['targetset_arity'].get_tuple()
clow, chigh = self['contrast_arity'].get_tuple()
referents = list(self.iter_singleton_referents())
t = len(referents)
low = max(1, tlow)
high = min([t, thigh])
for targets in itertools.chain.from_iterable(itertools.combinations(referents, r) \
for r in reversed(xrange(low, high+1))):
if clow <= t-len(targets) <= chigh:
yield targets | 0.013378 |
def delete_all_hosting_devices(self, context, force_delete=False):
"""Deletes all hosting devices."""
for item in self._get_collection_query(
context, hd_models.HostingDeviceTemplate):
self.delete_all_hosting_devices_by_template(
context, template=item, force_delete=force_delete) | 0.005882 |
def press(self, key_code):
""" Sends a 'down' event for the specified scan code """
if key_code >= 128:
# Media key
ev = NSEvent.otherEventWithType_location_modifierFlags_timestamp_windowNumber_context_subtype_data1_data2_(
14, # type
(0, 0), # location
0xa00, # flags
0, # timestamp
0, # window
0, # ctx
8, # subtype
((key_code-128) << 16) | (0xa << 8), # data1
-1 # data2
)
Quartz.CGEventPost(0, ev.CGEvent())
else:
# Regular key
# Apply modifiers if necessary
event_flags = 0
if self.current_modifiers["shift"]:
event_flags += Quartz.kCGEventFlagMaskShift
if self.current_modifiers["caps"]:
event_flags += Quartz.kCGEventFlagMaskAlphaShift
if self.current_modifiers["alt"]:
event_flags += Quartz.kCGEventFlagMaskAlternate
if self.current_modifiers["ctrl"]:
event_flags += Quartz.kCGEventFlagMaskControl
if self.current_modifiers["cmd"]:
event_flags += Quartz.kCGEventFlagMaskCommand
# Update modifiers if necessary
if key_code == 0x37: # cmd
self.current_modifiers["cmd"] = True
elif key_code == 0x38 or key_code == 0x3C: # shift or right shift
self.current_modifiers["shift"] = True
elif key_code == 0x39: # caps lock
self.current_modifiers["caps"] = True
elif key_code == 0x3A: # alt
self.current_modifiers["alt"] = True
elif key_code == 0x3B: # ctrl
self.current_modifiers["ctrl"] = True
event = Quartz.CGEventCreateKeyboardEvent(None, key_code, True)
Quartz.CGEventSetFlags(event, event_flags)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, event)
time.sleep(0.01) | 0.008691 |
def area(poly):
"""Area of a polygon poly"""
if len(poly) < 3: # not a plane - no area
return 0
total = [0, 0, 0]
num = len(poly)
for i in range(num):
vi1 = poly[i]
vi2 = poly[(i+1) % num]
prod = np.cross(vi1, vi2)
total[0] += prod[0]
total[1] += prod[1]
total[2] += prod[2]
if total == [0, 0, 0]: # points are in a straight line - no area
return 0
result = np.dot(total, unit_normal(poly[0], poly[1], poly[2]))
return abs(result/2) | 0.003781 |
def untokenize(tokens):
"""
Converts the output of tokenize.generate_tokens back into a human-readable
string (that doesn't contain oddly-placed whitespace everywhere).
.. note::
Unlike :meth:`tokenize.untokenize`, this function requires the 3rd and
4th items in each token tuple (though we can use lists *or* tuples).
"""
out = ""
last_lineno = -1
last_col = 0
for tok in tokens:
token_string = tok[1]
start_line, start_col = tok[2]
end_line, end_col = tok[3]
# The following two conditionals preserve indentation:
if start_line > last_lineno:
last_col = 0
if start_col > last_col and token_string != '\n':
out += (" " * (start_col - last_col))
out += token_string
last_col = end_col
last_lineno = end_line
return out | 0.001147 |
def move_to_destination(source, destination, job_name, sagemaker_session):
"""move source to destination. Can handle uploading to S3
Args:
source (str): root directory to move
destination (str): file:// or s3:// URI that source will be moved to.
job_name (str): SageMaker job name.
sagemaker_session (sagemaker.Session): a sagemaker_session to interact with S3 if needed
Returns:
(str): destination URI
"""
parsed_uri = urlparse(destination)
if parsed_uri.scheme == 'file':
recursive_copy(source, parsed_uri.path)
final_uri = destination
elif parsed_uri.scheme == 's3':
bucket = parsed_uri.netloc
path = "%s%s" % (parsed_uri.path.lstrip('/'), job_name)
final_uri = 's3://%s/%s' % (bucket, path)
sagemaker_session.upload_data(source, bucket, path)
else:
raise ValueError('Invalid destination URI, must be s3:// or file://, got: %s' % destination)
shutil.rmtree(source)
return final_uri | 0.002933 |
def init_run_context(self, raw=False):
"""
Sets the log context.
"""
self.raw = raw
self._set_context(self) | 0.013605 |
def del_from_groups(self, username, groups):
"""Delete user from groups"""
# it follows the same logic than add_to_groups
# but with MOD_DELETE
ldap_client = self._bind()
tmp = self._get_user(self._byte_p2(username), ALL_ATTRS)
if tmp is None:
raise UserDoesntExist(username, self.backend_name)
dn = tmp[0]
attrs = tmp[1]
attrs['dn'] = dn
self._normalize_group_attrs(attrs)
dn = self._byte_p2(tmp[0])
for group in groups:
group = self._byte_p2(group)
for attr in self.group_attrs:
content = self._byte_p2(self.group_attrs[attr] % attrs)
ldif = [(ldap.MOD_DELETE, attr, self._byte_p3(content))]
try:
ldap_client.modify_s(group, ldif)
except ldap.NO_SUCH_ATTRIBUTE as e:
self._logger(
severity=logging.INFO,
msg="%(backend)s: user '%(user)s'"
" wasn't member of group '%(group)s'"
" (attribute '%(attr)s')" % {
'user': username,
'group': self._uni(group),
'attr': attr,
'backend': self.backend_name
}
)
except Exception as e:
ldap_client.unbind_s()
self._exception_handler(e)
ldap_client.unbind_s() | 0.001289 |
def GetMacAddresses(self):
"""MAC addresses from all interfaces."""
result = set()
for interface in self.interfaces:
if (interface.mac_address and
interface.mac_address != b"\x00" * len(interface.mac_address)):
result.add(Text(interface.mac_address.human_readable_address))
return sorted(result) | 0.008955 |
def map_peaks_to_image(peaks, r=4, vox_dims=(2, 2, 2), dims=(91, 109, 91),
header=None):
""" Take a set of discrete foci (i.e., 2-D array of xyz coordinates)
and generate a corresponding image, convolving each focus with a
hard sphere of radius r."""
data = np.zeros(dims)
for p in peaks:
valid = get_sphere(p, r, vox_dims, dims)
valid = valid[:, ::-1]
data[tuple(valid.T)] = 1
return nifti1.Nifti1Image(data, None, header=header) | 0.001996 |
def expected_cost(numobj):
"""Gets the expected cost category of a short number (however, nothing is
implied about its validity). If the country calling code is unique to a
region, this method behaves exactly the same as
expected_cost_for_region. However, if the country calling code is
shared by multiple regions, then it returns the highest cost in the
sequence PREMIUM_RATE, UNKNOWN_COST, STANDARD_RATE, TOLL_FREE. The reason
for the position of UNKNOWN_COST in this order is that if a number is
UNKNOWN_COST in one region but STANDARD_RATE or TOLL_FREE in another, its
expected cost cannot be estimated as one of the latter since it might be a
PREMIUM_RATE number.
For example, if a number is STANDARD_RATE in the US, but TOLL_FREE in
Canada, the expected cost returned by this method will be STANDARD_RATE,
since the NANPA countries share the same country calling code.
Note: If the region from which the number is dialed is known, it is highly preferable to call
expected_cost_for_region instead.
Arguments:
numobj -- the short number for which we want to know the expected cost category
Return the highest expected cost category of the short number in the
region(s) with the given country calling code
"""
region_codes = region_codes_for_country_code(numobj.country_code)
if len(region_codes) == 0:
return ShortNumberCost.UNKNOWN_COST
if len(region_codes) == 1:
return expected_cost_for_region(numobj, region_codes[0])
cost = ShortNumberCost.TOLL_FREE
for region_code in region_codes:
cost_for_region = expected_cost_for_region(numobj, region_code)
if cost_for_region == ShortNumberCost.PREMIUM_RATE:
return ShortNumberCost.PREMIUM_RATE
elif cost_for_region == ShortNumberCost.UNKNOWN_COST:
return ShortNumberCost.UNKNOWN_COST
elif cost_for_region == ShortNumberCost.STANDARD_RATE:
if cost != ShortNumberCost.UNKNOWN_COST:
cost = ShortNumberCost.STANDARD_RATE
elif cost_for_region == ShortNumberCost.TOLL_FREE:
# Do nothing
pass
else: # pragma no cover
raise Exception("Unrecognized cost for region: %s", cost_for_region)
return cost | 0.001738 |
def qry_helper(flag_id, qry_string, param_str, flag_filt=False, filt_st=""):
"""Dynamically add syntaxtical elements to query.
This functions adds syntactical elements to the query string, and
report title, based on the types and number of items added thus far.
Args:
flag_filt (bool): at least one filter item specified.
qry_string (str): portion of the query constructed thus far.
param_str (str): the title to display before the list.
flag_id (bool): optional - instance-id was specified.
filt_st (str): optional - syntax to add on end if filter specified.
Returns:
qry_string (str): the portion of the query that was passed in with
the appropriate syntactical elements added.
param_str (str): the title to display before the list.
"""
if flag_id or flag_filt:
qry_string += ", "
param_str += ", "
if not flag_filt:
qry_string += filt_st
return (qry_string, param_str) | 0.000983 |
def alias_resolving(self):
"""
Alias definitions with resolved values as defined on this engine.
Aliases can be used in rules to simplify multiple object creation
::
fw = Engine('myfirewall')
for alias in fw.alias_resolving():
print(alias, alias.resolved_value)
...
(Alias(name=$$ Interface ID 0.ip), [u'10.10.0.1'])
(Alias(name=$$ Interface ID 0.net), [u'10.10.0.0/24'])
(Alias(name=$$ Interface ID 1.ip), [u'10.10.10.1'])
:return: generator of aliases
:rtype: Alias
"""
alias_list = list(Alias.objects.all())
for alias in self.make_request(resource='alias_resolving'):
yield Alias._from_engine(alias, alias_list) | 0.002538 |
def create_routertype(self, context, routertype):
"""Creates a router type.
Also binds it to the specified hosting device template.
"""
LOG.debug("create_routertype() called. Contents %s", routertype)
rt = routertype['routertype']
with context.session.begin(subtransactions=True):
routertype_db = l3_models.RouterType(
id=self._get_id(rt),
tenant_id=rt['tenant_id'],
name=rt['name'],
description=rt['description'],
template_id=rt['template_id'],
ha_enabled_by_default=rt['ha_enabled_by_default'],
shared=rt['shared'],
slot_need=rt['slot_need'],
scheduler=rt['scheduler'],
driver=rt['driver'],
cfg_agent_service_helper=rt['cfg_agent_service_helper'],
cfg_agent_driver=rt['cfg_agent_driver'])
context.session.add(routertype_db)
return self._make_routertype_dict(routertype_db) | 0.00191 |
def filter_params(target):
"""Decorator which filters params to remove non-oauth_* parameters
Assumes the decorated method takes a params dict or list of tuples as its
first argument.
"""
def wrapper(params, *args, **kwargs):
params = filter_oauth_params(params)
return target(params, *args, **kwargs)
wrapper.__doc__ = target.__doc__
return wrapper | 0.002532 |
def watsonsV(Dir1, Dir2):
"""
calculates Watson's V statistic for two sets of directions
"""
counter, NumSims = 0, 500
#
# first calculate the fisher means and cartesian coordinates of each set of Directions
#
pars_1 = fisher_mean(Dir1)
pars_2 = fisher_mean(Dir2)
#
# get V statistic for these
#
V = vfunc(pars_1, pars_2)
#
# do monte carlo simulation of datasets with same kappas, but common mean
#
Vp = [] # set of Vs from simulations
print("Doing ", NumSims, " simulations")
for k in range(NumSims):
counter += 1
if counter == 50:
print(k + 1)
counter = 0
Dirp = []
# get a set of N1 fisher distributed vectors with k1, calculate fisher stats
for i in range(pars_1["n"]):
Dirp.append(fshdev(pars_1["k"]))
pars_p1 = fisher_mean(Dirp)
# get a set of N2 fisher distributed vectors with k2, calculate fisher stats
Dirp = []
for i in range(pars_2["n"]):
Dirp.append(fshdev(pars_2["k"]))
pars_p2 = fisher_mean(Dirp)
# get the V for these
Vk = vfunc(pars_p1, pars_p2)
Vp.append(Vk)
#
# sort the Vs, get Vcrit (95th one)
#
Vp.sort()
k = int(.95 * NumSims)
return V, Vp[k] | 0.001604 |
def all_substrings(s):
''' yields all substrings of a string '''
join = ''.join
for i in range(1, len(s) + 1):
for sub in window(s, i):
yield join(sub) | 0.005464 |
def main(): # noqa
"""Attempt to fully destroy AWS Resources for a Spinnaker Application."""
logging.basicConfig(format=LOGGING_FORMAT)
parser = argparse.ArgumentParser(description=main.__doc__)
add_debug(parser)
add_app(parser)
args = parser.parse_args()
if args.debug == logging.DEBUG:
logging.getLogger(__package__.split('.')[0]).setLevel(args.debug)
else:
LOG.setLevel(args.debug)
for env in ENVS:
for region in REGIONS:
LOG.info('DESTROY %s:%s', env, region)
try:
destroy_dns(app=args.app, env=env)
except botocore.exceptions.ClientError as error:
LOG.warning('DNS issue for %s in %s: %s', env, region, error)
try:
destroy_elb(app=args.app, env=env, region=region)
except SpinnakerError:
pass
try:
destroy_iam(app=args.app, env=env)
except botocore.exceptions.ClientError as error:
LOG.warning('IAM issue for %s in %s: %s', env, region, error)
try:
destroy_s3(app=args.app, env=env)
except botocore.exceptions.ClientError as error:
LOG.warning('S3 issue for %s in %s: %s', env, region, error)
try:
destroy_sg(app=args.app, env=env, region=region)
except SpinnakerError:
pass
LOG.info('Destroyed %s:%s', env, region)
LOG.info('Destruction complete.') | 0.000652 |
def get_remote_data(data, settings, mode, more_excluded_names=None):
"""
Return globals according to filter described in *settings*:
* data: data to be filtered (dictionary)
* settings: variable explorer settings (dictionary)
* mode (string): 'editable' or 'picklable'
* more_excluded_names: additional excluded names (list)
"""
supported_types = get_supported_types()
assert mode in list(supported_types.keys())
excluded_names = settings['excluded_names']
if more_excluded_names is not None:
excluded_names += more_excluded_names
return globalsfilter(data, check_all=settings['check_all'],
filters=tuple(supported_types[mode]),
exclude_private=settings['exclude_private'],
exclude_uppercase=settings['exclude_uppercase'],
exclude_capitalized=settings['exclude_capitalized'],
exclude_unsupported=settings['exclude_unsupported'],
excluded_names=excluded_names) | 0.000925 |
def gettarinfo(self, name=None, arcname=None, fileobj=None):
"""Create a TarInfo object for either the file `name' or the file
object `fileobj' (using os.fstat on its file descriptor). You can
modify some of the TarInfo's attributes before you add it using
addfile(). If given, `arcname' specifies an alternative name for the
file in the archive.
"""
self._check("aw")
# When fileobj is given, replace name by
# fileobj's real name.
if fileobj is not None:
name = fileobj.name
# Building the name of the member in the archive.
# Backward slashes are converted to forward slashes,
# Absolute paths are turned to relative paths.
if arcname is None:
arcname = name
drv, arcname = os.path.splitdrive(arcname)
arcname = arcname.replace(os.sep, "/")
arcname = arcname.lstrip("/")
# Now, fill the TarInfo object with
# information specific for the file.
tarinfo = self.tarinfo()
tarinfo.tarfile = self
# Use os.stat or os.lstat, depending on platform
# and if symlinks shall be resolved.
if fileobj is None:
if hasattr(os, "lstat") and not self.dereference:
statres = os.lstat(name)
else:
statres = os.stat(name)
else:
statres = os.fstat(fileobj.fileno())
linkname = ""
stmd = statres.st_mode
if stat.S_ISREG(stmd):
inode = (statres.st_ino, statres.st_dev)
if not self.dereference and statres.st_nlink > 1 and \
inode in self.inodes and arcname != self.inodes[inode]:
# Is it a hardlink to an already
# archived file?
type = LNKTYPE
linkname = self.inodes[inode]
else:
# The inode is added only if its valid.
# For win32 it is always 0.
type = REGTYPE
if inode[0]:
self.inodes[inode] = arcname
elif stat.S_ISDIR(stmd):
type = DIRTYPE
elif stat.S_ISFIFO(stmd):
type = FIFOTYPE
elif stat.S_ISLNK(stmd):
type = SYMTYPE
linkname = os.readlink(name)
elif stat.S_ISCHR(stmd):
type = CHRTYPE
elif stat.S_ISBLK(stmd):
type = BLKTYPE
else:
return None
# Fill the TarInfo object with all
# information we can get.
tarinfo.name = arcname
tarinfo.mode = stmd
tarinfo.uid = statres.st_uid
tarinfo.gid = statres.st_gid
if type == REGTYPE:
tarinfo.size = statres.st_size
else:
tarinfo.size = 0
tarinfo.mtime = statres.st_mtime
tarinfo.type = type
tarinfo.linkname = linkname
if pwd:
try:
tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
except KeyError:
pass
if grp:
try:
tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
except KeyError:
pass
if type in (CHRTYPE, BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo | 0.000577 |
def GetDefaultContract(self):
"""
Get the default contract.
Returns:
contract (Contract): if Successful, a contract of type neo.SmartContract.Contract, otherwise an Exception.
Raises:
Exception: if no default contract is found.
Note:
Prints a warning to the console if the default contract could not be found.
"""
try:
return self.GetContracts()[0]
except Exception as e:
logger.error("Could not find default contract: %s" % str(e))
raise | 0.006932 |
def Popen(self, cmd, **kwargs):
"""
Remote Popen
:param cmd: command to remotely execute
:param kwargs: extra arguments to Popen (see subprocess.Popen)
:return: handle to subprocess
"""
masked_cmd = ' '.join(self.cmd_mask_password(cmd))
self.log.info("Executing command: {}".format(masked_cmd))
self.sp = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
**kwargs)
for line in iter(self.sp.stdout):
self.log.info(line.strip())
self.sp.wait()
self.log.info("Command exited with return code %s", self.sp.returncode)
if self.sp.returncode:
raise AirflowException("Sqoop command failed: {}".format(masked_cmd)) | 0.003704 |
def from_pty(cls, stdout, true_color=False, ansi_colors_only=None, term=None):
"""
Create an Output class from a pseudo terminal.
(This will take the dimensions by reading the pseudo
terminal attributes.)
"""
assert stdout.isatty()
def get_size():
rows, columns = _get_size(stdout.fileno())
# If terminal (incorrectly) reports its size as 0, pick a reasonable default.
# See https://github.com/ipython/ipython/issues/10071
return Size(rows=(rows or 24), columns=(columns or 80))
return cls(stdout, get_size, true_color=true_color,
ansi_colors_only=ansi_colors_only, term=term) | 0.00565 |
def _parse_args():
"""Parse and return command line arguments."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=_CliFormatter)
parser.add_argument('-v', '--verbose', action='store_true',
help='Enable verbose output.')
fb_group = parser.add_argument_group('FogBugz arguments')
fb_group.add_argument(
'-u', '--url', help=(
'URL for bugzscout requests to be sent. Should be something '
'like .../scoutSubmit.asp.'))
fb_group.add_argument(
'--user', help='User to designate when submitting via bugzscout.')
fb_group.add_argument(
'--project', help='Fogbugz project to file cases under.')
fb_group.add_argument(
'--area', help='Fogbugz area to file cases under.')
error_group = parser.add_argument_group('error arguments')
error_group.add_argument('-e', '--extra',
help='Extra data to send with error.')
error_group.add_argument('--default-message',
help='Set default message if case is new.')
error_group.add_argument('description',
help=('Description of error. Will be matched '
'against existing cases.'))
parser.set_defaults(**_defaults())
return parser.parse_args() | 0.000731 |
def managed(name, roles=None, profiles=None, authorizations=None):
'''
Manage RBAC properties for user
name : string
username
roles : list
list of roles for user
profiles : list
list of profiles for user
authorizations : list
list of authorizations for user
.. warning::
All existing roles, profiles and authorizations will be replaced!
An empty list will remove everything.
Set the property to `None` to not manage it.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
## check properties
if name not in __salt__['user.list_users']():
ret['result'] = False
ret['comment'] = 'User {0} does not exist!'.format(name)
return ret
if roles and not isinstance(roles, (list)):
ret['result'] = False
ret['comment'] = 'Property roles is not None or list!'
return ret
if profiles and not isinstance(profiles, (list)):
ret['result'] = False
ret['comment'] = 'Property profiles is not None or list!'
return ret
if authorizations and not isinstance(authorizations, (list)):
ret['result'] = False
ret['comment'] = 'Property authorizations is not None or list!'
return ret
log.debug('rbac.managed - roles=%s, profiles=%s, authorizations=%s',
roles, profiles, authorizations
)
## update roles
if isinstance(roles, (list)):
# compute changed
roles_current = __salt__['rbac.role_get'](name)
roles_add = [r for r in roles if r not in roles_current]
roles_rm = [r for r in roles_current if r not in roles]
# execute and verify changes
if roles_add:
res_roles_add = __salt__['rbac.role_add'](name, ','.join(roles_add).strip())
roles_current = __salt__['rbac.role_get'](name)
for role in roles_add:
if 'roles' not in ret['changes']:
ret['changes']['roles'] = {}
ret['changes']['roles'][role] = 'Added' if role in roles_current else 'Failed'
if ret['changes']['roles'][role] == 'Failed':
ret['result'] = False
if roles_rm:
res_roles_rm = __salt__['rbac.role_rm'](name, ','.join(roles_rm).strip())
roles_current = __salt__['rbac.role_get'](name)
for role in roles_rm:
if 'roles' not in ret['changes']:
ret['changes']['roles'] = {}
ret['changes']['roles'][role] = 'Removed' if role not in roles_current else 'Failed'
if ret['changes']['roles'][role] == 'Failed':
ret['result'] = False
## update profiles
if isinstance(profiles, (list)):
# compute changed
profiles_current = __salt__['rbac.profile_get'](name)
profiles_add = [r for r in profiles if r not in profiles_current]
profiles_rm = [r for r in profiles_current if r not in profiles]
# execute and verify changes
if profiles_add:
res_profiles_add = __salt__['rbac.profile_add'](name, ','.join(profiles_add).strip())
profiles_current = __salt__['rbac.profile_get'](name)
for profile in profiles_add:
if 'profiles' not in ret['changes']:
ret['changes']['profiles'] = {}
ret['changes']['profiles'][profile] = 'Added' if profile in profiles_current else 'Failed'
if ret['changes']['profiles'][profile] == 'Failed':
ret['result'] = False
if profiles_rm:
res_profiles_rm = __salt__['rbac.profile_rm'](name, ','.join(profiles_rm).strip())
profiles_current = __salt__['rbac.profile_get'](name)
for profile in profiles_rm:
if 'profiles' not in ret['changes']:
ret['changes']['profiles'] = {}
ret['changes']['profiles'][profile] = 'Removed' if profile not in profiles_current else 'Failed'
if ret['changes']['profiles'][profile] == 'Failed':
ret['result'] = False
## update auths
if isinstance(authorizations, (list)):
# compute changed
auths_current = __salt__['rbac.auth_get'](name, False)
auths_add = [r for r in authorizations if r not in auths_current]
auths_rm = [r for r in auths_current if r not in authorizations]
# execute and verify changes
if auths_add:
res_auths_add = __salt__['rbac.auth_add'](name, ','.join(auths_add).strip())
auths_current = __salt__['rbac.auth_get'](name)
for auth in auths_add:
if 'authorizations' not in ret['changes']:
ret['changes']['authorizations'] = {}
ret['changes']['authorizations'][auth] = 'Added' if auth in auths_current else 'Failed'
if ret['changes']['authorizations'][auth] == 'Failed':
ret['result'] = False
if auths_rm:
res_auths_rm = __salt__['rbac.auth_rm'](name, ','.join(auths_rm).strip())
auths_current = __salt__['rbac.auth_get'](name)
for auth in auths_rm:
if 'authorizations' not in ret['changes']:
ret['changes']['authorizations'] = {}
ret['changes']['authorizations'][auth] = 'Removed' if auth not in auths_current else 'Failed'
if ret['changes']['authorizations'][auth] == 'Failed':
ret['result'] = False
return ret | 0.00319 |
def header(self, name, value):
"""
Store all message headers, optionally clean them up.
This simply stores all message headers so we can send them to DSPAM.
Additionally, headers that have the same prefix as the ones we're
about to add are deleted.
"""
self.message += "{}: {}\r\n".format(name, value)
logger.debug('<{}> Received {} header'.format(self.id, name))
if name.lower().startswith(self.header_prefix.lower()):
self.remove_headers.append(name)
logger.debug('<{}> Going to remove {} header'.format(
self.id, name))
return Milter.CONTINUE | 0.002999 |
def get_modules(folder):
"""Find all valid modules in the given folder which must be in
in the same directory as this loader.py module. A valid module
has a .py extension, and is importable.
@return: all loaded valid modules
@rtype: iterator of module
"""
if is_frozen():
# find modules in library.zip filename
zipname = os.path.dirname(os.path.dirname(__file__))
parentmodule = os.path.basename(os.path.dirname(__file__))
with zipfile.ZipFile(zipname, 'r') as f:
prefix = "%s/%s/" % (parentmodule, folder)
modnames = [os.path.splitext(n[len(prefix):])[0]
for n in f.namelist()
if n.startswith(prefix) and "__init__" not in n]
else:
dirname = os.path.join(os.path.dirname(__file__), folder)
modnames = get_importable_modules(dirname)
for modname in modnames:
try:
name ="..%s.%s" % (folder, modname)
yield importlib.import_module(name, __name__)
except ImportError as msg:
out.error("could not load module %s: %s" % (modname, msg)) | 0.003575 |
def Close(self):
"""Flushes the flow and all its requests to the data_store."""
self._CheckLeaseAndFlush()
super(FlowBase, self).Close()
# Writing the messages queued in the queue_manager of the runner always has
# to be the last thing that happens or we will have a race condition.
self.FlushMessages() | 0.003058 |
def centralManager_didDisconnectPeripheral_error_(self, manager, peripheral, error):
"""Called when a device is disconnected."""
logger.debug('centralManager_didDisconnectPeripheral called')
# Get the device and remove it from the device list, then fire its
# disconnected event.
device = device_list().get(peripheral)
if device is not None:
# Fire disconnected event and remove device from device list.
device._set_disconnected()
device_list().remove(peripheral) | 0.005484 |
def _trj_store_trajectory(self, traj, only_init=False, store_data=pypetconstants.STORE_DATA,
max_depth=None):
""" Stores a trajectory to an hdf5 file
Stores all groups, parameters and results
"""
if not only_init:
self._logger.info('Start storing Trajectory `%s`.' % self._trajectory_name)
else:
self._logger.info('Initialising storage or updating meta data of Trajectory `%s`.' %
self._trajectory_name)
store_data = pypetconstants.STORE_NOTHING
# In case we accidentally chose a trajectory name that already exist
# We do not want to mess up the stored trajectory but raise an Error
if not traj._stored and self._trajectory_group is not None:
raise RuntimeError('You want to store a completely new trajectory with name'
' `%s` but this trajectory is already found in file `%s`.'
'Did you try to accidentally overwrite existing data? If '
'you DO want to override existing data, use `overwrite_file=True`.'
'Note that this deletes the whole HDF5 file not just the particular '
'trajectroy therein! ' %
(traj.v_name, self._filename))
# Extract HDF5 settings from the trajectory
self._srvc_check_hdf_properties(traj)
# Store the trajectory for the first time if necessary:
if self._trajectory_group is None:
self._trajectory_group = self._hdf5file.create_group(where='/',
name=self._trajectory_name,
title=self._trajectory_name,
filters=self._all_get_filters())
# Store meta information
self._trj_store_meta_data(traj)
# # Store recursively the config subtree
# self._tree_store_recursively(pypetconstants.LEAF,traj.config,self._trajectory_group)
if store_data in (pypetconstants.STORE_DATA_SKIPPING,
pypetconstants.STORE_DATA,
pypetconstants.OVERWRITE_DATA):
counter = 0
maximum_display_other = 10
name_set = set(['parameters', 'config', 'derived_parameters', 'results'])
for child_name in traj._children:
if child_name in name_set:
self._logger.info('Storing branch `%s`.' % child_name)
else:
if counter < maximum_display_other:
self._logger.info('Storing branch/node `%s`.' % child_name)
elif counter == maximum_display_other:
self._logger.info('To many branches or nodes at root for display. '
'I will not inform you about storing anymore. '
'Branches are stored silently in the background. '
'Do not worry, I will not freeze! Pinky promise!!!')
counter += 1
# Store recursively the elements
self._tree_store_sub_branch(traj, child_name, store_data=store_data,
with_links=True,
recursive=True, max_depth=max_depth,
hdf5_group=self._trajectory_group)
self._logger.info('Finished storing Trajectory `%s`.' % self._trajectory_name)
else:
self._logger.info('Finished init or meta data update for `%s`.' %
self._trajectory_name)
traj._stored = True | 0.006674 |
def trending(limit=DEFAULT_SEARCH_LIMIT, api_key=GIPHY_PUBLIC_KEY,
strict=False, rating=None):
"""
Shorthand for creating a Giphy api wrapper with the given api key
and then calling the trending method. Note that this will return
a generator
"""
return Giphy(api_key=api_key, strict=strict).trending(
limit=limit, rating=rating) | 0.002681 |
def authorization_code_grant_flow(credentials, storage_filename):
"""Get an access token through Authorization Code Grant.
Parameters
credentials (dict)
All your app credentials and information
imported from the configuration file.
storage_filename (str)
Filename to store OAuth 2.0 Credentials.
Returns
(UberRidesClient)
An UberRidesClient with OAuth 2.0 Credentials.
"""
auth_flow = AuthorizationCodeGrant(
credentials.get('client_id'),
credentials.get('scopes'),
credentials.get('client_secret'),
credentials.get('redirect_url'),
)
auth_url = auth_flow.get_authorization_url()
login_message = 'Login as a driver and grant access by going to:\n\n{}\n'
login_message = login_message.format(auth_url)
response_print(login_message)
redirect_url = 'Copy the URL you are redirected to and paste here:\n\n'
result = input(redirect_url).strip()
try:
session = auth_flow.get_session(result)
except (ClientError, UberIllegalState) as error:
fail_print(error)
return
credential = session.oauth2credential
credential_data = {
'client_id': credential.client_id,
'redirect_url': credential.redirect_url,
'access_token': credential.access_token,
'expires_in_seconds': credential.expires_in_seconds,
'scopes': list(credential.scopes),
'grant_type': credential.grant_type,
'client_secret': credential.client_secret,
'refresh_token': credential.refresh_token,
}
with open(storage_filename, 'w') as yaml_file:
yaml_file.write(safe_dump(credential_data, default_flow_style=False))
return UberRidesClient(session, sandbox_mode=True) | 0.000556 |
def visit_FunctionDeclaration(self, node):
"""Visitor for `FunctionDeclaration` AST node."""
for parameter in node.parameters:
try:
var_name = parameter.variable.identifier.name
var_is_mutable = parameter.variable.is_mutable
except AttributeError:
raise SementicError('Invalid parameter.')
var_symbol = VariableSymbol(var_name, var_is_mutable)
if self.table[var_name] is not None:
raise SementicError(f"Duplicated `{var_name}` function parameter.")
self.table[var_name] = var_symbol
self.visit(node.body) | 0.004573 |
def session_token(self):
"""Return OAuth session token."""
session_token = None
if self.user_id is not None:
session_token = token_getter(self.remote)
if session_token:
token = RemoteToken.get(
self.user_id, self.remote.consumer_key,
access_token=session_token[0]
)
return token
return None | 0.004878 |
def _get_token(
request=None, allowed_auth_schemes=('OAuth', 'Bearer'),
allowed_query_keys=('bearer_token', 'access_token')):
"""Get the auth token for this request.
Auth token may be specified in either the Authorization header or
as a query param (either access_token or bearer_token). We'll check in
this order:
1. Authorization header.
2. bearer_token query param.
3. access_token query param.
Args:
request: The current request, or None.
Returns:
The token in the request or None.
"""
allowed_auth_schemes = _listlike_guard(
allowed_auth_schemes, 'allowed_auth_schemes', iterable_only=True)
# Check if the token is in the Authorization header.
auth_header = os.environ.get('HTTP_AUTHORIZATION')
if auth_header:
for auth_scheme in allowed_auth_schemes:
if auth_header.startswith(auth_scheme):
return auth_header[len(auth_scheme) + 1:]
# If an auth header was specified, even if it's an invalid one, we won't
# look for the token anywhere else.
return None
# Check if the token is in the query string.
if request:
allowed_query_keys = _listlike_guard(
allowed_query_keys, 'allowed_query_keys', iterable_only=True)
for key in allowed_query_keys:
token, _ = request.get_unrecognized_field_info(key)
if token:
return token | 0.008869 |
def delete(args):
"""
cdstarcat delete OID
Delete an object specified by OID from CDSTAR.
"""
with _catalog(args) as cat:
n = len(cat)
cat.delete(args.args[0])
args.log.info('{0} objects deleted'.format(n - len(cat)))
return n - len(cat) | 0.003448 |
def list_volume_attachment(self, **kwargs):
"""
list or watch objects of kind VolumeAttachment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_volume_attachment(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1VolumeAttachmentList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_volume_attachment_with_http_info(**kwargs)
else:
(data) = self.list_volume_attachment_with_http_info(**kwargs)
return data | 0.001978 |
def _print_bar(self):
'''Print a progress bar.'''
self._print('[')
for position in range(self._bar_width):
position_fraction = position / (self._bar_width - 1)
position_bytes = position_fraction * self.max_value
if position_bytes < (self.continue_value or 0):
self._print('+')
elif position_bytes <= (self.continue_value or 0) + self.current_value:
self._print('=')
else:
self._print(' ')
self._print(']') | 0.005474 |
def print_list(cls, l, output='table'):
"""
prints a list
:param l: the list
:param output: the output, default is a table
:return:
"""
def dict_from_list(l):
"""
returns a dict from a list for printing
:param l: the list
:return:
"""
d = dict([(idx, item) for idx, item in enumerate(l)])
return d
if output == 'table':
x = PrettyTable(["Index", "Host"])
for (idx, item) in enumerate(l):
x.add_row([idx, item])
x.align = "l"
x.align["Index"] = "r"
return x
elif output == 'csv':
return ",".join(l)
elif output == 'dict':
d = dict_from_list(l)
return d
elif output == 'json':
d = dict_from_list(l)
result = json.dumps(d, indent=4)
return result
elif output == 'yaml':
d = dict_from_list(l)
result = yaml.dump(d, default_flow_style=False)
return result
elif output == 'txt':
return "\n".join(l) | 0.005085 |
def connection_service_name(service, *args):
''' the name of a service that manages the connection between services '''
# if the service is a string
if isinstance(service, str):
return service
return normalize_string(type(service).__name__) | 0.003774 |
def punct(self, text):
"""Push punctuation onto the token queue."""
cls = self.PUNCTUATION[text]
self.push_token(cls(text, self.lineno, self.offset)) | 0.011561 |
def create_basic_op_node(op_name, node, kwargs):
"""Helper function to create a basic operator
node that doesn't contain op specific attrs"""
name, input_nodes, _ = get_inputs(node, kwargs)
node = onnx.helper.make_node(
op_name,
input_nodes,
[name],
name=name
)
return [node] | 0.003012 |
def etree_replace_namespace(etree_obj, ns_str):
"""In-place change the namespace of elements in an ElementTree.
Args:
etree_obj: ElementTree
ns_str : str
The namespace to set. E.g.: ``http://ns.dataone.org/service/types/v1``.
"""
def _replace_recursive(el, n):
el.tag = re.sub(r'{.*\}', '{{{}}}'.format(n), el.tag)
el.text = el.text.strip() if el.text else None
el.tail = el.tail.strip() if el.tail else None
for child_el in el:
_replace_recursive(child_el, n)
_replace_recursive(etree_obj, ns_str) | 0.001704 |
def encode(cls, value):
"""
take a valid unicode string and turn it into utf-8 bytes
:param value: unicode, str
:return: bytes
"""
coerced = unicode(value)
if coerced == value:
return coerced.encode(cls._encoding)
raise InvalidValue('not text') | 0.006211 |
def find_next_comma(self, node, sub):
"""Find comma after sub andd add NodeWithPosition in node"""
position = (sub.last_line, sub.last_col)
first, last = find_next_comma(self.lcode, position)
if first: # comma exists
node.op_pos.append(NodeWithPosition(last, first)) | 0.006431 |
def submit_completion(self, user, course_key, block_key, completion):
"""
Update the completion value for the specified record.
Parameters:
* user (django.contrib.auth.models.User): The user for whom the
completion is being submitted.
* course_key (opaque_keys.edx.keys.CourseKey): The course in
which the submitted block is found.
* block_key (opaque_keys.edx.keys.UsageKey): The block that has had
its completion changed.
* completion (float in range [0.0, 1.0]): The fractional completion
value of the block (0.0 = incomplete, 1.0 = complete).
Return Value:
(BlockCompletion, bool): A tuple comprising the created or updated
BlockCompletion object and a boolean value indicating whether the
object was newly created by this call.
Raises:
ValueError:
If the wrong type is passed for one of the parameters.
django.core.exceptions.ValidationError:
If a float is passed that is not between 0.0 and 1.0.
django.db.DatabaseError:
If there was a problem getting, creating, or updating the
BlockCompletion record in the database.
This will also be a more specific error, as described here:
https://docs.djangoproject.com/en/1.11/ref/exceptions/#database-exceptions.
IntegrityError and OperationalError are relatively common
subclasses.
"""
# Raise ValueError to match normal django semantics for wrong type of field.
if not isinstance(course_key, CourseKey):
raise ValueError(
"course_key must be an instance of `opaque_keys.edx.keys.CourseKey`. Got {}".format(type(course_key))
)
try:
block_type = block_key.block_type
except AttributeError:
raise ValueError(
"block_key must be an instance of `opaque_keys.edx.keys.UsageKey`. Got {}".format(type(block_key))
)
if waffle.waffle().is_enabled(waffle.ENABLE_COMPLETION_TRACKING):
try:
with transaction.atomic():
obj, is_new = self.get_or_create( # pylint: disable=unpacking-non-sequence
user=user,
course_key=course_key,
block_key=block_key,
defaults={
'completion': completion,
'block_type': block_type,
},
)
except IntegrityError:
# The completion was created concurrently by another process
log.info(
"An IntegrityError was raised when trying to create a BlockCompletion for %s:%s:%s. "
"Falling back to get().",
user,
course_key,
block_key,
)
obj = self.get(
user=user,
course_key=course_key,
block_key=block_key,
)
is_new = False
if not is_new and obj.completion != completion:
obj.completion = completion
obj.full_clean()
obj.save(update_fields={'completion', 'modified'})
else:
# If the feature is not enabled, this method should not be called.
# Error out with a RuntimeError.
raise RuntimeError(
"BlockCompletion.objects.submit_completion should not be \
called when the feature is disabled."
)
return obj, is_new | 0.001828 |
def getobject_use_prevfield(idf, idfobject, fieldname):
"""field=object_name, prev_field=object_type. Return the object"""
if not fieldname.endswith("Name"):
return None
# test if prevfieldname ends with "Object_Type"
fdnames = idfobject.fieldnames
ifieldname = fdnames.index(fieldname)
prevfdname = fdnames[ifieldname - 1]
if not prevfdname.endswith("Object_Type"):
return None
objkey = idfobject[prevfdname].upper()
objname = idfobject[fieldname]
try:
foundobj = idf.getobject(objkey, objname)
except KeyError as e:
return None
return foundobj | 0.0016 |
def _compile_mothur_script(self):
"""Returns a Mothur batch script as a string"""
fasta = self._input_filename
required_params = ["reference", "taxonomy"]
for p in required_params:
if self.Parameters[p].Value is None:
raise ValueError("Must provide value for parameter %s" % p)
optional_params = ["ksize", "cutoff", "iters"]
args = self._format_function_arguments(
required_params + optional_params)
script = '#classify.seqs(fasta=%s, %s)' % (fasta, args)
return script | 0.003484 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.